Statistics
| Branch: | Revision:

root / exec.c @ 7296abac

History | View | Annotate | Download (117.3 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
#include <stdlib.h>
27 54936004 bellard
#include <stdio.h>
28 54936004 bellard
#include <stdarg.h>
29 54936004 bellard
#include <string.h>
30 54936004 bellard
#include <errno.h>
31 54936004 bellard
#include <unistd.h>
32 54936004 bellard
#include <inttypes.h>
33 54936004 bellard
34 6180a181 bellard
#include "cpu.h"
35 6180a181 bellard
#include "exec-all.h"
36 ca10f867 aurel32
#include "qemu-common.h"
37 b67d9a52 bellard
#include "tcg.h"
38 b3c7724c pbrook
#include "hw/hw.h"
39 74576198 aliguori
#include "osdep.h"
40 7ba1e619 aliguori
#include "kvm.h"
41 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
42 53a5960a pbrook
#include <qemu.h>
43 fd052bf6 Riku Voipio
#include <signal.h>
44 53a5960a pbrook
#endif
45 54936004 bellard
46 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
47 66e85a21 bellard
//#define DEBUG_FLUSH
48 9fa3e853 bellard
//#define DEBUG_TLB
49 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
50 fd6ce8f6 bellard
51 fd6ce8f6 bellard
/* make various TB consistency checks */
52 5fafdf24 ths
//#define DEBUG_TB_CHECK
53 5fafdf24 ths
//#define DEBUG_TLB_CHECK
54 fd6ce8f6 bellard
55 1196be37 ths
//#define DEBUG_IOPORT
56 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
57 1196be37 ths
58 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
59 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
60 99773bd4 pbrook
#undef DEBUG_TB_CHECK
61 99773bd4 pbrook
#endif
62 99773bd4 pbrook
63 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
64 9fa3e853 bellard
65 bdaf78e0 blueswir1
static TranslationBlock *tbs;
66 26a5f13b bellard
int code_gen_max_blocks;
67 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
68 bdaf78e0 blueswir1
static int nb_tbs;
69 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
70 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
71 fd6ce8f6 bellard
72 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
73 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
74 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
75 d03d860b blueswir1
 section close to code segment. */
76 d03d860b blueswir1
#define code_gen_section                                \
77 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
78 d03d860b blueswir1
    __attribute__((aligned (32)))
79 f8e2af11 Stefan Weil
#elif defined(_WIN32)
80 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
81 f8e2af11 Stefan Weil
#define code_gen_section                                \
82 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
83 d03d860b blueswir1
#else
84 d03d860b blueswir1
#define code_gen_section                                \
85 d03d860b blueswir1
    __attribute__((aligned (32)))
86 d03d860b blueswir1
#endif
87 d03d860b blueswir1
88 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
89 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
90 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
91 26a5f13b bellard
/* threshold to flush the translated code buffer */
92 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
93 fd6ce8f6 bellard
uint8_t *code_gen_ptr;
94 fd6ce8f6 bellard
95 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
96 9fa3e853 bellard
int phys_ram_fd;
97 1ccde1cb bellard
uint8_t *phys_ram_dirty;
98 74576198 aliguori
static int in_migration;
99 94a6b54f pbrook
100 94a6b54f pbrook
typedef struct RAMBlock {
101 94a6b54f pbrook
    uint8_t *host;
102 c227f099 Anthony Liguori
    ram_addr_t offset;
103 c227f099 Anthony Liguori
    ram_addr_t length;
104 94a6b54f pbrook
    struct RAMBlock *next;
105 94a6b54f pbrook
} RAMBlock;
106 94a6b54f pbrook
107 94a6b54f pbrook
static RAMBlock *ram_blocks;
108 94a6b54f pbrook
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
109 ccbb4d44 Stuart Brady
   then we can no longer assume contiguous ram offsets, and external uses
110 94a6b54f pbrook
   of this variable will break.  */
111 c227f099 Anthony Liguori
ram_addr_t last_ram_offset;
112 e2eef170 pbrook
#endif
113 9fa3e853 bellard
114 6a00d601 bellard
CPUState *first_cpu;
115 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
116 6a00d601 bellard
   cpu_exec() */
117 5fafdf24 ths
CPUState *cpu_single_env;
118 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
119 bf20dc07 ths
   1 = Precise instruction counting.
120 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
121 2e70f6ef pbrook
int use_icount = 0;
122 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
123 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
124 2e70f6ef pbrook
int64_t qemu_icount;
125 6a00d601 bellard
126 54936004 bellard
typedef struct PageDesc {
127 92e873b9 bellard
    /* list of TBs intersecting this ram page */
128 fd6ce8f6 bellard
    TranslationBlock *first_tb;
129 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
130 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
131 9fa3e853 bellard
    unsigned int code_write_count;
132 9fa3e853 bellard
    uint8_t *code_bitmap;
133 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
134 9fa3e853 bellard
    unsigned long flags;
135 9fa3e853 bellard
#endif
136 54936004 bellard
} PageDesc;
137 54936004 bellard
138 41c1b1c9 Paul Brook
/* In system mode we want L1_MAP to be based on ram offsets,
139 5cd2c5b6 Richard Henderson
   while in user mode we want it to be based on virtual addresses.  */
140 5cd2c5b6 Richard Henderson
#if !defined(CONFIG_USER_ONLY)
141 41c1b1c9 Paul Brook
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
142 41c1b1c9 Paul Brook
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
143 41c1b1c9 Paul Brook
#else
144 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
145 41c1b1c9 Paul Brook
#endif
146 bedb69ea j_mayer
#else
147 5cd2c5b6 Richard Henderson
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
148 bedb69ea j_mayer
#endif
149 54936004 bellard
150 5cd2c5b6 Richard Henderson
/* Size of the L2 (and L3, etc) page tables.  */
151 5cd2c5b6 Richard Henderson
#define L2_BITS 10
152 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
153 54936004 bellard
154 5cd2c5b6 Richard Henderson
/* The bits remaining after N lower levels of page tables.  */
155 5cd2c5b6 Richard Henderson
#define P_L1_BITS_REM \
156 5cd2c5b6 Richard Henderson
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157 5cd2c5b6 Richard Henderson
#define V_L1_BITS_REM \
158 5cd2c5b6 Richard Henderson
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
159 5cd2c5b6 Richard Henderson
160 5cd2c5b6 Richard Henderson
/* Size of the L1 page table.  Avoid silly small sizes.  */
161 5cd2c5b6 Richard Henderson
#if P_L1_BITS_REM < 4
162 5cd2c5b6 Richard Henderson
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
163 5cd2c5b6 Richard Henderson
#else
164 5cd2c5b6 Richard Henderson
#define P_L1_BITS  P_L1_BITS_REM
165 5cd2c5b6 Richard Henderson
#endif
166 5cd2c5b6 Richard Henderson
167 5cd2c5b6 Richard Henderson
#if V_L1_BITS_REM < 4
168 5cd2c5b6 Richard Henderson
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
169 5cd2c5b6 Richard Henderson
#else
170 5cd2c5b6 Richard Henderson
#define V_L1_BITS  V_L1_BITS_REM
171 5cd2c5b6 Richard Henderson
#endif
172 5cd2c5b6 Richard Henderson
173 5cd2c5b6 Richard Henderson
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
174 5cd2c5b6 Richard Henderson
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
175 5cd2c5b6 Richard Henderson
176 5cd2c5b6 Richard Henderson
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
177 5cd2c5b6 Richard Henderson
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178 5cd2c5b6 Richard Henderson
179 83fb7adf bellard
unsigned long qemu_real_host_page_size;
180 83fb7adf bellard
unsigned long qemu_host_page_bits;
181 83fb7adf bellard
unsigned long qemu_host_page_size;
182 83fb7adf bellard
unsigned long qemu_host_page_mask;
183 54936004 bellard
184 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the virtual address space.
185 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PageDesc.  */
186 5cd2c5b6 Richard Henderson
static void *l1_map[V_L1_SIZE];
187 54936004 bellard
188 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
189 41c1b1c9 Paul Brook
typedef struct PhysPageDesc {
190 41c1b1c9 Paul Brook
    /* offset in host memory of the page + io_index in the low bits */
191 41c1b1c9 Paul Brook
    ram_addr_t phys_offset;
192 41c1b1c9 Paul Brook
    ram_addr_t region_offset;
193 41c1b1c9 Paul Brook
} PhysPageDesc;
194 41c1b1c9 Paul Brook
195 5cd2c5b6 Richard Henderson
/* This is a multi-level map on the physical address space.
196 5cd2c5b6 Richard Henderson
   The bottom level has pointers to PhysPageDesc.  */
197 5cd2c5b6 Richard Henderson
static void *l1_phys_map[P_L1_SIZE];
198 6d9a1304 Paul Brook
199 e2eef170 pbrook
static void io_mem_init(void);
200 e2eef170 pbrook
201 33417e70 bellard
/* io memory support */
202 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
203 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
204 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
205 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
206 6658ffb8 pbrook
static int io_mem_watch;
207 6658ffb8 pbrook
#endif
208 33417e70 bellard
209 34865134 bellard
/* log support */
210 1e8b27ca Juha Riihimรคki
#ifdef WIN32
211 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
212 1e8b27ca Juha Riihimรคki
#else
213 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
214 1e8b27ca Juha Riihimรคki
#endif
215 34865134 bellard
FILE *logfile;
216 34865134 bellard
int loglevel;
217 e735b91c pbrook
static int log_append = 0;
218 34865134 bellard
219 e3db7226 bellard
/* statistics */
220 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
221 e3db7226 bellard
static int tlb_flush_count;
222 b3755a91 Paul Brook
#endif
223 e3db7226 bellard
static int tb_flush_count;
224 e3db7226 bellard
static int tb_phys_invalidate_count;
225 e3db7226 bellard
226 7cb69cae bellard
#ifdef _WIN32
227 7cb69cae bellard
static void map_exec(void *addr, long size)
228 7cb69cae bellard
{
229 7cb69cae bellard
    DWORD old_protect;
230 7cb69cae bellard
    VirtualProtect(addr, size,
231 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
232 7cb69cae bellard
    
233 7cb69cae bellard
}
234 7cb69cae bellard
#else
235 7cb69cae bellard
static void map_exec(void *addr, long size)
236 7cb69cae bellard
{
237 4369415f bellard
    unsigned long start, end, page_size;
238 7cb69cae bellard
    
239 4369415f bellard
    page_size = getpagesize();
240 7cb69cae bellard
    start = (unsigned long)addr;
241 4369415f bellard
    start &= ~(page_size - 1);
242 7cb69cae bellard
    
243 7cb69cae bellard
    end = (unsigned long)addr + size;
244 4369415f bellard
    end += page_size - 1;
245 4369415f bellard
    end &= ~(page_size - 1);
246 7cb69cae bellard
    
247 7cb69cae bellard
    mprotect((void *)start, end - start,
248 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
249 7cb69cae bellard
}
250 7cb69cae bellard
#endif
251 7cb69cae bellard
252 b346ff46 bellard
static void page_init(void)
253 54936004 bellard
{
254 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
255 54936004 bellard
       TARGET_PAGE_SIZE */
256 c2b48b69 aliguori
#ifdef _WIN32
257 c2b48b69 aliguori
    {
258 c2b48b69 aliguori
        SYSTEM_INFO system_info;
259 c2b48b69 aliguori
260 c2b48b69 aliguori
        GetSystemInfo(&system_info);
261 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
262 c2b48b69 aliguori
    }
263 c2b48b69 aliguori
#else
264 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
265 c2b48b69 aliguori
#endif
266 83fb7adf bellard
    if (qemu_host_page_size == 0)
267 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
268 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
269 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
270 83fb7adf bellard
    qemu_host_page_bits = 0;
271 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
272 83fb7adf bellard
        qemu_host_page_bits++;
273 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
274 50a9569b balrog
275 50a9569b balrog
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
276 50a9569b balrog
    {
277 50a9569b balrog
        FILE *f;
278 50a9569b balrog
279 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
280 5cd2c5b6 Richard Henderson
281 50a9569b balrog
        f = fopen("/proc/self/maps", "r");
282 50a9569b balrog
        if (f) {
283 5cd2c5b6 Richard Henderson
            mmap_lock();
284 5cd2c5b6 Richard Henderson
285 50a9569b balrog
            do {
286 5cd2c5b6 Richard Henderson
                unsigned long startaddr, endaddr;
287 5cd2c5b6 Richard Henderson
                int n;
288 5cd2c5b6 Richard Henderson
289 5cd2c5b6 Richard Henderson
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
290 5cd2c5b6 Richard Henderson
291 5cd2c5b6 Richard Henderson
                if (n == 2 && h2g_valid(startaddr)) {
292 5cd2c5b6 Richard Henderson
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
293 5cd2c5b6 Richard Henderson
294 5cd2c5b6 Richard Henderson
                    if (h2g_valid(endaddr)) {
295 5cd2c5b6 Richard Henderson
                        endaddr = h2g(endaddr);
296 5cd2c5b6 Richard Henderson
                    } else {
297 5cd2c5b6 Richard Henderson
                        endaddr = ~0ul;
298 5cd2c5b6 Richard Henderson
                    }
299 5cd2c5b6 Richard Henderson
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
300 50a9569b balrog
                }
301 50a9569b balrog
            } while (!feof(f));
302 5cd2c5b6 Richard Henderson
303 50a9569b balrog
            fclose(f);
304 5cd2c5b6 Richard Henderson
            mmap_unlock();
305 50a9569b balrog
        }
306 50a9569b balrog
    }
307 50a9569b balrog
#endif
308 54936004 bellard
}
309 54936004 bellard
310 41c1b1c9 Paul Brook
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
311 54936004 bellard
{
312 41c1b1c9 Paul Brook
    PageDesc *pd;
313 41c1b1c9 Paul Brook
    void **lp;
314 41c1b1c9 Paul Brook
    int i;
315 41c1b1c9 Paul Brook
316 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
317 5cd2c5b6 Richard Henderson
    /* We can't use qemu_malloc because it may recurse into a locked mutex.
318 5cd2c5b6 Richard Henderson
       Neither can we record the new pages we reserve while allocating a
319 5cd2c5b6 Richard Henderson
       given page because that may recurse into an unallocated page table
320 5cd2c5b6 Richard Henderson
       entry.  Stuff the allocations we do make into a queue and process
321 5cd2c5b6 Richard Henderson
       them after having completed one entire page table allocation.  */
322 5cd2c5b6 Richard Henderson
323 5cd2c5b6 Richard Henderson
    unsigned long reserve[2 * (V_L1_SHIFT / L2_BITS)];
324 5cd2c5b6 Richard Henderson
    int reserve_idx = 0;
325 5cd2c5b6 Richard Henderson
326 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE)                                 \
327 5cd2c5b6 Richard Henderson
    do {                                                \
328 5cd2c5b6 Richard Henderson
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
329 5cd2c5b6 Richard Henderson
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
330 5cd2c5b6 Richard Henderson
        if (h2g_valid(P)) {                             \
331 5cd2c5b6 Richard Henderson
            reserve[reserve_idx] = h2g(P);              \
332 5cd2c5b6 Richard Henderson
            reserve[reserve_idx + 1] = SIZE;            \
333 5cd2c5b6 Richard Henderson
            reserve_idx += 2;                           \
334 5cd2c5b6 Richard Henderson
        }                                               \
335 5cd2c5b6 Richard Henderson
    } while (0)
336 5cd2c5b6 Richard Henderson
#else
337 5cd2c5b6 Richard Henderson
# define ALLOC(P, SIZE) \
338 5cd2c5b6 Richard Henderson
    do { P = qemu_mallocz(SIZE); } while (0)
339 17e2377a pbrook
#endif
340 434929bf aliguori
341 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
342 5cd2c5b6 Richard Henderson
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
343 5cd2c5b6 Richard Henderson
344 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
345 5cd2c5b6 Richard Henderson
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
346 5cd2c5b6 Richard Henderson
        void **p = *lp;
347 5cd2c5b6 Richard Henderson
348 5cd2c5b6 Richard Henderson
        if (p == NULL) {
349 5cd2c5b6 Richard Henderson
            if (!alloc) {
350 5cd2c5b6 Richard Henderson
                return NULL;
351 5cd2c5b6 Richard Henderson
            }
352 5cd2c5b6 Richard Henderson
            ALLOC(p, sizeof(void *) * L2_SIZE);
353 5cd2c5b6 Richard Henderson
            *lp = p;
354 17e2377a pbrook
        }
355 5cd2c5b6 Richard Henderson
356 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
357 5cd2c5b6 Richard Henderson
    }
358 5cd2c5b6 Richard Henderson
359 5cd2c5b6 Richard Henderson
    pd = *lp;
360 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
361 5cd2c5b6 Richard Henderson
        if (!alloc) {
362 5cd2c5b6 Richard Henderson
            return NULL;
363 5cd2c5b6 Richard Henderson
        }
364 5cd2c5b6 Richard Henderson
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
365 5cd2c5b6 Richard Henderson
        *lp = pd;
366 54936004 bellard
    }
367 5cd2c5b6 Richard Henderson
368 5cd2c5b6 Richard Henderson
#undef ALLOC
369 5cd2c5b6 Richard Henderson
#if defined(CONFIG_USER_ONLY)
370 5cd2c5b6 Richard Henderson
    for (i = 0; i < reserve_idx; i += 2) {
371 5cd2c5b6 Richard Henderson
        unsigned long addr = reserve[i];
372 5cd2c5b6 Richard Henderson
        unsigned long len = reserve[i + 1];
373 5cd2c5b6 Richard Henderson
374 5cd2c5b6 Richard Henderson
        page_set_flags(addr & TARGET_PAGE_MASK,
375 5cd2c5b6 Richard Henderson
                       TARGET_PAGE_ALIGN(addr + len),
376 5cd2c5b6 Richard Henderson
                       PAGE_RESERVED);
377 5cd2c5b6 Richard Henderson
    }
378 5cd2c5b6 Richard Henderson
#endif
379 5cd2c5b6 Richard Henderson
380 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
381 54936004 bellard
}
382 54936004 bellard
383 41c1b1c9 Paul Brook
static inline PageDesc *page_find(tb_page_addr_t index)
384 54936004 bellard
{
385 5cd2c5b6 Richard Henderson
    return page_find_alloc(index, 0);
386 fd6ce8f6 bellard
}
387 fd6ce8f6 bellard
388 6d9a1304 Paul Brook
#if !defined(CONFIG_USER_ONLY)
389 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
390 92e873b9 bellard
{
391 e3f4e2a4 pbrook
    PhysPageDesc *pd;
392 5cd2c5b6 Richard Henderson
    void **lp;
393 5cd2c5b6 Richard Henderson
    int i;
394 92e873b9 bellard
395 5cd2c5b6 Richard Henderson
    /* Level 1.  Always allocated.  */
396 5cd2c5b6 Richard Henderson
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
397 108c49b8 bellard
398 5cd2c5b6 Richard Henderson
    /* Level 2..N-1.  */
399 5cd2c5b6 Richard Henderson
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
400 5cd2c5b6 Richard Henderson
        void **p = *lp;
401 5cd2c5b6 Richard Henderson
        if (p == NULL) {
402 5cd2c5b6 Richard Henderson
            if (!alloc) {
403 5cd2c5b6 Richard Henderson
                return NULL;
404 5cd2c5b6 Richard Henderson
            }
405 5cd2c5b6 Richard Henderson
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
406 5cd2c5b6 Richard Henderson
        }
407 5cd2c5b6 Richard Henderson
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
408 108c49b8 bellard
    }
409 5cd2c5b6 Richard Henderson
410 e3f4e2a4 pbrook
    pd = *lp;
411 5cd2c5b6 Richard Henderson
    if (pd == NULL) {
412 e3f4e2a4 pbrook
        int i;
413 5cd2c5b6 Richard Henderson
414 5cd2c5b6 Richard Henderson
        if (!alloc) {
415 108c49b8 bellard
            return NULL;
416 5cd2c5b6 Richard Henderson
        }
417 5cd2c5b6 Richard Henderson
418 5cd2c5b6 Richard Henderson
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
419 5cd2c5b6 Richard Henderson
420 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
421 5cd2c5b6 Richard Henderson
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
422 5cd2c5b6 Richard Henderson
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
423 67c4d23c pbrook
        }
424 92e873b9 bellard
    }
425 5cd2c5b6 Richard Henderson
426 5cd2c5b6 Richard Henderson
    return pd + (index & (L2_SIZE - 1));
427 92e873b9 bellard
}
428 92e873b9 bellard
429 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
430 92e873b9 bellard
{
431 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
432 92e873b9 bellard
}
433 92e873b9 bellard
434 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
435 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
436 3a7d929e bellard
                                    target_ulong vaddr);
437 c8a706fe pbrook
#define mmap_lock() do { } while(0)
438 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
439 9fa3e853 bellard
#endif
440 fd6ce8f6 bellard
441 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
442 4369415f bellard
443 4369415f bellard
#if defined(CONFIG_USER_ONLY)
444 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
445 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
446 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
447 4369415f bellard
#endif
448 4369415f bellard
449 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
450 4369415f bellard
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
451 4369415f bellard
#endif
452 4369415f bellard
453 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
454 26a5f13b bellard
{
455 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
456 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
457 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
458 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
459 4369415f bellard
#else
460 26a5f13b bellard
    code_gen_buffer_size = tb_size;
461 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
462 4369415f bellard
#if defined(CONFIG_USER_ONLY)
463 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
464 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
465 4369415f bellard
#else
466 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
467 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
468 4369415f bellard
#endif
469 26a5f13b bellard
    }
470 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
471 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
472 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
473 26a5f13b bellard
       the host cpu and OS */
474 26a5f13b bellard
#if defined(__linux__) 
475 26a5f13b bellard
    {
476 26a5f13b bellard
        int flags;
477 141ac468 blueswir1
        void *start = NULL;
478 141ac468 blueswir1
479 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
480 26a5f13b bellard
#if defined(__x86_64__)
481 26a5f13b bellard
        flags |= MAP_32BIT;
482 26a5f13b bellard
        /* Cannot map more than that */
483 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
484 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
485 141ac468 blueswir1
#elif defined(__sparc_v9__)
486 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
487 141ac468 blueswir1
        flags |= MAP_FIXED;
488 141ac468 blueswir1
        start = (void *) 0x60000000UL;
489 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
490 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
491 1cb0661e balrog
#elif defined(__arm__)
492 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
493 1cb0661e balrog
        flags |= MAP_FIXED;
494 1cb0661e balrog
        start = (void *) 0x01000000UL;
495 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
496 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
497 26a5f13b bellard
#endif
498 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
499 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
500 26a5f13b bellard
                               flags, -1, 0);
501 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
502 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
503 26a5f13b bellard
            exit(1);
504 26a5f13b bellard
        }
505 26a5f13b bellard
    }
506 a167ba50 Aurelien Jarno
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
507 06e67a82 aliguori
    {
508 06e67a82 aliguori
        int flags;
509 06e67a82 aliguori
        void *addr = NULL;
510 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
511 06e67a82 aliguori
#if defined(__x86_64__)
512 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
513 06e67a82 aliguori
         * 0x40000000 is free */
514 06e67a82 aliguori
        flags |= MAP_FIXED;
515 06e67a82 aliguori
        addr = (void *)0x40000000;
516 06e67a82 aliguori
        /* Cannot map more than that */
517 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
518 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
519 06e67a82 aliguori
#endif
520 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
521 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
522 06e67a82 aliguori
                               flags, -1, 0);
523 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
524 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
525 06e67a82 aliguori
            exit(1);
526 06e67a82 aliguori
        }
527 06e67a82 aliguori
    }
528 26a5f13b bellard
#else
529 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
530 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
531 26a5f13b bellard
#endif
532 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
533 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
534 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
535 26a5f13b bellard
        code_gen_max_block_size();
536 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
537 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
538 26a5f13b bellard
}
539 26a5f13b bellard
540 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
541 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
542 26a5f13b bellard
   size. */
543 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
544 26a5f13b bellard
{
545 26a5f13b bellard
    cpu_gen_init();
546 26a5f13b bellard
    code_gen_alloc(tb_size);
547 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
548 4369415f bellard
    page_init();
549 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
550 26a5f13b bellard
    io_mem_init();
551 e2eef170 pbrook
#endif
552 26a5f13b bellard
}
553 26a5f13b bellard
554 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
555 9656f324 pbrook
556 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
557 e7f4eff7 Juan Quintela
{
558 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
559 9656f324 pbrook
560 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
561 3098dba0 aurel32
       version_id is increased. */
562 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
563 9656f324 pbrook
    tlb_flush(env, 1);
564 9656f324 pbrook
565 9656f324 pbrook
    return 0;
566 9656f324 pbrook
}
567 e7f4eff7 Juan Quintela
568 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
569 e7f4eff7 Juan Quintela
    .name = "cpu_common",
570 e7f4eff7 Juan Quintela
    .version_id = 1,
571 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
572 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
573 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
574 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
575 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
576 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
577 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
578 e7f4eff7 Juan Quintela
    }
579 e7f4eff7 Juan Quintela
};
580 9656f324 pbrook
#endif
581 9656f324 pbrook
582 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
583 950f1472 Glauber Costa
{
584 950f1472 Glauber Costa
    CPUState *env = first_cpu;
585 950f1472 Glauber Costa
586 950f1472 Glauber Costa
    while (env) {
587 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
588 950f1472 Glauber Costa
            break;
589 950f1472 Glauber Costa
        env = env->next_cpu;
590 950f1472 Glauber Costa
    }
591 950f1472 Glauber Costa
592 950f1472 Glauber Costa
    return env;
593 950f1472 Glauber Costa
}
594 950f1472 Glauber Costa
595 6a00d601 bellard
void cpu_exec_init(CPUState *env)
596 fd6ce8f6 bellard
{
597 6a00d601 bellard
    CPUState **penv;
598 6a00d601 bellard
    int cpu_index;
599 6a00d601 bellard
600 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
601 c2764719 pbrook
    cpu_list_lock();
602 c2764719 pbrook
#endif
603 6a00d601 bellard
    env->next_cpu = NULL;
604 6a00d601 bellard
    penv = &first_cpu;
605 6a00d601 bellard
    cpu_index = 0;
606 6a00d601 bellard
    while (*penv != NULL) {
607 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
608 6a00d601 bellard
        cpu_index++;
609 6a00d601 bellard
    }
610 6a00d601 bellard
    env->cpu_index = cpu_index;
611 268a362c aliguori
    env->numa_node = 0;
612 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
613 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
614 6a00d601 bellard
    *penv = env;
615 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
616 c2764719 pbrook
    cpu_list_unlock();
617 c2764719 pbrook
#endif
618 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
619 e7f4eff7 Juan Quintela
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
620 b3c7724c pbrook
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
621 b3c7724c pbrook
                    cpu_save, cpu_load, env);
622 b3c7724c pbrook
#endif
623 fd6ce8f6 bellard
}
624 fd6ce8f6 bellard
625 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
626 9fa3e853 bellard
{
627 9fa3e853 bellard
    if (p->code_bitmap) {
628 59817ccb bellard
        qemu_free(p->code_bitmap);
629 9fa3e853 bellard
        p->code_bitmap = NULL;
630 9fa3e853 bellard
    }
631 9fa3e853 bellard
    p->code_write_count = 0;
632 9fa3e853 bellard
}
633 9fa3e853 bellard
634 5cd2c5b6 Richard Henderson
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
635 5cd2c5b6 Richard Henderson
636 5cd2c5b6 Richard Henderson
static void page_flush_tb_1 (int level, void **lp)
637 fd6ce8f6 bellard
{
638 5cd2c5b6 Richard Henderson
    int i;
639 fd6ce8f6 bellard
640 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
641 5cd2c5b6 Richard Henderson
        return;
642 5cd2c5b6 Richard Henderson
    }
643 5cd2c5b6 Richard Henderson
    if (level == 0) {
644 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
645 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
646 5cd2c5b6 Richard Henderson
            pd[i].first_tb = NULL;
647 5cd2c5b6 Richard Henderson
            invalidate_page_bitmap(pd + i);
648 fd6ce8f6 bellard
        }
649 5cd2c5b6 Richard Henderson
    } else {
650 5cd2c5b6 Richard Henderson
        void **pp = *lp;
651 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
652 5cd2c5b6 Richard Henderson
            page_flush_tb_1 (level - 1, pp + i);
653 5cd2c5b6 Richard Henderson
        }
654 5cd2c5b6 Richard Henderson
    }
655 5cd2c5b6 Richard Henderson
}
656 5cd2c5b6 Richard Henderson
657 5cd2c5b6 Richard Henderson
static void page_flush_tb(void)
658 5cd2c5b6 Richard Henderson
{
659 5cd2c5b6 Richard Henderson
    int i;
660 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
661 5cd2c5b6 Richard Henderson
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
662 fd6ce8f6 bellard
    }
663 fd6ce8f6 bellard
}
664 fd6ce8f6 bellard
665 fd6ce8f6 bellard
/* flush all the translation blocks */
666 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
667 6a00d601 bellard
void tb_flush(CPUState *env1)
668 fd6ce8f6 bellard
{
669 6a00d601 bellard
    CPUState *env;
670 0124311e bellard
#if defined(DEBUG_FLUSH)
671 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
672 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
673 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
674 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
675 fd6ce8f6 bellard
#endif
676 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
677 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
678 a208e54a pbrook
679 fd6ce8f6 bellard
    nb_tbs = 0;
680 3b46e624 ths
681 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
682 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
683 6a00d601 bellard
    }
684 9fa3e853 bellard
685 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
686 fd6ce8f6 bellard
    page_flush_tb();
687 9fa3e853 bellard
688 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
689 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
690 d4e8164f bellard
       expensive */
691 e3db7226 bellard
    tb_flush_count++;
692 fd6ce8f6 bellard
}
693 fd6ce8f6 bellard
694 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
695 fd6ce8f6 bellard
696 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
697 fd6ce8f6 bellard
{
698 fd6ce8f6 bellard
    TranslationBlock *tb;
699 fd6ce8f6 bellard
    int i;
700 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
701 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
702 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
703 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
704 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
705 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
706 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
707 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
708 fd6ce8f6 bellard
            }
709 fd6ce8f6 bellard
        }
710 fd6ce8f6 bellard
    }
711 fd6ce8f6 bellard
}
712 fd6ce8f6 bellard
713 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
714 fd6ce8f6 bellard
static void tb_page_check(void)
715 fd6ce8f6 bellard
{
716 fd6ce8f6 bellard
    TranslationBlock *tb;
717 fd6ce8f6 bellard
    int i, flags1, flags2;
718 3b46e624 ths
719 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
720 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
721 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
722 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
723 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
724 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
725 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
726 fd6ce8f6 bellard
            }
727 fd6ce8f6 bellard
        }
728 fd6ce8f6 bellard
    }
729 fd6ce8f6 bellard
}
730 fd6ce8f6 bellard
731 fd6ce8f6 bellard
#endif
732 fd6ce8f6 bellard
733 fd6ce8f6 bellard
/* invalidate one TB */
734 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
735 fd6ce8f6 bellard
                             int next_offset)
736 fd6ce8f6 bellard
{
737 fd6ce8f6 bellard
    TranslationBlock *tb1;
738 fd6ce8f6 bellard
    for(;;) {
739 fd6ce8f6 bellard
        tb1 = *ptb;
740 fd6ce8f6 bellard
        if (tb1 == tb) {
741 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
742 fd6ce8f6 bellard
            break;
743 fd6ce8f6 bellard
        }
744 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
745 fd6ce8f6 bellard
    }
746 fd6ce8f6 bellard
}
747 fd6ce8f6 bellard
748 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
749 9fa3e853 bellard
{
750 9fa3e853 bellard
    TranslationBlock *tb1;
751 9fa3e853 bellard
    unsigned int n1;
752 9fa3e853 bellard
753 9fa3e853 bellard
    for(;;) {
754 9fa3e853 bellard
        tb1 = *ptb;
755 9fa3e853 bellard
        n1 = (long)tb1 & 3;
756 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
757 9fa3e853 bellard
        if (tb1 == tb) {
758 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
759 9fa3e853 bellard
            break;
760 9fa3e853 bellard
        }
761 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
762 9fa3e853 bellard
    }
763 9fa3e853 bellard
}
764 9fa3e853 bellard
765 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
766 d4e8164f bellard
{
767 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
768 d4e8164f bellard
    unsigned int n1;
769 d4e8164f bellard
770 d4e8164f bellard
    ptb = &tb->jmp_next[n];
771 d4e8164f bellard
    tb1 = *ptb;
772 d4e8164f bellard
    if (tb1) {
773 d4e8164f bellard
        /* find tb(n) in circular list */
774 d4e8164f bellard
        for(;;) {
775 d4e8164f bellard
            tb1 = *ptb;
776 d4e8164f bellard
            n1 = (long)tb1 & 3;
777 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
778 d4e8164f bellard
            if (n1 == n && tb1 == tb)
779 d4e8164f bellard
                break;
780 d4e8164f bellard
            if (n1 == 2) {
781 d4e8164f bellard
                ptb = &tb1->jmp_first;
782 d4e8164f bellard
            } else {
783 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
784 d4e8164f bellard
            }
785 d4e8164f bellard
        }
786 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
787 d4e8164f bellard
        *ptb = tb->jmp_next[n];
788 d4e8164f bellard
789 d4e8164f bellard
        tb->jmp_next[n] = NULL;
790 d4e8164f bellard
    }
791 d4e8164f bellard
}
792 d4e8164f bellard
793 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
794 d4e8164f bellard
   another TB */
795 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
796 d4e8164f bellard
{
797 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
798 d4e8164f bellard
}
799 d4e8164f bellard
800 41c1b1c9 Paul Brook
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
801 fd6ce8f6 bellard
{
802 6a00d601 bellard
    CPUState *env;
803 8a40a180 bellard
    PageDesc *p;
804 d4e8164f bellard
    unsigned int h, n1;
805 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc;
806 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
807 3b46e624 ths
808 8a40a180 bellard
    /* remove the TB from the hash list */
809 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
810 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
811 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
812 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
813 8a40a180 bellard
814 8a40a180 bellard
    /* remove the TB from the page list */
815 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
816 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
817 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
818 8a40a180 bellard
        invalidate_page_bitmap(p);
819 8a40a180 bellard
    }
820 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
821 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
822 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
823 8a40a180 bellard
        invalidate_page_bitmap(p);
824 8a40a180 bellard
    }
825 8a40a180 bellard
826 36bdbe54 bellard
    tb_invalidated_flag = 1;
827 59817ccb bellard
828 fd6ce8f6 bellard
    /* remove the TB from the hash list */
829 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
830 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
831 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
832 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
833 6a00d601 bellard
    }
834 d4e8164f bellard
835 d4e8164f bellard
    /* suppress this TB from the two jump lists */
836 d4e8164f bellard
    tb_jmp_remove(tb, 0);
837 d4e8164f bellard
    tb_jmp_remove(tb, 1);
838 d4e8164f bellard
839 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
840 d4e8164f bellard
    tb1 = tb->jmp_first;
841 d4e8164f bellard
    for(;;) {
842 d4e8164f bellard
        n1 = (long)tb1 & 3;
843 d4e8164f bellard
        if (n1 == 2)
844 d4e8164f bellard
            break;
845 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
846 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
847 d4e8164f bellard
        tb_reset_jump(tb1, n1);
848 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
849 d4e8164f bellard
        tb1 = tb2;
850 d4e8164f bellard
    }
851 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
852 9fa3e853 bellard
853 e3db7226 bellard
    tb_phys_invalidate_count++;
854 9fa3e853 bellard
}
855 9fa3e853 bellard
856 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
857 9fa3e853 bellard
{
858 9fa3e853 bellard
    int end, mask, end1;
859 9fa3e853 bellard
860 9fa3e853 bellard
    end = start + len;
861 9fa3e853 bellard
    tab += start >> 3;
862 9fa3e853 bellard
    mask = 0xff << (start & 7);
863 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
864 9fa3e853 bellard
        if (start < end) {
865 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
866 9fa3e853 bellard
            *tab |= mask;
867 9fa3e853 bellard
        }
868 9fa3e853 bellard
    } else {
869 9fa3e853 bellard
        *tab++ |= mask;
870 9fa3e853 bellard
        start = (start + 8) & ~7;
871 9fa3e853 bellard
        end1 = end & ~7;
872 9fa3e853 bellard
        while (start < end1) {
873 9fa3e853 bellard
            *tab++ = 0xff;
874 9fa3e853 bellard
            start += 8;
875 9fa3e853 bellard
        }
876 9fa3e853 bellard
        if (start < end) {
877 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
878 9fa3e853 bellard
            *tab |= mask;
879 9fa3e853 bellard
        }
880 9fa3e853 bellard
    }
881 9fa3e853 bellard
}
882 9fa3e853 bellard
883 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
884 9fa3e853 bellard
{
885 9fa3e853 bellard
    int n, tb_start, tb_end;
886 9fa3e853 bellard
    TranslationBlock *tb;
887 3b46e624 ths
888 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
889 9fa3e853 bellard
890 9fa3e853 bellard
    tb = p->first_tb;
891 9fa3e853 bellard
    while (tb != NULL) {
892 9fa3e853 bellard
        n = (long)tb & 3;
893 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
894 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
895 9fa3e853 bellard
        if (n == 0) {
896 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
897 9fa3e853 bellard
               it is not a problem */
898 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
899 9fa3e853 bellard
            tb_end = tb_start + tb->size;
900 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
901 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
902 9fa3e853 bellard
        } else {
903 9fa3e853 bellard
            tb_start = 0;
904 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
905 9fa3e853 bellard
        }
906 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
907 9fa3e853 bellard
        tb = tb->page_next[n];
908 9fa3e853 bellard
    }
909 9fa3e853 bellard
}
910 9fa3e853 bellard
911 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
912 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
913 2e70f6ef pbrook
                              int flags, int cflags)
914 d720b93d bellard
{
915 d720b93d bellard
    TranslationBlock *tb;
916 d720b93d bellard
    uint8_t *tc_ptr;
917 41c1b1c9 Paul Brook
    tb_page_addr_t phys_pc, phys_page2;
918 41c1b1c9 Paul Brook
    target_ulong virt_page2;
919 d720b93d bellard
    int code_gen_size;
920 d720b93d bellard
921 41c1b1c9 Paul Brook
    phys_pc = get_page_addr_code(env, pc);
922 c27004ec bellard
    tb = tb_alloc(pc);
923 d720b93d bellard
    if (!tb) {
924 d720b93d bellard
        /* flush must be done */
925 d720b93d bellard
        tb_flush(env);
926 d720b93d bellard
        /* cannot fail at this point */
927 c27004ec bellard
        tb = tb_alloc(pc);
928 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
929 2e70f6ef pbrook
        tb_invalidated_flag = 1;
930 d720b93d bellard
    }
931 d720b93d bellard
    tc_ptr = code_gen_ptr;
932 d720b93d bellard
    tb->tc_ptr = tc_ptr;
933 d720b93d bellard
    tb->cs_base = cs_base;
934 d720b93d bellard
    tb->flags = flags;
935 d720b93d bellard
    tb->cflags = cflags;
936 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
937 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
938 3b46e624 ths
939 d720b93d bellard
    /* check next page if needed */
940 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
941 d720b93d bellard
    phys_page2 = -1;
942 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
943 41c1b1c9 Paul Brook
        phys_page2 = get_page_addr_code(env, virt_page2);
944 d720b93d bellard
    }
945 41c1b1c9 Paul Brook
    tb_link_page(tb, phys_pc, phys_page2);
946 2e70f6ef pbrook
    return tb;
947 d720b93d bellard
}
948 3b46e624 ths
949 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
950 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
951 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
952 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
953 d720b93d bellard
   TB if code is modified inside this TB. */
954 41c1b1c9 Paul Brook
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
955 d720b93d bellard
                                   int is_cpu_write_access)
956 d720b93d bellard
{
957 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
958 d720b93d bellard
    CPUState *env = cpu_single_env;
959 41c1b1c9 Paul Brook
    tb_page_addr_t tb_start, tb_end;
960 6b917547 aliguori
    PageDesc *p;
961 6b917547 aliguori
    int n;
962 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
963 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
964 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
965 6b917547 aliguori
    int current_tb_modified = 0;
966 6b917547 aliguori
    target_ulong current_pc = 0;
967 6b917547 aliguori
    target_ulong current_cs_base = 0;
968 6b917547 aliguori
    int current_flags = 0;
969 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
970 9fa3e853 bellard
971 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
972 5fafdf24 ths
    if (!p)
973 9fa3e853 bellard
        return;
974 5fafdf24 ths
    if (!p->code_bitmap &&
975 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
976 d720b93d bellard
        is_cpu_write_access) {
977 9fa3e853 bellard
        /* build code bitmap */
978 9fa3e853 bellard
        build_page_bitmap(p);
979 9fa3e853 bellard
    }
980 9fa3e853 bellard
981 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
982 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
983 9fa3e853 bellard
    tb = p->first_tb;
984 9fa3e853 bellard
    while (tb != NULL) {
985 9fa3e853 bellard
        n = (long)tb & 3;
986 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
987 9fa3e853 bellard
        tb_next = tb->page_next[n];
988 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
989 9fa3e853 bellard
        if (n == 0) {
990 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
991 9fa3e853 bellard
               it is not a problem */
992 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
993 9fa3e853 bellard
            tb_end = tb_start + tb->size;
994 9fa3e853 bellard
        } else {
995 9fa3e853 bellard
            tb_start = tb->page_addr[1];
996 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
997 9fa3e853 bellard
        }
998 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
999 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1000 d720b93d bellard
            if (current_tb_not_found) {
1001 d720b93d bellard
                current_tb_not_found = 0;
1002 d720b93d bellard
                current_tb = NULL;
1003 2e70f6ef pbrook
                if (env->mem_io_pc) {
1004 d720b93d bellard
                    /* now we have a real cpu fault */
1005 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
1006 d720b93d bellard
                }
1007 d720b93d bellard
            }
1008 d720b93d bellard
            if (current_tb == tb &&
1009 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1010 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1011 d720b93d bellard
                its execution. We could be more precise by checking
1012 d720b93d bellard
                that the modification is after the current PC, but it
1013 d720b93d bellard
                would require a specialized function to partially
1014 d720b93d bellard
                restore the CPU state */
1015 3b46e624 ths
1016 d720b93d bellard
                current_tb_modified = 1;
1017 5fafdf24 ths
                cpu_restore_state(current_tb, env,
1018 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
1019 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1020 6b917547 aliguori
                                     &current_flags);
1021 d720b93d bellard
            }
1022 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1023 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
1024 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
1025 6f5a9f7e bellard
            saved_tb = NULL;
1026 6f5a9f7e bellard
            if (env) {
1027 6f5a9f7e bellard
                saved_tb = env->current_tb;
1028 6f5a9f7e bellard
                env->current_tb = NULL;
1029 6f5a9f7e bellard
            }
1030 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
1031 6f5a9f7e bellard
            if (env) {
1032 6f5a9f7e bellard
                env->current_tb = saved_tb;
1033 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1034 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1035 6f5a9f7e bellard
            }
1036 9fa3e853 bellard
        }
1037 9fa3e853 bellard
        tb = tb_next;
1038 9fa3e853 bellard
    }
1039 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1040 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1041 9fa3e853 bellard
    if (!p->first_tb) {
1042 9fa3e853 bellard
        invalidate_page_bitmap(p);
1043 d720b93d bellard
        if (is_cpu_write_access) {
1044 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1045 d720b93d bellard
        }
1046 d720b93d bellard
    }
1047 d720b93d bellard
#endif
1048 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1049 d720b93d bellard
    if (current_tb_modified) {
1050 d720b93d bellard
        /* we generate a block containing just the instruction
1051 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1052 d720b93d bellard
           itself */
1053 ea1c1802 bellard
        env->current_tb = NULL;
1054 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1055 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1056 9fa3e853 bellard
    }
1057 fd6ce8f6 bellard
#endif
1058 9fa3e853 bellard
}
1059 fd6ce8f6 bellard
1060 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1061 41c1b1c9 Paul Brook
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1062 9fa3e853 bellard
{
1063 9fa3e853 bellard
    PageDesc *p;
1064 9fa3e853 bellard
    int offset, b;
1065 59817ccb bellard
#if 0
1066 a4193c8a bellard
    if (1) {
1067 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1068 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1069 93fcfe39 aliguori
                  cpu_single_env->eip,
1070 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1071 59817ccb bellard
    }
1072 59817ccb bellard
#endif
1073 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1074 5fafdf24 ths
    if (!p)
1075 9fa3e853 bellard
        return;
1076 9fa3e853 bellard
    if (p->code_bitmap) {
1077 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1078 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1079 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1080 9fa3e853 bellard
            goto do_invalidate;
1081 9fa3e853 bellard
    } else {
1082 9fa3e853 bellard
    do_invalidate:
1083 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1084 9fa3e853 bellard
    }
1085 9fa3e853 bellard
}
1086 9fa3e853 bellard
1087 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1088 41c1b1c9 Paul Brook
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1089 d720b93d bellard
                                    unsigned long pc, void *puc)
1090 9fa3e853 bellard
{
1091 6b917547 aliguori
    TranslationBlock *tb;
1092 9fa3e853 bellard
    PageDesc *p;
1093 6b917547 aliguori
    int n;
1094 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1095 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1096 d720b93d bellard
    CPUState *env = cpu_single_env;
1097 6b917547 aliguori
    int current_tb_modified = 0;
1098 6b917547 aliguori
    target_ulong current_pc = 0;
1099 6b917547 aliguori
    target_ulong current_cs_base = 0;
1100 6b917547 aliguori
    int current_flags = 0;
1101 d720b93d bellard
#endif
1102 9fa3e853 bellard
1103 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1104 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1105 5fafdf24 ths
    if (!p)
1106 9fa3e853 bellard
        return;
1107 9fa3e853 bellard
    tb = p->first_tb;
1108 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1109 d720b93d bellard
    if (tb && pc != 0) {
1110 d720b93d bellard
        current_tb = tb_find_pc(pc);
1111 d720b93d bellard
    }
1112 d720b93d bellard
#endif
1113 9fa3e853 bellard
    while (tb != NULL) {
1114 9fa3e853 bellard
        n = (long)tb & 3;
1115 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1116 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1117 d720b93d bellard
        if (current_tb == tb &&
1118 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1119 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1120 d720b93d bellard
                   its execution. We could be more precise by checking
1121 d720b93d bellard
                   that the modification is after the current PC, but it
1122 d720b93d bellard
                   would require a specialized function to partially
1123 d720b93d bellard
                   restore the CPU state */
1124 3b46e624 ths
1125 d720b93d bellard
            current_tb_modified = 1;
1126 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1127 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1128 6b917547 aliguori
                                 &current_flags);
1129 d720b93d bellard
        }
1130 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1131 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1132 9fa3e853 bellard
        tb = tb->page_next[n];
1133 9fa3e853 bellard
    }
1134 fd6ce8f6 bellard
    p->first_tb = NULL;
1135 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1136 d720b93d bellard
    if (current_tb_modified) {
1137 d720b93d bellard
        /* we generate a block containing just the instruction
1138 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1139 d720b93d bellard
           itself */
1140 ea1c1802 bellard
        env->current_tb = NULL;
1141 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1142 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1143 d720b93d bellard
    }
1144 d720b93d bellard
#endif
1145 fd6ce8f6 bellard
}
1146 9fa3e853 bellard
#endif
1147 fd6ce8f6 bellard
1148 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1149 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1150 41c1b1c9 Paul Brook
                                 unsigned int n, tb_page_addr_t page_addr)
1151 fd6ce8f6 bellard
{
1152 fd6ce8f6 bellard
    PageDesc *p;
1153 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1154 9fa3e853 bellard
1155 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1156 5cd2c5b6 Richard Henderson
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1157 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1158 9fa3e853 bellard
    last_first_tb = p->first_tb;
1159 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1160 9fa3e853 bellard
    invalidate_page_bitmap(p);
1161 fd6ce8f6 bellard
1162 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1163 d720b93d bellard
1164 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1165 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1166 53a5960a pbrook
        target_ulong addr;
1167 53a5960a pbrook
        PageDesc *p2;
1168 9fa3e853 bellard
        int prot;
1169 9fa3e853 bellard
1170 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1171 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1172 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1173 fd6ce8f6 bellard
        prot = 0;
1174 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1175 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1176 53a5960a pbrook
1177 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1178 53a5960a pbrook
            if (!p2)
1179 53a5960a pbrook
                continue;
1180 53a5960a pbrook
            prot |= p2->flags;
1181 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1182 53a5960a pbrook
            page_get_flags(addr);
1183 53a5960a pbrook
          }
1184 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1185 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1186 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1187 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1188 53a5960a pbrook
               page_addr);
1189 fd6ce8f6 bellard
#endif
1190 fd6ce8f6 bellard
    }
1191 9fa3e853 bellard
#else
1192 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1193 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1194 9fa3e853 bellard
       allocated in a physical page */
1195 9fa3e853 bellard
    if (!last_first_tb) {
1196 6a00d601 bellard
        tlb_protect_code(page_addr);
1197 9fa3e853 bellard
    }
1198 9fa3e853 bellard
#endif
1199 d720b93d bellard
1200 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1201 fd6ce8f6 bellard
}
1202 fd6ce8f6 bellard
1203 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1204 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1205 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1206 fd6ce8f6 bellard
{
1207 fd6ce8f6 bellard
    TranslationBlock *tb;
1208 fd6ce8f6 bellard
1209 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1210 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1211 d4e8164f bellard
        return NULL;
1212 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1213 fd6ce8f6 bellard
    tb->pc = pc;
1214 b448f2f3 bellard
    tb->cflags = 0;
1215 d4e8164f bellard
    return tb;
1216 d4e8164f bellard
}
1217 d4e8164f bellard
1218 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1219 2e70f6ef pbrook
{
1220 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1221 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1222 2e70f6ef pbrook
       be the last one generated.  */
1223 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1224 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1225 2e70f6ef pbrook
        nb_tbs--;
1226 2e70f6ef pbrook
    }
1227 2e70f6ef pbrook
}
1228 2e70f6ef pbrook
1229 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1230 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1231 41c1b1c9 Paul Brook
void tb_link_page(TranslationBlock *tb,
1232 41c1b1c9 Paul Brook
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1233 d4e8164f bellard
{
1234 9fa3e853 bellard
    unsigned int h;
1235 9fa3e853 bellard
    TranslationBlock **ptb;
1236 9fa3e853 bellard
1237 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1238 c8a706fe pbrook
       before we are done.  */
1239 c8a706fe pbrook
    mmap_lock();
1240 9fa3e853 bellard
    /* add in the physical hash table */
1241 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1242 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1243 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1244 9fa3e853 bellard
    *ptb = tb;
1245 fd6ce8f6 bellard
1246 fd6ce8f6 bellard
    /* add in the page list */
1247 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1248 9fa3e853 bellard
    if (phys_page2 != -1)
1249 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1250 9fa3e853 bellard
    else
1251 9fa3e853 bellard
        tb->page_addr[1] = -1;
1252 9fa3e853 bellard
1253 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1254 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1255 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1256 d4e8164f bellard
1257 d4e8164f bellard
    /* init original jump addresses */
1258 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1259 d4e8164f bellard
        tb_reset_jump(tb, 0);
1260 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1261 d4e8164f bellard
        tb_reset_jump(tb, 1);
1262 8a40a180 bellard
1263 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1264 8a40a180 bellard
    tb_page_check();
1265 8a40a180 bellard
#endif
1266 c8a706fe pbrook
    mmap_unlock();
1267 fd6ce8f6 bellard
}
1268 fd6ce8f6 bellard
1269 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1270 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1271 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1272 fd6ce8f6 bellard
{
1273 9fa3e853 bellard
    int m_min, m_max, m;
1274 9fa3e853 bellard
    unsigned long v;
1275 9fa3e853 bellard
    TranslationBlock *tb;
1276 a513fe19 bellard
1277 a513fe19 bellard
    if (nb_tbs <= 0)
1278 a513fe19 bellard
        return NULL;
1279 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1280 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1281 a513fe19 bellard
        return NULL;
1282 a513fe19 bellard
    /* binary search (cf Knuth) */
1283 a513fe19 bellard
    m_min = 0;
1284 a513fe19 bellard
    m_max = nb_tbs - 1;
1285 a513fe19 bellard
    while (m_min <= m_max) {
1286 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1287 a513fe19 bellard
        tb = &tbs[m];
1288 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1289 a513fe19 bellard
        if (v == tc_ptr)
1290 a513fe19 bellard
            return tb;
1291 a513fe19 bellard
        else if (tc_ptr < v) {
1292 a513fe19 bellard
            m_max = m - 1;
1293 a513fe19 bellard
        } else {
1294 a513fe19 bellard
            m_min = m + 1;
1295 a513fe19 bellard
        }
1296 5fafdf24 ths
    }
1297 a513fe19 bellard
    return &tbs[m_max];
1298 a513fe19 bellard
}
1299 7501267e bellard
1300 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1301 ea041c0e bellard
1302 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1303 ea041c0e bellard
{
1304 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1305 ea041c0e bellard
    unsigned int n1;
1306 ea041c0e bellard
1307 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1308 ea041c0e bellard
    if (tb1 != NULL) {
1309 ea041c0e bellard
        /* find head of list */
1310 ea041c0e bellard
        for(;;) {
1311 ea041c0e bellard
            n1 = (long)tb1 & 3;
1312 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1313 ea041c0e bellard
            if (n1 == 2)
1314 ea041c0e bellard
                break;
1315 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1316 ea041c0e bellard
        }
1317 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1318 ea041c0e bellard
        tb_next = tb1;
1319 ea041c0e bellard
1320 ea041c0e bellard
        /* remove tb from the jmp_first list */
1321 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1322 ea041c0e bellard
        for(;;) {
1323 ea041c0e bellard
            tb1 = *ptb;
1324 ea041c0e bellard
            n1 = (long)tb1 & 3;
1325 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1326 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1327 ea041c0e bellard
                break;
1328 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1329 ea041c0e bellard
        }
1330 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1331 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1332 3b46e624 ths
1333 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1334 ea041c0e bellard
        tb_reset_jump(tb, n);
1335 ea041c0e bellard
1336 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1337 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1338 ea041c0e bellard
    }
1339 ea041c0e bellard
}
1340 ea041c0e bellard
1341 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1342 ea041c0e bellard
{
1343 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1344 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1345 ea041c0e bellard
}
1346 ea041c0e bellard
1347 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1348 94df27fd Paul Brook
#if defined(CONFIG_USER_ONLY)
1349 94df27fd Paul Brook
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1350 94df27fd Paul Brook
{
1351 94df27fd Paul Brook
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1352 94df27fd Paul Brook
}
1353 94df27fd Paul Brook
#else
1354 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1355 d720b93d bellard
{
1356 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1357 9b3c35e0 j_mayer
    target_ulong pd;
1358 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1359 c2f07f81 pbrook
    PhysPageDesc *p;
1360 d720b93d bellard
1361 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1362 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1363 c2f07f81 pbrook
    if (!p) {
1364 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1365 c2f07f81 pbrook
    } else {
1366 c2f07f81 pbrook
        pd = p->phys_offset;
1367 c2f07f81 pbrook
    }
1368 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1369 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1370 d720b93d bellard
}
1371 c27004ec bellard
#endif
1372 94df27fd Paul Brook
#endif /* TARGET_HAS_ICE */
1373 d720b93d bellard
1374 c527ee8f Paul Brook
#if defined(CONFIG_USER_ONLY)
1375 c527ee8f Paul Brook
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1376 c527ee8f Paul Brook
1377 c527ee8f Paul Brook
{
1378 c527ee8f Paul Brook
}
1379 c527ee8f Paul Brook
1380 c527ee8f Paul Brook
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1381 c527ee8f Paul Brook
                          int flags, CPUWatchpoint **watchpoint)
1382 c527ee8f Paul Brook
{
1383 c527ee8f Paul Brook
    return -ENOSYS;
1384 c527ee8f Paul Brook
}
1385 c527ee8f Paul Brook
#else
1386 6658ffb8 pbrook
/* Add a watchpoint.  */
1387 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1388 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1389 6658ffb8 pbrook
{
1390 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1391 c0ce998e aliguori
    CPUWatchpoint *wp;
1392 6658ffb8 pbrook
1393 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1394 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1395 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1396 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1397 b4051334 aliguori
        return -EINVAL;
1398 b4051334 aliguori
    }
1399 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1400 a1d1bb31 aliguori
1401 a1d1bb31 aliguori
    wp->vaddr = addr;
1402 b4051334 aliguori
    wp->len_mask = len_mask;
1403 a1d1bb31 aliguori
    wp->flags = flags;
1404 a1d1bb31 aliguori
1405 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1406 c0ce998e aliguori
    if (flags & BP_GDB)
1407 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1408 c0ce998e aliguori
    else
1409 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1410 6658ffb8 pbrook
1411 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1412 a1d1bb31 aliguori
1413 a1d1bb31 aliguori
    if (watchpoint)
1414 a1d1bb31 aliguori
        *watchpoint = wp;
1415 a1d1bb31 aliguori
    return 0;
1416 6658ffb8 pbrook
}
1417 6658ffb8 pbrook
1418 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1419 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1420 a1d1bb31 aliguori
                          int flags)
1421 6658ffb8 pbrook
{
1422 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1423 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1424 6658ffb8 pbrook
1425 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1426 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1427 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1428 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1429 6658ffb8 pbrook
            return 0;
1430 6658ffb8 pbrook
        }
1431 6658ffb8 pbrook
    }
1432 a1d1bb31 aliguori
    return -ENOENT;
1433 6658ffb8 pbrook
}
1434 6658ffb8 pbrook
1435 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1436 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1437 a1d1bb31 aliguori
{
1438 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1439 7d03f82f edgar_igl
1440 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1441 a1d1bb31 aliguori
1442 a1d1bb31 aliguori
    qemu_free(watchpoint);
1443 a1d1bb31 aliguori
}
1444 a1d1bb31 aliguori
1445 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1446 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1447 a1d1bb31 aliguori
{
1448 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1449 a1d1bb31 aliguori
1450 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1451 a1d1bb31 aliguori
        if (wp->flags & mask)
1452 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1453 c0ce998e aliguori
    }
1454 7d03f82f edgar_igl
}
1455 c527ee8f Paul Brook
#endif
1456 7d03f82f edgar_igl
1457 a1d1bb31 aliguori
/* Add a breakpoint.  */
1458 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1459 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1460 4c3a88a2 bellard
{
1461 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1462 c0ce998e aliguori
    CPUBreakpoint *bp;
1463 3b46e624 ths
1464 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1465 4c3a88a2 bellard
1466 a1d1bb31 aliguori
    bp->pc = pc;
1467 a1d1bb31 aliguori
    bp->flags = flags;
1468 a1d1bb31 aliguori
1469 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1470 c0ce998e aliguori
    if (flags & BP_GDB)
1471 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1472 c0ce998e aliguori
    else
1473 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1474 3b46e624 ths
1475 d720b93d bellard
    breakpoint_invalidate(env, pc);
1476 a1d1bb31 aliguori
1477 a1d1bb31 aliguori
    if (breakpoint)
1478 a1d1bb31 aliguori
        *breakpoint = bp;
1479 4c3a88a2 bellard
    return 0;
1480 4c3a88a2 bellard
#else
1481 a1d1bb31 aliguori
    return -ENOSYS;
1482 4c3a88a2 bellard
#endif
1483 4c3a88a2 bellard
}
1484 4c3a88a2 bellard
1485 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1486 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1487 a1d1bb31 aliguori
{
1488 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1489 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1490 a1d1bb31 aliguori
1491 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1492 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1493 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1494 a1d1bb31 aliguori
            return 0;
1495 a1d1bb31 aliguori
        }
1496 7d03f82f edgar_igl
    }
1497 a1d1bb31 aliguori
    return -ENOENT;
1498 a1d1bb31 aliguori
#else
1499 a1d1bb31 aliguori
    return -ENOSYS;
1500 7d03f82f edgar_igl
#endif
1501 7d03f82f edgar_igl
}
1502 7d03f82f edgar_igl
1503 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1504 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1505 4c3a88a2 bellard
{
1506 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1507 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1508 d720b93d bellard
1509 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1510 a1d1bb31 aliguori
1511 a1d1bb31 aliguori
    qemu_free(breakpoint);
1512 a1d1bb31 aliguori
#endif
1513 a1d1bb31 aliguori
}
1514 a1d1bb31 aliguori
1515 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1516 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1517 a1d1bb31 aliguori
{
1518 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1519 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1520 a1d1bb31 aliguori
1521 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1522 a1d1bb31 aliguori
        if (bp->flags & mask)
1523 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1524 c0ce998e aliguori
    }
1525 4c3a88a2 bellard
#endif
1526 4c3a88a2 bellard
}
1527 4c3a88a2 bellard
1528 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1529 c33a346e bellard
   CPU loop after each instruction */
1530 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1531 c33a346e bellard
{
1532 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1533 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1534 c33a346e bellard
        env->singlestep_enabled = enabled;
1535 e22a25c9 aliguori
        if (kvm_enabled())
1536 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1537 e22a25c9 aliguori
        else {
1538 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1539 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1540 e22a25c9 aliguori
            tb_flush(env);
1541 e22a25c9 aliguori
        }
1542 c33a346e bellard
    }
1543 c33a346e bellard
#endif
1544 c33a346e bellard
}
1545 c33a346e bellard
1546 34865134 bellard
/* enable or disable low levels log */
1547 34865134 bellard
void cpu_set_log(int log_flags)
1548 34865134 bellard
{
1549 34865134 bellard
    loglevel = log_flags;
1550 34865134 bellard
    if (loglevel && !logfile) {
1551 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1552 34865134 bellard
        if (!logfile) {
1553 34865134 bellard
            perror(logfilename);
1554 34865134 bellard
            _exit(1);
1555 34865134 bellard
        }
1556 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1557 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1558 9fa3e853 bellard
        {
1559 b55266b5 blueswir1
            static char logfile_buf[4096];
1560 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1561 9fa3e853 bellard
        }
1562 bf65f53f Filip Navara
#elif !defined(_WIN32)
1563 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1564 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1565 9fa3e853 bellard
#endif
1566 e735b91c pbrook
        log_append = 1;
1567 e735b91c pbrook
    }
1568 e735b91c pbrook
    if (!loglevel && logfile) {
1569 e735b91c pbrook
        fclose(logfile);
1570 e735b91c pbrook
        logfile = NULL;
1571 34865134 bellard
    }
1572 34865134 bellard
}
1573 34865134 bellard
1574 34865134 bellard
void cpu_set_log_filename(const char *filename)
1575 34865134 bellard
{
1576 34865134 bellard
    logfilename = strdup(filename);
1577 e735b91c pbrook
    if (logfile) {
1578 e735b91c pbrook
        fclose(logfile);
1579 e735b91c pbrook
        logfile = NULL;
1580 e735b91c pbrook
    }
1581 e735b91c pbrook
    cpu_set_log(loglevel);
1582 34865134 bellard
}
1583 c33a346e bellard
1584 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1585 ea041c0e bellard
{
1586 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1587 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1588 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1589 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1590 ea041c0e bellard
    TranslationBlock *tb;
1591 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1592 59817ccb bellard
1593 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1594 3098dba0 aurel32
    tb = env->current_tb;
1595 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1596 3098dba0 aurel32
       all the potentially executing TB */
1597 f76cfe56 Riku Voipio
    if (tb) {
1598 3098dba0 aurel32
        env->current_tb = NULL;
1599 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1600 be214e6c aurel32
    }
1601 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1602 3098dba0 aurel32
}
1603 3098dba0 aurel32
1604 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1605 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1606 3098dba0 aurel32
{
1607 3098dba0 aurel32
    int old_mask;
1608 be214e6c aurel32
1609 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1610 68a79315 bellard
    env->interrupt_request |= mask;
1611 3098dba0 aurel32
1612 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1613 8edac960 aliguori
    /*
1614 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1615 8edac960 aliguori
     * case its halted.
1616 8edac960 aliguori
     */
1617 8edac960 aliguori
    if (!qemu_cpu_self(env)) {
1618 8edac960 aliguori
        qemu_cpu_kick(env);
1619 8edac960 aliguori
        return;
1620 8edac960 aliguori
    }
1621 8edac960 aliguori
#endif
1622 8edac960 aliguori
1623 2e70f6ef pbrook
    if (use_icount) {
1624 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1625 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1626 2e70f6ef pbrook
        if (!can_do_io(env)
1627 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1628 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1629 2e70f6ef pbrook
        }
1630 2e70f6ef pbrook
#endif
1631 2e70f6ef pbrook
    } else {
1632 3098dba0 aurel32
        cpu_unlink_tb(env);
1633 ea041c0e bellard
    }
1634 ea041c0e bellard
}
1635 ea041c0e bellard
1636 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1637 b54ad049 bellard
{
1638 b54ad049 bellard
    env->interrupt_request &= ~mask;
1639 b54ad049 bellard
}
1640 b54ad049 bellard
1641 3098dba0 aurel32
void cpu_exit(CPUState *env)
1642 3098dba0 aurel32
{
1643 3098dba0 aurel32
    env->exit_request = 1;
1644 3098dba0 aurel32
    cpu_unlink_tb(env);
1645 3098dba0 aurel32
}
1646 3098dba0 aurel32
1647 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1648 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1649 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1650 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1651 f193c797 bellard
      "show target assembly code for each compiled TB" },
1652 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1653 57fec1fe bellard
      "show micro ops for each compiled TB" },
1654 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1655 e01a1157 blueswir1
      "show micro ops "
1656 e01a1157 blueswir1
#ifdef TARGET_I386
1657 e01a1157 blueswir1
      "before eflags optimization and "
1658 f193c797 bellard
#endif
1659 e01a1157 blueswir1
      "after liveness analysis" },
1660 f193c797 bellard
    { CPU_LOG_INT, "int",
1661 f193c797 bellard
      "show interrupts/exceptions in short format" },
1662 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1663 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1664 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1665 e91c8a77 ths
      "show CPU state before block translation" },
1666 f193c797 bellard
#ifdef TARGET_I386
1667 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1668 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1669 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1670 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1671 f193c797 bellard
#endif
1672 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1673 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1674 fd872598 bellard
      "show all i/o ports accesses" },
1675 8e3a9fd2 bellard
#endif
1676 f193c797 bellard
    { 0, NULL, NULL },
1677 f193c797 bellard
};
1678 f193c797 bellard
1679 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1680 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1681 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1682 f6f3fbca Michael S. Tsirkin
1683 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1684 f6f3fbca Michael S. Tsirkin
                                  ram_addr_t size,
1685 f6f3fbca Michael S. Tsirkin
                                  ram_addr_t phys_offset)
1686 f6f3fbca Michael S. Tsirkin
{
1687 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1688 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1689 f6f3fbca Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset);
1690 f6f3fbca Michael S. Tsirkin
    }
1691 f6f3fbca Michael S. Tsirkin
}
1692 f6f3fbca Michael S. Tsirkin
1693 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1694 f6f3fbca Michael S. Tsirkin
                                        target_phys_addr_t end)
1695 f6f3fbca Michael S. Tsirkin
{
1696 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1697 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1698 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1699 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1700 f6f3fbca Michael S. Tsirkin
            return r;
1701 f6f3fbca Michael S. Tsirkin
    }
1702 f6f3fbca Michael S. Tsirkin
    return 0;
1703 f6f3fbca Michael S. Tsirkin
}
1704 f6f3fbca Michael S. Tsirkin
1705 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1706 f6f3fbca Michael S. Tsirkin
{
1707 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1708 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1709 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1710 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1711 f6f3fbca Michael S. Tsirkin
            return r;
1712 f6f3fbca Michael S. Tsirkin
    }
1713 f6f3fbca Michael S. Tsirkin
    return 0;
1714 f6f3fbca Michael S. Tsirkin
}
1715 f6f3fbca Michael S. Tsirkin
1716 5cd2c5b6 Richard Henderson
static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1717 5cd2c5b6 Richard Henderson
                                 int level, void **lp)
1718 f6f3fbca Michael S. Tsirkin
{
1719 5cd2c5b6 Richard Henderson
    int i;
1720 f6f3fbca Michael S. Tsirkin
1721 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
1722 5cd2c5b6 Richard Henderson
        return;
1723 5cd2c5b6 Richard Henderson
    }
1724 5cd2c5b6 Richard Henderson
    if (level == 0) {
1725 5cd2c5b6 Richard Henderson
        PhysPageDesc *pd = *lp;
1726 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1727 5cd2c5b6 Richard Henderson
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1728 5cd2c5b6 Richard Henderson
                client->set_memory(client, pd[i].region_offset,
1729 5cd2c5b6 Richard Henderson
                                   TARGET_PAGE_SIZE, pd[i].phys_offset);
1730 f6f3fbca Michael S. Tsirkin
            }
1731 5cd2c5b6 Richard Henderson
        }
1732 5cd2c5b6 Richard Henderson
    } else {
1733 5cd2c5b6 Richard Henderson
        void **pp = *lp;
1734 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
1735 5cd2c5b6 Richard Henderson
            phys_page_for_each_1(client, level - 1, pp + i);
1736 f6f3fbca Michael S. Tsirkin
        }
1737 f6f3fbca Michael S. Tsirkin
    }
1738 f6f3fbca Michael S. Tsirkin
}
1739 f6f3fbca Michael S. Tsirkin
1740 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1741 f6f3fbca Michael S. Tsirkin
{
1742 5cd2c5b6 Richard Henderson
    int i;
1743 5cd2c5b6 Richard Henderson
    for (i = 0; i < P_L1_SIZE; ++i) {
1744 5cd2c5b6 Richard Henderson
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1745 5cd2c5b6 Richard Henderson
                             l1_phys_map + 1);
1746 f6f3fbca Michael S. Tsirkin
    }
1747 f6f3fbca Michael S. Tsirkin
}
1748 f6f3fbca Michael S. Tsirkin
1749 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1750 f6f3fbca Michael S. Tsirkin
{
1751 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1752 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1753 f6f3fbca Michael S. Tsirkin
}
1754 f6f3fbca Michael S. Tsirkin
1755 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1756 f6f3fbca Michael S. Tsirkin
{
1757 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1758 f6f3fbca Michael S. Tsirkin
}
1759 f6f3fbca Michael S. Tsirkin
#endif
1760 f6f3fbca Michael S. Tsirkin
1761 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1762 f193c797 bellard
{
1763 f193c797 bellard
    if (strlen(s2) != n)
1764 f193c797 bellard
        return 0;
1765 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1766 f193c797 bellard
}
1767 3b46e624 ths
1768 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1769 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1770 f193c797 bellard
{
1771 c7cd6a37 blueswir1
    const CPULogItem *item;
1772 f193c797 bellard
    int mask;
1773 f193c797 bellard
    const char *p, *p1;
1774 f193c797 bellard
1775 f193c797 bellard
    p = str;
1776 f193c797 bellard
    mask = 0;
1777 f193c797 bellard
    for(;;) {
1778 f193c797 bellard
        p1 = strchr(p, ',');
1779 f193c797 bellard
        if (!p1)
1780 f193c797 bellard
            p1 = p + strlen(p);
1781 8e3a9fd2 bellard
        if(cmp1(p,p1-p,"all")) {
1782 8e3a9fd2 bellard
                for(item = cpu_log_items; item->mask != 0; item++) {
1783 8e3a9fd2 bellard
                        mask |= item->mask;
1784 8e3a9fd2 bellard
                }
1785 8e3a9fd2 bellard
        } else {
1786 f193c797 bellard
        for(item = cpu_log_items; item->mask != 0; item++) {
1787 f193c797 bellard
            if (cmp1(p, p1 - p, item->name))
1788 f193c797 bellard
                goto found;
1789 f193c797 bellard
        }
1790 f193c797 bellard
        return 0;
1791 8e3a9fd2 bellard
        }
1792 f193c797 bellard
    found:
1793 f193c797 bellard
        mask |= item->mask;
1794 f193c797 bellard
        if (*p1 != ',')
1795 f193c797 bellard
            break;
1796 f193c797 bellard
        p = p1 + 1;
1797 f193c797 bellard
    }
1798 f193c797 bellard
    return mask;
1799 f193c797 bellard
}
1800 ea041c0e bellard
1801 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1802 7501267e bellard
{
1803 7501267e bellard
    va_list ap;
1804 493ae1f0 pbrook
    va_list ap2;
1805 7501267e bellard
1806 7501267e bellard
    va_start(ap, fmt);
1807 493ae1f0 pbrook
    va_copy(ap2, ap);
1808 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1809 7501267e bellard
    vfprintf(stderr, fmt, ap);
1810 7501267e bellard
    fprintf(stderr, "\n");
1811 7501267e bellard
#ifdef TARGET_I386
1812 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1813 7fe48483 bellard
#else
1814 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1815 7501267e bellard
#endif
1816 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1817 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1818 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1819 93fcfe39 aliguori
        qemu_log("\n");
1820 f9373291 j_mayer
#ifdef TARGET_I386
1821 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1822 f9373291 j_mayer
#else
1823 93fcfe39 aliguori
        log_cpu_state(env, 0);
1824 f9373291 j_mayer
#endif
1825 31b1a7b4 aliguori
        qemu_log_flush();
1826 93fcfe39 aliguori
        qemu_log_close();
1827 924edcae balrog
    }
1828 493ae1f0 pbrook
    va_end(ap2);
1829 f9373291 j_mayer
    va_end(ap);
1830 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1831 fd052bf6 Riku Voipio
    {
1832 fd052bf6 Riku Voipio
        struct sigaction act;
1833 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1834 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1835 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1836 fd052bf6 Riku Voipio
    }
1837 fd052bf6 Riku Voipio
#endif
1838 7501267e bellard
    abort();
1839 7501267e bellard
}
1840 7501267e bellard
1841 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1842 c5be9f08 ths
{
1843 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1844 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1845 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1846 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1847 5a38f081 aliguori
    CPUBreakpoint *bp;
1848 5a38f081 aliguori
    CPUWatchpoint *wp;
1849 5a38f081 aliguori
#endif
1850 5a38f081 aliguori
1851 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1852 5a38f081 aliguori
1853 5a38f081 aliguori
    /* Preserve chaining and index. */
1854 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1855 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1856 5a38f081 aliguori
1857 5a38f081 aliguori
    /* Clone all break/watchpoints.
1858 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1859 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1860 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1861 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1862 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1863 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1864 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1865 5a38f081 aliguori
    }
1866 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1867 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1868 5a38f081 aliguori
                              wp->flags, NULL);
1869 5a38f081 aliguori
    }
1870 5a38f081 aliguori
#endif
1871 5a38f081 aliguori
1872 c5be9f08 ths
    return new_env;
1873 c5be9f08 ths
}
1874 c5be9f08 ths
1875 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1876 0124311e bellard
1877 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1878 5c751e99 edgar_igl
{
1879 5c751e99 edgar_igl
    unsigned int i;
1880 5c751e99 edgar_igl
1881 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1882 5c751e99 edgar_igl
       overlap the flushed page.  */
1883 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1884 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1885 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1886 5c751e99 edgar_igl
1887 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1888 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1889 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1890 5c751e99 edgar_igl
}
1891 5c751e99 edgar_igl
1892 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1893 08738984 Igor Kovalenko
    .addr_read  = -1,
1894 08738984 Igor Kovalenko
    .addr_write = -1,
1895 08738984 Igor Kovalenko
    .addr_code  = -1,
1896 08738984 Igor Kovalenko
    .addend     = -1,
1897 08738984 Igor Kovalenko
};
1898 08738984 Igor Kovalenko
1899 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1900 ee8b7021 bellard
   implemented yet) */
1901 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1902 33417e70 bellard
{
1903 33417e70 bellard
    int i;
1904 0124311e bellard
1905 9fa3e853 bellard
#if defined(DEBUG_TLB)
1906 9fa3e853 bellard
    printf("tlb_flush:\n");
1907 9fa3e853 bellard
#endif
1908 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1909 0124311e bellard
       links while we are modifying them */
1910 0124311e bellard
    env->current_tb = NULL;
1911 0124311e bellard
1912 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1913 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1914 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1915 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1916 cfde4bd9 Isaku Yamahata
        }
1917 33417e70 bellard
    }
1918 9fa3e853 bellard
1919 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1920 9fa3e853 bellard
1921 e3db7226 bellard
    tlb_flush_count++;
1922 33417e70 bellard
}
1923 33417e70 bellard
1924 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1925 61382a50 bellard
{
1926 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1927 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1928 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1929 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1930 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1931 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1932 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1933 84b7b8e7 bellard
    }
1934 61382a50 bellard
}
1935 61382a50 bellard
1936 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1937 33417e70 bellard
{
1938 8a40a180 bellard
    int i;
1939 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1940 0124311e bellard
1941 9fa3e853 bellard
#if defined(DEBUG_TLB)
1942 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1943 9fa3e853 bellard
#endif
1944 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1945 0124311e bellard
       links while we are modifying them */
1946 0124311e bellard
    env->current_tb = NULL;
1947 61382a50 bellard
1948 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1949 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1950 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1951 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1952 0124311e bellard
1953 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1954 9fa3e853 bellard
}
1955 9fa3e853 bellard
1956 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1957 9fa3e853 bellard
   can be detected */
1958 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
1959 9fa3e853 bellard
{
1960 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1961 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1962 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
1963 9fa3e853 bellard
}
1964 9fa3e853 bellard
1965 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1966 3a7d929e bellard
   tested for self modifying code */
1967 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1968 3a7d929e bellard
                                    target_ulong vaddr)
1969 9fa3e853 bellard
{
1970 3a7d929e bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1971 1ccde1cb bellard
}
1972 1ccde1cb bellard
1973 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1974 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
1975 1ccde1cb bellard
{
1976 1ccde1cb bellard
    unsigned long addr;
1977 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1978 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1979 1ccde1cb bellard
        if ((addr - start) < length) {
1980 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1981 1ccde1cb bellard
        }
1982 1ccde1cb bellard
    }
1983 1ccde1cb bellard
}
1984 1ccde1cb bellard
1985 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
1986 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1987 0a962c02 bellard
                                     int dirty_flags)
1988 1ccde1cb bellard
{
1989 1ccde1cb bellard
    CPUState *env;
1990 4f2ac237 bellard
    unsigned long length, start1;
1991 0a962c02 bellard
    int i, mask, len;
1992 0a962c02 bellard
    uint8_t *p;
1993 1ccde1cb bellard
1994 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
1995 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
1996 1ccde1cb bellard
1997 1ccde1cb bellard
    length = end - start;
1998 1ccde1cb bellard
    if (length == 0)
1999 1ccde1cb bellard
        return;
2000 0a962c02 bellard
    len = length >> TARGET_PAGE_BITS;
2001 f23db169 bellard
    mask = ~dirty_flags;
2002 f23db169 bellard
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
2003 f23db169 bellard
    for(i = 0; i < len; i++)
2004 f23db169 bellard
        p[i] &= mask;
2005 f23db169 bellard
2006 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
2007 1ccde1cb bellard
       when accessing the range */
2008 5579c7f3 pbrook
    start1 = (unsigned long)qemu_get_ram_ptr(start);
2009 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
2010 5579c7f3 pbrook
       address comparisons below.  */
2011 5579c7f3 pbrook
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
2012 5579c7f3 pbrook
            != (end - 1) - start) {
2013 5579c7f3 pbrook
        abort();
2014 5579c7f3 pbrook
    }
2015 5579c7f3 pbrook
2016 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2017 cfde4bd9 Isaku Yamahata
        int mmu_idx;
2018 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2019 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
2020 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2021 cfde4bd9 Isaku Yamahata
                                      start1, length);
2022 cfde4bd9 Isaku Yamahata
        }
2023 6a00d601 bellard
    }
2024 1ccde1cb bellard
}
2025 1ccde1cb bellard
2026 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
2027 74576198 aliguori
{
2028 f6f3fbca Michael S. Tsirkin
    int ret = 0;
2029 74576198 aliguori
    in_migration = enable;
2030 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
2031 f6f3fbca Michael S. Tsirkin
    return ret;
2032 74576198 aliguori
}
2033 74576198 aliguori
2034 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
2035 74576198 aliguori
{
2036 74576198 aliguori
    return in_migration;
2037 74576198 aliguori
}
2038 74576198 aliguori
2039 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2040 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
2041 2bec46dc aliguori
{
2042 7b8f3b78 Michael S. Tsirkin
    int ret;
2043 151f7749 Jan Kiszka
2044 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2045 151f7749 Jan Kiszka
    return ret;
2046 2bec46dc aliguori
}
2047 2bec46dc aliguori
2048 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2049 3a7d929e bellard
{
2050 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2051 5579c7f3 pbrook
    void *p;
2052 3a7d929e bellard
2053 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2054 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2055 5579c7f3 pbrook
            + tlb_entry->addend);
2056 5579c7f3 pbrook
        ram_addr = qemu_ram_addr_from_host(p);
2057 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2058 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2059 3a7d929e bellard
        }
2060 3a7d929e bellard
    }
2061 3a7d929e bellard
}
2062 3a7d929e bellard
2063 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2064 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2065 3a7d929e bellard
{
2066 3a7d929e bellard
    int i;
2067 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2068 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2069 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2070 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2071 cfde4bd9 Isaku Yamahata
    }
2072 3a7d929e bellard
}
2073 3a7d929e bellard
2074 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2075 1ccde1cb bellard
{
2076 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2077 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2078 1ccde1cb bellard
}
2079 1ccde1cb bellard
2080 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2081 0f459d16 pbrook
   so that it is no longer dirty */
2082 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2083 1ccde1cb bellard
{
2084 1ccde1cb bellard
    int i;
2085 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2086 1ccde1cb bellard
2087 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2088 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2089 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2090 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2091 9fa3e853 bellard
}
2092 9fa3e853 bellard
2093 59817ccb bellard
/* add a new TLB entry. At most one entry for a given virtual address
2094 59817ccb bellard
   is permitted. Return 0 if OK or 2 if the page could not be mapped
2095 59817ccb bellard
   (can only happen in non SOFTMMU mode for I/O pages or pages
2096 59817ccb bellard
   conflicting with the host address space). */
2097 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2098 c227f099 Anthony Liguori
                      target_phys_addr_t paddr, int prot,
2099 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
2100 9fa3e853 bellard
{
2101 92e873b9 bellard
    PhysPageDesc *p;
2102 4f2ac237 bellard
    unsigned long pd;
2103 9fa3e853 bellard
    unsigned int index;
2104 4f2ac237 bellard
    target_ulong address;
2105 0f459d16 pbrook
    target_ulong code_address;
2106 c227f099 Anthony Liguori
    target_phys_addr_t addend;
2107 9fa3e853 bellard
    int ret;
2108 84b7b8e7 bellard
    CPUTLBEntry *te;
2109 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2110 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2111 9fa3e853 bellard
2112 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2113 9fa3e853 bellard
    if (!p) {
2114 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2115 9fa3e853 bellard
    } else {
2116 9fa3e853 bellard
        pd = p->phys_offset;
2117 9fa3e853 bellard
    }
2118 9fa3e853 bellard
#if defined(DEBUG_TLB)
2119 6ebbf390 j_mayer
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2120 6ebbf390 j_mayer
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2121 9fa3e853 bellard
#endif
2122 9fa3e853 bellard
2123 9fa3e853 bellard
    ret = 0;
2124 0f459d16 pbrook
    address = vaddr;
2125 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2126 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2127 0f459d16 pbrook
        address |= TLB_MMIO;
2128 0f459d16 pbrook
    }
2129 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2130 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2131 0f459d16 pbrook
        /* Normal RAM.  */
2132 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2133 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2134 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2135 0f459d16 pbrook
        else
2136 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2137 0f459d16 pbrook
    } else {
2138 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2139 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2140 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2141 0f459d16 pbrook
           and avoid full address decoding in every device.
2142 0f459d16 pbrook
           We can't use the high bits of pd for this because
2143 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2144 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2145 8da3ff18 pbrook
        if (p) {
2146 8da3ff18 pbrook
            iotlb += p->region_offset;
2147 8da3ff18 pbrook
        } else {
2148 8da3ff18 pbrook
            iotlb += paddr;
2149 8da3ff18 pbrook
        }
2150 0f459d16 pbrook
    }
2151 0f459d16 pbrook
2152 0f459d16 pbrook
    code_address = address;
2153 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2154 0f459d16 pbrook
       watchpoint trap routines.  */
2155 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2156 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2157 0f459d16 pbrook
            iotlb = io_mem_watch + paddr;
2158 0f459d16 pbrook
            /* TODO: The memory case can be optimized by not trapping
2159 0f459d16 pbrook
               reads of pages with a write breakpoint.  */
2160 0f459d16 pbrook
            address |= TLB_MMIO;
2161 6658ffb8 pbrook
        }
2162 0f459d16 pbrook
    }
2163 d79acba4 balrog
2164 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2165 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2166 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2167 0f459d16 pbrook
    te->addend = addend - vaddr;
2168 0f459d16 pbrook
    if (prot & PAGE_READ) {
2169 0f459d16 pbrook
        te->addr_read = address;
2170 0f459d16 pbrook
    } else {
2171 0f459d16 pbrook
        te->addr_read = -1;
2172 0f459d16 pbrook
    }
2173 5c751e99 edgar_igl
2174 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2175 0f459d16 pbrook
        te->addr_code = code_address;
2176 0f459d16 pbrook
    } else {
2177 0f459d16 pbrook
        te->addr_code = -1;
2178 0f459d16 pbrook
    }
2179 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2180 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2181 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2182 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2183 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2184 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2185 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2186 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2187 9fa3e853 bellard
        } else {
2188 0f459d16 pbrook
            te->addr_write = address;
2189 9fa3e853 bellard
        }
2190 0f459d16 pbrook
    } else {
2191 0f459d16 pbrook
        te->addr_write = -1;
2192 9fa3e853 bellard
    }
2193 9fa3e853 bellard
    return ret;
2194 9fa3e853 bellard
}
2195 9fa3e853 bellard
2196 0124311e bellard
#else
2197 0124311e bellard
2198 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2199 0124311e bellard
{
2200 0124311e bellard
}
2201 0124311e bellard
2202 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2203 0124311e bellard
{
2204 0124311e bellard
}
2205 0124311e bellard
2206 edf8e2af Mika Westerberg
/*
2207 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2208 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2209 edf8e2af Mika Westerberg
 */
2210 5cd2c5b6 Richard Henderson
2211 5cd2c5b6 Richard Henderson
struct walk_memory_regions_data
2212 5cd2c5b6 Richard Henderson
{
2213 5cd2c5b6 Richard Henderson
    walk_memory_regions_fn fn;
2214 5cd2c5b6 Richard Henderson
    void *priv;
2215 5cd2c5b6 Richard Henderson
    unsigned long start;
2216 5cd2c5b6 Richard Henderson
    int prot;
2217 5cd2c5b6 Richard Henderson
};
2218 5cd2c5b6 Richard Henderson
2219 5cd2c5b6 Richard Henderson
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2220 b480d9b7 Paul Brook
                                   abi_ulong end, int new_prot)
2221 5cd2c5b6 Richard Henderson
{
2222 5cd2c5b6 Richard Henderson
    if (data->start != -1ul) {
2223 5cd2c5b6 Richard Henderson
        int rc = data->fn(data->priv, data->start, end, data->prot);
2224 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2225 5cd2c5b6 Richard Henderson
            return rc;
2226 5cd2c5b6 Richard Henderson
        }
2227 5cd2c5b6 Richard Henderson
    }
2228 5cd2c5b6 Richard Henderson
2229 5cd2c5b6 Richard Henderson
    data->start = (new_prot ? end : -1ul);
2230 5cd2c5b6 Richard Henderson
    data->prot = new_prot;
2231 5cd2c5b6 Richard Henderson
2232 5cd2c5b6 Richard Henderson
    return 0;
2233 5cd2c5b6 Richard Henderson
}
2234 5cd2c5b6 Richard Henderson
2235 5cd2c5b6 Richard Henderson
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2236 b480d9b7 Paul Brook
                                 abi_ulong base, int level, void **lp)
2237 5cd2c5b6 Richard Henderson
{
2238 b480d9b7 Paul Brook
    abi_ulong pa;
2239 5cd2c5b6 Richard Henderson
    int i, rc;
2240 5cd2c5b6 Richard Henderson
2241 5cd2c5b6 Richard Henderson
    if (*lp == NULL) {
2242 5cd2c5b6 Richard Henderson
        return walk_memory_regions_end(data, base, 0);
2243 5cd2c5b6 Richard Henderson
    }
2244 5cd2c5b6 Richard Henderson
2245 5cd2c5b6 Richard Henderson
    if (level == 0) {
2246 5cd2c5b6 Richard Henderson
        PageDesc *pd = *lp;
2247 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2248 5cd2c5b6 Richard Henderson
            int prot = pd[i].flags;
2249 5cd2c5b6 Richard Henderson
2250 5cd2c5b6 Richard Henderson
            pa = base | (i << TARGET_PAGE_BITS);
2251 5cd2c5b6 Richard Henderson
            if (prot != data->prot) {
2252 5cd2c5b6 Richard Henderson
                rc = walk_memory_regions_end(data, pa, prot);
2253 5cd2c5b6 Richard Henderson
                if (rc != 0) {
2254 5cd2c5b6 Richard Henderson
                    return rc;
2255 9fa3e853 bellard
                }
2256 9fa3e853 bellard
            }
2257 5cd2c5b6 Richard Henderson
        }
2258 5cd2c5b6 Richard Henderson
    } else {
2259 5cd2c5b6 Richard Henderson
        void **pp = *lp;
2260 7296abac Paul Brook
        for (i = 0; i < L2_SIZE; ++i) {
2261 b480d9b7 Paul Brook
            pa = base | ((abi_ulong)i <<
2262 b480d9b7 Paul Brook
                (TARGET_PAGE_BITS + L2_BITS * level));
2263 5cd2c5b6 Richard Henderson
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2264 5cd2c5b6 Richard Henderson
            if (rc != 0) {
2265 5cd2c5b6 Richard Henderson
                return rc;
2266 5cd2c5b6 Richard Henderson
            }
2267 5cd2c5b6 Richard Henderson
        }
2268 5cd2c5b6 Richard Henderson
    }
2269 5cd2c5b6 Richard Henderson
2270 5cd2c5b6 Richard Henderson
    return 0;
2271 5cd2c5b6 Richard Henderson
}
2272 5cd2c5b6 Richard Henderson
2273 5cd2c5b6 Richard Henderson
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2274 5cd2c5b6 Richard Henderson
{
2275 5cd2c5b6 Richard Henderson
    struct walk_memory_regions_data data;
2276 5cd2c5b6 Richard Henderson
    unsigned long i;
2277 5cd2c5b6 Richard Henderson
2278 5cd2c5b6 Richard Henderson
    data.fn = fn;
2279 5cd2c5b6 Richard Henderson
    data.priv = priv;
2280 5cd2c5b6 Richard Henderson
    data.start = -1ul;
2281 5cd2c5b6 Richard Henderson
    data.prot = 0;
2282 5cd2c5b6 Richard Henderson
2283 5cd2c5b6 Richard Henderson
    for (i = 0; i < V_L1_SIZE; i++) {
2284 b480d9b7 Paul Brook
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2285 5cd2c5b6 Richard Henderson
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2286 5cd2c5b6 Richard Henderson
        if (rc != 0) {
2287 5cd2c5b6 Richard Henderson
            return rc;
2288 9fa3e853 bellard
        }
2289 33417e70 bellard
    }
2290 5cd2c5b6 Richard Henderson
2291 5cd2c5b6 Richard Henderson
    return walk_memory_regions_end(&data, 0, 0);
2292 edf8e2af Mika Westerberg
}
2293 edf8e2af Mika Westerberg
2294 b480d9b7 Paul Brook
static int dump_region(void *priv, abi_ulong start,
2295 b480d9b7 Paul Brook
    abi_ulong end, unsigned long prot)
2296 edf8e2af Mika Westerberg
{
2297 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2298 edf8e2af Mika Westerberg
2299 b480d9b7 Paul Brook
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2300 b480d9b7 Paul Brook
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2301 edf8e2af Mika Westerberg
        start, end, end - start,
2302 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2303 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2304 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2305 edf8e2af Mika Westerberg
2306 edf8e2af Mika Westerberg
    return (0);
2307 edf8e2af Mika Westerberg
}
2308 edf8e2af Mika Westerberg
2309 edf8e2af Mika Westerberg
/* dump memory mappings */
2310 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2311 edf8e2af Mika Westerberg
{
2312 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2313 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2314 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2315 33417e70 bellard
}
2316 33417e70 bellard
2317 53a5960a pbrook
int page_get_flags(target_ulong address)
2318 33417e70 bellard
{
2319 9fa3e853 bellard
    PageDesc *p;
2320 9fa3e853 bellard
2321 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2322 33417e70 bellard
    if (!p)
2323 9fa3e853 bellard
        return 0;
2324 9fa3e853 bellard
    return p->flags;
2325 9fa3e853 bellard
}
2326 9fa3e853 bellard
2327 376a7909 Richard Henderson
/* Modify the flags of a page and invalidate the code if necessary.
2328 376a7909 Richard Henderson
   The flag PAGE_WRITE_ORG is positioned automatically depending
2329 376a7909 Richard Henderson
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2330 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2331 9fa3e853 bellard
{
2332 376a7909 Richard Henderson
    target_ulong addr, len;
2333 376a7909 Richard Henderson
2334 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2335 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2336 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2337 b480d9b7 Paul Brook
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2338 b480d9b7 Paul Brook
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2339 376a7909 Richard Henderson
#endif
2340 376a7909 Richard Henderson
    assert(start < end);
2341 9fa3e853 bellard
2342 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2343 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2344 376a7909 Richard Henderson
2345 376a7909 Richard Henderson
    if (flags & PAGE_WRITE) {
2346 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2347 376a7909 Richard Henderson
    }
2348 376a7909 Richard Henderson
2349 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2350 376a7909 Richard Henderson
         len != 0;
2351 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2352 376a7909 Richard Henderson
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2353 376a7909 Richard Henderson
2354 376a7909 Richard Henderson
        /* If the write protection bit is set, then we invalidate
2355 376a7909 Richard Henderson
           the code inside.  */
2356 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2357 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2358 9fa3e853 bellard
            p->first_tb) {
2359 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2360 9fa3e853 bellard
        }
2361 9fa3e853 bellard
        p->flags = flags;
2362 9fa3e853 bellard
    }
2363 33417e70 bellard
}
2364 33417e70 bellard
2365 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2366 3d97b40b ths
{
2367 3d97b40b ths
    PageDesc *p;
2368 3d97b40b ths
    target_ulong end;
2369 3d97b40b ths
    target_ulong addr;
2370 3d97b40b ths
2371 376a7909 Richard Henderson
    /* This function should never be called with addresses outside the
2372 376a7909 Richard Henderson
       guest address space.  If this assert fires, it probably indicates
2373 376a7909 Richard Henderson
       a missing call to h2g_valid.  */
2374 338e9e6c Blue Swirl
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2375 338e9e6c Blue Swirl
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2376 376a7909 Richard Henderson
#endif
2377 376a7909 Richard Henderson
2378 376a7909 Richard Henderson
    if (start + len - 1 < start) {
2379 376a7909 Richard Henderson
        /* We've wrapped around.  */
2380 55f280c9 balrog
        return -1;
2381 376a7909 Richard Henderson
    }
2382 55f280c9 balrog
2383 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2384 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2385 3d97b40b ths
2386 376a7909 Richard Henderson
    for (addr = start, len = end - start;
2387 376a7909 Richard Henderson
         len != 0;
2388 376a7909 Richard Henderson
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2389 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2390 3d97b40b ths
        if( !p )
2391 3d97b40b ths
            return -1;
2392 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2393 3d97b40b ths
            return -1;
2394 3d97b40b ths
2395 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2396 3d97b40b ths
            return -1;
2397 dae3270c bellard
        if (flags & PAGE_WRITE) {
2398 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2399 dae3270c bellard
                return -1;
2400 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2401 dae3270c bellard
               contains translated code */
2402 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2403 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2404 dae3270c bellard
                    return -1;
2405 dae3270c bellard
            }
2406 dae3270c bellard
            return 0;
2407 dae3270c bellard
        }
2408 3d97b40b ths
    }
2409 3d97b40b ths
    return 0;
2410 3d97b40b ths
}
2411 3d97b40b ths
2412 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2413 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2414 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2415 9fa3e853 bellard
{
2416 9fa3e853 bellard
    unsigned int page_index, prot, pindex;
2417 9fa3e853 bellard
    PageDesc *p, *p1;
2418 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2419 9fa3e853 bellard
2420 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2421 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2422 c8a706fe pbrook
       practice it seems to be ok.  */
2423 c8a706fe pbrook
    mmap_lock();
2424 c8a706fe pbrook
2425 83fb7adf bellard
    host_start = address & qemu_host_page_mask;
2426 9fa3e853 bellard
    page_index = host_start >> TARGET_PAGE_BITS;
2427 9fa3e853 bellard
    p1 = page_find(page_index);
2428 c8a706fe pbrook
    if (!p1) {
2429 c8a706fe pbrook
        mmap_unlock();
2430 9fa3e853 bellard
        return 0;
2431 c8a706fe pbrook
    }
2432 83fb7adf bellard
    host_end = host_start + qemu_host_page_size;
2433 9fa3e853 bellard
    p = p1;
2434 9fa3e853 bellard
    prot = 0;
2435 9fa3e853 bellard
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2436 9fa3e853 bellard
        prot |= p->flags;
2437 9fa3e853 bellard
        p++;
2438 9fa3e853 bellard
    }
2439 9fa3e853 bellard
    /* if the page was really writable, then we change its
2440 9fa3e853 bellard
       protection back to writable */
2441 9fa3e853 bellard
    if (prot & PAGE_WRITE_ORG) {
2442 9fa3e853 bellard
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2443 9fa3e853 bellard
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2444 5fafdf24 ths
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2445 9fa3e853 bellard
                     (prot & PAGE_BITS) | PAGE_WRITE);
2446 9fa3e853 bellard
            p1[pindex].flags |= PAGE_WRITE;
2447 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2448 9fa3e853 bellard
               the corresponding translated code. */
2449 d720b93d bellard
            tb_invalidate_phys_page(address, pc, puc);
2450 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2451 9fa3e853 bellard
            tb_invalidate_check(address);
2452 9fa3e853 bellard
#endif
2453 c8a706fe pbrook
            mmap_unlock();
2454 9fa3e853 bellard
            return 1;
2455 9fa3e853 bellard
        }
2456 9fa3e853 bellard
    }
2457 c8a706fe pbrook
    mmap_unlock();
2458 9fa3e853 bellard
    return 0;
2459 9fa3e853 bellard
}
2460 9fa3e853 bellard
2461 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2462 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2463 1ccde1cb bellard
{
2464 1ccde1cb bellard
}
2465 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2466 9fa3e853 bellard
2467 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2468 8da3ff18 pbrook
2469 c04b2b78 Paul Brook
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2470 c04b2b78 Paul Brook
typedef struct subpage_t {
2471 c04b2b78 Paul Brook
    target_phys_addr_t base;
2472 c04b2b78 Paul Brook
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
2473 c04b2b78 Paul Brook
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
2474 c04b2b78 Paul Brook
    void *opaque[TARGET_PAGE_SIZE][2][4];
2475 c04b2b78 Paul Brook
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
2476 c04b2b78 Paul Brook
} subpage_t;
2477 c04b2b78 Paul Brook
2478 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2479 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2480 c227f099 Anthony Liguori
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2481 c227f099 Anthony Liguori
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2482 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2483 db7b5426 blueswir1
                      need_subpage)                                     \
2484 db7b5426 blueswir1
    do {                                                                \
2485 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2486 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2487 db7b5426 blueswir1
        else {                                                          \
2488 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2489 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2490 db7b5426 blueswir1
                need_subpage = 1;                                       \
2491 db7b5426 blueswir1
        }                                                               \
2492 db7b5426 blueswir1
                                                                        \
2493 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2494 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2495 db7b5426 blueswir1
        else {                                                          \
2496 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2497 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2498 db7b5426 blueswir1
                need_subpage = 1;                                       \
2499 db7b5426 blueswir1
        }                                                               \
2500 db7b5426 blueswir1
    } while (0)
2501 db7b5426 blueswir1
2502 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2503 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2504 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2505 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2506 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2507 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2508 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2509 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2510 c227f099 Anthony Liguori
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2511 c227f099 Anthony Liguori
                                         ram_addr_t size,
2512 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2513 c227f099 Anthony Liguori
                                         ram_addr_t region_offset)
2514 33417e70 bellard
{
2515 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2516 92e873b9 bellard
    PhysPageDesc *p;
2517 9d42037b bellard
    CPUState *env;
2518 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2519 db7b5426 blueswir1
    void *subpage;
2520 33417e70 bellard
2521 f6f3fbca Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset);
2522 f6f3fbca Michael S. Tsirkin
2523 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2524 67c4d23c pbrook
        region_offset = start_addr;
2525 67c4d23c pbrook
    }
2526 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2527 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2528 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2529 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2530 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2531 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2532 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2533 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2534 db7b5426 blueswir1
            int need_subpage = 0;
2535 db7b5426 blueswir1
2536 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2537 db7b5426 blueswir1
                          need_subpage);
2538 4254fab8 blueswir1
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2539 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2540 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2541 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2542 8da3ff18 pbrook
                                           p->region_offset);
2543 db7b5426 blueswir1
                } else {
2544 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2545 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2546 db7b5426 blueswir1
                }
2547 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2548 8da3ff18 pbrook
                                 region_offset);
2549 8da3ff18 pbrook
                p->region_offset = 0;
2550 db7b5426 blueswir1
            } else {
2551 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2552 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2553 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2554 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2555 db7b5426 blueswir1
            }
2556 db7b5426 blueswir1
        } else {
2557 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2558 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2559 8da3ff18 pbrook
            p->region_offset = region_offset;
2560 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2561 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2562 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2563 0e8f0967 pbrook
            } else {
2564 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2565 db7b5426 blueswir1
                int need_subpage = 0;
2566 db7b5426 blueswir1
2567 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2568 db7b5426 blueswir1
                              end_addr2, need_subpage);
2569 db7b5426 blueswir1
2570 4254fab8 blueswir1
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2571 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2572 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2573 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2574 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2575 8da3ff18 pbrook
                                     phys_offset, region_offset);
2576 8da3ff18 pbrook
                    p->region_offset = 0;
2577 db7b5426 blueswir1
                }
2578 db7b5426 blueswir1
            }
2579 db7b5426 blueswir1
        }
2580 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2581 33417e70 bellard
    }
2582 3b46e624 ths
2583 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2584 9d42037b bellard
       reset the modified entries */
2585 9d42037b bellard
    /* XXX: slow ! */
2586 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2587 9d42037b bellard
        tlb_flush(env, 1);
2588 9d42037b bellard
    }
2589 33417e70 bellard
}
2590 33417e70 bellard
2591 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2592 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2593 ba863458 bellard
{
2594 ba863458 bellard
    PhysPageDesc *p;
2595 ba863458 bellard
2596 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2597 ba863458 bellard
    if (!p)
2598 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2599 ba863458 bellard
    return p->phys_offset;
2600 ba863458 bellard
}
2601 ba863458 bellard
2602 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2603 f65ed4c1 aliguori
{
2604 f65ed4c1 aliguori
    if (kvm_enabled())
2605 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2606 f65ed4c1 aliguori
}
2607 f65ed4c1 aliguori
2608 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2609 f65ed4c1 aliguori
{
2610 f65ed4c1 aliguori
    if (kvm_enabled())
2611 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2612 f65ed4c1 aliguori
}
2613 f65ed4c1 aliguori
2614 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2615 62a2744c Sheng Yang
{
2616 62a2744c Sheng Yang
    if (kvm_enabled())
2617 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2618 62a2744c Sheng Yang
}
2619 62a2744c Sheng Yang
2620 c902760f Marcelo Tosatti
#if defined(__linux__) && !defined(TARGET_S390X)
2621 c902760f Marcelo Tosatti
2622 c902760f Marcelo Tosatti
#include <sys/vfs.h>
2623 c902760f Marcelo Tosatti
2624 c902760f Marcelo Tosatti
#define HUGETLBFS_MAGIC       0x958458f6
2625 c902760f Marcelo Tosatti
2626 c902760f Marcelo Tosatti
static long gethugepagesize(const char *path)
2627 c902760f Marcelo Tosatti
{
2628 c902760f Marcelo Tosatti
    struct statfs fs;
2629 c902760f Marcelo Tosatti
    int ret;
2630 c902760f Marcelo Tosatti
2631 c902760f Marcelo Tosatti
    do {
2632 c902760f Marcelo Tosatti
            ret = statfs(path, &fs);
2633 c902760f Marcelo Tosatti
    } while (ret != 0 && errno == EINTR);
2634 c902760f Marcelo Tosatti
2635 c902760f Marcelo Tosatti
    if (ret != 0) {
2636 c902760f Marcelo Tosatti
            perror("statfs");
2637 c902760f Marcelo Tosatti
            return 0;
2638 c902760f Marcelo Tosatti
    }
2639 c902760f Marcelo Tosatti
2640 c902760f Marcelo Tosatti
    if (fs.f_type != HUGETLBFS_MAGIC)
2641 c902760f Marcelo Tosatti
            fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2642 c902760f Marcelo Tosatti
2643 c902760f Marcelo Tosatti
    return fs.f_bsize;
2644 c902760f Marcelo Tosatti
}
2645 c902760f Marcelo Tosatti
2646 c902760f Marcelo Tosatti
static void *file_ram_alloc(ram_addr_t memory, const char *path)
2647 c902760f Marcelo Tosatti
{
2648 c902760f Marcelo Tosatti
    char *filename;
2649 c902760f Marcelo Tosatti
    void *area;
2650 c902760f Marcelo Tosatti
    int fd;
2651 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2652 c902760f Marcelo Tosatti
    int flags;
2653 c902760f Marcelo Tosatti
#endif
2654 c902760f Marcelo Tosatti
    unsigned long hpagesize;
2655 c902760f Marcelo Tosatti
2656 c902760f Marcelo Tosatti
    hpagesize = gethugepagesize(path);
2657 c902760f Marcelo Tosatti
    if (!hpagesize) {
2658 c902760f Marcelo Tosatti
        return NULL;
2659 c902760f Marcelo Tosatti
    }
2660 c902760f Marcelo Tosatti
2661 c902760f Marcelo Tosatti
    if (memory < hpagesize) {
2662 c902760f Marcelo Tosatti
        return NULL;
2663 c902760f Marcelo Tosatti
    }
2664 c902760f Marcelo Tosatti
2665 c902760f Marcelo Tosatti
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2666 c902760f Marcelo Tosatti
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2667 c902760f Marcelo Tosatti
        return NULL;
2668 c902760f Marcelo Tosatti
    }
2669 c902760f Marcelo Tosatti
2670 c902760f Marcelo Tosatti
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2671 c902760f Marcelo Tosatti
        return NULL;
2672 c902760f Marcelo Tosatti
    }
2673 c902760f Marcelo Tosatti
2674 c902760f Marcelo Tosatti
    fd = mkstemp(filename);
2675 c902760f Marcelo Tosatti
    if (fd < 0) {
2676 c902760f Marcelo Tosatti
        perror("mkstemp");
2677 c902760f Marcelo Tosatti
        free(filename);
2678 c902760f Marcelo Tosatti
        return NULL;
2679 c902760f Marcelo Tosatti
    }
2680 c902760f Marcelo Tosatti
    unlink(filename);
2681 c902760f Marcelo Tosatti
    free(filename);
2682 c902760f Marcelo Tosatti
2683 c902760f Marcelo Tosatti
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2684 c902760f Marcelo Tosatti
2685 c902760f Marcelo Tosatti
    /*
2686 c902760f Marcelo Tosatti
     * ftruncate is not supported by hugetlbfs in older
2687 c902760f Marcelo Tosatti
     * hosts, so don't bother bailing out on errors.
2688 c902760f Marcelo Tosatti
     * If anything goes wrong with it under other filesystems,
2689 c902760f Marcelo Tosatti
     * mmap will fail.
2690 c902760f Marcelo Tosatti
     */
2691 c902760f Marcelo Tosatti
    if (ftruncate(fd, memory))
2692 c902760f Marcelo Tosatti
        perror("ftruncate");
2693 c902760f Marcelo Tosatti
2694 c902760f Marcelo Tosatti
#ifdef MAP_POPULATE
2695 c902760f Marcelo Tosatti
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2696 c902760f Marcelo Tosatti
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2697 c902760f Marcelo Tosatti
     * to sidestep this quirk.
2698 c902760f Marcelo Tosatti
     */
2699 c902760f Marcelo Tosatti
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2700 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2701 c902760f Marcelo Tosatti
#else
2702 c902760f Marcelo Tosatti
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2703 c902760f Marcelo Tosatti
#endif
2704 c902760f Marcelo Tosatti
    if (area == MAP_FAILED) {
2705 c902760f Marcelo Tosatti
        perror("file_ram_alloc: can't mmap RAM pages");
2706 c902760f Marcelo Tosatti
        close(fd);
2707 c902760f Marcelo Tosatti
        return (NULL);
2708 c902760f Marcelo Tosatti
    }
2709 c902760f Marcelo Tosatti
    return area;
2710 c902760f Marcelo Tosatti
}
2711 c902760f Marcelo Tosatti
#endif
2712 c902760f Marcelo Tosatti
2713 c227f099 Anthony Liguori
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2714 94a6b54f pbrook
{
2715 94a6b54f pbrook
    RAMBlock *new_block;
2716 94a6b54f pbrook
2717 94a6b54f pbrook
    size = TARGET_PAGE_ALIGN(size);
2718 94a6b54f pbrook
    new_block = qemu_malloc(sizeof(*new_block));
2719 94a6b54f pbrook
2720 c902760f Marcelo Tosatti
    if (mem_path) {
2721 c902760f Marcelo Tosatti
#if defined (__linux__) && !defined(TARGET_S390X)
2722 c902760f Marcelo Tosatti
        new_block->host = file_ram_alloc(size, mem_path);
2723 c902760f Marcelo Tosatti
        if (!new_block->host)
2724 c902760f Marcelo Tosatti
            exit(1);
2725 c902760f Marcelo Tosatti
#else
2726 c902760f Marcelo Tosatti
        fprintf(stderr, "-mem-path option unsupported\n");
2727 c902760f Marcelo Tosatti
        exit(1);
2728 c902760f Marcelo Tosatti
#endif
2729 c902760f Marcelo Tosatti
    } else {
2730 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2731 c902760f Marcelo Tosatti
        /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2732 c902760f Marcelo Tosatti
        new_block->host = mmap((void*)0x1000000, size,
2733 c902760f Marcelo Tosatti
                                PROT_EXEC|PROT_READ|PROT_WRITE,
2734 c902760f Marcelo Tosatti
                                MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2735 6b02494d Alexander Graf
#else
2736 c902760f Marcelo Tosatti
        new_block->host = qemu_vmalloc(size);
2737 6b02494d Alexander Graf
#endif
2738 ccb167e9 Izik Eidus
#ifdef MADV_MERGEABLE
2739 c902760f Marcelo Tosatti
        madvise(new_block->host, size, MADV_MERGEABLE);
2740 ccb167e9 Izik Eidus
#endif
2741 c902760f Marcelo Tosatti
    }
2742 94a6b54f pbrook
    new_block->offset = last_ram_offset;
2743 94a6b54f pbrook
    new_block->length = size;
2744 94a6b54f pbrook
2745 94a6b54f pbrook
    new_block->next = ram_blocks;
2746 94a6b54f pbrook
    ram_blocks = new_block;
2747 94a6b54f pbrook
2748 94a6b54f pbrook
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2749 94a6b54f pbrook
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2750 94a6b54f pbrook
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2751 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2752 94a6b54f pbrook
2753 94a6b54f pbrook
    last_ram_offset += size;
2754 94a6b54f pbrook
2755 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2756 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2757 6f0437e8 Jan Kiszka
2758 94a6b54f pbrook
    return new_block->offset;
2759 94a6b54f pbrook
}
2760 e9a1ab19 bellard
2761 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2762 e9a1ab19 bellard
{
2763 94a6b54f pbrook
    /* TODO: implement this.  */
2764 e9a1ab19 bellard
}
2765 e9a1ab19 bellard
2766 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2767 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2768 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2769 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2770 5579c7f3 pbrook

2771 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2772 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2773 5579c7f3 pbrook
 */
2774 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
2775 dc828ca1 pbrook
{
2776 94a6b54f pbrook
    RAMBlock *prev;
2777 94a6b54f pbrook
    RAMBlock **prevp;
2778 94a6b54f pbrook
    RAMBlock *block;
2779 94a6b54f pbrook
2780 94a6b54f pbrook
    prev = NULL;
2781 94a6b54f pbrook
    prevp = &ram_blocks;
2782 94a6b54f pbrook
    block = ram_blocks;
2783 94a6b54f pbrook
    while (block && (block->offset > addr
2784 94a6b54f pbrook
                     || block->offset + block->length <= addr)) {
2785 94a6b54f pbrook
        if (prev)
2786 94a6b54f pbrook
          prevp = &prev->next;
2787 94a6b54f pbrook
        prev = block;
2788 94a6b54f pbrook
        block = block->next;
2789 94a6b54f pbrook
    }
2790 94a6b54f pbrook
    if (!block) {
2791 94a6b54f pbrook
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2792 94a6b54f pbrook
        abort();
2793 94a6b54f pbrook
    }
2794 94a6b54f pbrook
    /* Move this entry to to start of the list.  */
2795 94a6b54f pbrook
    if (prev) {
2796 94a6b54f pbrook
        prev->next = block->next;
2797 94a6b54f pbrook
        block->next = *prevp;
2798 94a6b54f pbrook
        *prevp = block;
2799 94a6b54f pbrook
    }
2800 94a6b54f pbrook
    return block->host + (addr - block->offset);
2801 dc828ca1 pbrook
}
2802 dc828ca1 pbrook
2803 5579c7f3 pbrook
/* Some of the softmmu routines need to translate from a host pointer
2804 5579c7f3 pbrook
   (typically a TLB entry) back to a ram offset.  */
2805 c227f099 Anthony Liguori
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2806 5579c7f3 pbrook
{
2807 94a6b54f pbrook
    RAMBlock *prev;
2808 94a6b54f pbrook
    RAMBlock *block;
2809 94a6b54f pbrook
    uint8_t *host = ptr;
2810 94a6b54f pbrook
2811 94a6b54f pbrook
    prev = NULL;
2812 94a6b54f pbrook
    block = ram_blocks;
2813 94a6b54f pbrook
    while (block && (block->host > host
2814 94a6b54f pbrook
                     || block->host + block->length <= host)) {
2815 94a6b54f pbrook
        prev = block;
2816 94a6b54f pbrook
        block = block->next;
2817 94a6b54f pbrook
    }
2818 94a6b54f pbrook
    if (!block) {
2819 94a6b54f pbrook
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2820 94a6b54f pbrook
        abort();
2821 94a6b54f pbrook
    }
2822 94a6b54f pbrook
    return block->offset + (host - block->host);
2823 5579c7f3 pbrook
}
2824 5579c7f3 pbrook
2825 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2826 33417e70 bellard
{
2827 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2828 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2829 67d3b957 pbrook
#endif
2830 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2831 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2832 e18231a3 blueswir1
#endif
2833 e18231a3 blueswir1
    return 0;
2834 e18231a3 blueswir1
}
2835 e18231a3 blueswir1
2836 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2837 e18231a3 blueswir1
{
2838 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2839 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2840 e18231a3 blueswir1
#endif
2841 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2842 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2843 e18231a3 blueswir1
#endif
2844 e18231a3 blueswir1
    return 0;
2845 e18231a3 blueswir1
}
2846 e18231a3 blueswir1
2847 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2848 e18231a3 blueswir1
{
2849 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2850 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2851 e18231a3 blueswir1
#endif
2852 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2853 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2854 b4f0a316 blueswir1
#endif
2855 33417e70 bellard
    return 0;
2856 33417e70 bellard
}
2857 33417e70 bellard
2858 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2859 33417e70 bellard
{
2860 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2861 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2862 67d3b957 pbrook
#endif
2863 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2864 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
2865 e18231a3 blueswir1
#endif
2866 e18231a3 blueswir1
}
2867 e18231a3 blueswir1
2868 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2869 e18231a3 blueswir1
{
2870 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2871 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2872 e18231a3 blueswir1
#endif
2873 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2874 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
2875 e18231a3 blueswir1
#endif
2876 e18231a3 blueswir1
}
2877 e18231a3 blueswir1
2878 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2879 e18231a3 blueswir1
{
2880 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2881 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2882 e18231a3 blueswir1
#endif
2883 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2884 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
2885 b4f0a316 blueswir1
#endif
2886 33417e70 bellard
}
2887 33417e70 bellard
2888 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2889 33417e70 bellard
    unassigned_mem_readb,
2890 e18231a3 blueswir1
    unassigned_mem_readw,
2891 e18231a3 blueswir1
    unassigned_mem_readl,
2892 33417e70 bellard
};
2893 33417e70 bellard
2894 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2895 33417e70 bellard
    unassigned_mem_writeb,
2896 e18231a3 blueswir1
    unassigned_mem_writew,
2897 e18231a3 blueswir1
    unassigned_mem_writel,
2898 33417e70 bellard
};
2899 33417e70 bellard
2900 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2901 0f459d16 pbrook
                                uint32_t val)
2902 9fa3e853 bellard
{
2903 3a7d929e bellard
    int dirty_flags;
2904 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2905 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2906 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2907 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
2908 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2909 9fa3e853 bellard
#endif
2910 3a7d929e bellard
    }
2911 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2912 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2913 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2914 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2915 f23db169 bellard
       flushed */
2916 f23db169 bellard
    if (dirty_flags == 0xff)
2917 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2918 9fa3e853 bellard
}
2919 9fa3e853 bellard
2920 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2921 0f459d16 pbrook
                                uint32_t val)
2922 9fa3e853 bellard
{
2923 3a7d929e bellard
    int dirty_flags;
2924 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2925 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2926 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2927 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
2928 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2929 9fa3e853 bellard
#endif
2930 3a7d929e bellard
    }
2931 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2932 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2933 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2934 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2935 f23db169 bellard
       flushed */
2936 f23db169 bellard
    if (dirty_flags == 0xff)
2937 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2938 9fa3e853 bellard
}
2939 9fa3e853 bellard
2940 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2941 0f459d16 pbrook
                                uint32_t val)
2942 9fa3e853 bellard
{
2943 3a7d929e bellard
    int dirty_flags;
2944 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2945 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2946 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2947 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
2948 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2949 9fa3e853 bellard
#endif
2950 3a7d929e bellard
    }
2951 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2952 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2953 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2954 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2955 f23db169 bellard
       flushed */
2956 f23db169 bellard
    if (dirty_flags == 0xff)
2957 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2958 9fa3e853 bellard
}
2959 9fa3e853 bellard
2960 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
2961 9fa3e853 bellard
    NULL, /* never used */
2962 9fa3e853 bellard
    NULL, /* never used */
2963 9fa3e853 bellard
    NULL, /* never used */
2964 9fa3e853 bellard
};
2965 9fa3e853 bellard
2966 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2967 1ccde1cb bellard
    notdirty_mem_writeb,
2968 1ccde1cb bellard
    notdirty_mem_writew,
2969 1ccde1cb bellard
    notdirty_mem_writel,
2970 1ccde1cb bellard
};
2971 1ccde1cb bellard
2972 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
2973 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
2974 0f459d16 pbrook
{
2975 0f459d16 pbrook
    CPUState *env = cpu_single_env;
2976 06d55cc1 aliguori
    target_ulong pc, cs_base;
2977 06d55cc1 aliguori
    TranslationBlock *tb;
2978 0f459d16 pbrook
    target_ulong vaddr;
2979 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2980 06d55cc1 aliguori
    int cpu_flags;
2981 0f459d16 pbrook
2982 06d55cc1 aliguori
    if (env->watchpoint_hit) {
2983 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
2984 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
2985 06d55cc1 aliguori
         * current instruction. */
2986 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2987 06d55cc1 aliguori
        return;
2988 06d55cc1 aliguori
    }
2989 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2990 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2991 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
2992 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2993 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
2994 6e140f28 aliguori
            if (!env->watchpoint_hit) {
2995 6e140f28 aliguori
                env->watchpoint_hit = wp;
2996 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
2997 6e140f28 aliguori
                if (!tb) {
2998 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2999 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
3000 6e140f28 aliguori
                }
3001 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3002 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
3003 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3004 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
3005 6e140f28 aliguori
                } else {
3006 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3007 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3008 6e140f28 aliguori
                }
3009 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
3010 06d55cc1 aliguori
            }
3011 6e140f28 aliguori
        } else {
3012 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
3013 0f459d16 pbrook
        }
3014 0f459d16 pbrook
    }
3015 0f459d16 pbrook
}
3016 0f459d16 pbrook
3017 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3018 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
3019 6658ffb8 pbrook
   phys routines.  */
3020 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3021 6658ffb8 pbrook
{
3022 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3023 6658ffb8 pbrook
    return ldub_phys(addr);
3024 6658ffb8 pbrook
}
3025 6658ffb8 pbrook
3026 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3027 6658ffb8 pbrook
{
3028 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3029 6658ffb8 pbrook
    return lduw_phys(addr);
3030 6658ffb8 pbrook
}
3031 6658ffb8 pbrook
3032 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3033 6658ffb8 pbrook
{
3034 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3035 6658ffb8 pbrook
    return ldl_phys(addr);
3036 6658ffb8 pbrook
}
3037 6658ffb8 pbrook
3038 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3039 6658ffb8 pbrook
                             uint32_t val)
3040 6658ffb8 pbrook
{
3041 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3042 6658ffb8 pbrook
    stb_phys(addr, val);
3043 6658ffb8 pbrook
}
3044 6658ffb8 pbrook
3045 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3046 6658ffb8 pbrook
                             uint32_t val)
3047 6658ffb8 pbrook
{
3048 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3049 6658ffb8 pbrook
    stw_phys(addr, val);
3050 6658ffb8 pbrook
}
3051 6658ffb8 pbrook
3052 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3053 6658ffb8 pbrook
                             uint32_t val)
3054 6658ffb8 pbrook
{
3055 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3056 6658ffb8 pbrook
    stl_phys(addr, val);
3057 6658ffb8 pbrook
}
3058 6658ffb8 pbrook
3059 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3060 6658ffb8 pbrook
    watch_mem_readb,
3061 6658ffb8 pbrook
    watch_mem_readw,
3062 6658ffb8 pbrook
    watch_mem_readl,
3063 6658ffb8 pbrook
};
3064 6658ffb8 pbrook
3065 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3066 6658ffb8 pbrook
    watch_mem_writeb,
3067 6658ffb8 pbrook
    watch_mem_writew,
3068 6658ffb8 pbrook
    watch_mem_writel,
3069 6658ffb8 pbrook
};
3070 6658ffb8 pbrook
3071 c227f099 Anthony Liguori
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
3072 db7b5426 blueswir1
                                 unsigned int len)
3073 db7b5426 blueswir1
{
3074 db7b5426 blueswir1
    uint32_t ret;
3075 db7b5426 blueswir1
    unsigned int idx;
3076 db7b5426 blueswir1
3077 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
3078 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3079 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3080 db7b5426 blueswir1
           mmio, len, addr, idx);
3081 db7b5426 blueswir1
#endif
3082 8da3ff18 pbrook
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
3083 8da3ff18 pbrook
                                       addr + mmio->region_offset[idx][0][len]);
3084 db7b5426 blueswir1
3085 db7b5426 blueswir1
    return ret;
3086 db7b5426 blueswir1
}
3087 db7b5426 blueswir1
3088 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3089 db7b5426 blueswir1
                              uint32_t value, unsigned int len)
3090 db7b5426 blueswir1
{
3091 db7b5426 blueswir1
    unsigned int idx;
3092 db7b5426 blueswir1
3093 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
3094 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3095 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
3096 db7b5426 blueswir1
           mmio, len, addr, idx, value);
3097 db7b5426 blueswir1
#endif
3098 8da3ff18 pbrook
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
3099 8da3ff18 pbrook
                                  addr + mmio->region_offset[idx][1][len],
3100 8da3ff18 pbrook
                                  value);
3101 db7b5426 blueswir1
}
3102 db7b5426 blueswir1
3103 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3104 db7b5426 blueswir1
{
3105 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3106 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3107 db7b5426 blueswir1
#endif
3108 db7b5426 blueswir1
3109 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
3110 db7b5426 blueswir1
}
3111 db7b5426 blueswir1
3112 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3113 db7b5426 blueswir1
                            uint32_t value)
3114 db7b5426 blueswir1
{
3115 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3116 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3117 db7b5426 blueswir1
#endif
3118 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
3119 db7b5426 blueswir1
}
3120 db7b5426 blueswir1
3121 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3122 db7b5426 blueswir1
{
3123 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3124 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3125 db7b5426 blueswir1
#endif
3126 db7b5426 blueswir1
3127 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
3128 db7b5426 blueswir1
}
3129 db7b5426 blueswir1
3130 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3131 db7b5426 blueswir1
                            uint32_t value)
3132 db7b5426 blueswir1
{
3133 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3134 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3135 db7b5426 blueswir1
#endif
3136 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
3137 db7b5426 blueswir1
}
3138 db7b5426 blueswir1
3139 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3140 db7b5426 blueswir1
{
3141 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3142 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
3143 db7b5426 blueswir1
#endif
3144 db7b5426 blueswir1
3145 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
3146 db7b5426 blueswir1
}
3147 db7b5426 blueswir1
3148 db7b5426 blueswir1
static void subpage_writel (void *opaque,
3149 c227f099 Anthony Liguori
                         target_phys_addr_t addr, uint32_t value)
3150 db7b5426 blueswir1
{
3151 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3152 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
3153 db7b5426 blueswir1
#endif
3154 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
3155 db7b5426 blueswir1
}
3156 db7b5426 blueswir1
3157 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
3158 db7b5426 blueswir1
    &subpage_readb,
3159 db7b5426 blueswir1
    &subpage_readw,
3160 db7b5426 blueswir1
    &subpage_readl,
3161 db7b5426 blueswir1
};
3162 db7b5426 blueswir1
3163 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
3164 db7b5426 blueswir1
    &subpage_writeb,
3165 db7b5426 blueswir1
    &subpage_writew,
3166 db7b5426 blueswir1
    &subpage_writel,
3167 db7b5426 blueswir1
};
3168 db7b5426 blueswir1
3169 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3170 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
3171 db7b5426 blueswir1
{
3172 db7b5426 blueswir1
    int idx, eidx;
3173 4254fab8 blueswir1
    unsigned int i;
3174 db7b5426 blueswir1
3175 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3176 db7b5426 blueswir1
        return -1;
3177 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
3178 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
3179 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3180 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3181 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
3182 db7b5426 blueswir1
#endif
3183 db7b5426 blueswir1
    memory >>= IO_MEM_SHIFT;
3184 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
3185 4254fab8 blueswir1
        for (i = 0; i < 4; i++) {
3186 3ee89922 blueswir1
            if (io_mem_read[memory][i]) {
3187 3ee89922 blueswir1
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
3188 3ee89922 blueswir1
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
3189 8da3ff18 pbrook
                mmio->region_offset[idx][0][i] = region_offset;
3190 3ee89922 blueswir1
            }
3191 3ee89922 blueswir1
            if (io_mem_write[memory][i]) {
3192 3ee89922 blueswir1
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
3193 3ee89922 blueswir1
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
3194 8da3ff18 pbrook
                mmio->region_offset[idx][1][i] = region_offset;
3195 3ee89922 blueswir1
            }
3196 4254fab8 blueswir1
        }
3197 db7b5426 blueswir1
    }
3198 db7b5426 blueswir1
3199 db7b5426 blueswir1
    return 0;
3200 db7b5426 blueswir1
}
3201 db7b5426 blueswir1
3202 c227f099 Anthony Liguori
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3203 c227f099 Anthony Liguori
                           ram_addr_t orig_memory, ram_addr_t region_offset)
3204 db7b5426 blueswir1
{
3205 c227f099 Anthony Liguori
    subpage_t *mmio;
3206 db7b5426 blueswir1
    int subpage_memory;
3207 db7b5426 blueswir1
3208 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3209 1eec614b aliguori
3210 1eec614b aliguori
    mmio->base = base;
3211 1eed09cb Avi Kivity
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3212 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3213 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3214 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3215 db7b5426 blueswir1
#endif
3216 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3217 1eec614b aliguori
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3218 8da3ff18 pbrook
                         region_offset);
3219 db7b5426 blueswir1
3220 db7b5426 blueswir1
    return mmio;
3221 db7b5426 blueswir1
}
3222 db7b5426 blueswir1
3223 88715657 aliguori
static int get_free_io_mem_idx(void)
3224 88715657 aliguori
{
3225 88715657 aliguori
    int i;
3226 88715657 aliguori
3227 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3228 88715657 aliguori
        if (!io_mem_used[i]) {
3229 88715657 aliguori
            io_mem_used[i] = 1;
3230 88715657 aliguori
            return i;
3231 88715657 aliguori
        }
3232 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3233 88715657 aliguori
    return -1;
3234 88715657 aliguori
}
3235 88715657 aliguori
3236 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3237 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3238 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3239 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3240 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3241 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3242 4254fab8 blueswir1
   returned if error. */
3243 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3244 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3245 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3246 1eed09cb Avi Kivity
                                        void *opaque)
3247 33417e70 bellard
{
3248 4254fab8 blueswir1
    int i, subwidth = 0;
3249 33417e70 bellard
3250 33417e70 bellard
    if (io_index <= 0) {
3251 88715657 aliguori
        io_index = get_free_io_mem_idx();
3252 88715657 aliguori
        if (io_index == -1)
3253 88715657 aliguori
            return io_index;
3254 33417e70 bellard
    } else {
3255 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3256 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3257 33417e70 bellard
            return -1;
3258 33417e70 bellard
    }
3259 b5ff1b31 bellard
3260 33417e70 bellard
    for(i = 0;i < 3; i++) {
3261 4254fab8 blueswir1
        if (!mem_read[i] || !mem_write[i])
3262 4254fab8 blueswir1
            subwidth = IO_MEM_SUBWIDTH;
3263 33417e70 bellard
        io_mem_read[io_index][i] = mem_read[i];
3264 33417e70 bellard
        io_mem_write[io_index][i] = mem_write[i];
3265 33417e70 bellard
    }
3266 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3267 4254fab8 blueswir1
    return (io_index << IO_MEM_SHIFT) | subwidth;
3268 33417e70 bellard
}
3269 61382a50 bellard
3270 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3271 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3272 1eed09cb Avi Kivity
                           void *opaque)
3273 1eed09cb Avi Kivity
{
3274 1eed09cb Avi Kivity
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3275 1eed09cb Avi Kivity
}
3276 1eed09cb Avi Kivity
3277 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3278 88715657 aliguori
{
3279 88715657 aliguori
    int i;
3280 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3281 88715657 aliguori
3282 88715657 aliguori
    for (i=0;i < 3; i++) {
3283 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3284 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3285 88715657 aliguori
    }
3286 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3287 88715657 aliguori
    io_mem_used[io_index] = 0;
3288 88715657 aliguori
}
3289 88715657 aliguori
3290 e9179ce1 Avi Kivity
static void io_mem_init(void)
3291 e9179ce1 Avi Kivity
{
3292 e9179ce1 Avi Kivity
    int i;
3293 e9179ce1 Avi Kivity
3294 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3295 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3296 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3297 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3298 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3299 e9179ce1 Avi Kivity
3300 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3301 e9179ce1 Avi Kivity
                                          watch_mem_write, NULL);
3302 e9179ce1 Avi Kivity
}
3303 e9179ce1 Avi Kivity
3304 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3305 e2eef170 pbrook
3306 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3307 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3308 a68fe89c Paul Brook
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3309 a68fe89c Paul Brook
                        uint8_t *buf, int len, int is_write)
3310 13eb76e0 bellard
{
3311 13eb76e0 bellard
    int l, flags;
3312 13eb76e0 bellard
    target_ulong page;
3313 53a5960a pbrook
    void * p;
3314 13eb76e0 bellard
3315 13eb76e0 bellard
    while (len > 0) {
3316 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3317 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3318 13eb76e0 bellard
        if (l > len)
3319 13eb76e0 bellard
            l = len;
3320 13eb76e0 bellard
        flags = page_get_flags(page);
3321 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3322 a68fe89c Paul Brook
            return -1;
3323 13eb76e0 bellard
        if (is_write) {
3324 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3325 a68fe89c Paul Brook
                return -1;
3326 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3327 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3328 a68fe89c Paul Brook
                return -1;
3329 72fb7daa aurel32
            memcpy(p, buf, l);
3330 72fb7daa aurel32
            unlock_user(p, addr, l);
3331 13eb76e0 bellard
        } else {
3332 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3333 a68fe89c Paul Brook
                return -1;
3334 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3335 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3336 a68fe89c Paul Brook
                return -1;
3337 72fb7daa aurel32
            memcpy(buf, p, l);
3338 5b257578 aurel32
            unlock_user(p, addr, 0);
3339 13eb76e0 bellard
        }
3340 13eb76e0 bellard
        len -= l;
3341 13eb76e0 bellard
        buf += l;
3342 13eb76e0 bellard
        addr += l;
3343 13eb76e0 bellard
    }
3344 a68fe89c Paul Brook
    return 0;
3345 13eb76e0 bellard
}
3346 8df1cd07 bellard
3347 13eb76e0 bellard
#else
3348 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3349 13eb76e0 bellard
                            int len, int is_write)
3350 13eb76e0 bellard
{
3351 13eb76e0 bellard
    int l, io_index;
3352 13eb76e0 bellard
    uint8_t *ptr;
3353 13eb76e0 bellard
    uint32_t val;
3354 c227f099 Anthony Liguori
    target_phys_addr_t page;
3355 2e12669a bellard
    unsigned long pd;
3356 92e873b9 bellard
    PhysPageDesc *p;
3357 3b46e624 ths
3358 13eb76e0 bellard
    while (len > 0) {
3359 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3360 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3361 13eb76e0 bellard
        if (l > len)
3362 13eb76e0 bellard
            l = len;
3363 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3364 13eb76e0 bellard
        if (!p) {
3365 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3366 13eb76e0 bellard
        } else {
3367 13eb76e0 bellard
            pd = p->phys_offset;
3368 13eb76e0 bellard
        }
3369 3b46e624 ths
3370 13eb76e0 bellard
        if (is_write) {
3371 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3372 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3373 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3374 8da3ff18 pbrook
                if (p)
3375 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3376 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3377 6a00d601 bellard
                   potential bugs */
3378 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3379 1c213d19 bellard
                    /* 32 bit write access */
3380 c27004ec bellard
                    val = ldl_p(buf);
3381 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3382 13eb76e0 bellard
                    l = 4;
3383 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3384 1c213d19 bellard
                    /* 16 bit write access */
3385 c27004ec bellard
                    val = lduw_p(buf);
3386 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3387 13eb76e0 bellard
                    l = 2;
3388 13eb76e0 bellard
                } else {
3389 1c213d19 bellard
                    /* 8 bit write access */
3390 c27004ec bellard
                    val = ldub_p(buf);
3391 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3392 13eb76e0 bellard
                    l = 1;
3393 13eb76e0 bellard
                }
3394 13eb76e0 bellard
            } else {
3395 b448f2f3 bellard
                unsigned long addr1;
3396 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3397 13eb76e0 bellard
                /* RAM case */
3398 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3399 13eb76e0 bellard
                memcpy(ptr, buf, l);
3400 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3401 3a7d929e bellard
                    /* invalidate code */
3402 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3403 3a7d929e bellard
                    /* set dirty bit */
3404 5fafdf24 ths
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3405 f23db169 bellard
                        (0xff & ~CODE_DIRTY_FLAG);
3406 3a7d929e bellard
                }
3407 13eb76e0 bellard
            }
3408 13eb76e0 bellard
        } else {
3409 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3410 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3411 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3412 13eb76e0 bellard
                /* I/O case */
3413 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3414 8da3ff18 pbrook
                if (p)
3415 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3416 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3417 13eb76e0 bellard
                    /* 32 bit read access */
3418 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3419 c27004ec bellard
                    stl_p(buf, val);
3420 13eb76e0 bellard
                    l = 4;
3421 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3422 13eb76e0 bellard
                    /* 16 bit read access */
3423 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3424 c27004ec bellard
                    stw_p(buf, val);
3425 13eb76e0 bellard
                    l = 2;
3426 13eb76e0 bellard
                } else {
3427 1c213d19 bellard
                    /* 8 bit read access */
3428 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3429 c27004ec bellard
                    stb_p(buf, val);
3430 13eb76e0 bellard
                    l = 1;
3431 13eb76e0 bellard
                }
3432 13eb76e0 bellard
            } else {
3433 13eb76e0 bellard
                /* RAM case */
3434 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3435 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3436 13eb76e0 bellard
                memcpy(buf, ptr, l);
3437 13eb76e0 bellard
            }
3438 13eb76e0 bellard
        }
3439 13eb76e0 bellard
        len -= l;
3440 13eb76e0 bellard
        buf += l;
3441 13eb76e0 bellard
        addr += l;
3442 13eb76e0 bellard
    }
3443 13eb76e0 bellard
}
3444 8df1cd07 bellard
3445 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3446 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3447 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3448 d0ecd2aa bellard
{
3449 d0ecd2aa bellard
    int l;
3450 d0ecd2aa bellard
    uint8_t *ptr;
3451 c227f099 Anthony Liguori
    target_phys_addr_t page;
3452 d0ecd2aa bellard
    unsigned long pd;
3453 d0ecd2aa bellard
    PhysPageDesc *p;
3454 3b46e624 ths
3455 d0ecd2aa bellard
    while (len > 0) {
3456 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3457 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3458 d0ecd2aa bellard
        if (l > len)
3459 d0ecd2aa bellard
            l = len;
3460 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3461 d0ecd2aa bellard
        if (!p) {
3462 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3463 d0ecd2aa bellard
        } else {
3464 d0ecd2aa bellard
            pd = p->phys_offset;
3465 d0ecd2aa bellard
        }
3466 3b46e624 ths
3467 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3468 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3469 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3470 d0ecd2aa bellard
            /* do nothing */
3471 d0ecd2aa bellard
        } else {
3472 d0ecd2aa bellard
            unsigned long addr1;
3473 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3474 d0ecd2aa bellard
            /* ROM/RAM case */
3475 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3476 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3477 d0ecd2aa bellard
        }
3478 d0ecd2aa bellard
        len -= l;
3479 d0ecd2aa bellard
        buf += l;
3480 d0ecd2aa bellard
        addr += l;
3481 d0ecd2aa bellard
    }
3482 d0ecd2aa bellard
}
3483 d0ecd2aa bellard
3484 6d16c2f8 aliguori
typedef struct {
3485 6d16c2f8 aliguori
    void *buffer;
3486 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3487 c227f099 Anthony Liguori
    target_phys_addr_t len;
3488 6d16c2f8 aliguori
} BounceBuffer;
3489 6d16c2f8 aliguori
3490 6d16c2f8 aliguori
static BounceBuffer bounce;
3491 6d16c2f8 aliguori
3492 ba223c29 aliguori
typedef struct MapClient {
3493 ba223c29 aliguori
    void *opaque;
3494 ba223c29 aliguori
    void (*callback)(void *opaque);
3495 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3496 ba223c29 aliguori
} MapClient;
3497 ba223c29 aliguori
3498 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3499 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3500 ba223c29 aliguori
3501 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3502 ba223c29 aliguori
{
3503 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3504 ba223c29 aliguori
3505 ba223c29 aliguori
    client->opaque = opaque;
3506 ba223c29 aliguori
    client->callback = callback;
3507 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3508 ba223c29 aliguori
    return client;
3509 ba223c29 aliguori
}
3510 ba223c29 aliguori
3511 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3512 ba223c29 aliguori
{
3513 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3514 ba223c29 aliguori
3515 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3516 34d5e948 Isaku Yamahata
    qemu_free(client);
3517 ba223c29 aliguori
}
3518 ba223c29 aliguori
3519 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3520 ba223c29 aliguori
{
3521 ba223c29 aliguori
    MapClient *client;
3522 ba223c29 aliguori
3523 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3524 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3525 ba223c29 aliguori
        client->callback(client->opaque);
3526 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3527 ba223c29 aliguori
    }
3528 ba223c29 aliguori
}
3529 ba223c29 aliguori
3530 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3531 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3532 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3533 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3534 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3535 ba223c29 aliguori
 * likely to succeed.
3536 6d16c2f8 aliguori
 */
3537 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3538 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3539 6d16c2f8 aliguori
                              int is_write)
3540 6d16c2f8 aliguori
{
3541 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3542 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
3543 6d16c2f8 aliguori
    int l;
3544 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3545 6d16c2f8 aliguori
    uint8_t *ptr;
3546 c227f099 Anthony Liguori
    target_phys_addr_t page;
3547 6d16c2f8 aliguori
    unsigned long pd;
3548 6d16c2f8 aliguori
    PhysPageDesc *p;
3549 6d16c2f8 aliguori
    unsigned long addr1;
3550 6d16c2f8 aliguori
3551 6d16c2f8 aliguori
    while (len > 0) {
3552 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3553 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3554 6d16c2f8 aliguori
        if (l > len)
3555 6d16c2f8 aliguori
            l = len;
3556 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3557 6d16c2f8 aliguori
        if (!p) {
3558 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3559 6d16c2f8 aliguori
        } else {
3560 6d16c2f8 aliguori
            pd = p->phys_offset;
3561 6d16c2f8 aliguori
        }
3562 6d16c2f8 aliguori
3563 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3564 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3565 6d16c2f8 aliguori
                break;
3566 6d16c2f8 aliguori
            }
3567 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3568 6d16c2f8 aliguori
            bounce.addr = addr;
3569 6d16c2f8 aliguori
            bounce.len = l;
3570 6d16c2f8 aliguori
            if (!is_write) {
3571 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3572 6d16c2f8 aliguori
            }
3573 6d16c2f8 aliguori
            ptr = bounce.buffer;
3574 6d16c2f8 aliguori
        } else {
3575 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3576 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3577 6d16c2f8 aliguori
        }
3578 6d16c2f8 aliguori
        if (!done) {
3579 6d16c2f8 aliguori
            ret = ptr;
3580 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3581 6d16c2f8 aliguori
            break;
3582 6d16c2f8 aliguori
        }
3583 6d16c2f8 aliguori
3584 6d16c2f8 aliguori
        len -= l;
3585 6d16c2f8 aliguori
        addr += l;
3586 6d16c2f8 aliguori
        done += l;
3587 6d16c2f8 aliguori
    }
3588 6d16c2f8 aliguori
    *plen = done;
3589 6d16c2f8 aliguori
    return ret;
3590 6d16c2f8 aliguori
}
3591 6d16c2f8 aliguori
3592 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3593 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3594 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3595 6d16c2f8 aliguori
 */
3596 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3597 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3598 6d16c2f8 aliguori
{
3599 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3600 6d16c2f8 aliguori
        if (is_write) {
3601 c227f099 Anthony Liguori
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3602 6d16c2f8 aliguori
            while (access_len) {
3603 6d16c2f8 aliguori
                unsigned l;
3604 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3605 6d16c2f8 aliguori
                if (l > access_len)
3606 6d16c2f8 aliguori
                    l = access_len;
3607 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3608 6d16c2f8 aliguori
                    /* invalidate code */
3609 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3610 6d16c2f8 aliguori
                    /* set dirty bit */
3611 6d16c2f8 aliguori
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3612 6d16c2f8 aliguori
                        (0xff & ~CODE_DIRTY_FLAG);
3613 6d16c2f8 aliguori
                }
3614 6d16c2f8 aliguori
                addr1 += l;
3615 6d16c2f8 aliguori
                access_len -= l;
3616 6d16c2f8 aliguori
            }
3617 6d16c2f8 aliguori
        }
3618 6d16c2f8 aliguori
        return;
3619 6d16c2f8 aliguori
    }
3620 6d16c2f8 aliguori
    if (is_write) {
3621 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3622 6d16c2f8 aliguori
    }
3623 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3624 6d16c2f8 aliguori
    bounce.buffer = NULL;
3625 ba223c29 aliguori
    cpu_notify_map_clients();
3626 6d16c2f8 aliguori
}
3627 d0ecd2aa bellard
3628 8df1cd07 bellard
/* warning: addr must be aligned */
3629 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
3630 8df1cd07 bellard
{
3631 8df1cd07 bellard
    int io_index;
3632 8df1cd07 bellard
    uint8_t *ptr;
3633 8df1cd07 bellard
    uint32_t val;
3634 8df1cd07 bellard
    unsigned long pd;
3635 8df1cd07 bellard
    PhysPageDesc *p;
3636 8df1cd07 bellard
3637 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3638 8df1cd07 bellard
    if (!p) {
3639 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3640 8df1cd07 bellard
    } else {
3641 8df1cd07 bellard
        pd = p->phys_offset;
3642 8df1cd07 bellard
    }
3643 3b46e624 ths
3644 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3645 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3646 8df1cd07 bellard
        /* I/O case */
3647 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3648 8da3ff18 pbrook
        if (p)
3649 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3650 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3651 8df1cd07 bellard
    } else {
3652 8df1cd07 bellard
        /* RAM case */
3653 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3654 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3655 8df1cd07 bellard
        val = ldl_p(ptr);
3656 8df1cd07 bellard
    }
3657 8df1cd07 bellard
    return val;
3658 8df1cd07 bellard
}
3659 8df1cd07 bellard
3660 84b7b8e7 bellard
/* warning: addr must be aligned */
3661 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
3662 84b7b8e7 bellard
{
3663 84b7b8e7 bellard
    int io_index;
3664 84b7b8e7 bellard
    uint8_t *ptr;
3665 84b7b8e7 bellard
    uint64_t val;
3666 84b7b8e7 bellard
    unsigned long pd;
3667 84b7b8e7 bellard
    PhysPageDesc *p;
3668 84b7b8e7 bellard
3669 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3670 84b7b8e7 bellard
    if (!p) {
3671 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3672 84b7b8e7 bellard
    } else {
3673 84b7b8e7 bellard
        pd = p->phys_offset;
3674 84b7b8e7 bellard
    }
3675 3b46e624 ths
3676 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3677 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3678 84b7b8e7 bellard
        /* I/O case */
3679 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3680 8da3ff18 pbrook
        if (p)
3681 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3682 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3683 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3684 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3685 84b7b8e7 bellard
#else
3686 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3687 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3688 84b7b8e7 bellard
#endif
3689 84b7b8e7 bellard
    } else {
3690 84b7b8e7 bellard
        /* RAM case */
3691 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3692 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3693 84b7b8e7 bellard
        val = ldq_p(ptr);
3694 84b7b8e7 bellard
    }
3695 84b7b8e7 bellard
    return val;
3696 84b7b8e7 bellard
}
3697 84b7b8e7 bellard
3698 aab33094 bellard
/* XXX: optimize */
3699 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
3700 aab33094 bellard
{
3701 aab33094 bellard
    uint8_t val;
3702 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3703 aab33094 bellard
    return val;
3704 aab33094 bellard
}
3705 aab33094 bellard
3706 aab33094 bellard
/* XXX: optimize */
3707 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
3708 aab33094 bellard
{
3709 aab33094 bellard
    uint16_t val;
3710 aab33094 bellard
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3711 aab33094 bellard
    return tswap16(val);
3712 aab33094 bellard
}
3713 aab33094 bellard
3714 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3715 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3716 8df1cd07 bellard
   bits are used to track modified PTEs */
3717 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3718 8df1cd07 bellard
{
3719 8df1cd07 bellard
    int io_index;
3720 8df1cd07 bellard
    uint8_t *ptr;
3721 8df1cd07 bellard
    unsigned long pd;
3722 8df1cd07 bellard
    PhysPageDesc *p;
3723 8df1cd07 bellard
3724 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3725 8df1cd07 bellard
    if (!p) {
3726 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3727 8df1cd07 bellard
    } else {
3728 8df1cd07 bellard
        pd = p->phys_offset;
3729 8df1cd07 bellard
    }
3730 3b46e624 ths
3731 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3732 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3733 8da3ff18 pbrook
        if (p)
3734 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3735 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3736 8df1cd07 bellard
    } else {
3737 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3738 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3739 8df1cd07 bellard
        stl_p(ptr, val);
3740 74576198 aliguori
3741 74576198 aliguori
        if (unlikely(in_migration)) {
3742 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3743 74576198 aliguori
                /* invalidate code */
3744 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3745 74576198 aliguori
                /* set dirty bit */
3746 74576198 aliguori
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3747 74576198 aliguori
                    (0xff & ~CODE_DIRTY_FLAG);
3748 74576198 aliguori
            }
3749 74576198 aliguori
        }
3750 8df1cd07 bellard
    }
3751 8df1cd07 bellard
}
3752 8df1cd07 bellard
3753 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3754 bc98a7ef j_mayer
{
3755 bc98a7ef j_mayer
    int io_index;
3756 bc98a7ef j_mayer
    uint8_t *ptr;
3757 bc98a7ef j_mayer
    unsigned long pd;
3758 bc98a7ef j_mayer
    PhysPageDesc *p;
3759 bc98a7ef j_mayer
3760 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3761 bc98a7ef j_mayer
    if (!p) {
3762 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3763 bc98a7ef j_mayer
    } else {
3764 bc98a7ef j_mayer
        pd = p->phys_offset;
3765 bc98a7ef j_mayer
    }
3766 3b46e624 ths
3767 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3768 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3769 8da3ff18 pbrook
        if (p)
3770 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3771 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3772 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3773 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3774 bc98a7ef j_mayer
#else
3775 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3776 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3777 bc98a7ef j_mayer
#endif
3778 bc98a7ef j_mayer
    } else {
3779 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3780 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3781 bc98a7ef j_mayer
        stq_p(ptr, val);
3782 bc98a7ef j_mayer
    }
3783 bc98a7ef j_mayer
}
3784 bc98a7ef j_mayer
3785 8df1cd07 bellard
/* warning: addr must be aligned */
3786 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
3787 8df1cd07 bellard
{
3788 8df1cd07 bellard
    int io_index;
3789 8df1cd07 bellard
    uint8_t *ptr;
3790 8df1cd07 bellard
    unsigned long pd;
3791 8df1cd07 bellard
    PhysPageDesc *p;
3792 8df1cd07 bellard
3793 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3794 8df1cd07 bellard
    if (!p) {
3795 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3796 8df1cd07 bellard
    } else {
3797 8df1cd07 bellard
        pd = p->phys_offset;
3798 8df1cd07 bellard
    }
3799 3b46e624 ths
3800 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3801 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3802 8da3ff18 pbrook
        if (p)
3803 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3804 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3805 8df1cd07 bellard
    } else {
3806 8df1cd07 bellard
        unsigned long addr1;
3807 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3808 8df1cd07 bellard
        /* RAM case */
3809 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3810 8df1cd07 bellard
        stl_p(ptr, val);
3811 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3812 3a7d929e bellard
            /* invalidate code */
3813 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3814 3a7d929e bellard
            /* set dirty bit */
3815 f23db169 bellard
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3816 f23db169 bellard
                (0xff & ~CODE_DIRTY_FLAG);
3817 3a7d929e bellard
        }
3818 8df1cd07 bellard
    }
3819 8df1cd07 bellard
}
3820 8df1cd07 bellard
3821 aab33094 bellard
/* XXX: optimize */
3822 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
3823 aab33094 bellard
{
3824 aab33094 bellard
    uint8_t v = val;
3825 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3826 aab33094 bellard
}
3827 aab33094 bellard
3828 aab33094 bellard
/* XXX: optimize */
3829 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
3830 aab33094 bellard
{
3831 aab33094 bellard
    uint16_t v = tswap16(val);
3832 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3833 aab33094 bellard
}
3834 aab33094 bellard
3835 aab33094 bellard
/* XXX: optimize */
3836 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
3837 aab33094 bellard
{
3838 aab33094 bellard
    val = tswap64(val);
3839 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3840 aab33094 bellard
}
3841 aab33094 bellard
3842 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
3843 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3844 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
3845 13eb76e0 bellard
{
3846 13eb76e0 bellard
    int l;
3847 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
3848 9b3c35e0 j_mayer
    target_ulong page;
3849 13eb76e0 bellard
3850 13eb76e0 bellard
    while (len > 0) {
3851 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3852 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
3853 13eb76e0 bellard
        /* if no physical page mapped, return an error */
3854 13eb76e0 bellard
        if (phys_addr == -1)
3855 13eb76e0 bellard
            return -1;
3856 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3857 13eb76e0 bellard
        if (l > len)
3858 13eb76e0 bellard
            l = len;
3859 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3860 5e2972fd aliguori
        if (is_write)
3861 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3862 5e2972fd aliguori
        else
3863 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3864 13eb76e0 bellard
        len -= l;
3865 13eb76e0 bellard
        buf += l;
3866 13eb76e0 bellard
        addr += l;
3867 13eb76e0 bellard
    }
3868 13eb76e0 bellard
    return 0;
3869 13eb76e0 bellard
}
3870 a68fe89c Paul Brook
#endif
3871 13eb76e0 bellard
3872 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
3873 2e70f6ef pbrook
   must be at the end of the TB */
3874 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
3875 2e70f6ef pbrook
{
3876 2e70f6ef pbrook
    TranslationBlock *tb;
3877 2e70f6ef pbrook
    uint32_t n, cflags;
3878 2e70f6ef pbrook
    target_ulong pc, cs_base;
3879 2e70f6ef pbrook
    uint64_t flags;
3880 2e70f6ef pbrook
3881 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
3882 2e70f6ef pbrook
    if (!tb) {
3883 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3884 2e70f6ef pbrook
                  retaddr);
3885 2e70f6ef pbrook
    }
3886 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
3887 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3888 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
3889 bf20dc07 ths
       occurred.  */
3890 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
3891 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
3892 2e70f6ef pbrook
    n++;
3893 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
3894 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
3895 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
3896 2e70f6ef pbrook
       branch.  */
3897 2e70f6ef pbrook
#if defined(TARGET_MIPS)
3898 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3899 2e70f6ef pbrook
        env->active_tc.PC -= 4;
3900 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3901 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
3902 2e70f6ef pbrook
    }
3903 2e70f6ef pbrook
#elif defined(TARGET_SH4)
3904 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3905 2e70f6ef pbrook
            && n > 1) {
3906 2e70f6ef pbrook
        env->pc -= 2;
3907 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3908 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3909 2e70f6ef pbrook
    }
3910 2e70f6ef pbrook
#endif
3911 2e70f6ef pbrook
    /* This should never happen.  */
3912 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
3913 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
3914 2e70f6ef pbrook
3915 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
3916 2e70f6ef pbrook
    pc = tb->pc;
3917 2e70f6ef pbrook
    cs_base = tb->cs_base;
3918 2e70f6ef pbrook
    flags = tb->flags;
3919 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
3920 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
3921 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
3922 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
3923 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3924 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
3925 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
3926 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
3927 2e70f6ef pbrook
       second new TB.  */
3928 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
3929 2e70f6ef pbrook
}
3930 2e70f6ef pbrook
3931 b3755a91 Paul Brook
#if !defined(CONFIG_USER_ONLY)
3932 b3755a91 Paul Brook
3933 e3db7226 bellard
void dump_exec_info(FILE *f,
3934 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3935 e3db7226 bellard
{
3936 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
3937 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
3938 e3db7226 bellard
    TranslationBlock *tb;
3939 3b46e624 ths
3940 e3db7226 bellard
    target_code_size = 0;
3941 e3db7226 bellard
    max_target_code_size = 0;
3942 e3db7226 bellard
    cross_page = 0;
3943 e3db7226 bellard
    direct_jmp_count = 0;
3944 e3db7226 bellard
    direct_jmp2_count = 0;
3945 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
3946 e3db7226 bellard
        tb = &tbs[i];
3947 e3db7226 bellard
        target_code_size += tb->size;
3948 e3db7226 bellard
        if (tb->size > max_target_code_size)
3949 e3db7226 bellard
            max_target_code_size = tb->size;
3950 e3db7226 bellard
        if (tb->page_addr[1] != -1)
3951 e3db7226 bellard
            cross_page++;
3952 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
3953 e3db7226 bellard
            direct_jmp_count++;
3954 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
3955 e3db7226 bellard
                direct_jmp2_count++;
3956 e3db7226 bellard
            }
3957 e3db7226 bellard
        }
3958 e3db7226 bellard
    }
3959 e3db7226 bellard
    /* XXX: avoid using doubles ? */
3960 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
3961 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3962 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3963 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
3964 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
3965 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3966 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
3967 e3db7226 bellard
                max_target_code_size);
3968 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3969 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3970 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3971 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3972 5fafdf24 ths
            cross_page,
3973 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3974 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3975 5fafdf24 ths
                direct_jmp_count,
3976 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3977 e3db7226 bellard
                direct_jmp2_count,
3978 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3979 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
3980 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3981 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3982 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3983 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
3984 e3db7226 bellard
}
3985 e3db7226 bellard
3986 61382a50 bellard
#define MMUSUFFIX _cmmu
3987 61382a50 bellard
#define GETPC() NULL
3988 61382a50 bellard
#define env cpu_single_env
3989 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
3990 61382a50 bellard
3991 61382a50 bellard
#define SHIFT 0
3992 61382a50 bellard
#include "softmmu_template.h"
3993 61382a50 bellard
3994 61382a50 bellard
#define SHIFT 1
3995 61382a50 bellard
#include "softmmu_template.h"
3996 61382a50 bellard
3997 61382a50 bellard
#define SHIFT 2
3998 61382a50 bellard
#include "softmmu_template.h"
3999 61382a50 bellard
4000 61382a50 bellard
#define SHIFT 3
4001 61382a50 bellard
#include "softmmu_template.h"
4002 61382a50 bellard
4003 61382a50 bellard
#undef env
4004 61382a50 bellard
4005 61382a50 bellard
#endif