Statistics
| Branch: | Revision:

root / exec.c @ 6c712321

History | View | Annotate | Download (112.5 kB)

1 54936004 bellard
/*
2 fd6ce8f6 bellard
 *  virtual page mapping and translated block handling
3 5fafdf24 ths
 *
4 54936004 bellard
 *  Copyright (c) 2003 Fabrice Bellard
5 54936004 bellard
 *
6 54936004 bellard
 * This library is free software; you can redistribute it and/or
7 54936004 bellard
 * modify it under the terms of the GNU Lesser General Public
8 54936004 bellard
 * License as published by the Free Software Foundation; either
9 54936004 bellard
 * version 2 of the License, or (at your option) any later version.
10 54936004 bellard
 *
11 54936004 bellard
 * This library is distributed in the hope that it will be useful,
12 54936004 bellard
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 54936004 bellard
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 54936004 bellard
 * Lesser General Public License for more details.
15 54936004 bellard
 *
16 54936004 bellard
 * You should have received a copy of the GNU Lesser General Public
17 8167ee88 Blue Swirl
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 54936004 bellard
 */
19 67b915a5 bellard
#include "config.h"
20 d5a8f07c bellard
#ifdef _WIN32
21 d5a8f07c bellard
#include <windows.h>
22 d5a8f07c bellard
#else
23 a98d49b1 bellard
#include <sys/types.h>
24 d5a8f07c bellard
#include <sys/mman.h>
25 d5a8f07c bellard
#endif
26 54936004 bellard
#include <stdlib.h>
27 54936004 bellard
#include <stdio.h>
28 54936004 bellard
#include <stdarg.h>
29 54936004 bellard
#include <string.h>
30 54936004 bellard
#include <errno.h>
31 54936004 bellard
#include <unistd.h>
32 54936004 bellard
#include <inttypes.h>
33 54936004 bellard
34 6180a181 bellard
#include "cpu.h"
35 6180a181 bellard
#include "exec-all.h"
36 ca10f867 aurel32
#include "qemu-common.h"
37 b67d9a52 bellard
#include "tcg.h"
38 b3c7724c pbrook
#include "hw/hw.h"
39 74576198 aliguori
#include "osdep.h"
40 7ba1e619 aliguori
#include "kvm.h"
41 53a5960a pbrook
#if defined(CONFIG_USER_ONLY)
42 53a5960a pbrook
#include <qemu.h>
43 fd052bf6 Riku Voipio
#include <signal.h>
44 53a5960a pbrook
#endif
45 54936004 bellard
46 fd6ce8f6 bellard
//#define DEBUG_TB_INVALIDATE
47 66e85a21 bellard
//#define DEBUG_FLUSH
48 9fa3e853 bellard
//#define DEBUG_TLB
49 67d3b957 pbrook
//#define DEBUG_UNASSIGNED
50 fd6ce8f6 bellard
51 fd6ce8f6 bellard
/* make various TB consistency checks */
52 5fafdf24 ths
//#define DEBUG_TB_CHECK
53 5fafdf24 ths
//#define DEBUG_TLB_CHECK
54 fd6ce8f6 bellard
55 1196be37 ths
//#define DEBUG_IOPORT
56 db7b5426 blueswir1
//#define DEBUG_SUBPAGE
57 1196be37 ths
58 99773bd4 pbrook
#if !defined(CONFIG_USER_ONLY)
59 99773bd4 pbrook
/* TB consistency checks only implemented for usermode emulation.  */
60 99773bd4 pbrook
#undef DEBUG_TB_CHECK
61 99773bd4 pbrook
#endif
62 99773bd4 pbrook
63 9fa3e853 bellard
#define SMC_BITMAP_USE_THRESHOLD 10
64 9fa3e853 bellard
65 108c49b8 bellard
#if defined(TARGET_SPARC64)
66 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67 5dcb6b91 blueswir1
#elif defined(TARGET_SPARC)
68 5dcb6b91 blueswir1
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69 bedb69ea j_mayer
#elif defined(TARGET_ALPHA)
70 bedb69ea j_mayer
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71 bedb69ea j_mayer
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72 108c49b8 bellard
#elif defined(TARGET_PPC64)
73 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74 4a1418e0 Anthony Liguori
#elif defined(TARGET_X86_64)
75 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76 4a1418e0 Anthony Liguori
#elif defined(TARGET_I386)
77 00f82b8a aurel32
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78 108c49b8 bellard
#else
79 108c49b8 bellard
#define TARGET_PHYS_ADDR_SPACE_BITS 32
80 108c49b8 bellard
#endif
81 108c49b8 bellard
82 bdaf78e0 blueswir1
static TranslationBlock *tbs;
83 26a5f13b bellard
int code_gen_max_blocks;
84 9fa3e853 bellard
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85 bdaf78e0 blueswir1
static int nb_tbs;
86 eb51d102 bellard
/* any access to the tbs or the page table must use this lock */
87 c227f099 Anthony Liguori
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88 fd6ce8f6 bellard
89 141ac468 blueswir1
#if defined(__arm__) || defined(__sparc_v9__)
90 141ac468 blueswir1
/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 141ac468 blueswir1
 have limited branch ranges (possibly also PPC) so place it in a
92 d03d860b blueswir1
 section close to code segment. */
93 d03d860b blueswir1
#define code_gen_section                                \
94 d03d860b blueswir1
    __attribute__((__section__(".gen_code")))           \
95 d03d860b blueswir1
    __attribute__((aligned (32)))
96 f8e2af11 Stefan Weil
#elif defined(_WIN32)
97 f8e2af11 Stefan Weil
/* Maximum alignment for Win32 is 16. */
98 f8e2af11 Stefan Weil
#define code_gen_section                                \
99 f8e2af11 Stefan Weil
    __attribute__((aligned (16)))
100 d03d860b blueswir1
#else
101 d03d860b blueswir1
#define code_gen_section                                \
102 d03d860b blueswir1
    __attribute__((aligned (32)))
103 d03d860b blueswir1
#endif
104 d03d860b blueswir1
105 d03d860b blueswir1
uint8_t code_gen_prologue[1024] code_gen_section;
106 bdaf78e0 blueswir1
static uint8_t *code_gen_buffer;
107 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_size;
108 26a5f13b bellard
/* threshold to flush the translated code buffer */
109 bdaf78e0 blueswir1
static unsigned long code_gen_buffer_max_size;
110 fd6ce8f6 bellard
uint8_t *code_gen_ptr;
111 fd6ce8f6 bellard
112 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
113 9fa3e853 bellard
int phys_ram_fd;
114 1ccde1cb bellard
uint8_t *phys_ram_dirty;
115 74576198 aliguori
static int in_migration;
116 94a6b54f pbrook
117 94a6b54f pbrook
typedef struct RAMBlock {
118 94a6b54f pbrook
    uint8_t *host;
119 c227f099 Anthony Liguori
    ram_addr_t offset;
120 c227f099 Anthony Liguori
    ram_addr_t length;
121 94a6b54f pbrook
    struct RAMBlock *next;
122 94a6b54f pbrook
} RAMBlock;
123 94a6b54f pbrook
124 94a6b54f pbrook
static RAMBlock *ram_blocks;
125 94a6b54f pbrook
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126 ccbb4d44 Stuart Brady
   then we can no longer assume contiguous ram offsets, and external uses
127 94a6b54f pbrook
   of this variable will break.  */
128 c227f099 Anthony Liguori
ram_addr_t last_ram_offset;
129 e2eef170 pbrook
#endif
130 9fa3e853 bellard
131 6a00d601 bellard
CPUState *first_cpu;
132 6a00d601 bellard
/* current CPU in the current thread. It is only valid inside
133 6a00d601 bellard
   cpu_exec() */
134 5fafdf24 ths
CPUState *cpu_single_env;
135 2e70f6ef pbrook
/* 0 = Do not count executed instructions.
136 bf20dc07 ths
   1 = Precise instruction counting.
137 2e70f6ef pbrook
   2 = Adaptive rate instruction counting.  */
138 2e70f6ef pbrook
int use_icount = 0;
139 2e70f6ef pbrook
/* Current instruction counter.  While executing translated code this may
140 2e70f6ef pbrook
   include some instructions that have not yet been executed.  */
141 2e70f6ef pbrook
int64_t qemu_icount;
142 6a00d601 bellard
143 54936004 bellard
typedef struct PageDesc {
144 92e873b9 bellard
    /* list of TBs intersecting this ram page */
145 fd6ce8f6 bellard
    TranslationBlock *first_tb;
146 9fa3e853 bellard
    /* in order to optimize self modifying code, we count the number
147 9fa3e853 bellard
       of lookups we do to a given page to use a bitmap */
148 9fa3e853 bellard
    unsigned int code_write_count;
149 9fa3e853 bellard
    uint8_t *code_bitmap;
150 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
151 9fa3e853 bellard
    unsigned long flags;
152 9fa3e853 bellard
#endif
153 54936004 bellard
} PageDesc;
154 54936004 bellard
155 92e873b9 bellard
typedef struct PhysPageDesc {
156 0f459d16 pbrook
    /* offset in host memory of the page + io_index in the low bits */
157 c227f099 Anthony Liguori
    ram_addr_t phys_offset;
158 c227f099 Anthony Liguori
    ram_addr_t region_offset;
159 92e873b9 bellard
} PhysPageDesc;
160 92e873b9 bellard
161 54936004 bellard
#define L2_BITS 10
162 bedb69ea j_mayer
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163 bedb69ea j_mayer
/* XXX: this is a temporary hack for alpha target.
164 bedb69ea j_mayer
 *      In the future, this is to be replaced by a multi-level table
165 bedb69ea j_mayer
 *      to actually be able to handle the complete 64 bits address space.
166 bedb69ea j_mayer
 */
167 bedb69ea j_mayer
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168 bedb69ea j_mayer
#else
169 03875444 aurel32
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
170 bedb69ea j_mayer
#endif
171 54936004 bellard
172 54936004 bellard
#define L1_SIZE (1 << L1_BITS)
173 54936004 bellard
#define L2_SIZE (1 << L2_BITS)
174 54936004 bellard
175 83fb7adf bellard
unsigned long qemu_real_host_page_size;
176 83fb7adf bellard
unsigned long qemu_host_page_bits;
177 83fb7adf bellard
unsigned long qemu_host_page_size;
178 83fb7adf bellard
unsigned long qemu_host_page_mask;
179 54936004 bellard
180 92e873b9 bellard
/* XXX: for system emulation, it could just be an array */
181 54936004 bellard
static PageDesc *l1_map[L1_SIZE];
182 bdaf78e0 blueswir1
static PhysPageDesc **l1_phys_map;
183 54936004 bellard
184 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
185 e2eef170 pbrook
static void io_mem_init(void);
186 e2eef170 pbrook
187 33417e70 bellard
/* io memory support */
188 33417e70 bellard
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
189 33417e70 bellard
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
190 a4193c8a bellard
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
191 511d2b14 blueswir1
static char io_mem_used[IO_MEM_NB_ENTRIES];
192 6658ffb8 pbrook
static int io_mem_watch;
193 6658ffb8 pbrook
#endif
194 33417e70 bellard
195 34865134 bellard
/* log support */
196 1e8b27ca Juha Riihimรคki
#ifdef WIN32
197 1e8b27ca Juha Riihimรคki
static const char *logfilename = "qemu.log";
198 1e8b27ca Juha Riihimรคki
#else
199 d9b630fd blueswir1
static const char *logfilename = "/tmp/qemu.log";
200 1e8b27ca Juha Riihimรคki
#endif
201 34865134 bellard
FILE *logfile;
202 34865134 bellard
int loglevel;
203 e735b91c pbrook
static int log_append = 0;
204 34865134 bellard
205 e3db7226 bellard
/* statistics */
206 e3db7226 bellard
static int tlb_flush_count;
207 e3db7226 bellard
static int tb_flush_count;
208 e3db7226 bellard
static int tb_phys_invalidate_count;
209 e3db7226 bellard
210 db7b5426 blueswir1
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
211 c227f099 Anthony Liguori
typedef struct subpage_t {
212 c227f099 Anthony Liguori
    target_phys_addr_t base;
213 d60efc6b Blue Swirl
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
214 d60efc6b Blue Swirl
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
215 3ee89922 blueswir1
    void *opaque[TARGET_PAGE_SIZE][2][4];
216 c227f099 Anthony Liguori
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
217 c227f099 Anthony Liguori
} subpage_t;
218 db7b5426 blueswir1
219 7cb69cae bellard
#ifdef _WIN32
220 7cb69cae bellard
static void map_exec(void *addr, long size)
221 7cb69cae bellard
{
222 7cb69cae bellard
    DWORD old_protect;
223 7cb69cae bellard
    VirtualProtect(addr, size,
224 7cb69cae bellard
                   PAGE_EXECUTE_READWRITE, &old_protect);
225 7cb69cae bellard
    
226 7cb69cae bellard
}
227 7cb69cae bellard
#else
228 7cb69cae bellard
static void map_exec(void *addr, long size)
229 7cb69cae bellard
{
230 4369415f bellard
    unsigned long start, end, page_size;
231 7cb69cae bellard
    
232 4369415f bellard
    page_size = getpagesize();
233 7cb69cae bellard
    start = (unsigned long)addr;
234 4369415f bellard
    start &= ~(page_size - 1);
235 7cb69cae bellard
    
236 7cb69cae bellard
    end = (unsigned long)addr + size;
237 4369415f bellard
    end += page_size - 1;
238 4369415f bellard
    end &= ~(page_size - 1);
239 7cb69cae bellard
    
240 7cb69cae bellard
    mprotect((void *)start, end - start,
241 7cb69cae bellard
             PROT_READ | PROT_WRITE | PROT_EXEC);
242 7cb69cae bellard
}
243 7cb69cae bellard
#endif
244 7cb69cae bellard
245 b346ff46 bellard
static void page_init(void)
246 54936004 bellard
{
247 83fb7adf bellard
    /* NOTE: we can always suppose that qemu_host_page_size >=
248 54936004 bellard
       TARGET_PAGE_SIZE */
249 c2b48b69 aliguori
#ifdef _WIN32
250 c2b48b69 aliguori
    {
251 c2b48b69 aliguori
        SYSTEM_INFO system_info;
252 c2b48b69 aliguori
253 c2b48b69 aliguori
        GetSystemInfo(&system_info);
254 c2b48b69 aliguori
        qemu_real_host_page_size = system_info.dwPageSize;
255 c2b48b69 aliguori
    }
256 c2b48b69 aliguori
#else
257 c2b48b69 aliguori
    qemu_real_host_page_size = getpagesize();
258 c2b48b69 aliguori
#endif
259 83fb7adf bellard
    if (qemu_host_page_size == 0)
260 83fb7adf bellard
        qemu_host_page_size = qemu_real_host_page_size;
261 83fb7adf bellard
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
262 83fb7adf bellard
        qemu_host_page_size = TARGET_PAGE_SIZE;
263 83fb7adf bellard
    qemu_host_page_bits = 0;
264 83fb7adf bellard
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
265 83fb7adf bellard
        qemu_host_page_bits++;
266 83fb7adf bellard
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
267 108c49b8 bellard
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
268 108c49b8 bellard
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
269 50a9569b balrog
270 50a9569b balrog
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
271 50a9569b balrog
    {
272 50a9569b balrog
        long long startaddr, endaddr;
273 50a9569b balrog
        FILE *f;
274 50a9569b balrog
        int n;
275 50a9569b balrog
276 c8a706fe pbrook
        mmap_lock();
277 0776590d pbrook
        last_brk = (unsigned long)sbrk(0);
278 50a9569b balrog
        f = fopen("/proc/self/maps", "r");
279 50a9569b balrog
        if (f) {
280 50a9569b balrog
            do {
281 50a9569b balrog
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
282 50a9569b balrog
                if (n == 2) {
283 e0b8d65a blueswir1
                    startaddr = MIN(startaddr,
284 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
285 e0b8d65a blueswir1
                    endaddr = MIN(endaddr,
286 e0b8d65a blueswir1
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
287 b5fc909e pbrook
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
288 50a9569b balrog
                                   TARGET_PAGE_ALIGN(endaddr),
289 50a9569b balrog
                                   PAGE_RESERVED); 
290 50a9569b balrog
                }
291 50a9569b balrog
            } while (!feof(f));
292 50a9569b balrog
            fclose(f);
293 50a9569b balrog
        }
294 c8a706fe pbrook
        mmap_unlock();
295 50a9569b balrog
    }
296 50a9569b balrog
#endif
297 54936004 bellard
}
298 54936004 bellard
299 434929bf aliguori
static inline PageDesc **page_l1_map(target_ulong index)
300 54936004 bellard
{
301 17e2377a pbrook
#if TARGET_LONG_BITS > 32
302 17e2377a pbrook
    /* Host memory outside guest VM.  For 32-bit targets we have already
303 17e2377a pbrook
       excluded high addresses.  */
304 d8173e0f ths
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
305 17e2377a pbrook
        return NULL;
306 17e2377a pbrook
#endif
307 434929bf aliguori
    return &l1_map[index >> L2_BITS];
308 434929bf aliguori
}
309 434929bf aliguori
310 434929bf aliguori
static inline PageDesc *page_find_alloc(target_ulong index)
311 434929bf aliguori
{
312 434929bf aliguori
    PageDesc **lp, *p;
313 434929bf aliguori
    lp = page_l1_map(index);
314 434929bf aliguori
    if (!lp)
315 434929bf aliguori
        return NULL;
316 434929bf aliguori
317 54936004 bellard
    p = *lp;
318 54936004 bellard
    if (!p) {
319 54936004 bellard
        /* allocate if not found */
320 17e2377a pbrook
#if defined(CONFIG_USER_ONLY)
321 17e2377a pbrook
        size_t len = sizeof(PageDesc) * L2_SIZE;
322 17e2377a pbrook
        /* Don't use qemu_malloc because it may recurse.  */
323 660f11be Blue Swirl
        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
324 17e2377a pbrook
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
325 54936004 bellard
        *lp = p;
326 fb1c2cd7 aurel32
        if (h2g_valid(p)) {
327 fb1c2cd7 aurel32
            unsigned long addr = h2g(p);
328 17e2377a pbrook
            page_set_flags(addr & TARGET_PAGE_MASK,
329 17e2377a pbrook
                           TARGET_PAGE_ALIGN(addr + len),
330 17e2377a pbrook
                           PAGE_RESERVED); 
331 17e2377a pbrook
        }
332 17e2377a pbrook
#else
333 17e2377a pbrook
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
334 17e2377a pbrook
        *lp = p;
335 17e2377a pbrook
#endif
336 54936004 bellard
    }
337 54936004 bellard
    return p + (index & (L2_SIZE - 1));
338 54936004 bellard
}
339 54936004 bellard
340 00f82b8a aurel32
static inline PageDesc *page_find(target_ulong index)
341 54936004 bellard
{
342 434929bf aliguori
    PageDesc **lp, *p;
343 434929bf aliguori
    lp = page_l1_map(index);
344 434929bf aliguori
    if (!lp)
345 434929bf aliguori
        return NULL;
346 54936004 bellard
347 434929bf aliguori
    p = *lp;
348 660f11be Blue Swirl
    if (!p) {
349 660f11be Blue Swirl
        return NULL;
350 660f11be Blue Swirl
    }
351 fd6ce8f6 bellard
    return p + (index & (L2_SIZE - 1));
352 fd6ce8f6 bellard
}
353 fd6ce8f6 bellard
354 c227f099 Anthony Liguori
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
355 92e873b9 bellard
{
356 108c49b8 bellard
    void **lp, **p;
357 e3f4e2a4 pbrook
    PhysPageDesc *pd;
358 92e873b9 bellard
359 108c49b8 bellard
    p = (void **)l1_phys_map;
360 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
361 108c49b8 bellard
362 108c49b8 bellard
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
363 108c49b8 bellard
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
364 108c49b8 bellard
#endif
365 108c49b8 bellard
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
366 92e873b9 bellard
    p = *lp;
367 92e873b9 bellard
    if (!p) {
368 92e873b9 bellard
        /* allocate if not found */
369 108c49b8 bellard
        if (!alloc)
370 108c49b8 bellard
            return NULL;
371 108c49b8 bellard
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
372 108c49b8 bellard
        memset(p, 0, sizeof(void *) * L1_SIZE);
373 108c49b8 bellard
        *lp = p;
374 108c49b8 bellard
    }
375 108c49b8 bellard
#endif
376 108c49b8 bellard
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
377 e3f4e2a4 pbrook
    pd = *lp;
378 e3f4e2a4 pbrook
    if (!pd) {
379 e3f4e2a4 pbrook
        int i;
380 108c49b8 bellard
        /* allocate if not found */
381 108c49b8 bellard
        if (!alloc)
382 108c49b8 bellard
            return NULL;
383 e3f4e2a4 pbrook
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
384 e3f4e2a4 pbrook
        *lp = pd;
385 67c4d23c pbrook
        for (i = 0; i < L2_SIZE; i++) {
386 e3f4e2a4 pbrook
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
387 67c4d23c pbrook
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
388 67c4d23c pbrook
        }
389 92e873b9 bellard
    }
390 e3f4e2a4 pbrook
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
391 92e873b9 bellard
}
392 92e873b9 bellard
393 c227f099 Anthony Liguori
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
394 92e873b9 bellard
{
395 108c49b8 bellard
    return phys_page_find_alloc(index, 0);
396 92e873b9 bellard
}
397 92e873b9 bellard
398 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
399 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr);
400 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
401 3a7d929e bellard
                                    target_ulong vaddr);
402 c8a706fe pbrook
#define mmap_lock() do { } while(0)
403 c8a706fe pbrook
#define mmap_unlock() do { } while(0)
404 9fa3e853 bellard
#endif
405 fd6ce8f6 bellard
406 4369415f bellard
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
407 4369415f bellard
408 4369415f bellard
#if defined(CONFIG_USER_ONLY)
409 ccbb4d44 Stuart Brady
/* Currently it is not recommended to allocate big chunks of data in
410 4369415f bellard
   user mode. It will change when a dedicated libc will be used */
411 4369415f bellard
#define USE_STATIC_CODE_GEN_BUFFER
412 4369415f bellard
#endif
413 4369415f bellard
414 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
415 4369415f bellard
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
416 4369415f bellard
#endif
417 4369415f bellard
418 8fcd3692 blueswir1
static void code_gen_alloc(unsigned long tb_size)
419 26a5f13b bellard
{
420 4369415f bellard
#ifdef USE_STATIC_CODE_GEN_BUFFER
421 4369415f bellard
    code_gen_buffer = static_code_gen_buffer;
422 4369415f bellard
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
423 4369415f bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
424 4369415f bellard
#else
425 26a5f13b bellard
    code_gen_buffer_size = tb_size;
426 26a5f13b bellard
    if (code_gen_buffer_size == 0) {
427 4369415f bellard
#if defined(CONFIG_USER_ONLY)
428 4369415f bellard
        /* in user mode, phys_ram_size is not meaningful */
429 4369415f bellard
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
430 4369415f bellard
#else
431 ccbb4d44 Stuart Brady
        /* XXX: needs adjustments */
432 94a6b54f pbrook
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
433 4369415f bellard
#endif
434 26a5f13b bellard
    }
435 26a5f13b bellard
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
436 26a5f13b bellard
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
437 26a5f13b bellard
    /* The code gen buffer location may have constraints depending on
438 26a5f13b bellard
       the host cpu and OS */
439 26a5f13b bellard
#if defined(__linux__) 
440 26a5f13b bellard
    {
441 26a5f13b bellard
        int flags;
442 141ac468 blueswir1
        void *start = NULL;
443 141ac468 blueswir1
444 26a5f13b bellard
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
445 26a5f13b bellard
#if defined(__x86_64__)
446 26a5f13b bellard
        flags |= MAP_32BIT;
447 26a5f13b bellard
        /* Cannot map more than that */
448 26a5f13b bellard
        if (code_gen_buffer_size > (800 * 1024 * 1024))
449 26a5f13b bellard
            code_gen_buffer_size = (800 * 1024 * 1024);
450 141ac468 blueswir1
#elif defined(__sparc_v9__)
451 141ac468 blueswir1
        // Map the buffer below 2G, so we can use direct calls and branches
452 141ac468 blueswir1
        flags |= MAP_FIXED;
453 141ac468 blueswir1
        start = (void *) 0x60000000UL;
454 141ac468 blueswir1
        if (code_gen_buffer_size > (512 * 1024 * 1024))
455 141ac468 blueswir1
            code_gen_buffer_size = (512 * 1024 * 1024);
456 1cb0661e balrog
#elif defined(__arm__)
457 63d41246 balrog
        /* Map the buffer below 32M, so we can use direct calls and branches */
458 1cb0661e balrog
        flags |= MAP_FIXED;
459 1cb0661e balrog
        start = (void *) 0x01000000UL;
460 1cb0661e balrog
        if (code_gen_buffer_size > 16 * 1024 * 1024)
461 1cb0661e balrog
            code_gen_buffer_size = 16 * 1024 * 1024;
462 26a5f13b bellard
#endif
463 141ac468 blueswir1
        code_gen_buffer = mmap(start, code_gen_buffer_size,
464 141ac468 blueswir1
                               PROT_WRITE | PROT_READ | PROT_EXEC,
465 26a5f13b bellard
                               flags, -1, 0);
466 26a5f13b bellard
        if (code_gen_buffer == MAP_FAILED) {
467 26a5f13b bellard
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
468 26a5f13b bellard
            exit(1);
469 26a5f13b bellard
        }
470 26a5f13b bellard
    }
471 a167ba50 Aurelien Jarno
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
472 06e67a82 aliguori
    {
473 06e67a82 aliguori
        int flags;
474 06e67a82 aliguori
        void *addr = NULL;
475 06e67a82 aliguori
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
476 06e67a82 aliguori
#if defined(__x86_64__)
477 06e67a82 aliguori
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
478 06e67a82 aliguori
         * 0x40000000 is free */
479 06e67a82 aliguori
        flags |= MAP_FIXED;
480 06e67a82 aliguori
        addr = (void *)0x40000000;
481 06e67a82 aliguori
        /* Cannot map more than that */
482 06e67a82 aliguori
        if (code_gen_buffer_size > (800 * 1024 * 1024))
483 06e67a82 aliguori
            code_gen_buffer_size = (800 * 1024 * 1024);
484 06e67a82 aliguori
#endif
485 06e67a82 aliguori
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
486 06e67a82 aliguori
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
487 06e67a82 aliguori
                               flags, -1, 0);
488 06e67a82 aliguori
        if (code_gen_buffer == MAP_FAILED) {
489 06e67a82 aliguori
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
490 06e67a82 aliguori
            exit(1);
491 06e67a82 aliguori
        }
492 06e67a82 aliguori
    }
493 26a5f13b bellard
#else
494 26a5f13b bellard
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
495 26a5f13b bellard
    map_exec(code_gen_buffer, code_gen_buffer_size);
496 26a5f13b bellard
#endif
497 4369415f bellard
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
498 26a5f13b bellard
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
499 26a5f13b bellard
    code_gen_buffer_max_size = code_gen_buffer_size - 
500 26a5f13b bellard
        code_gen_max_block_size();
501 26a5f13b bellard
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
502 26a5f13b bellard
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
503 26a5f13b bellard
}
504 26a5f13b bellard
505 26a5f13b bellard
/* Must be called before using the QEMU cpus. 'tb_size' is the size
506 26a5f13b bellard
   (in bytes) allocated to the translation buffer. Zero means default
507 26a5f13b bellard
   size. */
508 26a5f13b bellard
void cpu_exec_init_all(unsigned long tb_size)
509 26a5f13b bellard
{
510 26a5f13b bellard
    cpu_gen_init();
511 26a5f13b bellard
    code_gen_alloc(tb_size);
512 26a5f13b bellard
    code_gen_ptr = code_gen_buffer;
513 4369415f bellard
    page_init();
514 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
515 26a5f13b bellard
    io_mem_init();
516 e2eef170 pbrook
#endif
517 26a5f13b bellard
}
518 26a5f13b bellard
519 9656f324 pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
520 9656f324 pbrook
521 d4bfa4d7 Juan Quintela
static void cpu_common_pre_save(void *opaque)
522 9656f324 pbrook
{
523 d4bfa4d7 Juan Quintela
    CPUState *env = opaque;
524 9656f324 pbrook
525 4c0960c0 Avi Kivity
    cpu_synchronize_state(env);
526 9656f324 pbrook
}
527 9656f324 pbrook
528 e7f4eff7 Juan Quintela
static int cpu_common_pre_load(void *opaque)
529 9656f324 pbrook
{
530 9656f324 pbrook
    CPUState *env = opaque;
531 9656f324 pbrook
532 4c0960c0 Avi Kivity
    cpu_synchronize_state(env);
533 e7f4eff7 Juan Quintela
    return 0;
534 e7f4eff7 Juan Quintela
}
535 e7f4eff7 Juan Quintela
536 e59fb374 Juan Quintela
static int cpu_common_post_load(void *opaque, int version_id)
537 e7f4eff7 Juan Quintela
{
538 e7f4eff7 Juan Quintela
    CPUState *env = opaque;
539 9656f324 pbrook
540 3098dba0 aurel32
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
541 3098dba0 aurel32
       version_id is increased. */
542 3098dba0 aurel32
    env->interrupt_request &= ~0x01;
543 9656f324 pbrook
    tlb_flush(env, 1);
544 9656f324 pbrook
545 9656f324 pbrook
    return 0;
546 9656f324 pbrook
}
547 e7f4eff7 Juan Quintela
548 e7f4eff7 Juan Quintela
static const VMStateDescription vmstate_cpu_common = {
549 e7f4eff7 Juan Quintela
    .name = "cpu_common",
550 e7f4eff7 Juan Quintela
    .version_id = 1,
551 e7f4eff7 Juan Quintela
    .minimum_version_id = 1,
552 e7f4eff7 Juan Quintela
    .minimum_version_id_old = 1,
553 e7f4eff7 Juan Quintela
    .pre_save = cpu_common_pre_save,
554 e7f4eff7 Juan Quintela
    .pre_load = cpu_common_pre_load,
555 e7f4eff7 Juan Quintela
    .post_load = cpu_common_post_load,
556 e7f4eff7 Juan Quintela
    .fields      = (VMStateField []) {
557 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(halted, CPUState),
558 e7f4eff7 Juan Quintela
        VMSTATE_UINT32(interrupt_request, CPUState),
559 e7f4eff7 Juan Quintela
        VMSTATE_END_OF_LIST()
560 e7f4eff7 Juan Quintela
    }
561 e7f4eff7 Juan Quintela
};
562 9656f324 pbrook
#endif
563 9656f324 pbrook
564 950f1472 Glauber Costa
CPUState *qemu_get_cpu(int cpu)
565 950f1472 Glauber Costa
{
566 950f1472 Glauber Costa
    CPUState *env = first_cpu;
567 950f1472 Glauber Costa
568 950f1472 Glauber Costa
    while (env) {
569 950f1472 Glauber Costa
        if (env->cpu_index == cpu)
570 950f1472 Glauber Costa
            break;
571 950f1472 Glauber Costa
        env = env->next_cpu;
572 950f1472 Glauber Costa
    }
573 950f1472 Glauber Costa
574 950f1472 Glauber Costa
    return env;
575 950f1472 Glauber Costa
}
576 950f1472 Glauber Costa
577 6a00d601 bellard
void cpu_exec_init(CPUState *env)
578 fd6ce8f6 bellard
{
579 6a00d601 bellard
    CPUState **penv;
580 6a00d601 bellard
    int cpu_index;
581 6a00d601 bellard
582 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
583 c2764719 pbrook
    cpu_list_lock();
584 c2764719 pbrook
#endif
585 6a00d601 bellard
    env->next_cpu = NULL;
586 6a00d601 bellard
    penv = &first_cpu;
587 6a00d601 bellard
    cpu_index = 0;
588 6a00d601 bellard
    while (*penv != NULL) {
589 1e9fa730 Nathan Froyd
        penv = &(*penv)->next_cpu;
590 6a00d601 bellard
        cpu_index++;
591 6a00d601 bellard
    }
592 6a00d601 bellard
    env->cpu_index = cpu_index;
593 268a362c aliguori
    env->numa_node = 0;
594 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
595 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
596 6a00d601 bellard
    *penv = env;
597 c2764719 pbrook
#if defined(CONFIG_USER_ONLY)
598 c2764719 pbrook
    cpu_list_unlock();
599 c2764719 pbrook
#endif
600 b3c7724c pbrook
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
601 e7f4eff7 Juan Quintela
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
602 b3c7724c pbrook
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
603 b3c7724c pbrook
                    cpu_save, cpu_load, env);
604 b3c7724c pbrook
#endif
605 fd6ce8f6 bellard
}
606 fd6ce8f6 bellard
607 9fa3e853 bellard
static inline void invalidate_page_bitmap(PageDesc *p)
608 9fa3e853 bellard
{
609 9fa3e853 bellard
    if (p->code_bitmap) {
610 59817ccb bellard
        qemu_free(p->code_bitmap);
611 9fa3e853 bellard
        p->code_bitmap = NULL;
612 9fa3e853 bellard
    }
613 9fa3e853 bellard
    p->code_write_count = 0;
614 9fa3e853 bellard
}
615 9fa3e853 bellard
616 fd6ce8f6 bellard
/* set to NULL all the 'first_tb' fields in all PageDescs */
617 fd6ce8f6 bellard
static void page_flush_tb(void)
618 fd6ce8f6 bellard
{
619 fd6ce8f6 bellard
    int i, j;
620 fd6ce8f6 bellard
    PageDesc *p;
621 fd6ce8f6 bellard
622 fd6ce8f6 bellard
    for(i = 0; i < L1_SIZE; i++) {
623 fd6ce8f6 bellard
        p = l1_map[i];
624 fd6ce8f6 bellard
        if (p) {
625 9fa3e853 bellard
            for(j = 0; j < L2_SIZE; j++) {
626 9fa3e853 bellard
                p->first_tb = NULL;
627 9fa3e853 bellard
                invalidate_page_bitmap(p);
628 9fa3e853 bellard
                p++;
629 9fa3e853 bellard
            }
630 fd6ce8f6 bellard
        }
631 fd6ce8f6 bellard
    }
632 fd6ce8f6 bellard
}
633 fd6ce8f6 bellard
634 fd6ce8f6 bellard
/* flush all the translation blocks */
635 d4e8164f bellard
/* XXX: tb_flush is currently not thread safe */
636 6a00d601 bellard
void tb_flush(CPUState *env1)
637 fd6ce8f6 bellard
{
638 6a00d601 bellard
    CPUState *env;
639 0124311e bellard
#if defined(DEBUG_FLUSH)
640 ab3d1727 blueswir1
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
641 ab3d1727 blueswir1
           (unsigned long)(code_gen_ptr - code_gen_buffer),
642 ab3d1727 blueswir1
           nb_tbs, nb_tbs > 0 ?
643 ab3d1727 blueswir1
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
644 fd6ce8f6 bellard
#endif
645 26a5f13b bellard
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
646 a208e54a pbrook
        cpu_abort(env1, "Internal error: code buffer overflow\n");
647 a208e54a pbrook
648 fd6ce8f6 bellard
    nb_tbs = 0;
649 3b46e624 ths
650 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
651 6a00d601 bellard
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
652 6a00d601 bellard
    }
653 9fa3e853 bellard
654 8a8a608f bellard
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
655 fd6ce8f6 bellard
    page_flush_tb();
656 9fa3e853 bellard
657 fd6ce8f6 bellard
    code_gen_ptr = code_gen_buffer;
658 d4e8164f bellard
    /* XXX: flush processor icache at this point if cache flush is
659 d4e8164f bellard
       expensive */
660 e3db7226 bellard
    tb_flush_count++;
661 fd6ce8f6 bellard
}
662 fd6ce8f6 bellard
663 fd6ce8f6 bellard
#ifdef DEBUG_TB_CHECK
664 fd6ce8f6 bellard
665 bc98a7ef j_mayer
static void tb_invalidate_check(target_ulong address)
666 fd6ce8f6 bellard
{
667 fd6ce8f6 bellard
    TranslationBlock *tb;
668 fd6ce8f6 bellard
    int i;
669 fd6ce8f6 bellard
    address &= TARGET_PAGE_MASK;
670 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
671 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
672 fd6ce8f6 bellard
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
673 fd6ce8f6 bellard
                  address >= tb->pc + tb->size)) {
674 0bf9e31a Blue Swirl
                printf("ERROR invalidate: address=" TARGET_FMT_lx
675 0bf9e31a Blue Swirl
                       " PC=%08lx size=%04x\n",
676 99773bd4 pbrook
                       address, (long)tb->pc, tb->size);
677 fd6ce8f6 bellard
            }
678 fd6ce8f6 bellard
        }
679 fd6ce8f6 bellard
    }
680 fd6ce8f6 bellard
}
681 fd6ce8f6 bellard
682 fd6ce8f6 bellard
/* verify that all the pages have correct rights for code */
683 fd6ce8f6 bellard
static void tb_page_check(void)
684 fd6ce8f6 bellard
{
685 fd6ce8f6 bellard
    TranslationBlock *tb;
686 fd6ce8f6 bellard
    int i, flags1, flags2;
687 3b46e624 ths
688 99773bd4 pbrook
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
689 99773bd4 pbrook
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
690 fd6ce8f6 bellard
            flags1 = page_get_flags(tb->pc);
691 fd6ce8f6 bellard
            flags2 = page_get_flags(tb->pc + tb->size - 1);
692 fd6ce8f6 bellard
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
693 fd6ce8f6 bellard
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
694 99773bd4 pbrook
                       (long)tb->pc, tb->size, flags1, flags2);
695 fd6ce8f6 bellard
            }
696 fd6ce8f6 bellard
        }
697 fd6ce8f6 bellard
    }
698 fd6ce8f6 bellard
}
699 fd6ce8f6 bellard
700 fd6ce8f6 bellard
#endif
701 fd6ce8f6 bellard
702 fd6ce8f6 bellard
/* invalidate one TB */
703 fd6ce8f6 bellard
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
704 fd6ce8f6 bellard
                             int next_offset)
705 fd6ce8f6 bellard
{
706 fd6ce8f6 bellard
    TranslationBlock *tb1;
707 fd6ce8f6 bellard
    for(;;) {
708 fd6ce8f6 bellard
        tb1 = *ptb;
709 fd6ce8f6 bellard
        if (tb1 == tb) {
710 fd6ce8f6 bellard
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
711 fd6ce8f6 bellard
            break;
712 fd6ce8f6 bellard
        }
713 fd6ce8f6 bellard
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
714 fd6ce8f6 bellard
    }
715 fd6ce8f6 bellard
}
716 fd6ce8f6 bellard
717 9fa3e853 bellard
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
718 9fa3e853 bellard
{
719 9fa3e853 bellard
    TranslationBlock *tb1;
720 9fa3e853 bellard
    unsigned int n1;
721 9fa3e853 bellard
722 9fa3e853 bellard
    for(;;) {
723 9fa3e853 bellard
        tb1 = *ptb;
724 9fa3e853 bellard
        n1 = (long)tb1 & 3;
725 9fa3e853 bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
726 9fa3e853 bellard
        if (tb1 == tb) {
727 9fa3e853 bellard
            *ptb = tb1->page_next[n1];
728 9fa3e853 bellard
            break;
729 9fa3e853 bellard
        }
730 9fa3e853 bellard
        ptb = &tb1->page_next[n1];
731 9fa3e853 bellard
    }
732 9fa3e853 bellard
}
733 9fa3e853 bellard
734 d4e8164f bellard
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
735 d4e8164f bellard
{
736 d4e8164f bellard
    TranslationBlock *tb1, **ptb;
737 d4e8164f bellard
    unsigned int n1;
738 d4e8164f bellard
739 d4e8164f bellard
    ptb = &tb->jmp_next[n];
740 d4e8164f bellard
    tb1 = *ptb;
741 d4e8164f bellard
    if (tb1) {
742 d4e8164f bellard
        /* find tb(n) in circular list */
743 d4e8164f bellard
        for(;;) {
744 d4e8164f bellard
            tb1 = *ptb;
745 d4e8164f bellard
            n1 = (long)tb1 & 3;
746 d4e8164f bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
747 d4e8164f bellard
            if (n1 == n && tb1 == tb)
748 d4e8164f bellard
                break;
749 d4e8164f bellard
            if (n1 == 2) {
750 d4e8164f bellard
                ptb = &tb1->jmp_first;
751 d4e8164f bellard
            } else {
752 d4e8164f bellard
                ptb = &tb1->jmp_next[n1];
753 d4e8164f bellard
            }
754 d4e8164f bellard
        }
755 d4e8164f bellard
        /* now we can suppress tb(n) from the list */
756 d4e8164f bellard
        *ptb = tb->jmp_next[n];
757 d4e8164f bellard
758 d4e8164f bellard
        tb->jmp_next[n] = NULL;
759 d4e8164f bellard
    }
760 d4e8164f bellard
}
761 d4e8164f bellard
762 d4e8164f bellard
/* reset the jump entry 'n' of a TB so that it is not chained to
763 d4e8164f bellard
   another TB */
764 d4e8164f bellard
static inline void tb_reset_jump(TranslationBlock *tb, int n)
765 d4e8164f bellard
{
766 d4e8164f bellard
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
767 d4e8164f bellard
}
768 d4e8164f bellard
769 2e70f6ef pbrook
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
770 fd6ce8f6 bellard
{
771 6a00d601 bellard
    CPUState *env;
772 8a40a180 bellard
    PageDesc *p;
773 d4e8164f bellard
    unsigned int h, n1;
774 c227f099 Anthony Liguori
    target_phys_addr_t phys_pc;
775 8a40a180 bellard
    TranslationBlock *tb1, *tb2;
776 3b46e624 ths
777 8a40a180 bellard
    /* remove the TB from the hash list */
778 8a40a180 bellard
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
779 8a40a180 bellard
    h = tb_phys_hash_func(phys_pc);
780 5fafdf24 ths
    tb_remove(&tb_phys_hash[h], tb,
781 8a40a180 bellard
              offsetof(TranslationBlock, phys_hash_next));
782 8a40a180 bellard
783 8a40a180 bellard
    /* remove the TB from the page list */
784 8a40a180 bellard
    if (tb->page_addr[0] != page_addr) {
785 8a40a180 bellard
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
786 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
787 8a40a180 bellard
        invalidate_page_bitmap(p);
788 8a40a180 bellard
    }
789 8a40a180 bellard
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
790 8a40a180 bellard
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
791 8a40a180 bellard
        tb_page_remove(&p->first_tb, tb);
792 8a40a180 bellard
        invalidate_page_bitmap(p);
793 8a40a180 bellard
    }
794 8a40a180 bellard
795 36bdbe54 bellard
    tb_invalidated_flag = 1;
796 59817ccb bellard
797 fd6ce8f6 bellard
    /* remove the TB from the hash list */
798 8a40a180 bellard
    h = tb_jmp_cache_hash_func(tb->pc);
799 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
800 6a00d601 bellard
        if (env->tb_jmp_cache[h] == tb)
801 6a00d601 bellard
            env->tb_jmp_cache[h] = NULL;
802 6a00d601 bellard
    }
803 d4e8164f bellard
804 d4e8164f bellard
    /* suppress this TB from the two jump lists */
805 d4e8164f bellard
    tb_jmp_remove(tb, 0);
806 d4e8164f bellard
    tb_jmp_remove(tb, 1);
807 d4e8164f bellard
808 d4e8164f bellard
    /* suppress any remaining jumps to this TB */
809 d4e8164f bellard
    tb1 = tb->jmp_first;
810 d4e8164f bellard
    for(;;) {
811 d4e8164f bellard
        n1 = (long)tb1 & 3;
812 d4e8164f bellard
        if (n1 == 2)
813 d4e8164f bellard
            break;
814 d4e8164f bellard
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
815 d4e8164f bellard
        tb2 = tb1->jmp_next[n1];
816 d4e8164f bellard
        tb_reset_jump(tb1, n1);
817 d4e8164f bellard
        tb1->jmp_next[n1] = NULL;
818 d4e8164f bellard
        tb1 = tb2;
819 d4e8164f bellard
    }
820 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
821 9fa3e853 bellard
822 e3db7226 bellard
    tb_phys_invalidate_count++;
823 9fa3e853 bellard
}
824 9fa3e853 bellard
825 9fa3e853 bellard
static inline void set_bits(uint8_t *tab, int start, int len)
826 9fa3e853 bellard
{
827 9fa3e853 bellard
    int end, mask, end1;
828 9fa3e853 bellard
829 9fa3e853 bellard
    end = start + len;
830 9fa3e853 bellard
    tab += start >> 3;
831 9fa3e853 bellard
    mask = 0xff << (start & 7);
832 9fa3e853 bellard
    if ((start & ~7) == (end & ~7)) {
833 9fa3e853 bellard
        if (start < end) {
834 9fa3e853 bellard
            mask &= ~(0xff << (end & 7));
835 9fa3e853 bellard
            *tab |= mask;
836 9fa3e853 bellard
        }
837 9fa3e853 bellard
    } else {
838 9fa3e853 bellard
        *tab++ |= mask;
839 9fa3e853 bellard
        start = (start + 8) & ~7;
840 9fa3e853 bellard
        end1 = end & ~7;
841 9fa3e853 bellard
        while (start < end1) {
842 9fa3e853 bellard
            *tab++ = 0xff;
843 9fa3e853 bellard
            start += 8;
844 9fa3e853 bellard
        }
845 9fa3e853 bellard
        if (start < end) {
846 9fa3e853 bellard
            mask = ~(0xff << (end & 7));
847 9fa3e853 bellard
            *tab |= mask;
848 9fa3e853 bellard
        }
849 9fa3e853 bellard
    }
850 9fa3e853 bellard
}
851 9fa3e853 bellard
852 9fa3e853 bellard
static void build_page_bitmap(PageDesc *p)
853 9fa3e853 bellard
{
854 9fa3e853 bellard
    int n, tb_start, tb_end;
855 9fa3e853 bellard
    TranslationBlock *tb;
856 3b46e624 ths
857 b2a7081a pbrook
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
858 9fa3e853 bellard
859 9fa3e853 bellard
    tb = p->first_tb;
860 9fa3e853 bellard
    while (tb != NULL) {
861 9fa3e853 bellard
        n = (long)tb & 3;
862 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
863 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
864 9fa3e853 bellard
        if (n == 0) {
865 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
866 9fa3e853 bellard
               it is not a problem */
867 9fa3e853 bellard
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
868 9fa3e853 bellard
            tb_end = tb_start + tb->size;
869 9fa3e853 bellard
            if (tb_end > TARGET_PAGE_SIZE)
870 9fa3e853 bellard
                tb_end = TARGET_PAGE_SIZE;
871 9fa3e853 bellard
        } else {
872 9fa3e853 bellard
            tb_start = 0;
873 9fa3e853 bellard
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
874 9fa3e853 bellard
        }
875 9fa3e853 bellard
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
876 9fa3e853 bellard
        tb = tb->page_next[n];
877 9fa3e853 bellard
    }
878 9fa3e853 bellard
}
879 9fa3e853 bellard
880 2e70f6ef pbrook
TranslationBlock *tb_gen_code(CPUState *env,
881 2e70f6ef pbrook
                              target_ulong pc, target_ulong cs_base,
882 2e70f6ef pbrook
                              int flags, int cflags)
883 d720b93d bellard
{
884 d720b93d bellard
    TranslationBlock *tb;
885 d720b93d bellard
    uint8_t *tc_ptr;
886 d720b93d bellard
    target_ulong phys_pc, phys_page2, virt_page2;
887 d720b93d bellard
    int code_gen_size;
888 d720b93d bellard
889 c27004ec bellard
    phys_pc = get_phys_addr_code(env, pc);
890 c27004ec bellard
    tb = tb_alloc(pc);
891 d720b93d bellard
    if (!tb) {
892 d720b93d bellard
        /* flush must be done */
893 d720b93d bellard
        tb_flush(env);
894 d720b93d bellard
        /* cannot fail at this point */
895 c27004ec bellard
        tb = tb_alloc(pc);
896 2e70f6ef pbrook
        /* Don't forget to invalidate previous TB info.  */
897 2e70f6ef pbrook
        tb_invalidated_flag = 1;
898 d720b93d bellard
    }
899 d720b93d bellard
    tc_ptr = code_gen_ptr;
900 d720b93d bellard
    tb->tc_ptr = tc_ptr;
901 d720b93d bellard
    tb->cs_base = cs_base;
902 d720b93d bellard
    tb->flags = flags;
903 d720b93d bellard
    tb->cflags = cflags;
904 d07bde88 blueswir1
    cpu_gen_code(env, tb, &code_gen_size);
905 d720b93d bellard
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
906 3b46e624 ths
907 d720b93d bellard
    /* check next page if needed */
908 c27004ec bellard
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
909 d720b93d bellard
    phys_page2 = -1;
910 c27004ec bellard
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
911 d720b93d bellard
        phys_page2 = get_phys_addr_code(env, virt_page2);
912 d720b93d bellard
    }
913 d720b93d bellard
    tb_link_phys(tb, phys_pc, phys_page2);
914 2e70f6ef pbrook
    return tb;
915 d720b93d bellard
}
916 3b46e624 ths
917 9fa3e853 bellard
/* invalidate all TBs which intersect with the target physical page
918 9fa3e853 bellard
   starting in range [start;end[. NOTE: start and end must refer to
919 d720b93d bellard
   the same physical page. 'is_cpu_write_access' should be true if called
920 d720b93d bellard
   from a real cpu write access: the virtual CPU will exit the current
921 d720b93d bellard
   TB if code is modified inside this TB. */
922 c227f099 Anthony Liguori
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
923 d720b93d bellard
                                   int is_cpu_write_access)
924 d720b93d bellard
{
925 6b917547 aliguori
    TranslationBlock *tb, *tb_next, *saved_tb;
926 d720b93d bellard
    CPUState *env = cpu_single_env;
927 9fa3e853 bellard
    target_ulong tb_start, tb_end;
928 6b917547 aliguori
    PageDesc *p;
929 6b917547 aliguori
    int n;
930 6b917547 aliguori
#ifdef TARGET_HAS_PRECISE_SMC
931 6b917547 aliguori
    int current_tb_not_found = is_cpu_write_access;
932 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
933 6b917547 aliguori
    int current_tb_modified = 0;
934 6b917547 aliguori
    target_ulong current_pc = 0;
935 6b917547 aliguori
    target_ulong current_cs_base = 0;
936 6b917547 aliguori
    int current_flags = 0;
937 6b917547 aliguori
#endif /* TARGET_HAS_PRECISE_SMC */
938 9fa3e853 bellard
939 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
940 5fafdf24 ths
    if (!p)
941 9fa3e853 bellard
        return;
942 5fafdf24 ths
    if (!p->code_bitmap &&
943 d720b93d bellard
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
944 d720b93d bellard
        is_cpu_write_access) {
945 9fa3e853 bellard
        /* build code bitmap */
946 9fa3e853 bellard
        build_page_bitmap(p);
947 9fa3e853 bellard
    }
948 9fa3e853 bellard
949 9fa3e853 bellard
    /* we remove all the TBs in the range [start, end[ */
950 9fa3e853 bellard
    /* XXX: see if in some cases it could be faster to invalidate all the code */
951 9fa3e853 bellard
    tb = p->first_tb;
952 9fa3e853 bellard
    while (tb != NULL) {
953 9fa3e853 bellard
        n = (long)tb & 3;
954 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
955 9fa3e853 bellard
        tb_next = tb->page_next[n];
956 9fa3e853 bellard
        /* NOTE: this is subtle as a TB may span two physical pages */
957 9fa3e853 bellard
        if (n == 0) {
958 9fa3e853 bellard
            /* NOTE: tb_end may be after the end of the page, but
959 9fa3e853 bellard
               it is not a problem */
960 9fa3e853 bellard
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
961 9fa3e853 bellard
            tb_end = tb_start + tb->size;
962 9fa3e853 bellard
        } else {
963 9fa3e853 bellard
            tb_start = tb->page_addr[1];
964 9fa3e853 bellard
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
965 9fa3e853 bellard
        }
966 9fa3e853 bellard
        if (!(tb_end <= start || tb_start >= end)) {
967 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
968 d720b93d bellard
            if (current_tb_not_found) {
969 d720b93d bellard
                current_tb_not_found = 0;
970 d720b93d bellard
                current_tb = NULL;
971 2e70f6ef pbrook
                if (env->mem_io_pc) {
972 d720b93d bellard
                    /* now we have a real cpu fault */
973 2e70f6ef pbrook
                    current_tb = tb_find_pc(env->mem_io_pc);
974 d720b93d bellard
                }
975 d720b93d bellard
            }
976 d720b93d bellard
            if (current_tb == tb &&
977 2e70f6ef pbrook
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
978 d720b93d bellard
                /* If we are modifying the current TB, we must stop
979 d720b93d bellard
                its execution. We could be more precise by checking
980 d720b93d bellard
                that the modification is after the current PC, but it
981 d720b93d bellard
                would require a specialized function to partially
982 d720b93d bellard
                restore the CPU state */
983 3b46e624 ths
984 d720b93d bellard
                current_tb_modified = 1;
985 5fafdf24 ths
                cpu_restore_state(current_tb, env,
986 2e70f6ef pbrook
                                  env->mem_io_pc, NULL);
987 6b917547 aliguori
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
988 6b917547 aliguori
                                     &current_flags);
989 d720b93d bellard
            }
990 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
991 6f5a9f7e bellard
            /* we need to do that to handle the case where a signal
992 6f5a9f7e bellard
               occurs while doing tb_phys_invalidate() */
993 6f5a9f7e bellard
            saved_tb = NULL;
994 6f5a9f7e bellard
            if (env) {
995 6f5a9f7e bellard
                saved_tb = env->current_tb;
996 6f5a9f7e bellard
                env->current_tb = NULL;
997 6f5a9f7e bellard
            }
998 9fa3e853 bellard
            tb_phys_invalidate(tb, -1);
999 6f5a9f7e bellard
            if (env) {
1000 6f5a9f7e bellard
                env->current_tb = saved_tb;
1001 6f5a9f7e bellard
                if (env->interrupt_request && env->current_tb)
1002 6f5a9f7e bellard
                    cpu_interrupt(env, env->interrupt_request);
1003 6f5a9f7e bellard
            }
1004 9fa3e853 bellard
        }
1005 9fa3e853 bellard
        tb = tb_next;
1006 9fa3e853 bellard
    }
1007 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
1008 9fa3e853 bellard
    /* if no code remaining, no need to continue to use slow writes */
1009 9fa3e853 bellard
    if (!p->first_tb) {
1010 9fa3e853 bellard
        invalidate_page_bitmap(p);
1011 d720b93d bellard
        if (is_cpu_write_access) {
1012 2e70f6ef pbrook
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1013 d720b93d bellard
        }
1014 d720b93d bellard
    }
1015 d720b93d bellard
#endif
1016 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1017 d720b93d bellard
    if (current_tb_modified) {
1018 d720b93d bellard
        /* we generate a block containing just the instruction
1019 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1020 d720b93d bellard
           itself */
1021 ea1c1802 bellard
        env->current_tb = NULL;
1022 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1023 d720b93d bellard
        cpu_resume_from_signal(env, NULL);
1024 9fa3e853 bellard
    }
1025 fd6ce8f6 bellard
#endif
1026 9fa3e853 bellard
}
1027 fd6ce8f6 bellard
1028 9fa3e853 bellard
/* len must be <= 8 and start must be a multiple of len */
1029 c227f099 Anthony Liguori
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1030 9fa3e853 bellard
{
1031 9fa3e853 bellard
    PageDesc *p;
1032 9fa3e853 bellard
    int offset, b;
1033 59817ccb bellard
#if 0
1034 a4193c8a bellard
    if (1) {
1035 93fcfe39 aliguori
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1036 93fcfe39 aliguori
                  cpu_single_env->mem_io_vaddr, len,
1037 93fcfe39 aliguori
                  cpu_single_env->eip,
1038 93fcfe39 aliguori
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1039 59817ccb bellard
    }
1040 59817ccb bellard
#endif
1041 9fa3e853 bellard
    p = page_find(start >> TARGET_PAGE_BITS);
1042 5fafdf24 ths
    if (!p)
1043 9fa3e853 bellard
        return;
1044 9fa3e853 bellard
    if (p->code_bitmap) {
1045 9fa3e853 bellard
        offset = start & ~TARGET_PAGE_MASK;
1046 9fa3e853 bellard
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1047 9fa3e853 bellard
        if (b & ((1 << len) - 1))
1048 9fa3e853 bellard
            goto do_invalidate;
1049 9fa3e853 bellard
    } else {
1050 9fa3e853 bellard
    do_invalidate:
1051 d720b93d bellard
        tb_invalidate_phys_page_range(start, start + len, 1);
1052 9fa3e853 bellard
    }
1053 9fa3e853 bellard
}
1054 9fa3e853 bellard
1055 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1056 c227f099 Anthony Liguori
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1057 d720b93d bellard
                                    unsigned long pc, void *puc)
1058 9fa3e853 bellard
{
1059 6b917547 aliguori
    TranslationBlock *tb;
1060 9fa3e853 bellard
    PageDesc *p;
1061 6b917547 aliguori
    int n;
1062 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1063 6b917547 aliguori
    TranslationBlock *current_tb = NULL;
1064 d720b93d bellard
    CPUState *env = cpu_single_env;
1065 6b917547 aliguori
    int current_tb_modified = 0;
1066 6b917547 aliguori
    target_ulong current_pc = 0;
1067 6b917547 aliguori
    target_ulong current_cs_base = 0;
1068 6b917547 aliguori
    int current_flags = 0;
1069 d720b93d bellard
#endif
1070 9fa3e853 bellard
1071 9fa3e853 bellard
    addr &= TARGET_PAGE_MASK;
1072 9fa3e853 bellard
    p = page_find(addr >> TARGET_PAGE_BITS);
1073 5fafdf24 ths
    if (!p)
1074 9fa3e853 bellard
        return;
1075 9fa3e853 bellard
    tb = p->first_tb;
1076 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1077 d720b93d bellard
    if (tb && pc != 0) {
1078 d720b93d bellard
        current_tb = tb_find_pc(pc);
1079 d720b93d bellard
    }
1080 d720b93d bellard
#endif
1081 9fa3e853 bellard
    while (tb != NULL) {
1082 9fa3e853 bellard
        n = (long)tb & 3;
1083 9fa3e853 bellard
        tb = (TranslationBlock *)((long)tb & ~3);
1084 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1085 d720b93d bellard
        if (current_tb == tb &&
1086 2e70f6ef pbrook
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1087 d720b93d bellard
                /* If we are modifying the current TB, we must stop
1088 d720b93d bellard
                   its execution. We could be more precise by checking
1089 d720b93d bellard
                   that the modification is after the current PC, but it
1090 d720b93d bellard
                   would require a specialized function to partially
1091 d720b93d bellard
                   restore the CPU state */
1092 3b46e624 ths
1093 d720b93d bellard
            current_tb_modified = 1;
1094 d720b93d bellard
            cpu_restore_state(current_tb, env, pc, puc);
1095 6b917547 aliguori
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1096 6b917547 aliguori
                                 &current_flags);
1097 d720b93d bellard
        }
1098 d720b93d bellard
#endif /* TARGET_HAS_PRECISE_SMC */
1099 9fa3e853 bellard
        tb_phys_invalidate(tb, addr);
1100 9fa3e853 bellard
        tb = tb->page_next[n];
1101 9fa3e853 bellard
    }
1102 fd6ce8f6 bellard
    p->first_tb = NULL;
1103 d720b93d bellard
#ifdef TARGET_HAS_PRECISE_SMC
1104 d720b93d bellard
    if (current_tb_modified) {
1105 d720b93d bellard
        /* we generate a block containing just the instruction
1106 d720b93d bellard
           modifying the memory. It will ensure that it cannot modify
1107 d720b93d bellard
           itself */
1108 ea1c1802 bellard
        env->current_tb = NULL;
1109 2e70f6ef pbrook
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1110 d720b93d bellard
        cpu_resume_from_signal(env, puc);
1111 d720b93d bellard
    }
1112 d720b93d bellard
#endif
1113 fd6ce8f6 bellard
}
1114 9fa3e853 bellard
#endif
1115 fd6ce8f6 bellard
1116 fd6ce8f6 bellard
/* add the tb in the target page and protect it if necessary */
1117 5fafdf24 ths
static inline void tb_alloc_page(TranslationBlock *tb,
1118 53a5960a pbrook
                                 unsigned int n, target_ulong page_addr)
1119 fd6ce8f6 bellard
{
1120 fd6ce8f6 bellard
    PageDesc *p;
1121 9fa3e853 bellard
    TranslationBlock *last_first_tb;
1122 9fa3e853 bellard
1123 9fa3e853 bellard
    tb->page_addr[n] = page_addr;
1124 3a7d929e bellard
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1125 9fa3e853 bellard
    tb->page_next[n] = p->first_tb;
1126 9fa3e853 bellard
    last_first_tb = p->first_tb;
1127 9fa3e853 bellard
    p->first_tb = (TranslationBlock *)((long)tb | n);
1128 9fa3e853 bellard
    invalidate_page_bitmap(p);
1129 fd6ce8f6 bellard
1130 107db443 bellard
#if defined(TARGET_HAS_SMC) || 1
1131 d720b93d bellard
1132 9fa3e853 bellard
#if defined(CONFIG_USER_ONLY)
1133 fd6ce8f6 bellard
    if (p->flags & PAGE_WRITE) {
1134 53a5960a pbrook
        target_ulong addr;
1135 53a5960a pbrook
        PageDesc *p2;
1136 9fa3e853 bellard
        int prot;
1137 9fa3e853 bellard
1138 fd6ce8f6 bellard
        /* force the host page as non writable (writes will have a
1139 fd6ce8f6 bellard
           page fault + mprotect overhead) */
1140 53a5960a pbrook
        page_addr &= qemu_host_page_mask;
1141 fd6ce8f6 bellard
        prot = 0;
1142 53a5960a pbrook
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1143 53a5960a pbrook
            addr += TARGET_PAGE_SIZE) {
1144 53a5960a pbrook
1145 53a5960a pbrook
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1146 53a5960a pbrook
            if (!p2)
1147 53a5960a pbrook
                continue;
1148 53a5960a pbrook
            prot |= p2->flags;
1149 53a5960a pbrook
            p2->flags &= ~PAGE_WRITE;
1150 53a5960a pbrook
            page_get_flags(addr);
1151 53a5960a pbrook
          }
1152 5fafdf24 ths
        mprotect(g2h(page_addr), qemu_host_page_size,
1153 fd6ce8f6 bellard
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1154 fd6ce8f6 bellard
#ifdef DEBUG_TB_INVALIDATE
1155 ab3d1727 blueswir1
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1156 53a5960a pbrook
               page_addr);
1157 fd6ce8f6 bellard
#endif
1158 fd6ce8f6 bellard
    }
1159 9fa3e853 bellard
#else
1160 9fa3e853 bellard
    /* if some code is already present, then the pages are already
1161 9fa3e853 bellard
       protected. So we handle the case where only the first TB is
1162 9fa3e853 bellard
       allocated in a physical page */
1163 9fa3e853 bellard
    if (!last_first_tb) {
1164 6a00d601 bellard
        tlb_protect_code(page_addr);
1165 9fa3e853 bellard
    }
1166 9fa3e853 bellard
#endif
1167 d720b93d bellard
1168 d720b93d bellard
#endif /* TARGET_HAS_SMC */
1169 fd6ce8f6 bellard
}
1170 fd6ce8f6 bellard
1171 fd6ce8f6 bellard
/* Allocate a new translation block. Flush the translation buffer if
1172 fd6ce8f6 bellard
   too many translation blocks or too much generated code. */
1173 c27004ec bellard
TranslationBlock *tb_alloc(target_ulong pc)
1174 fd6ce8f6 bellard
{
1175 fd6ce8f6 bellard
    TranslationBlock *tb;
1176 fd6ce8f6 bellard
1177 26a5f13b bellard
    if (nb_tbs >= code_gen_max_blocks ||
1178 26a5f13b bellard
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1179 d4e8164f bellard
        return NULL;
1180 fd6ce8f6 bellard
    tb = &tbs[nb_tbs++];
1181 fd6ce8f6 bellard
    tb->pc = pc;
1182 b448f2f3 bellard
    tb->cflags = 0;
1183 d4e8164f bellard
    return tb;
1184 d4e8164f bellard
}
1185 d4e8164f bellard
1186 2e70f6ef pbrook
void tb_free(TranslationBlock *tb)
1187 2e70f6ef pbrook
{
1188 bf20dc07 ths
    /* In practice this is mostly used for single use temporary TB
1189 2e70f6ef pbrook
       Ignore the hard cases and just back up if this TB happens to
1190 2e70f6ef pbrook
       be the last one generated.  */
1191 2e70f6ef pbrook
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1192 2e70f6ef pbrook
        code_gen_ptr = tb->tc_ptr;
1193 2e70f6ef pbrook
        nb_tbs--;
1194 2e70f6ef pbrook
    }
1195 2e70f6ef pbrook
}
1196 2e70f6ef pbrook
1197 9fa3e853 bellard
/* add a new TB and link it to the physical page tables. phys_page2 is
1198 9fa3e853 bellard
   (-1) to indicate that only one page contains the TB. */
1199 5fafdf24 ths
void tb_link_phys(TranslationBlock *tb,
1200 9fa3e853 bellard
                  target_ulong phys_pc, target_ulong phys_page2)
1201 d4e8164f bellard
{
1202 9fa3e853 bellard
    unsigned int h;
1203 9fa3e853 bellard
    TranslationBlock **ptb;
1204 9fa3e853 bellard
1205 c8a706fe pbrook
    /* Grab the mmap lock to stop another thread invalidating this TB
1206 c8a706fe pbrook
       before we are done.  */
1207 c8a706fe pbrook
    mmap_lock();
1208 9fa3e853 bellard
    /* add in the physical hash table */
1209 9fa3e853 bellard
    h = tb_phys_hash_func(phys_pc);
1210 9fa3e853 bellard
    ptb = &tb_phys_hash[h];
1211 9fa3e853 bellard
    tb->phys_hash_next = *ptb;
1212 9fa3e853 bellard
    *ptb = tb;
1213 fd6ce8f6 bellard
1214 fd6ce8f6 bellard
    /* add in the page list */
1215 9fa3e853 bellard
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1216 9fa3e853 bellard
    if (phys_page2 != -1)
1217 9fa3e853 bellard
        tb_alloc_page(tb, 1, phys_page2);
1218 9fa3e853 bellard
    else
1219 9fa3e853 bellard
        tb->page_addr[1] = -1;
1220 9fa3e853 bellard
1221 d4e8164f bellard
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1222 d4e8164f bellard
    tb->jmp_next[0] = NULL;
1223 d4e8164f bellard
    tb->jmp_next[1] = NULL;
1224 d4e8164f bellard
1225 d4e8164f bellard
    /* init original jump addresses */
1226 d4e8164f bellard
    if (tb->tb_next_offset[0] != 0xffff)
1227 d4e8164f bellard
        tb_reset_jump(tb, 0);
1228 d4e8164f bellard
    if (tb->tb_next_offset[1] != 0xffff)
1229 d4e8164f bellard
        tb_reset_jump(tb, 1);
1230 8a40a180 bellard
1231 8a40a180 bellard
#ifdef DEBUG_TB_CHECK
1232 8a40a180 bellard
    tb_page_check();
1233 8a40a180 bellard
#endif
1234 c8a706fe pbrook
    mmap_unlock();
1235 fd6ce8f6 bellard
}
1236 fd6ce8f6 bellard
1237 9fa3e853 bellard
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1238 9fa3e853 bellard
   tb[1].tc_ptr. Return NULL if not found */
1239 9fa3e853 bellard
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1240 fd6ce8f6 bellard
{
1241 9fa3e853 bellard
    int m_min, m_max, m;
1242 9fa3e853 bellard
    unsigned long v;
1243 9fa3e853 bellard
    TranslationBlock *tb;
1244 a513fe19 bellard
1245 a513fe19 bellard
    if (nb_tbs <= 0)
1246 a513fe19 bellard
        return NULL;
1247 a513fe19 bellard
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1248 a513fe19 bellard
        tc_ptr >= (unsigned long)code_gen_ptr)
1249 a513fe19 bellard
        return NULL;
1250 a513fe19 bellard
    /* binary search (cf Knuth) */
1251 a513fe19 bellard
    m_min = 0;
1252 a513fe19 bellard
    m_max = nb_tbs - 1;
1253 a513fe19 bellard
    while (m_min <= m_max) {
1254 a513fe19 bellard
        m = (m_min + m_max) >> 1;
1255 a513fe19 bellard
        tb = &tbs[m];
1256 a513fe19 bellard
        v = (unsigned long)tb->tc_ptr;
1257 a513fe19 bellard
        if (v == tc_ptr)
1258 a513fe19 bellard
            return tb;
1259 a513fe19 bellard
        else if (tc_ptr < v) {
1260 a513fe19 bellard
            m_max = m - 1;
1261 a513fe19 bellard
        } else {
1262 a513fe19 bellard
            m_min = m + 1;
1263 a513fe19 bellard
        }
1264 5fafdf24 ths
    }
1265 a513fe19 bellard
    return &tbs[m_max];
1266 a513fe19 bellard
}
1267 7501267e bellard
1268 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb);
1269 ea041c0e bellard
1270 ea041c0e bellard
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1271 ea041c0e bellard
{
1272 ea041c0e bellard
    TranslationBlock *tb1, *tb_next, **ptb;
1273 ea041c0e bellard
    unsigned int n1;
1274 ea041c0e bellard
1275 ea041c0e bellard
    tb1 = tb->jmp_next[n];
1276 ea041c0e bellard
    if (tb1 != NULL) {
1277 ea041c0e bellard
        /* find head of list */
1278 ea041c0e bellard
        for(;;) {
1279 ea041c0e bellard
            n1 = (long)tb1 & 3;
1280 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1281 ea041c0e bellard
            if (n1 == 2)
1282 ea041c0e bellard
                break;
1283 ea041c0e bellard
            tb1 = tb1->jmp_next[n1];
1284 ea041c0e bellard
        }
1285 ea041c0e bellard
        /* we are now sure now that tb jumps to tb1 */
1286 ea041c0e bellard
        tb_next = tb1;
1287 ea041c0e bellard
1288 ea041c0e bellard
        /* remove tb from the jmp_first list */
1289 ea041c0e bellard
        ptb = &tb_next->jmp_first;
1290 ea041c0e bellard
        for(;;) {
1291 ea041c0e bellard
            tb1 = *ptb;
1292 ea041c0e bellard
            n1 = (long)tb1 & 3;
1293 ea041c0e bellard
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1294 ea041c0e bellard
            if (n1 == n && tb1 == tb)
1295 ea041c0e bellard
                break;
1296 ea041c0e bellard
            ptb = &tb1->jmp_next[n1];
1297 ea041c0e bellard
        }
1298 ea041c0e bellard
        *ptb = tb->jmp_next[n];
1299 ea041c0e bellard
        tb->jmp_next[n] = NULL;
1300 3b46e624 ths
1301 ea041c0e bellard
        /* suppress the jump to next tb in generated code */
1302 ea041c0e bellard
        tb_reset_jump(tb, n);
1303 ea041c0e bellard
1304 0124311e bellard
        /* suppress jumps in the tb on which we could have jumped */
1305 ea041c0e bellard
        tb_reset_jump_recursive(tb_next);
1306 ea041c0e bellard
    }
1307 ea041c0e bellard
}
1308 ea041c0e bellard
1309 ea041c0e bellard
static void tb_reset_jump_recursive(TranslationBlock *tb)
1310 ea041c0e bellard
{
1311 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 0);
1312 ea041c0e bellard
    tb_reset_jump_recursive2(tb, 1);
1313 ea041c0e bellard
}
1314 ea041c0e bellard
1315 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1316 d720b93d bellard
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1317 d720b93d bellard
{
1318 c227f099 Anthony Liguori
    target_phys_addr_t addr;
1319 9b3c35e0 j_mayer
    target_ulong pd;
1320 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
1321 c2f07f81 pbrook
    PhysPageDesc *p;
1322 d720b93d bellard
1323 c2f07f81 pbrook
    addr = cpu_get_phys_page_debug(env, pc);
1324 c2f07f81 pbrook
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1325 c2f07f81 pbrook
    if (!p) {
1326 c2f07f81 pbrook
        pd = IO_MEM_UNASSIGNED;
1327 c2f07f81 pbrook
    } else {
1328 c2f07f81 pbrook
        pd = p->phys_offset;
1329 c2f07f81 pbrook
    }
1330 c2f07f81 pbrook
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1331 706cd4b5 pbrook
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1332 d720b93d bellard
}
1333 c27004ec bellard
#endif
1334 d720b93d bellard
1335 6658ffb8 pbrook
/* Add a watchpoint.  */
1336 a1d1bb31 aliguori
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1337 a1d1bb31 aliguori
                          int flags, CPUWatchpoint **watchpoint)
1338 6658ffb8 pbrook
{
1339 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1340 c0ce998e aliguori
    CPUWatchpoint *wp;
1341 6658ffb8 pbrook
1342 b4051334 aliguori
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1343 b4051334 aliguori
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1344 b4051334 aliguori
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1345 b4051334 aliguori
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1346 b4051334 aliguori
        return -EINVAL;
1347 b4051334 aliguori
    }
1348 a1d1bb31 aliguori
    wp = qemu_malloc(sizeof(*wp));
1349 a1d1bb31 aliguori
1350 a1d1bb31 aliguori
    wp->vaddr = addr;
1351 b4051334 aliguori
    wp->len_mask = len_mask;
1352 a1d1bb31 aliguori
    wp->flags = flags;
1353 a1d1bb31 aliguori
1354 2dc9f411 aliguori
    /* keep all GDB-injected watchpoints in front */
1355 c0ce998e aliguori
    if (flags & BP_GDB)
1356 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1357 c0ce998e aliguori
    else
1358 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1359 6658ffb8 pbrook
1360 6658ffb8 pbrook
    tlb_flush_page(env, addr);
1361 a1d1bb31 aliguori
1362 a1d1bb31 aliguori
    if (watchpoint)
1363 a1d1bb31 aliguori
        *watchpoint = wp;
1364 a1d1bb31 aliguori
    return 0;
1365 6658ffb8 pbrook
}
1366 6658ffb8 pbrook
1367 a1d1bb31 aliguori
/* Remove a specific watchpoint.  */
1368 a1d1bb31 aliguori
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1369 a1d1bb31 aliguori
                          int flags)
1370 6658ffb8 pbrook
{
1371 b4051334 aliguori
    target_ulong len_mask = ~(len - 1);
1372 a1d1bb31 aliguori
    CPUWatchpoint *wp;
1373 6658ffb8 pbrook
1374 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1375 b4051334 aliguori
        if (addr == wp->vaddr && len_mask == wp->len_mask
1376 6e140f28 aliguori
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1377 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1378 6658ffb8 pbrook
            return 0;
1379 6658ffb8 pbrook
        }
1380 6658ffb8 pbrook
    }
1381 a1d1bb31 aliguori
    return -ENOENT;
1382 6658ffb8 pbrook
}
1383 6658ffb8 pbrook
1384 a1d1bb31 aliguori
/* Remove a specific watchpoint by reference.  */
1385 a1d1bb31 aliguori
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1386 a1d1bb31 aliguori
{
1387 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1388 7d03f82f edgar_igl
1389 a1d1bb31 aliguori
    tlb_flush_page(env, watchpoint->vaddr);
1390 a1d1bb31 aliguori
1391 a1d1bb31 aliguori
    qemu_free(watchpoint);
1392 a1d1bb31 aliguori
}
1393 a1d1bb31 aliguori
1394 a1d1bb31 aliguori
/* Remove all matching watchpoints.  */
1395 a1d1bb31 aliguori
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1396 a1d1bb31 aliguori
{
1397 c0ce998e aliguori
    CPUWatchpoint *wp, *next;
1398 a1d1bb31 aliguori
1399 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1400 a1d1bb31 aliguori
        if (wp->flags & mask)
1401 a1d1bb31 aliguori
            cpu_watchpoint_remove_by_ref(env, wp);
1402 c0ce998e aliguori
    }
1403 7d03f82f edgar_igl
}
1404 7d03f82f edgar_igl
1405 a1d1bb31 aliguori
/* Add a breakpoint.  */
1406 a1d1bb31 aliguori
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1407 a1d1bb31 aliguori
                          CPUBreakpoint **breakpoint)
1408 4c3a88a2 bellard
{
1409 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1410 c0ce998e aliguori
    CPUBreakpoint *bp;
1411 3b46e624 ths
1412 a1d1bb31 aliguori
    bp = qemu_malloc(sizeof(*bp));
1413 4c3a88a2 bellard
1414 a1d1bb31 aliguori
    bp->pc = pc;
1415 a1d1bb31 aliguori
    bp->flags = flags;
1416 a1d1bb31 aliguori
1417 2dc9f411 aliguori
    /* keep all GDB-injected breakpoints in front */
1418 c0ce998e aliguori
    if (flags & BP_GDB)
1419 72cf2d4f Blue Swirl
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1420 c0ce998e aliguori
    else
1421 72cf2d4f Blue Swirl
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1422 3b46e624 ths
1423 d720b93d bellard
    breakpoint_invalidate(env, pc);
1424 a1d1bb31 aliguori
1425 a1d1bb31 aliguori
    if (breakpoint)
1426 a1d1bb31 aliguori
        *breakpoint = bp;
1427 4c3a88a2 bellard
    return 0;
1428 4c3a88a2 bellard
#else
1429 a1d1bb31 aliguori
    return -ENOSYS;
1430 4c3a88a2 bellard
#endif
1431 4c3a88a2 bellard
}
1432 4c3a88a2 bellard
1433 a1d1bb31 aliguori
/* Remove a specific breakpoint.  */
1434 a1d1bb31 aliguori
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1435 a1d1bb31 aliguori
{
1436 7d03f82f edgar_igl
#if defined(TARGET_HAS_ICE)
1437 a1d1bb31 aliguori
    CPUBreakpoint *bp;
1438 a1d1bb31 aliguori
1439 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1440 a1d1bb31 aliguori
        if (bp->pc == pc && bp->flags == flags) {
1441 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1442 a1d1bb31 aliguori
            return 0;
1443 a1d1bb31 aliguori
        }
1444 7d03f82f edgar_igl
    }
1445 a1d1bb31 aliguori
    return -ENOENT;
1446 a1d1bb31 aliguori
#else
1447 a1d1bb31 aliguori
    return -ENOSYS;
1448 7d03f82f edgar_igl
#endif
1449 7d03f82f edgar_igl
}
1450 7d03f82f edgar_igl
1451 a1d1bb31 aliguori
/* Remove a specific breakpoint by reference.  */
1452 a1d1bb31 aliguori
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1453 4c3a88a2 bellard
{
1454 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1455 72cf2d4f Blue Swirl
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1456 d720b93d bellard
1457 a1d1bb31 aliguori
    breakpoint_invalidate(env, breakpoint->pc);
1458 a1d1bb31 aliguori
1459 a1d1bb31 aliguori
    qemu_free(breakpoint);
1460 a1d1bb31 aliguori
#endif
1461 a1d1bb31 aliguori
}
1462 a1d1bb31 aliguori
1463 a1d1bb31 aliguori
/* Remove all matching breakpoints. */
1464 a1d1bb31 aliguori
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1465 a1d1bb31 aliguori
{
1466 a1d1bb31 aliguori
#if defined(TARGET_HAS_ICE)
1467 c0ce998e aliguori
    CPUBreakpoint *bp, *next;
1468 a1d1bb31 aliguori
1469 72cf2d4f Blue Swirl
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1470 a1d1bb31 aliguori
        if (bp->flags & mask)
1471 a1d1bb31 aliguori
            cpu_breakpoint_remove_by_ref(env, bp);
1472 c0ce998e aliguori
    }
1473 4c3a88a2 bellard
#endif
1474 4c3a88a2 bellard
}
1475 4c3a88a2 bellard
1476 c33a346e bellard
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1477 c33a346e bellard
   CPU loop after each instruction */
1478 c33a346e bellard
void cpu_single_step(CPUState *env, int enabled)
1479 c33a346e bellard
{
1480 1fddef4b bellard
#if defined(TARGET_HAS_ICE)
1481 c33a346e bellard
    if (env->singlestep_enabled != enabled) {
1482 c33a346e bellard
        env->singlestep_enabled = enabled;
1483 e22a25c9 aliguori
        if (kvm_enabled())
1484 e22a25c9 aliguori
            kvm_update_guest_debug(env, 0);
1485 e22a25c9 aliguori
        else {
1486 ccbb4d44 Stuart Brady
            /* must flush all the translated code to avoid inconsistencies */
1487 e22a25c9 aliguori
            /* XXX: only flush what is necessary */
1488 e22a25c9 aliguori
            tb_flush(env);
1489 e22a25c9 aliguori
        }
1490 c33a346e bellard
    }
1491 c33a346e bellard
#endif
1492 c33a346e bellard
}
1493 c33a346e bellard
1494 34865134 bellard
/* enable or disable low levels log */
1495 34865134 bellard
void cpu_set_log(int log_flags)
1496 34865134 bellard
{
1497 34865134 bellard
    loglevel = log_flags;
1498 34865134 bellard
    if (loglevel && !logfile) {
1499 11fcfab4 pbrook
        logfile = fopen(logfilename, log_append ? "a" : "w");
1500 34865134 bellard
        if (!logfile) {
1501 34865134 bellard
            perror(logfilename);
1502 34865134 bellard
            _exit(1);
1503 34865134 bellard
        }
1504 9fa3e853 bellard
#if !defined(CONFIG_SOFTMMU)
1505 9fa3e853 bellard
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1506 9fa3e853 bellard
        {
1507 b55266b5 blueswir1
            static char logfile_buf[4096];
1508 9fa3e853 bellard
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1509 9fa3e853 bellard
        }
1510 bf65f53f Filip Navara
#elif !defined(_WIN32)
1511 bf65f53f Filip Navara
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1512 34865134 bellard
        setvbuf(logfile, NULL, _IOLBF, 0);
1513 9fa3e853 bellard
#endif
1514 e735b91c pbrook
        log_append = 1;
1515 e735b91c pbrook
    }
1516 e735b91c pbrook
    if (!loglevel && logfile) {
1517 e735b91c pbrook
        fclose(logfile);
1518 e735b91c pbrook
        logfile = NULL;
1519 34865134 bellard
    }
1520 34865134 bellard
}
1521 34865134 bellard
1522 34865134 bellard
void cpu_set_log_filename(const char *filename)
1523 34865134 bellard
{
1524 34865134 bellard
    logfilename = strdup(filename);
1525 e735b91c pbrook
    if (logfile) {
1526 e735b91c pbrook
        fclose(logfile);
1527 e735b91c pbrook
        logfile = NULL;
1528 e735b91c pbrook
    }
1529 e735b91c pbrook
    cpu_set_log(loglevel);
1530 34865134 bellard
}
1531 c33a346e bellard
1532 3098dba0 aurel32
static void cpu_unlink_tb(CPUState *env)
1533 ea041c0e bellard
{
1534 3098dba0 aurel32
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1535 3098dba0 aurel32
       problem and hope the cpu will stop of its own accord.  For userspace
1536 3098dba0 aurel32
       emulation this often isn't actually as bad as it sounds.  Often
1537 3098dba0 aurel32
       signals are used primarily to interrupt blocking syscalls.  */
1538 ea041c0e bellard
    TranslationBlock *tb;
1539 c227f099 Anthony Liguori
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1540 59817ccb bellard
1541 cab1b4bd Riku Voipio
    spin_lock(&interrupt_lock);
1542 3098dba0 aurel32
    tb = env->current_tb;
1543 3098dba0 aurel32
    /* if the cpu is currently executing code, we must unlink it and
1544 3098dba0 aurel32
       all the potentially executing TB */
1545 f76cfe56 Riku Voipio
    if (tb) {
1546 3098dba0 aurel32
        env->current_tb = NULL;
1547 3098dba0 aurel32
        tb_reset_jump_recursive(tb);
1548 be214e6c aurel32
    }
1549 cab1b4bd Riku Voipio
    spin_unlock(&interrupt_lock);
1550 3098dba0 aurel32
}
1551 3098dba0 aurel32
1552 3098dba0 aurel32
/* mask must never be zero, except for A20 change call */
1553 3098dba0 aurel32
void cpu_interrupt(CPUState *env, int mask)
1554 3098dba0 aurel32
{
1555 3098dba0 aurel32
    int old_mask;
1556 be214e6c aurel32
1557 2e70f6ef pbrook
    old_mask = env->interrupt_request;
1558 68a79315 bellard
    env->interrupt_request |= mask;
1559 3098dba0 aurel32
1560 8edac960 aliguori
#ifndef CONFIG_USER_ONLY
1561 8edac960 aliguori
    /*
1562 8edac960 aliguori
     * If called from iothread context, wake the target cpu in
1563 8edac960 aliguori
     * case its halted.
1564 8edac960 aliguori
     */
1565 8edac960 aliguori
    if (!qemu_cpu_self(env)) {
1566 8edac960 aliguori
        qemu_cpu_kick(env);
1567 8edac960 aliguori
        return;
1568 8edac960 aliguori
    }
1569 8edac960 aliguori
#endif
1570 8edac960 aliguori
1571 2e70f6ef pbrook
    if (use_icount) {
1572 266910c4 pbrook
        env->icount_decr.u16.high = 0xffff;
1573 2e70f6ef pbrook
#ifndef CONFIG_USER_ONLY
1574 2e70f6ef pbrook
        if (!can_do_io(env)
1575 be214e6c aurel32
            && (mask & ~old_mask) != 0) {
1576 2e70f6ef pbrook
            cpu_abort(env, "Raised interrupt while not in I/O function");
1577 2e70f6ef pbrook
        }
1578 2e70f6ef pbrook
#endif
1579 2e70f6ef pbrook
    } else {
1580 3098dba0 aurel32
        cpu_unlink_tb(env);
1581 ea041c0e bellard
    }
1582 ea041c0e bellard
}
1583 ea041c0e bellard
1584 b54ad049 bellard
void cpu_reset_interrupt(CPUState *env, int mask)
1585 b54ad049 bellard
{
1586 b54ad049 bellard
    env->interrupt_request &= ~mask;
1587 b54ad049 bellard
}
1588 b54ad049 bellard
1589 3098dba0 aurel32
void cpu_exit(CPUState *env)
1590 3098dba0 aurel32
{
1591 3098dba0 aurel32
    env->exit_request = 1;
1592 3098dba0 aurel32
    cpu_unlink_tb(env);
1593 3098dba0 aurel32
}
1594 3098dba0 aurel32
1595 c7cd6a37 blueswir1
const CPULogItem cpu_log_items[] = {
1596 5fafdf24 ths
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1597 f193c797 bellard
      "show generated host assembly code for each compiled TB" },
1598 f193c797 bellard
    { CPU_LOG_TB_IN_ASM, "in_asm",
1599 f193c797 bellard
      "show target assembly code for each compiled TB" },
1600 5fafdf24 ths
    { CPU_LOG_TB_OP, "op",
1601 57fec1fe bellard
      "show micro ops for each compiled TB" },
1602 f193c797 bellard
    { CPU_LOG_TB_OP_OPT, "op_opt",
1603 e01a1157 blueswir1
      "show micro ops "
1604 e01a1157 blueswir1
#ifdef TARGET_I386
1605 e01a1157 blueswir1
      "before eflags optimization and "
1606 f193c797 bellard
#endif
1607 e01a1157 blueswir1
      "after liveness analysis" },
1608 f193c797 bellard
    { CPU_LOG_INT, "int",
1609 f193c797 bellard
      "show interrupts/exceptions in short format" },
1610 f193c797 bellard
    { CPU_LOG_EXEC, "exec",
1611 f193c797 bellard
      "show trace before each executed TB (lots of logs)" },
1612 9fddaa0c bellard
    { CPU_LOG_TB_CPU, "cpu",
1613 e91c8a77 ths
      "show CPU state before block translation" },
1614 f193c797 bellard
#ifdef TARGET_I386
1615 f193c797 bellard
    { CPU_LOG_PCALL, "pcall",
1616 f193c797 bellard
      "show protected mode far calls/returns/exceptions" },
1617 eca1bdf4 aliguori
    { CPU_LOG_RESET, "cpu_reset",
1618 eca1bdf4 aliguori
      "show CPU state before CPU resets" },
1619 f193c797 bellard
#endif
1620 8e3a9fd2 bellard
#ifdef DEBUG_IOPORT
1621 fd872598 bellard
    { CPU_LOG_IOPORT, "ioport",
1622 fd872598 bellard
      "show all i/o ports accesses" },
1623 8e3a9fd2 bellard
#endif
1624 f193c797 bellard
    { 0, NULL, NULL },
1625 f193c797 bellard
};
1626 f193c797 bellard
1627 f6f3fbca Michael S. Tsirkin
#ifndef CONFIG_USER_ONLY
1628 f6f3fbca Michael S. Tsirkin
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1629 f6f3fbca Michael S. Tsirkin
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1630 f6f3fbca Michael S. Tsirkin
1631 f6f3fbca Michael S. Tsirkin
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1632 f6f3fbca Michael S. Tsirkin
                                  ram_addr_t size,
1633 f6f3fbca Michael S. Tsirkin
                                  ram_addr_t phys_offset)
1634 f6f3fbca Michael S. Tsirkin
{
1635 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1636 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1637 f6f3fbca Michael S. Tsirkin
        client->set_memory(client, start_addr, size, phys_offset);
1638 f6f3fbca Michael S. Tsirkin
    }
1639 f6f3fbca Michael S. Tsirkin
}
1640 f6f3fbca Michael S. Tsirkin
1641 f6f3fbca Michael S. Tsirkin
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1642 f6f3fbca Michael S. Tsirkin
                                        target_phys_addr_t end)
1643 f6f3fbca Michael S. Tsirkin
{
1644 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1645 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1646 f6f3fbca Michael S. Tsirkin
        int r = client->sync_dirty_bitmap(client, start, end);
1647 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1648 f6f3fbca Michael S. Tsirkin
            return r;
1649 f6f3fbca Michael S. Tsirkin
    }
1650 f6f3fbca Michael S. Tsirkin
    return 0;
1651 f6f3fbca Michael S. Tsirkin
}
1652 f6f3fbca Michael S. Tsirkin
1653 f6f3fbca Michael S. Tsirkin
static int cpu_notify_migration_log(int enable)
1654 f6f3fbca Michael S. Tsirkin
{
1655 f6f3fbca Michael S. Tsirkin
    CPUPhysMemoryClient *client;
1656 f6f3fbca Michael S. Tsirkin
    QLIST_FOREACH(client, &memory_client_list, list) {
1657 f6f3fbca Michael S. Tsirkin
        int r = client->migration_log(client, enable);
1658 f6f3fbca Michael S. Tsirkin
        if (r < 0)
1659 f6f3fbca Michael S. Tsirkin
            return r;
1660 f6f3fbca Michael S. Tsirkin
    }
1661 f6f3fbca Michael S. Tsirkin
    return 0;
1662 f6f3fbca Michael S. Tsirkin
}
1663 f6f3fbca Michael S. Tsirkin
1664 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
1665 f6f3fbca Michael S. Tsirkin
                                         CPUPhysMemoryClient *client)
1666 f6f3fbca Michael S. Tsirkin
{
1667 f6f3fbca Michael S. Tsirkin
    PhysPageDesc *pd;
1668 f6f3fbca Michael S. Tsirkin
    int l1, l2;
1669 f6f3fbca Michael S. Tsirkin
1670 f6f3fbca Michael S. Tsirkin
    for (l1 = 0; l1 < L1_SIZE; ++l1) {
1671 f6f3fbca Michael S. Tsirkin
        pd = phys_map[l1];
1672 f6f3fbca Michael S. Tsirkin
        if (!pd) {
1673 f6f3fbca Michael S. Tsirkin
            continue;
1674 f6f3fbca Michael S. Tsirkin
        }
1675 f6f3fbca Michael S. Tsirkin
        for (l2 = 0; l2 < L2_SIZE; ++l2) {
1676 f6f3fbca Michael S. Tsirkin
            if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
1677 f6f3fbca Michael S. Tsirkin
                continue;
1678 f6f3fbca Michael S. Tsirkin
            }
1679 f6f3fbca Michael S. Tsirkin
            client->set_memory(client, pd[l2].region_offset,
1680 f6f3fbca Michael S. Tsirkin
                               TARGET_PAGE_SIZE, pd[l2].phys_offset);
1681 f6f3fbca Michael S. Tsirkin
        }
1682 f6f3fbca Michael S. Tsirkin
    }
1683 f6f3fbca Michael S. Tsirkin
}
1684 f6f3fbca Michael S. Tsirkin
1685 f6f3fbca Michael S. Tsirkin
static void phys_page_for_each(CPUPhysMemoryClient *client)
1686 f6f3fbca Michael S. Tsirkin
{
1687 f6f3fbca Michael S. Tsirkin
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
1688 f6f3fbca Michael S. Tsirkin
1689 f6f3fbca Michael S. Tsirkin
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
1690 f6f3fbca Michael S. Tsirkin
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
1691 f6f3fbca Michael S. Tsirkin
#endif
1692 f6f3fbca Michael S. Tsirkin
    void **phys_map = (void **)l1_phys_map;
1693 f6f3fbca Michael S. Tsirkin
    int l1;
1694 f6f3fbca Michael S. Tsirkin
    if (!l1_phys_map) {
1695 f6f3fbca Michael S. Tsirkin
        return;
1696 f6f3fbca Michael S. Tsirkin
    }
1697 f6f3fbca Michael S. Tsirkin
    for (l1 = 0; l1 < L1_SIZE; ++l1) {
1698 f6f3fbca Michael S. Tsirkin
        if (phys_map[l1]) {
1699 f6f3fbca Michael S. Tsirkin
            phys_page_for_each_in_l1_map(phys_map[l1], client);
1700 f6f3fbca Michael S. Tsirkin
        }
1701 f6f3fbca Michael S. Tsirkin
    }
1702 f6f3fbca Michael S. Tsirkin
#else
1703 f6f3fbca Michael S. Tsirkin
    if (!l1_phys_map) {
1704 f6f3fbca Michael S. Tsirkin
        return;
1705 f6f3fbca Michael S. Tsirkin
    }
1706 f6f3fbca Michael S. Tsirkin
    phys_page_for_each_in_l1_map(l1_phys_map, client);
1707 f6f3fbca Michael S. Tsirkin
#endif
1708 f6f3fbca Michael S. Tsirkin
}
1709 f6f3fbca Michael S. Tsirkin
1710 f6f3fbca Michael S. Tsirkin
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1711 f6f3fbca Michael S. Tsirkin
{
1712 f6f3fbca Michael S. Tsirkin
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1713 f6f3fbca Michael S. Tsirkin
    phys_page_for_each(client);
1714 f6f3fbca Michael S. Tsirkin
}
1715 f6f3fbca Michael S. Tsirkin
1716 f6f3fbca Michael S. Tsirkin
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1717 f6f3fbca Michael S. Tsirkin
{
1718 f6f3fbca Michael S. Tsirkin
    QLIST_REMOVE(client, list);
1719 f6f3fbca Michael S. Tsirkin
}
1720 f6f3fbca Michael S. Tsirkin
#endif
1721 f6f3fbca Michael S. Tsirkin
1722 f193c797 bellard
static int cmp1(const char *s1, int n, const char *s2)
1723 f193c797 bellard
{
1724 f193c797 bellard
    if (strlen(s2) != n)
1725 f193c797 bellard
        return 0;
1726 f193c797 bellard
    return memcmp(s1, s2, n) == 0;
1727 f193c797 bellard
}
1728 3b46e624 ths
1729 f193c797 bellard
/* takes a comma separated list of log masks. Return 0 if error. */
1730 f193c797 bellard
int cpu_str_to_log_mask(const char *str)
1731 f193c797 bellard
{
1732 c7cd6a37 blueswir1
    const CPULogItem *item;
1733 f193c797 bellard
    int mask;
1734 f193c797 bellard
    const char *p, *p1;
1735 f193c797 bellard
1736 f193c797 bellard
    p = str;
1737 f193c797 bellard
    mask = 0;
1738 f193c797 bellard
    for(;;) {
1739 f193c797 bellard
        p1 = strchr(p, ',');
1740 f193c797 bellard
        if (!p1)
1741 f193c797 bellard
            p1 = p + strlen(p);
1742 8e3a9fd2 bellard
        if(cmp1(p,p1-p,"all")) {
1743 8e3a9fd2 bellard
                for(item = cpu_log_items; item->mask != 0; item++) {
1744 8e3a9fd2 bellard
                        mask |= item->mask;
1745 8e3a9fd2 bellard
                }
1746 8e3a9fd2 bellard
        } else {
1747 f193c797 bellard
        for(item = cpu_log_items; item->mask != 0; item++) {
1748 f193c797 bellard
            if (cmp1(p, p1 - p, item->name))
1749 f193c797 bellard
                goto found;
1750 f193c797 bellard
        }
1751 f193c797 bellard
        return 0;
1752 8e3a9fd2 bellard
        }
1753 f193c797 bellard
    found:
1754 f193c797 bellard
        mask |= item->mask;
1755 f193c797 bellard
        if (*p1 != ',')
1756 f193c797 bellard
            break;
1757 f193c797 bellard
        p = p1 + 1;
1758 f193c797 bellard
    }
1759 f193c797 bellard
    return mask;
1760 f193c797 bellard
}
1761 ea041c0e bellard
1762 7501267e bellard
void cpu_abort(CPUState *env, const char *fmt, ...)
1763 7501267e bellard
{
1764 7501267e bellard
    va_list ap;
1765 493ae1f0 pbrook
    va_list ap2;
1766 7501267e bellard
1767 7501267e bellard
    va_start(ap, fmt);
1768 493ae1f0 pbrook
    va_copy(ap2, ap);
1769 7501267e bellard
    fprintf(stderr, "qemu: fatal: ");
1770 7501267e bellard
    vfprintf(stderr, fmt, ap);
1771 7501267e bellard
    fprintf(stderr, "\n");
1772 7501267e bellard
#ifdef TARGET_I386
1773 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1774 7fe48483 bellard
#else
1775 7fe48483 bellard
    cpu_dump_state(env, stderr, fprintf, 0);
1776 7501267e bellard
#endif
1777 93fcfe39 aliguori
    if (qemu_log_enabled()) {
1778 93fcfe39 aliguori
        qemu_log("qemu: fatal: ");
1779 93fcfe39 aliguori
        qemu_log_vprintf(fmt, ap2);
1780 93fcfe39 aliguori
        qemu_log("\n");
1781 f9373291 j_mayer
#ifdef TARGET_I386
1782 93fcfe39 aliguori
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1783 f9373291 j_mayer
#else
1784 93fcfe39 aliguori
        log_cpu_state(env, 0);
1785 f9373291 j_mayer
#endif
1786 31b1a7b4 aliguori
        qemu_log_flush();
1787 93fcfe39 aliguori
        qemu_log_close();
1788 924edcae balrog
    }
1789 493ae1f0 pbrook
    va_end(ap2);
1790 f9373291 j_mayer
    va_end(ap);
1791 fd052bf6 Riku Voipio
#if defined(CONFIG_USER_ONLY)
1792 fd052bf6 Riku Voipio
    {
1793 fd052bf6 Riku Voipio
        struct sigaction act;
1794 fd052bf6 Riku Voipio
        sigfillset(&act.sa_mask);
1795 fd052bf6 Riku Voipio
        act.sa_handler = SIG_DFL;
1796 fd052bf6 Riku Voipio
        sigaction(SIGABRT, &act, NULL);
1797 fd052bf6 Riku Voipio
    }
1798 fd052bf6 Riku Voipio
#endif
1799 7501267e bellard
    abort();
1800 7501267e bellard
}
1801 7501267e bellard
1802 c5be9f08 ths
CPUState *cpu_copy(CPUState *env)
1803 c5be9f08 ths
{
1804 01ba9816 ths
    CPUState *new_env = cpu_init(env->cpu_model_str);
1805 c5be9f08 ths
    CPUState *next_cpu = new_env->next_cpu;
1806 c5be9f08 ths
    int cpu_index = new_env->cpu_index;
1807 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1808 5a38f081 aliguori
    CPUBreakpoint *bp;
1809 5a38f081 aliguori
    CPUWatchpoint *wp;
1810 5a38f081 aliguori
#endif
1811 5a38f081 aliguori
1812 c5be9f08 ths
    memcpy(new_env, env, sizeof(CPUState));
1813 5a38f081 aliguori
1814 5a38f081 aliguori
    /* Preserve chaining and index. */
1815 c5be9f08 ths
    new_env->next_cpu = next_cpu;
1816 c5be9f08 ths
    new_env->cpu_index = cpu_index;
1817 5a38f081 aliguori
1818 5a38f081 aliguori
    /* Clone all break/watchpoints.
1819 5a38f081 aliguori
       Note: Once we support ptrace with hw-debug register access, make sure
1820 5a38f081 aliguori
       BP_CPU break/watchpoints are handled correctly on clone. */
1821 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->breakpoints);
1822 72cf2d4f Blue Swirl
    QTAILQ_INIT(&env->watchpoints);
1823 5a38f081 aliguori
#if defined(TARGET_HAS_ICE)
1824 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1825 5a38f081 aliguori
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1826 5a38f081 aliguori
    }
1827 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1828 5a38f081 aliguori
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1829 5a38f081 aliguori
                              wp->flags, NULL);
1830 5a38f081 aliguori
    }
1831 5a38f081 aliguori
#endif
1832 5a38f081 aliguori
1833 c5be9f08 ths
    return new_env;
1834 c5be9f08 ths
}
1835 c5be9f08 ths
1836 0124311e bellard
#if !defined(CONFIG_USER_ONLY)
1837 0124311e bellard
1838 5c751e99 edgar_igl
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1839 5c751e99 edgar_igl
{
1840 5c751e99 edgar_igl
    unsigned int i;
1841 5c751e99 edgar_igl
1842 5c751e99 edgar_igl
    /* Discard jump cache entries for any tb which might potentially
1843 5c751e99 edgar_igl
       overlap the flushed page.  */
1844 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1845 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1846 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1847 5c751e99 edgar_igl
1848 5c751e99 edgar_igl
    i = tb_jmp_cache_hash_page(addr);
1849 5c751e99 edgar_igl
    memset (&env->tb_jmp_cache[i], 0, 
1850 5c751e99 edgar_igl
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1851 5c751e99 edgar_igl
}
1852 5c751e99 edgar_igl
1853 08738984 Igor Kovalenko
static CPUTLBEntry s_cputlb_empty_entry = {
1854 08738984 Igor Kovalenko
    .addr_read  = -1,
1855 08738984 Igor Kovalenko
    .addr_write = -1,
1856 08738984 Igor Kovalenko
    .addr_code  = -1,
1857 08738984 Igor Kovalenko
    .addend     = -1,
1858 08738984 Igor Kovalenko
};
1859 08738984 Igor Kovalenko
1860 ee8b7021 bellard
/* NOTE: if flush_global is true, also flush global entries (not
1861 ee8b7021 bellard
   implemented yet) */
1862 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
1863 33417e70 bellard
{
1864 33417e70 bellard
    int i;
1865 0124311e bellard
1866 9fa3e853 bellard
#if defined(DEBUG_TLB)
1867 9fa3e853 bellard
    printf("tlb_flush:\n");
1868 9fa3e853 bellard
#endif
1869 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1870 0124311e bellard
       links while we are modifying them */
1871 0124311e bellard
    env->current_tb = NULL;
1872 0124311e bellard
1873 33417e70 bellard
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1874 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1875 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1876 08738984 Igor Kovalenko
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1877 cfde4bd9 Isaku Yamahata
        }
1878 33417e70 bellard
    }
1879 9fa3e853 bellard
1880 8a40a180 bellard
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1881 9fa3e853 bellard
1882 e3db7226 bellard
    tlb_flush_count++;
1883 33417e70 bellard
}
1884 33417e70 bellard
1885 274da6b2 bellard
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1886 61382a50 bellard
{
1887 5fafdf24 ths
    if (addr == (tlb_entry->addr_read &
1888 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1889 5fafdf24 ths
        addr == (tlb_entry->addr_write &
1890 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1891 5fafdf24 ths
        addr == (tlb_entry->addr_code &
1892 84b7b8e7 bellard
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1893 08738984 Igor Kovalenko
        *tlb_entry = s_cputlb_empty_entry;
1894 84b7b8e7 bellard
    }
1895 61382a50 bellard
}
1896 61382a50 bellard
1897 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
1898 33417e70 bellard
{
1899 8a40a180 bellard
    int i;
1900 cfde4bd9 Isaku Yamahata
    int mmu_idx;
1901 0124311e bellard
1902 9fa3e853 bellard
#if defined(DEBUG_TLB)
1903 108c49b8 bellard
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1904 9fa3e853 bellard
#endif
1905 0124311e bellard
    /* must reset current TB so that interrupts cannot modify the
1906 0124311e bellard
       links while we are modifying them */
1907 0124311e bellard
    env->current_tb = NULL;
1908 61382a50 bellard
1909 61382a50 bellard
    addr &= TARGET_PAGE_MASK;
1910 61382a50 bellard
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1911 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1912 cfde4bd9 Isaku Yamahata
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1913 0124311e bellard
1914 5c751e99 edgar_igl
    tlb_flush_jmp_cache(env, addr);
1915 9fa3e853 bellard
}
1916 9fa3e853 bellard
1917 9fa3e853 bellard
/* update the TLBs so that writes to code in the virtual page 'addr'
1918 9fa3e853 bellard
   can be detected */
1919 c227f099 Anthony Liguori
static void tlb_protect_code(ram_addr_t ram_addr)
1920 9fa3e853 bellard
{
1921 5fafdf24 ths
    cpu_physical_memory_reset_dirty(ram_addr,
1922 6a00d601 bellard
                                    ram_addr + TARGET_PAGE_SIZE,
1923 6a00d601 bellard
                                    CODE_DIRTY_FLAG);
1924 9fa3e853 bellard
}
1925 9fa3e853 bellard
1926 9fa3e853 bellard
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1927 3a7d929e bellard
   tested for self modifying code */
1928 c227f099 Anthony Liguori
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1929 3a7d929e bellard
                                    target_ulong vaddr)
1930 9fa3e853 bellard
{
1931 3a7d929e bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1932 1ccde1cb bellard
}
1933 1ccde1cb bellard
1934 5fafdf24 ths
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1935 1ccde1cb bellard
                                         unsigned long start, unsigned long length)
1936 1ccde1cb bellard
{
1937 1ccde1cb bellard
    unsigned long addr;
1938 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1939 84b7b8e7 bellard
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1940 1ccde1cb bellard
        if ((addr - start) < length) {
1941 0f459d16 pbrook
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1942 1ccde1cb bellard
        }
1943 1ccde1cb bellard
    }
1944 1ccde1cb bellard
}
1945 1ccde1cb bellard
1946 5579c7f3 pbrook
/* Note: start and end must be within the same ram block.  */
1947 c227f099 Anthony Liguori
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1948 0a962c02 bellard
                                     int dirty_flags)
1949 1ccde1cb bellard
{
1950 1ccde1cb bellard
    CPUState *env;
1951 4f2ac237 bellard
    unsigned long length, start1;
1952 0a962c02 bellard
    int i, mask, len;
1953 0a962c02 bellard
    uint8_t *p;
1954 1ccde1cb bellard
1955 1ccde1cb bellard
    start &= TARGET_PAGE_MASK;
1956 1ccde1cb bellard
    end = TARGET_PAGE_ALIGN(end);
1957 1ccde1cb bellard
1958 1ccde1cb bellard
    length = end - start;
1959 1ccde1cb bellard
    if (length == 0)
1960 1ccde1cb bellard
        return;
1961 0a962c02 bellard
    len = length >> TARGET_PAGE_BITS;
1962 f23db169 bellard
    mask = ~dirty_flags;
1963 f23db169 bellard
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1964 f23db169 bellard
    for(i = 0; i < len; i++)
1965 f23db169 bellard
        p[i] &= mask;
1966 f23db169 bellard
1967 1ccde1cb bellard
    /* we modify the TLB cache so that the dirty bit will be set again
1968 1ccde1cb bellard
       when accessing the range */
1969 5579c7f3 pbrook
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1970 5579c7f3 pbrook
    /* Chek that we don't span multiple blocks - this breaks the
1971 5579c7f3 pbrook
       address comparisons below.  */
1972 5579c7f3 pbrook
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1973 5579c7f3 pbrook
            != (end - 1) - start) {
1974 5579c7f3 pbrook
        abort();
1975 5579c7f3 pbrook
    }
1976 5579c7f3 pbrook
1977 6a00d601 bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1978 cfde4bd9 Isaku Yamahata
        int mmu_idx;
1979 cfde4bd9 Isaku Yamahata
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1980 cfde4bd9 Isaku Yamahata
            for(i = 0; i < CPU_TLB_SIZE; i++)
1981 cfde4bd9 Isaku Yamahata
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1982 cfde4bd9 Isaku Yamahata
                                      start1, length);
1983 cfde4bd9 Isaku Yamahata
        }
1984 6a00d601 bellard
    }
1985 1ccde1cb bellard
}
1986 1ccde1cb bellard
1987 74576198 aliguori
int cpu_physical_memory_set_dirty_tracking(int enable)
1988 74576198 aliguori
{
1989 f6f3fbca Michael S. Tsirkin
    int ret = 0;
1990 74576198 aliguori
    in_migration = enable;
1991 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_migration_log(!!enable);
1992 f6f3fbca Michael S. Tsirkin
    return ret;
1993 74576198 aliguori
}
1994 74576198 aliguori
1995 74576198 aliguori
int cpu_physical_memory_get_dirty_tracking(void)
1996 74576198 aliguori
{
1997 74576198 aliguori
    return in_migration;
1998 74576198 aliguori
}
1999 74576198 aliguori
2000 c227f099 Anthony Liguori
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2001 c227f099 Anthony Liguori
                                   target_phys_addr_t end_addr)
2002 2bec46dc aliguori
{
2003 7b8f3b78 Michael S. Tsirkin
    int ret;
2004 151f7749 Jan Kiszka
2005 f6f3fbca Michael S. Tsirkin
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2006 151f7749 Jan Kiszka
    return ret;
2007 2bec46dc aliguori
}
2008 2bec46dc aliguori
2009 3a7d929e bellard
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2010 3a7d929e bellard
{
2011 c227f099 Anthony Liguori
    ram_addr_t ram_addr;
2012 5579c7f3 pbrook
    void *p;
2013 3a7d929e bellard
2014 84b7b8e7 bellard
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2015 5579c7f3 pbrook
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2016 5579c7f3 pbrook
            + tlb_entry->addend);
2017 5579c7f3 pbrook
        ram_addr = qemu_ram_addr_from_host(p);
2018 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2019 0f459d16 pbrook
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2020 3a7d929e bellard
        }
2021 3a7d929e bellard
    }
2022 3a7d929e bellard
}
2023 3a7d929e bellard
2024 3a7d929e bellard
/* update the TLB according to the current state of the dirty bits */
2025 3a7d929e bellard
void cpu_tlb_update_dirty(CPUState *env)
2026 3a7d929e bellard
{
2027 3a7d929e bellard
    int i;
2028 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2029 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2030 cfde4bd9 Isaku Yamahata
        for(i = 0; i < CPU_TLB_SIZE; i++)
2031 cfde4bd9 Isaku Yamahata
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2032 cfde4bd9 Isaku Yamahata
    }
2033 3a7d929e bellard
}
2034 3a7d929e bellard
2035 0f459d16 pbrook
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2036 1ccde1cb bellard
{
2037 0f459d16 pbrook
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2038 0f459d16 pbrook
        tlb_entry->addr_write = vaddr;
2039 1ccde1cb bellard
}
2040 1ccde1cb bellard
2041 0f459d16 pbrook
/* update the TLB corresponding to virtual page vaddr
2042 0f459d16 pbrook
   so that it is no longer dirty */
2043 0f459d16 pbrook
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2044 1ccde1cb bellard
{
2045 1ccde1cb bellard
    int i;
2046 cfde4bd9 Isaku Yamahata
    int mmu_idx;
2047 1ccde1cb bellard
2048 0f459d16 pbrook
    vaddr &= TARGET_PAGE_MASK;
2049 1ccde1cb bellard
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2050 cfde4bd9 Isaku Yamahata
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2051 cfde4bd9 Isaku Yamahata
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2052 9fa3e853 bellard
}
2053 9fa3e853 bellard
2054 59817ccb bellard
/* add a new TLB entry. At most one entry for a given virtual address
2055 59817ccb bellard
   is permitted. Return 0 if OK or 2 if the page could not be mapped
2056 59817ccb bellard
   (can only happen in non SOFTMMU mode for I/O pages or pages
2057 59817ccb bellard
   conflicting with the host address space). */
2058 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2059 c227f099 Anthony Liguori
                      target_phys_addr_t paddr, int prot,
2060 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
2061 9fa3e853 bellard
{
2062 92e873b9 bellard
    PhysPageDesc *p;
2063 4f2ac237 bellard
    unsigned long pd;
2064 9fa3e853 bellard
    unsigned int index;
2065 4f2ac237 bellard
    target_ulong address;
2066 0f459d16 pbrook
    target_ulong code_address;
2067 c227f099 Anthony Liguori
    target_phys_addr_t addend;
2068 9fa3e853 bellard
    int ret;
2069 84b7b8e7 bellard
    CPUTLBEntry *te;
2070 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2071 c227f099 Anthony Liguori
    target_phys_addr_t iotlb;
2072 9fa3e853 bellard
2073 92e873b9 bellard
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2074 9fa3e853 bellard
    if (!p) {
2075 9fa3e853 bellard
        pd = IO_MEM_UNASSIGNED;
2076 9fa3e853 bellard
    } else {
2077 9fa3e853 bellard
        pd = p->phys_offset;
2078 9fa3e853 bellard
    }
2079 9fa3e853 bellard
#if defined(DEBUG_TLB)
2080 6ebbf390 j_mayer
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2081 6ebbf390 j_mayer
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2082 9fa3e853 bellard
#endif
2083 9fa3e853 bellard
2084 9fa3e853 bellard
    ret = 0;
2085 0f459d16 pbrook
    address = vaddr;
2086 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2087 0f459d16 pbrook
        /* IO memory case (romd handled later) */
2088 0f459d16 pbrook
        address |= TLB_MMIO;
2089 0f459d16 pbrook
    }
2090 5579c7f3 pbrook
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2091 0f459d16 pbrook
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2092 0f459d16 pbrook
        /* Normal RAM.  */
2093 0f459d16 pbrook
        iotlb = pd & TARGET_PAGE_MASK;
2094 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2095 0f459d16 pbrook
            iotlb |= IO_MEM_NOTDIRTY;
2096 0f459d16 pbrook
        else
2097 0f459d16 pbrook
            iotlb |= IO_MEM_ROM;
2098 0f459d16 pbrook
    } else {
2099 ccbb4d44 Stuart Brady
        /* IO handlers are currently passed a physical address.
2100 0f459d16 pbrook
           It would be nice to pass an offset from the base address
2101 0f459d16 pbrook
           of that region.  This would avoid having to special case RAM,
2102 0f459d16 pbrook
           and avoid full address decoding in every device.
2103 0f459d16 pbrook
           We can't use the high bits of pd for this because
2104 0f459d16 pbrook
           IO_MEM_ROMD uses these as a ram address.  */
2105 8da3ff18 pbrook
        iotlb = (pd & ~TARGET_PAGE_MASK);
2106 8da3ff18 pbrook
        if (p) {
2107 8da3ff18 pbrook
            iotlb += p->region_offset;
2108 8da3ff18 pbrook
        } else {
2109 8da3ff18 pbrook
            iotlb += paddr;
2110 8da3ff18 pbrook
        }
2111 0f459d16 pbrook
    }
2112 0f459d16 pbrook
2113 0f459d16 pbrook
    code_address = address;
2114 0f459d16 pbrook
    /* Make accesses to pages with watchpoints go via the
2115 0f459d16 pbrook
       watchpoint trap routines.  */
2116 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2117 a1d1bb31 aliguori
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2118 0f459d16 pbrook
            iotlb = io_mem_watch + paddr;
2119 0f459d16 pbrook
            /* TODO: The memory case can be optimized by not trapping
2120 0f459d16 pbrook
               reads of pages with a write breakpoint.  */
2121 0f459d16 pbrook
            address |= TLB_MMIO;
2122 6658ffb8 pbrook
        }
2123 0f459d16 pbrook
    }
2124 d79acba4 balrog
2125 0f459d16 pbrook
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2126 0f459d16 pbrook
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2127 0f459d16 pbrook
    te = &env->tlb_table[mmu_idx][index];
2128 0f459d16 pbrook
    te->addend = addend - vaddr;
2129 0f459d16 pbrook
    if (prot & PAGE_READ) {
2130 0f459d16 pbrook
        te->addr_read = address;
2131 0f459d16 pbrook
    } else {
2132 0f459d16 pbrook
        te->addr_read = -1;
2133 0f459d16 pbrook
    }
2134 5c751e99 edgar_igl
2135 0f459d16 pbrook
    if (prot & PAGE_EXEC) {
2136 0f459d16 pbrook
        te->addr_code = code_address;
2137 0f459d16 pbrook
    } else {
2138 0f459d16 pbrook
        te->addr_code = -1;
2139 0f459d16 pbrook
    }
2140 0f459d16 pbrook
    if (prot & PAGE_WRITE) {
2141 0f459d16 pbrook
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2142 0f459d16 pbrook
            (pd & IO_MEM_ROMD)) {
2143 0f459d16 pbrook
            /* Write access calls the I/O callback.  */
2144 0f459d16 pbrook
            te->addr_write = address | TLB_MMIO;
2145 0f459d16 pbrook
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2146 0f459d16 pbrook
                   !cpu_physical_memory_is_dirty(pd)) {
2147 0f459d16 pbrook
            te->addr_write = address | TLB_NOTDIRTY;
2148 9fa3e853 bellard
        } else {
2149 0f459d16 pbrook
            te->addr_write = address;
2150 9fa3e853 bellard
        }
2151 0f459d16 pbrook
    } else {
2152 0f459d16 pbrook
        te->addr_write = -1;
2153 9fa3e853 bellard
    }
2154 9fa3e853 bellard
    return ret;
2155 9fa3e853 bellard
}
2156 9fa3e853 bellard
2157 0124311e bellard
#else
2158 0124311e bellard
2159 ee8b7021 bellard
void tlb_flush(CPUState *env, int flush_global)
2160 0124311e bellard
{
2161 0124311e bellard
}
2162 0124311e bellard
2163 2e12669a bellard
void tlb_flush_page(CPUState *env, target_ulong addr)
2164 0124311e bellard
{
2165 0124311e bellard
}
2166 0124311e bellard
2167 5fafdf24 ths
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2168 c227f099 Anthony Liguori
                      target_phys_addr_t paddr, int prot,
2169 6ebbf390 j_mayer
                      int mmu_idx, int is_softmmu)
2170 9fa3e853 bellard
{
2171 9fa3e853 bellard
    return 0;
2172 9fa3e853 bellard
}
2173 0124311e bellard
2174 edf8e2af Mika Westerberg
/*
2175 edf8e2af Mika Westerberg
 * Walks guest process memory "regions" one by one
2176 edf8e2af Mika Westerberg
 * and calls callback function 'fn' for each region.
2177 edf8e2af Mika Westerberg
 */
2178 edf8e2af Mika Westerberg
int walk_memory_regions(void *priv,
2179 edf8e2af Mika Westerberg
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2180 33417e70 bellard
{
2181 9fa3e853 bellard
    unsigned long start, end;
2182 edf8e2af Mika Westerberg
    PageDesc *p = NULL;
2183 9fa3e853 bellard
    int i, j, prot, prot1;
2184 edf8e2af Mika Westerberg
    int rc = 0;
2185 33417e70 bellard
2186 edf8e2af Mika Westerberg
    start = end = -1;
2187 9fa3e853 bellard
    prot = 0;
2188 edf8e2af Mika Westerberg
2189 edf8e2af Mika Westerberg
    for (i = 0; i <= L1_SIZE; i++) {
2190 edf8e2af Mika Westerberg
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2191 edf8e2af Mika Westerberg
        for (j = 0; j < L2_SIZE; j++) {
2192 edf8e2af Mika Westerberg
            prot1 = (p == NULL) ? 0 : p[j].flags;
2193 edf8e2af Mika Westerberg
            /*
2194 edf8e2af Mika Westerberg
             * "region" is one continuous chunk of memory
2195 edf8e2af Mika Westerberg
             * that has same protection flags set.
2196 edf8e2af Mika Westerberg
             */
2197 9fa3e853 bellard
            if (prot1 != prot) {
2198 9fa3e853 bellard
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2199 9fa3e853 bellard
                if (start != -1) {
2200 edf8e2af Mika Westerberg
                    rc = (*fn)(priv, start, end, prot);
2201 edf8e2af Mika Westerberg
                    /* callback can stop iteration by returning != 0 */
2202 edf8e2af Mika Westerberg
                    if (rc != 0)
2203 edf8e2af Mika Westerberg
                        return (rc);
2204 9fa3e853 bellard
                }
2205 9fa3e853 bellard
                if (prot1 != 0)
2206 9fa3e853 bellard
                    start = end;
2207 9fa3e853 bellard
                else
2208 9fa3e853 bellard
                    start = -1;
2209 9fa3e853 bellard
                prot = prot1;
2210 9fa3e853 bellard
            }
2211 edf8e2af Mika Westerberg
            if (p == NULL)
2212 9fa3e853 bellard
                break;
2213 9fa3e853 bellard
        }
2214 33417e70 bellard
    }
2215 edf8e2af Mika Westerberg
    return (rc);
2216 edf8e2af Mika Westerberg
}
2217 edf8e2af Mika Westerberg
2218 edf8e2af Mika Westerberg
static int dump_region(void *priv, unsigned long start,
2219 edf8e2af Mika Westerberg
    unsigned long end, unsigned long prot)
2220 edf8e2af Mika Westerberg
{
2221 edf8e2af Mika Westerberg
    FILE *f = (FILE *)priv;
2222 edf8e2af Mika Westerberg
2223 edf8e2af Mika Westerberg
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2224 edf8e2af Mika Westerberg
        start, end, end - start,
2225 edf8e2af Mika Westerberg
        ((prot & PAGE_READ) ? 'r' : '-'),
2226 edf8e2af Mika Westerberg
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2227 edf8e2af Mika Westerberg
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2228 edf8e2af Mika Westerberg
2229 edf8e2af Mika Westerberg
    return (0);
2230 edf8e2af Mika Westerberg
}
2231 edf8e2af Mika Westerberg
2232 edf8e2af Mika Westerberg
/* dump memory mappings */
2233 edf8e2af Mika Westerberg
void page_dump(FILE *f)
2234 edf8e2af Mika Westerberg
{
2235 edf8e2af Mika Westerberg
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2236 edf8e2af Mika Westerberg
            "start", "end", "size", "prot");
2237 edf8e2af Mika Westerberg
    walk_memory_regions(f, dump_region);
2238 33417e70 bellard
}
2239 33417e70 bellard
2240 53a5960a pbrook
int page_get_flags(target_ulong address)
2241 33417e70 bellard
{
2242 9fa3e853 bellard
    PageDesc *p;
2243 9fa3e853 bellard
2244 9fa3e853 bellard
    p = page_find(address >> TARGET_PAGE_BITS);
2245 33417e70 bellard
    if (!p)
2246 9fa3e853 bellard
        return 0;
2247 9fa3e853 bellard
    return p->flags;
2248 9fa3e853 bellard
}
2249 9fa3e853 bellard
2250 9fa3e853 bellard
/* modify the flags of a page and invalidate the code if
2251 ccbb4d44 Stuart Brady
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2252 9fa3e853 bellard
   depending on PAGE_WRITE */
2253 53a5960a pbrook
void page_set_flags(target_ulong start, target_ulong end, int flags)
2254 9fa3e853 bellard
{
2255 9fa3e853 bellard
    PageDesc *p;
2256 53a5960a pbrook
    target_ulong addr;
2257 9fa3e853 bellard
2258 c8a706fe pbrook
    /* mmap_lock should already be held.  */
2259 9fa3e853 bellard
    start = start & TARGET_PAGE_MASK;
2260 9fa3e853 bellard
    end = TARGET_PAGE_ALIGN(end);
2261 9fa3e853 bellard
    if (flags & PAGE_WRITE)
2262 9fa3e853 bellard
        flags |= PAGE_WRITE_ORG;
2263 9fa3e853 bellard
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2264 9fa3e853 bellard
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2265 17e2377a pbrook
        /* We may be called for host regions that are outside guest
2266 17e2377a pbrook
           address space.  */
2267 17e2377a pbrook
        if (!p)
2268 17e2377a pbrook
            return;
2269 9fa3e853 bellard
        /* if the write protection is set, then we invalidate the code
2270 9fa3e853 bellard
           inside */
2271 5fafdf24 ths
        if (!(p->flags & PAGE_WRITE) &&
2272 9fa3e853 bellard
            (flags & PAGE_WRITE) &&
2273 9fa3e853 bellard
            p->first_tb) {
2274 d720b93d bellard
            tb_invalidate_phys_page(addr, 0, NULL);
2275 9fa3e853 bellard
        }
2276 9fa3e853 bellard
        p->flags = flags;
2277 9fa3e853 bellard
    }
2278 33417e70 bellard
}
2279 33417e70 bellard
2280 3d97b40b ths
int page_check_range(target_ulong start, target_ulong len, int flags)
2281 3d97b40b ths
{
2282 3d97b40b ths
    PageDesc *p;
2283 3d97b40b ths
    target_ulong end;
2284 3d97b40b ths
    target_ulong addr;
2285 3d97b40b ths
2286 55f280c9 balrog
    if (start + len < start)
2287 55f280c9 balrog
        /* we've wrapped around */
2288 55f280c9 balrog
        return -1;
2289 55f280c9 balrog
2290 3d97b40b ths
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2291 3d97b40b ths
    start = start & TARGET_PAGE_MASK;
2292 3d97b40b ths
2293 3d97b40b ths
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2294 3d97b40b ths
        p = page_find(addr >> TARGET_PAGE_BITS);
2295 3d97b40b ths
        if( !p )
2296 3d97b40b ths
            return -1;
2297 3d97b40b ths
        if( !(p->flags & PAGE_VALID) )
2298 3d97b40b ths
            return -1;
2299 3d97b40b ths
2300 dae3270c bellard
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2301 3d97b40b ths
            return -1;
2302 dae3270c bellard
        if (flags & PAGE_WRITE) {
2303 dae3270c bellard
            if (!(p->flags & PAGE_WRITE_ORG))
2304 dae3270c bellard
                return -1;
2305 dae3270c bellard
            /* unprotect the page if it was put read-only because it
2306 dae3270c bellard
               contains translated code */
2307 dae3270c bellard
            if (!(p->flags & PAGE_WRITE)) {
2308 dae3270c bellard
                if (!page_unprotect(addr, 0, NULL))
2309 dae3270c bellard
                    return -1;
2310 dae3270c bellard
            }
2311 dae3270c bellard
            return 0;
2312 dae3270c bellard
        }
2313 3d97b40b ths
    }
2314 3d97b40b ths
    return 0;
2315 3d97b40b ths
}
2316 3d97b40b ths
2317 9fa3e853 bellard
/* called from signal handler: invalidate the code and unprotect the
2318 ccbb4d44 Stuart Brady
   page. Return TRUE if the fault was successfully handled. */
2319 53a5960a pbrook
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2320 9fa3e853 bellard
{
2321 9fa3e853 bellard
    unsigned int page_index, prot, pindex;
2322 9fa3e853 bellard
    PageDesc *p, *p1;
2323 53a5960a pbrook
    target_ulong host_start, host_end, addr;
2324 9fa3e853 bellard
2325 c8a706fe pbrook
    /* Technically this isn't safe inside a signal handler.  However we
2326 c8a706fe pbrook
       know this only ever happens in a synchronous SEGV handler, so in
2327 c8a706fe pbrook
       practice it seems to be ok.  */
2328 c8a706fe pbrook
    mmap_lock();
2329 c8a706fe pbrook
2330 83fb7adf bellard
    host_start = address & qemu_host_page_mask;
2331 9fa3e853 bellard
    page_index = host_start >> TARGET_PAGE_BITS;
2332 9fa3e853 bellard
    p1 = page_find(page_index);
2333 c8a706fe pbrook
    if (!p1) {
2334 c8a706fe pbrook
        mmap_unlock();
2335 9fa3e853 bellard
        return 0;
2336 c8a706fe pbrook
    }
2337 83fb7adf bellard
    host_end = host_start + qemu_host_page_size;
2338 9fa3e853 bellard
    p = p1;
2339 9fa3e853 bellard
    prot = 0;
2340 9fa3e853 bellard
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2341 9fa3e853 bellard
        prot |= p->flags;
2342 9fa3e853 bellard
        p++;
2343 9fa3e853 bellard
    }
2344 9fa3e853 bellard
    /* if the page was really writable, then we change its
2345 9fa3e853 bellard
       protection back to writable */
2346 9fa3e853 bellard
    if (prot & PAGE_WRITE_ORG) {
2347 9fa3e853 bellard
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2348 9fa3e853 bellard
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2349 5fafdf24 ths
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2350 9fa3e853 bellard
                     (prot & PAGE_BITS) | PAGE_WRITE);
2351 9fa3e853 bellard
            p1[pindex].flags |= PAGE_WRITE;
2352 9fa3e853 bellard
            /* and since the content will be modified, we must invalidate
2353 9fa3e853 bellard
               the corresponding translated code. */
2354 d720b93d bellard
            tb_invalidate_phys_page(address, pc, puc);
2355 9fa3e853 bellard
#ifdef DEBUG_TB_CHECK
2356 9fa3e853 bellard
            tb_invalidate_check(address);
2357 9fa3e853 bellard
#endif
2358 c8a706fe pbrook
            mmap_unlock();
2359 9fa3e853 bellard
            return 1;
2360 9fa3e853 bellard
        }
2361 9fa3e853 bellard
    }
2362 c8a706fe pbrook
    mmap_unlock();
2363 9fa3e853 bellard
    return 0;
2364 9fa3e853 bellard
}
2365 9fa3e853 bellard
2366 6a00d601 bellard
static inline void tlb_set_dirty(CPUState *env,
2367 6a00d601 bellard
                                 unsigned long addr, target_ulong vaddr)
2368 1ccde1cb bellard
{
2369 1ccde1cb bellard
}
2370 9fa3e853 bellard
#endif /* defined(CONFIG_USER_ONLY) */
2371 9fa3e853 bellard
2372 e2eef170 pbrook
#if !defined(CONFIG_USER_ONLY)
2373 8da3ff18 pbrook
2374 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2375 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset);
2376 c227f099 Anthony Liguori
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2377 c227f099 Anthony Liguori
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2378 db7b5426 blueswir1
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2379 db7b5426 blueswir1
                      need_subpage)                                     \
2380 db7b5426 blueswir1
    do {                                                                \
2381 db7b5426 blueswir1
        if (addr > start_addr)                                          \
2382 db7b5426 blueswir1
            start_addr2 = 0;                                            \
2383 db7b5426 blueswir1
        else {                                                          \
2384 db7b5426 blueswir1
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2385 db7b5426 blueswir1
            if (start_addr2 > 0)                                        \
2386 db7b5426 blueswir1
                need_subpage = 1;                                       \
2387 db7b5426 blueswir1
        }                                                               \
2388 db7b5426 blueswir1
                                                                        \
2389 49e9fba2 blueswir1
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2390 db7b5426 blueswir1
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2391 db7b5426 blueswir1
        else {                                                          \
2392 db7b5426 blueswir1
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2393 db7b5426 blueswir1
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2394 db7b5426 blueswir1
                need_subpage = 1;                                       \
2395 db7b5426 blueswir1
        }                                                               \
2396 db7b5426 blueswir1
    } while (0)
2397 db7b5426 blueswir1
2398 8f2498f9 Michael S. Tsirkin
/* register physical memory.
2399 8f2498f9 Michael S. Tsirkin
   For RAM, 'size' must be a multiple of the target page size.
2400 8f2498f9 Michael S. Tsirkin
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2401 8da3ff18 pbrook
   io memory page.  The address used when calling the IO function is
2402 8da3ff18 pbrook
   the offset from the start of the region, plus region_offset.  Both
2403 ccbb4d44 Stuart Brady
   start_addr and region_offset are rounded down to a page boundary
2404 8da3ff18 pbrook
   before calculating this offset.  This should not be a problem unless
2405 8da3ff18 pbrook
   the low bits of start_addr and region_offset differ.  */
2406 c227f099 Anthony Liguori
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2407 c227f099 Anthony Liguori
                                         ram_addr_t size,
2408 c227f099 Anthony Liguori
                                         ram_addr_t phys_offset,
2409 c227f099 Anthony Liguori
                                         ram_addr_t region_offset)
2410 33417e70 bellard
{
2411 c227f099 Anthony Liguori
    target_phys_addr_t addr, end_addr;
2412 92e873b9 bellard
    PhysPageDesc *p;
2413 9d42037b bellard
    CPUState *env;
2414 c227f099 Anthony Liguori
    ram_addr_t orig_size = size;
2415 db7b5426 blueswir1
    void *subpage;
2416 33417e70 bellard
2417 f6f3fbca Michael S. Tsirkin
    cpu_notify_set_memory(start_addr, size, phys_offset);
2418 f6f3fbca Michael S. Tsirkin
2419 67c4d23c pbrook
    if (phys_offset == IO_MEM_UNASSIGNED) {
2420 67c4d23c pbrook
        region_offset = start_addr;
2421 67c4d23c pbrook
    }
2422 8da3ff18 pbrook
    region_offset &= TARGET_PAGE_MASK;
2423 5fd386f6 bellard
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2424 c227f099 Anthony Liguori
    end_addr = start_addr + (target_phys_addr_t)size;
2425 49e9fba2 blueswir1
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2426 db7b5426 blueswir1
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2427 db7b5426 blueswir1
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2428 c227f099 Anthony Liguori
            ram_addr_t orig_memory = p->phys_offset;
2429 c227f099 Anthony Liguori
            target_phys_addr_t start_addr2, end_addr2;
2430 db7b5426 blueswir1
            int need_subpage = 0;
2431 db7b5426 blueswir1
2432 db7b5426 blueswir1
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2433 db7b5426 blueswir1
                          need_subpage);
2434 4254fab8 blueswir1
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2435 db7b5426 blueswir1
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2436 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2437 8da3ff18 pbrook
                                           &p->phys_offset, orig_memory,
2438 8da3ff18 pbrook
                                           p->region_offset);
2439 db7b5426 blueswir1
                } else {
2440 db7b5426 blueswir1
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2441 db7b5426 blueswir1
                                            >> IO_MEM_SHIFT];
2442 db7b5426 blueswir1
                }
2443 8da3ff18 pbrook
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2444 8da3ff18 pbrook
                                 region_offset);
2445 8da3ff18 pbrook
                p->region_offset = 0;
2446 db7b5426 blueswir1
            } else {
2447 db7b5426 blueswir1
                p->phys_offset = phys_offset;
2448 db7b5426 blueswir1
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2449 db7b5426 blueswir1
                    (phys_offset & IO_MEM_ROMD))
2450 db7b5426 blueswir1
                    phys_offset += TARGET_PAGE_SIZE;
2451 db7b5426 blueswir1
            }
2452 db7b5426 blueswir1
        } else {
2453 db7b5426 blueswir1
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2454 db7b5426 blueswir1
            p->phys_offset = phys_offset;
2455 8da3ff18 pbrook
            p->region_offset = region_offset;
2456 db7b5426 blueswir1
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2457 8da3ff18 pbrook
                (phys_offset & IO_MEM_ROMD)) {
2458 db7b5426 blueswir1
                phys_offset += TARGET_PAGE_SIZE;
2459 0e8f0967 pbrook
            } else {
2460 c227f099 Anthony Liguori
                target_phys_addr_t start_addr2, end_addr2;
2461 db7b5426 blueswir1
                int need_subpage = 0;
2462 db7b5426 blueswir1
2463 db7b5426 blueswir1
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2464 db7b5426 blueswir1
                              end_addr2, need_subpage);
2465 db7b5426 blueswir1
2466 4254fab8 blueswir1
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2467 db7b5426 blueswir1
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2468 8da3ff18 pbrook
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2469 67c4d23c pbrook
                                           addr & TARGET_PAGE_MASK);
2470 db7b5426 blueswir1
                    subpage_register(subpage, start_addr2, end_addr2,
2471 8da3ff18 pbrook
                                     phys_offset, region_offset);
2472 8da3ff18 pbrook
                    p->region_offset = 0;
2473 db7b5426 blueswir1
                }
2474 db7b5426 blueswir1
            }
2475 db7b5426 blueswir1
        }
2476 8da3ff18 pbrook
        region_offset += TARGET_PAGE_SIZE;
2477 33417e70 bellard
    }
2478 3b46e624 ths
2479 9d42037b bellard
    /* since each CPU stores ram addresses in its TLB cache, we must
2480 9d42037b bellard
       reset the modified entries */
2481 9d42037b bellard
    /* XXX: slow ! */
2482 9d42037b bellard
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2483 9d42037b bellard
        tlb_flush(env, 1);
2484 9d42037b bellard
    }
2485 33417e70 bellard
}
2486 33417e70 bellard
2487 ba863458 bellard
/* XXX: temporary until new memory mapping API */
2488 c227f099 Anthony Liguori
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2489 ba863458 bellard
{
2490 ba863458 bellard
    PhysPageDesc *p;
2491 ba863458 bellard
2492 ba863458 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2493 ba863458 bellard
    if (!p)
2494 ba863458 bellard
        return IO_MEM_UNASSIGNED;
2495 ba863458 bellard
    return p->phys_offset;
2496 ba863458 bellard
}
2497 ba863458 bellard
2498 c227f099 Anthony Liguori
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2499 f65ed4c1 aliguori
{
2500 f65ed4c1 aliguori
    if (kvm_enabled())
2501 f65ed4c1 aliguori
        kvm_coalesce_mmio_region(addr, size);
2502 f65ed4c1 aliguori
}
2503 f65ed4c1 aliguori
2504 c227f099 Anthony Liguori
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2505 f65ed4c1 aliguori
{
2506 f65ed4c1 aliguori
    if (kvm_enabled())
2507 f65ed4c1 aliguori
        kvm_uncoalesce_mmio_region(addr, size);
2508 f65ed4c1 aliguori
}
2509 f65ed4c1 aliguori
2510 62a2744c Sheng Yang
void qemu_flush_coalesced_mmio_buffer(void)
2511 62a2744c Sheng Yang
{
2512 62a2744c Sheng Yang
    if (kvm_enabled())
2513 62a2744c Sheng Yang
        kvm_flush_coalesced_mmio_buffer();
2514 62a2744c Sheng Yang
}
2515 62a2744c Sheng Yang
2516 c227f099 Anthony Liguori
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2517 94a6b54f pbrook
{
2518 94a6b54f pbrook
    RAMBlock *new_block;
2519 94a6b54f pbrook
2520 94a6b54f pbrook
    size = TARGET_PAGE_ALIGN(size);
2521 94a6b54f pbrook
    new_block = qemu_malloc(sizeof(*new_block));
2522 94a6b54f pbrook
2523 6b02494d Alexander Graf
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2524 6b02494d Alexander Graf
    /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2525 6b02494d Alexander Graf
    new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
2526 6b02494d Alexander Graf
                           MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2527 6b02494d Alexander Graf
#else
2528 94a6b54f pbrook
    new_block->host = qemu_vmalloc(size);
2529 6b02494d Alexander Graf
#endif
2530 ccb167e9 Izik Eidus
#ifdef MADV_MERGEABLE
2531 ccb167e9 Izik Eidus
    madvise(new_block->host, size, MADV_MERGEABLE);
2532 ccb167e9 Izik Eidus
#endif
2533 94a6b54f pbrook
    new_block->offset = last_ram_offset;
2534 94a6b54f pbrook
    new_block->length = size;
2535 94a6b54f pbrook
2536 94a6b54f pbrook
    new_block->next = ram_blocks;
2537 94a6b54f pbrook
    ram_blocks = new_block;
2538 94a6b54f pbrook
2539 94a6b54f pbrook
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2540 94a6b54f pbrook
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2541 94a6b54f pbrook
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2542 94a6b54f pbrook
           0xff, size >> TARGET_PAGE_BITS);
2543 94a6b54f pbrook
2544 94a6b54f pbrook
    last_ram_offset += size;
2545 94a6b54f pbrook
2546 6f0437e8 Jan Kiszka
    if (kvm_enabled())
2547 6f0437e8 Jan Kiszka
        kvm_setup_guest_memory(new_block->host, size);
2548 6f0437e8 Jan Kiszka
2549 94a6b54f pbrook
    return new_block->offset;
2550 94a6b54f pbrook
}
2551 e9a1ab19 bellard
2552 c227f099 Anthony Liguori
void qemu_ram_free(ram_addr_t addr)
2553 e9a1ab19 bellard
{
2554 94a6b54f pbrook
    /* TODO: implement this.  */
2555 e9a1ab19 bellard
}
2556 e9a1ab19 bellard
2557 dc828ca1 pbrook
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2558 5579c7f3 pbrook
   With the exception of the softmmu code in this file, this should
2559 5579c7f3 pbrook
   only be used for local memory (e.g. video ram) that the device owns,
2560 5579c7f3 pbrook
   and knows it isn't going to access beyond the end of the block.
2561 5579c7f3 pbrook

2562 5579c7f3 pbrook
   It should not be used for general purpose DMA.
2563 5579c7f3 pbrook
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2564 5579c7f3 pbrook
 */
2565 c227f099 Anthony Liguori
void *qemu_get_ram_ptr(ram_addr_t addr)
2566 dc828ca1 pbrook
{
2567 94a6b54f pbrook
    RAMBlock *prev;
2568 94a6b54f pbrook
    RAMBlock **prevp;
2569 94a6b54f pbrook
    RAMBlock *block;
2570 94a6b54f pbrook
2571 94a6b54f pbrook
    prev = NULL;
2572 94a6b54f pbrook
    prevp = &ram_blocks;
2573 94a6b54f pbrook
    block = ram_blocks;
2574 94a6b54f pbrook
    while (block && (block->offset > addr
2575 94a6b54f pbrook
                     || block->offset + block->length <= addr)) {
2576 94a6b54f pbrook
        if (prev)
2577 94a6b54f pbrook
          prevp = &prev->next;
2578 94a6b54f pbrook
        prev = block;
2579 94a6b54f pbrook
        block = block->next;
2580 94a6b54f pbrook
    }
2581 94a6b54f pbrook
    if (!block) {
2582 94a6b54f pbrook
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2583 94a6b54f pbrook
        abort();
2584 94a6b54f pbrook
    }
2585 94a6b54f pbrook
    /* Move this entry to to start of the list.  */
2586 94a6b54f pbrook
    if (prev) {
2587 94a6b54f pbrook
        prev->next = block->next;
2588 94a6b54f pbrook
        block->next = *prevp;
2589 94a6b54f pbrook
        *prevp = block;
2590 94a6b54f pbrook
    }
2591 94a6b54f pbrook
    return block->host + (addr - block->offset);
2592 dc828ca1 pbrook
}
2593 dc828ca1 pbrook
2594 5579c7f3 pbrook
/* Some of the softmmu routines need to translate from a host pointer
2595 5579c7f3 pbrook
   (typically a TLB entry) back to a ram offset.  */
2596 c227f099 Anthony Liguori
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2597 5579c7f3 pbrook
{
2598 94a6b54f pbrook
    RAMBlock *prev;
2599 94a6b54f pbrook
    RAMBlock *block;
2600 94a6b54f pbrook
    uint8_t *host = ptr;
2601 94a6b54f pbrook
2602 94a6b54f pbrook
    prev = NULL;
2603 94a6b54f pbrook
    block = ram_blocks;
2604 94a6b54f pbrook
    while (block && (block->host > host
2605 94a6b54f pbrook
                     || block->host + block->length <= host)) {
2606 94a6b54f pbrook
        prev = block;
2607 94a6b54f pbrook
        block = block->next;
2608 94a6b54f pbrook
    }
2609 94a6b54f pbrook
    if (!block) {
2610 94a6b54f pbrook
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2611 94a6b54f pbrook
        abort();
2612 94a6b54f pbrook
    }
2613 94a6b54f pbrook
    return block->offset + (host - block->host);
2614 5579c7f3 pbrook
}
2615 5579c7f3 pbrook
2616 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2617 33417e70 bellard
{
2618 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2619 ab3d1727 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2620 67d3b957 pbrook
#endif
2621 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2622 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 1);
2623 e18231a3 blueswir1
#endif
2624 e18231a3 blueswir1
    return 0;
2625 e18231a3 blueswir1
}
2626 e18231a3 blueswir1
2627 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2628 e18231a3 blueswir1
{
2629 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2630 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2631 e18231a3 blueswir1
#endif
2632 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2633 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 2);
2634 e18231a3 blueswir1
#endif
2635 e18231a3 blueswir1
    return 0;
2636 e18231a3 blueswir1
}
2637 e18231a3 blueswir1
2638 c227f099 Anthony Liguori
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2639 e18231a3 blueswir1
{
2640 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2641 e18231a3 blueswir1
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2642 e18231a3 blueswir1
#endif
2643 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2644 e18231a3 blueswir1
    do_unassigned_access(addr, 0, 0, 0, 4);
2645 b4f0a316 blueswir1
#endif
2646 33417e70 bellard
    return 0;
2647 33417e70 bellard
}
2648 33417e70 bellard
2649 c227f099 Anthony Liguori
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2650 33417e70 bellard
{
2651 67d3b957 pbrook
#ifdef DEBUG_UNASSIGNED
2652 ab3d1727 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2653 67d3b957 pbrook
#endif
2654 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2655 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 1);
2656 e18231a3 blueswir1
#endif
2657 e18231a3 blueswir1
}
2658 e18231a3 blueswir1
2659 c227f099 Anthony Liguori
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2660 e18231a3 blueswir1
{
2661 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2662 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2663 e18231a3 blueswir1
#endif
2664 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2665 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 2);
2666 e18231a3 blueswir1
#endif
2667 e18231a3 blueswir1
}
2668 e18231a3 blueswir1
2669 c227f099 Anthony Liguori
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2670 e18231a3 blueswir1
{
2671 e18231a3 blueswir1
#ifdef DEBUG_UNASSIGNED
2672 e18231a3 blueswir1
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2673 e18231a3 blueswir1
#endif
2674 faed1c2a Edgar E. Iglesias
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2675 e18231a3 blueswir1
    do_unassigned_access(addr, 1, 0, 0, 4);
2676 b4f0a316 blueswir1
#endif
2677 33417e70 bellard
}
2678 33417e70 bellard
2679 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2680 33417e70 bellard
    unassigned_mem_readb,
2681 e18231a3 blueswir1
    unassigned_mem_readw,
2682 e18231a3 blueswir1
    unassigned_mem_readl,
2683 33417e70 bellard
};
2684 33417e70 bellard
2685 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2686 33417e70 bellard
    unassigned_mem_writeb,
2687 e18231a3 blueswir1
    unassigned_mem_writew,
2688 e18231a3 blueswir1
    unassigned_mem_writel,
2689 33417e70 bellard
};
2690 33417e70 bellard
2691 c227f099 Anthony Liguori
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2692 0f459d16 pbrook
                                uint32_t val)
2693 9fa3e853 bellard
{
2694 3a7d929e bellard
    int dirty_flags;
2695 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2696 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2697 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2698 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 1);
2699 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2700 9fa3e853 bellard
#endif
2701 3a7d929e bellard
    }
2702 5579c7f3 pbrook
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2703 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2704 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2705 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2706 f23db169 bellard
       flushed */
2707 f23db169 bellard
    if (dirty_flags == 0xff)
2708 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2709 9fa3e853 bellard
}
2710 9fa3e853 bellard
2711 c227f099 Anthony Liguori
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2712 0f459d16 pbrook
                                uint32_t val)
2713 9fa3e853 bellard
{
2714 3a7d929e bellard
    int dirty_flags;
2715 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2716 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2717 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2718 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 2);
2719 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2720 9fa3e853 bellard
#endif
2721 3a7d929e bellard
    }
2722 5579c7f3 pbrook
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2723 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2724 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2725 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2726 f23db169 bellard
       flushed */
2727 f23db169 bellard
    if (dirty_flags == 0xff)
2728 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2729 9fa3e853 bellard
}
2730 9fa3e853 bellard
2731 c227f099 Anthony Liguori
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2732 0f459d16 pbrook
                                uint32_t val)
2733 9fa3e853 bellard
{
2734 3a7d929e bellard
    int dirty_flags;
2735 3a7d929e bellard
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2736 3a7d929e bellard
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2737 9fa3e853 bellard
#if !defined(CONFIG_USER_ONLY)
2738 3a7d929e bellard
        tb_invalidate_phys_page_fast(ram_addr, 4);
2739 3a7d929e bellard
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2740 9fa3e853 bellard
#endif
2741 3a7d929e bellard
    }
2742 5579c7f3 pbrook
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2743 f23db169 bellard
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2744 f23db169 bellard
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2745 f23db169 bellard
    /* we remove the notdirty callback only if the code has been
2746 f23db169 bellard
       flushed */
2747 f23db169 bellard
    if (dirty_flags == 0xff)
2748 2e70f6ef pbrook
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2749 9fa3e853 bellard
}
2750 9fa3e853 bellard
2751 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const error_mem_read[3] = {
2752 9fa3e853 bellard
    NULL, /* never used */
2753 9fa3e853 bellard
    NULL, /* never used */
2754 9fa3e853 bellard
    NULL, /* never used */
2755 9fa3e853 bellard
};
2756 9fa3e853 bellard
2757 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2758 1ccde1cb bellard
    notdirty_mem_writeb,
2759 1ccde1cb bellard
    notdirty_mem_writew,
2760 1ccde1cb bellard
    notdirty_mem_writel,
2761 1ccde1cb bellard
};
2762 1ccde1cb bellard
2763 0f459d16 pbrook
/* Generate a debug exception if a watchpoint has been hit.  */
2764 b4051334 aliguori
static void check_watchpoint(int offset, int len_mask, int flags)
2765 0f459d16 pbrook
{
2766 0f459d16 pbrook
    CPUState *env = cpu_single_env;
2767 06d55cc1 aliguori
    target_ulong pc, cs_base;
2768 06d55cc1 aliguori
    TranslationBlock *tb;
2769 0f459d16 pbrook
    target_ulong vaddr;
2770 a1d1bb31 aliguori
    CPUWatchpoint *wp;
2771 06d55cc1 aliguori
    int cpu_flags;
2772 0f459d16 pbrook
2773 06d55cc1 aliguori
    if (env->watchpoint_hit) {
2774 06d55cc1 aliguori
        /* We re-entered the check after replacing the TB. Now raise
2775 06d55cc1 aliguori
         * the debug interrupt so that is will trigger after the
2776 06d55cc1 aliguori
         * current instruction. */
2777 06d55cc1 aliguori
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2778 06d55cc1 aliguori
        return;
2779 06d55cc1 aliguori
    }
2780 2e70f6ef pbrook
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2781 72cf2d4f Blue Swirl
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2782 b4051334 aliguori
        if ((vaddr == (wp->vaddr & len_mask) ||
2783 b4051334 aliguori
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2784 6e140f28 aliguori
            wp->flags |= BP_WATCHPOINT_HIT;
2785 6e140f28 aliguori
            if (!env->watchpoint_hit) {
2786 6e140f28 aliguori
                env->watchpoint_hit = wp;
2787 6e140f28 aliguori
                tb = tb_find_pc(env->mem_io_pc);
2788 6e140f28 aliguori
                if (!tb) {
2789 6e140f28 aliguori
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2790 6e140f28 aliguori
                              "pc=%p", (void *)env->mem_io_pc);
2791 6e140f28 aliguori
                }
2792 6e140f28 aliguori
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2793 6e140f28 aliguori
                tb_phys_invalidate(tb, -1);
2794 6e140f28 aliguori
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2795 6e140f28 aliguori
                    env->exception_index = EXCP_DEBUG;
2796 6e140f28 aliguori
                } else {
2797 6e140f28 aliguori
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2798 6e140f28 aliguori
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2799 6e140f28 aliguori
                }
2800 6e140f28 aliguori
                cpu_resume_from_signal(env, NULL);
2801 06d55cc1 aliguori
            }
2802 6e140f28 aliguori
        } else {
2803 6e140f28 aliguori
            wp->flags &= ~BP_WATCHPOINT_HIT;
2804 0f459d16 pbrook
        }
2805 0f459d16 pbrook
    }
2806 0f459d16 pbrook
}
2807 0f459d16 pbrook
2808 6658ffb8 pbrook
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2809 6658ffb8 pbrook
   so these check for a hit then pass through to the normal out-of-line
2810 6658ffb8 pbrook
   phys routines.  */
2811 c227f099 Anthony Liguori
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2812 6658ffb8 pbrook
{
2813 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2814 6658ffb8 pbrook
    return ldub_phys(addr);
2815 6658ffb8 pbrook
}
2816 6658ffb8 pbrook
2817 c227f099 Anthony Liguori
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2818 6658ffb8 pbrook
{
2819 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2820 6658ffb8 pbrook
    return lduw_phys(addr);
2821 6658ffb8 pbrook
}
2822 6658ffb8 pbrook
2823 c227f099 Anthony Liguori
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2824 6658ffb8 pbrook
{
2825 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2826 6658ffb8 pbrook
    return ldl_phys(addr);
2827 6658ffb8 pbrook
}
2828 6658ffb8 pbrook
2829 c227f099 Anthony Liguori
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2830 6658ffb8 pbrook
                             uint32_t val)
2831 6658ffb8 pbrook
{
2832 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2833 6658ffb8 pbrook
    stb_phys(addr, val);
2834 6658ffb8 pbrook
}
2835 6658ffb8 pbrook
2836 c227f099 Anthony Liguori
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2837 6658ffb8 pbrook
                             uint32_t val)
2838 6658ffb8 pbrook
{
2839 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2840 6658ffb8 pbrook
    stw_phys(addr, val);
2841 6658ffb8 pbrook
}
2842 6658ffb8 pbrook
2843 c227f099 Anthony Liguori
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2844 6658ffb8 pbrook
                             uint32_t val)
2845 6658ffb8 pbrook
{
2846 b4051334 aliguori
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2847 6658ffb8 pbrook
    stl_phys(addr, val);
2848 6658ffb8 pbrook
}
2849 6658ffb8 pbrook
2850 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const watch_mem_read[3] = {
2851 6658ffb8 pbrook
    watch_mem_readb,
2852 6658ffb8 pbrook
    watch_mem_readw,
2853 6658ffb8 pbrook
    watch_mem_readl,
2854 6658ffb8 pbrook
};
2855 6658ffb8 pbrook
2856 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2857 6658ffb8 pbrook
    watch_mem_writeb,
2858 6658ffb8 pbrook
    watch_mem_writew,
2859 6658ffb8 pbrook
    watch_mem_writel,
2860 6658ffb8 pbrook
};
2861 6658ffb8 pbrook
2862 c227f099 Anthony Liguori
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2863 db7b5426 blueswir1
                                 unsigned int len)
2864 db7b5426 blueswir1
{
2865 db7b5426 blueswir1
    uint32_t ret;
2866 db7b5426 blueswir1
    unsigned int idx;
2867 db7b5426 blueswir1
2868 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2869 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2870 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2871 db7b5426 blueswir1
           mmio, len, addr, idx);
2872 db7b5426 blueswir1
#endif
2873 8da3ff18 pbrook
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2874 8da3ff18 pbrook
                                       addr + mmio->region_offset[idx][0][len]);
2875 db7b5426 blueswir1
2876 db7b5426 blueswir1
    return ret;
2877 db7b5426 blueswir1
}
2878 db7b5426 blueswir1
2879 c227f099 Anthony Liguori
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2880 db7b5426 blueswir1
                              uint32_t value, unsigned int len)
2881 db7b5426 blueswir1
{
2882 db7b5426 blueswir1
    unsigned int idx;
2883 db7b5426 blueswir1
2884 8da3ff18 pbrook
    idx = SUBPAGE_IDX(addr);
2885 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2886 db7b5426 blueswir1
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2887 db7b5426 blueswir1
           mmio, len, addr, idx, value);
2888 db7b5426 blueswir1
#endif
2889 8da3ff18 pbrook
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2890 8da3ff18 pbrook
                                  addr + mmio->region_offset[idx][1][len],
2891 8da3ff18 pbrook
                                  value);
2892 db7b5426 blueswir1
}
2893 db7b5426 blueswir1
2894 c227f099 Anthony Liguori
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2895 db7b5426 blueswir1
{
2896 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2897 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2898 db7b5426 blueswir1
#endif
2899 db7b5426 blueswir1
2900 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 0);
2901 db7b5426 blueswir1
}
2902 db7b5426 blueswir1
2903 c227f099 Anthony Liguori
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2904 db7b5426 blueswir1
                            uint32_t value)
2905 db7b5426 blueswir1
{
2906 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2907 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2908 db7b5426 blueswir1
#endif
2909 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 0);
2910 db7b5426 blueswir1
}
2911 db7b5426 blueswir1
2912 c227f099 Anthony Liguori
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2913 db7b5426 blueswir1
{
2914 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2915 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2916 db7b5426 blueswir1
#endif
2917 db7b5426 blueswir1
2918 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 1);
2919 db7b5426 blueswir1
}
2920 db7b5426 blueswir1
2921 c227f099 Anthony Liguori
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2922 db7b5426 blueswir1
                            uint32_t value)
2923 db7b5426 blueswir1
{
2924 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2925 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2926 db7b5426 blueswir1
#endif
2927 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 1);
2928 db7b5426 blueswir1
}
2929 db7b5426 blueswir1
2930 c227f099 Anthony Liguori
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2931 db7b5426 blueswir1
{
2932 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2933 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2934 db7b5426 blueswir1
#endif
2935 db7b5426 blueswir1
2936 db7b5426 blueswir1
    return subpage_readlen(opaque, addr, 2);
2937 db7b5426 blueswir1
}
2938 db7b5426 blueswir1
2939 db7b5426 blueswir1
static void subpage_writel (void *opaque,
2940 c227f099 Anthony Liguori
                         target_phys_addr_t addr, uint32_t value)
2941 db7b5426 blueswir1
{
2942 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2943 db7b5426 blueswir1
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2944 db7b5426 blueswir1
#endif
2945 db7b5426 blueswir1
    subpage_writelen(opaque, addr, value, 2);
2946 db7b5426 blueswir1
}
2947 db7b5426 blueswir1
2948 d60efc6b Blue Swirl
static CPUReadMemoryFunc * const subpage_read[] = {
2949 db7b5426 blueswir1
    &subpage_readb,
2950 db7b5426 blueswir1
    &subpage_readw,
2951 db7b5426 blueswir1
    &subpage_readl,
2952 db7b5426 blueswir1
};
2953 db7b5426 blueswir1
2954 d60efc6b Blue Swirl
static CPUWriteMemoryFunc * const subpage_write[] = {
2955 db7b5426 blueswir1
    &subpage_writeb,
2956 db7b5426 blueswir1
    &subpage_writew,
2957 db7b5426 blueswir1
    &subpage_writel,
2958 db7b5426 blueswir1
};
2959 db7b5426 blueswir1
2960 c227f099 Anthony Liguori
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2961 c227f099 Anthony Liguori
                             ram_addr_t memory, ram_addr_t region_offset)
2962 db7b5426 blueswir1
{
2963 db7b5426 blueswir1
    int idx, eidx;
2964 4254fab8 blueswir1
    unsigned int i;
2965 db7b5426 blueswir1
2966 db7b5426 blueswir1
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2967 db7b5426 blueswir1
        return -1;
2968 db7b5426 blueswir1
    idx = SUBPAGE_IDX(start);
2969 db7b5426 blueswir1
    eidx = SUBPAGE_IDX(end);
2970 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
2971 0bf9e31a Blue Swirl
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2972 db7b5426 blueswir1
           mmio, start, end, idx, eidx, memory);
2973 db7b5426 blueswir1
#endif
2974 db7b5426 blueswir1
    memory >>= IO_MEM_SHIFT;
2975 db7b5426 blueswir1
    for (; idx <= eidx; idx++) {
2976 4254fab8 blueswir1
        for (i = 0; i < 4; i++) {
2977 3ee89922 blueswir1
            if (io_mem_read[memory][i]) {
2978 3ee89922 blueswir1
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2979 3ee89922 blueswir1
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2980 8da3ff18 pbrook
                mmio->region_offset[idx][0][i] = region_offset;
2981 3ee89922 blueswir1
            }
2982 3ee89922 blueswir1
            if (io_mem_write[memory][i]) {
2983 3ee89922 blueswir1
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2984 3ee89922 blueswir1
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2985 8da3ff18 pbrook
                mmio->region_offset[idx][1][i] = region_offset;
2986 3ee89922 blueswir1
            }
2987 4254fab8 blueswir1
        }
2988 db7b5426 blueswir1
    }
2989 db7b5426 blueswir1
2990 db7b5426 blueswir1
    return 0;
2991 db7b5426 blueswir1
}
2992 db7b5426 blueswir1
2993 c227f099 Anthony Liguori
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2994 c227f099 Anthony Liguori
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2995 db7b5426 blueswir1
{
2996 c227f099 Anthony Liguori
    subpage_t *mmio;
2997 db7b5426 blueswir1
    int subpage_memory;
2998 db7b5426 blueswir1
2999 c227f099 Anthony Liguori
    mmio = qemu_mallocz(sizeof(subpage_t));
3000 1eec614b aliguori
3001 1eec614b aliguori
    mmio->base = base;
3002 1eed09cb Avi Kivity
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3003 db7b5426 blueswir1
#if defined(DEBUG_SUBPAGE)
3004 1eec614b aliguori
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3005 1eec614b aliguori
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3006 db7b5426 blueswir1
#endif
3007 1eec614b aliguori
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3008 1eec614b aliguori
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3009 8da3ff18 pbrook
                         region_offset);
3010 db7b5426 blueswir1
3011 db7b5426 blueswir1
    return mmio;
3012 db7b5426 blueswir1
}
3013 db7b5426 blueswir1
3014 88715657 aliguori
static int get_free_io_mem_idx(void)
3015 88715657 aliguori
{
3016 88715657 aliguori
    int i;
3017 88715657 aliguori
3018 88715657 aliguori
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3019 88715657 aliguori
        if (!io_mem_used[i]) {
3020 88715657 aliguori
            io_mem_used[i] = 1;
3021 88715657 aliguori
            return i;
3022 88715657 aliguori
        }
3023 c6703b47 Riku Voipio
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3024 88715657 aliguori
    return -1;
3025 88715657 aliguori
}
3026 88715657 aliguori
3027 33417e70 bellard
/* mem_read and mem_write are arrays of functions containing the
3028 33417e70 bellard
   function to access byte (index 0), word (index 1) and dword (index
3029 0b4e6e3e Paul Brook
   2). Functions can be omitted with a NULL function pointer.
3030 3ee89922 blueswir1
   If io_index is non zero, the corresponding io zone is
3031 4254fab8 blueswir1
   modified. If it is zero, a new io zone is allocated. The return
3032 4254fab8 blueswir1
   value can be used with cpu_register_physical_memory(). (-1) is
3033 4254fab8 blueswir1
   returned if error. */
3034 1eed09cb Avi Kivity
static int cpu_register_io_memory_fixed(int io_index,
3035 d60efc6b Blue Swirl
                                        CPUReadMemoryFunc * const *mem_read,
3036 d60efc6b Blue Swirl
                                        CPUWriteMemoryFunc * const *mem_write,
3037 1eed09cb Avi Kivity
                                        void *opaque)
3038 33417e70 bellard
{
3039 4254fab8 blueswir1
    int i, subwidth = 0;
3040 33417e70 bellard
3041 33417e70 bellard
    if (io_index <= 0) {
3042 88715657 aliguori
        io_index = get_free_io_mem_idx();
3043 88715657 aliguori
        if (io_index == -1)
3044 88715657 aliguori
            return io_index;
3045 33417e70 bellard
    } else {
3046 1eed09cb Avi Kivity
        io_index >>= IO_MEM_SHIFT;
3047 33417e70 bellard
        if (io_index >= IO_MEM_NB_ENTRIES)
3048 33417e70 bellard
            return -1;
3049 33417e70 bellard
    }
3050 b5ff1b31 bellard
3051 33417e70 bellard
    for(i = 0;i < 3; i++) {
3052 4254fab8 blueswir1
        if (!mem_read[i] || !mem_write[i])
3053 4254fab8 blueswir1
            subwidth = IO_MEM_SUBWIDTH;
3054 33417e70 bellard
        io_mem_read[io_index][i] = mem_read[i];
3055 33417e70 bellard
        io_mem_write[io_index][i] = mem_write[i];
3056 33417e70 bellard
    }
3057 a4193c8a bellard
    io_mem_opaque[io_index] = opaque;
3058 4254fab8 blueswir1
    return (io_index << IO_MEM_SHIFT) | subwidth;
3059 33417e70 bellard
}
3060 61382a50 bellard
3061 d60efc6b Blue Swirl
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3062 d60efc6b Blue Swirl
                           CPUWriteMemoryFunc * const *mem_write,
3063 1eed09cb Avi Kivity
                           void *opaque)
3064 1eed09cb Avi Kivity
{
3065 1eed09cb Avi Kivity
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3066 1eed09cb Avi Kivity
}
3067 1eed09cb Avi Kivity
3068 88715657 aliguori
void cpu_unregister_io_memory(int io_table_address)
3069 88715657 aliguori
{
3070 88715657 aliguori
    int i;
3071 88715657 aliguori
    int io_index = io_table_address >> IO_MEM_SHIFT;
3072 88715657 aliguori
3073 88715657 aliguori
    for (i=0;i < 3; i++) {
3074 88715657 aliguori
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3075 88715657 aliguori
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3076 88715657 aliguori
    }
3077 88715657 aliguori
    io_mem_opaque[io_index] = NULL;
3078 88715657 aliguori
    io_mem_used[io_index] = 0;
3079 88715657 aliguori
}
3080 88715657 aliguori
3081 e9179ce1 Avi Kivity
static void io_mem_init(void)
3082 e9179ce1 Avi Kivity
{
3083 e9179ce1 Avi Kivity
    int i;
3084 e9179ce1 Avi Kivity
3085 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3086 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3087 e9179ce1 Avi Kivity
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3088 e9179ce1 Avi Kivity
    for (i=0; i<5; i++)
3089 e9179ce1 Avi Kivity
        io_mem_used[i] = 1;
3090 e9179ce1 Avi Kivity
3091 e9179ce1 Avi Kivity
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3092 e9179ce1 Avi Kivity
                                          watch_mem_write, NULL);
3093 e9179ce1 Avi Kivity
}
3094 e9179ce1 Avi Kivity
3095 e2eef170 pbrook
#endif /* !defined(CONFIG_USER_ONLY) */
3096 e2eef170 pbrook
3097 13eb76e0 bellard
/* physical memory access (slow version, mainly for debug) */
3098 13eb76e0 bellard
#if defined(CONFIG_USER_ONLY)
3099 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3100 13eb76e0 bellard
                            int len, int is_write)
3101 13eb76e0 bellard
{
3102 13eb76e0 bellard
    int l, flags;
3103 13eb76e0 bellard
    target_ulong page;
3104 53a5960a pbrook
    void * p;
3105 13eb76e0 bellard
3106 13eb76e0 bellard
    while (len > 0) {
3107 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3108 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3109 13eb76e0 bellard
        if (l > len)
3110 13eb76e0 bellard
            l = len;
3111 13eb76e0 bellard
        flags = page_get_flags(page);
3112 13eb76e0 bellard
        if (!(flags & PAGE_VALID))
3113 13eb76e0 bellard
            return;
3114 13eb76e0 bellard
        if (is_write) {
3115 13eb76e0 bellard
            if (!(flags & PAGE_WRITE))
3116 13eb76e0 bellard
                return;
3117 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3118 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3119 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
3120 579a97f7 bellard
                return;
3121 72fb7daa aurel32
            memcpy(p, buf, l);
3122 72fb7daa aurel32
            unlock_user(p, addr, l);
3123 13eb76e0 bellard
        } else {
3124 13eb76e0 bellard
            if (!(flags & PAGE_READ))
3125 13eb76e0 bellard
                return;
3126 579a97f7 bellard
            /* XXX: this code should not depend on lock_user */
3127 72fb7daa aurel32
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3128 579a97f7 bellard
                /* FIXME - should this return an error rather than just fail? */
3129 579a97f7 bellard
                return;
3130 72fb7daa aurel32
            memcpy(buf, p, l);
3131 5b257578 aurel32
            unlock_user(p, addr, 0);
3132 13eb76e0 bellard
        }
3133 13eb76e0 bellard
        len -= l;
3134 13eb76e0 bellard
        buf += l;
3135 13eb76e0 bellard
        addr += l;
3136 13eb76e0 bellard
    }
3137 13eb76e0 bellard
}
3138 8df1cd07 bellard
3139 13eb76e0 bellard
#else
3140 c227f099 Anthony Liguori
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3141 13eb76e0 bellard
                            int len, int is_write)
3142 13eb76e0 bellard
{
3143 13eb76e0 bellard
    int l, io_index;
3144 13eb76e0 bellard
    uint8_t *ptr;
3145 13eb76e0 bellard
    uint32_t val;
3146 c227f099 Anthony Liguori
    target_phys_addr_t page;
3147 2e12669a bellard
    unsigned long pd;
3148 92e873b9 bellard
    PhysPageDesc *p;
3149 3b46e624 ths
3150 13eb76e0 bellard
    while (len > 0) {
3151 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3152 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3153 13eb76e0 bellard
        if (l > len)
3154 13eb76e0 bellard
            l = len;
3155 92e873b9 bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3156 13eb76e0 bellard
        if (!p) {
3157 13eb76e0 bellard
            pd = IO_MEM_UNASSIGNED;
3158 13eb76e0 bellard
        } else {
3159 13eb76e0 bellard
            pd = p->phys_offset;
3160 13eb76e0 bellard
        }
3161 3b46e624 ths
3162 13eb76e0 bellard
        if (is_write) {
3163 3a7d929e bellard
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3164 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3165 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3166 8da3ff18 pbrook
                if (p)
3167 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3168 6a00d601 bellard
                /* XXX: could force cpu_single_env to NULL to avoid
3169 6a00d601 bellard
                   potential bugs */
3170 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3171 1c213d19 bellard
                    /* 32 bit write access */
3172 c27004ec bellard
                    val = ldl_p(buf);
3173 6c2934db aurel32
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3174 13eb76e0 bellard
                    l = 4;
3175 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3176 1c213d19 bellard
                    /* 16 bit write access */
3177 c27004ec bellard
                    val = lduw_p(buf);
3178 6c2934db aurel32
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3179 13eb76e0 bellard
                    l = 2;
3180 13eb76e0 bellard
                } else {
3181 1c213d19 bellard
                    /* 8 bit write access */
3182 c27004ec bellard
                    val = ldub_p(buf);
3183 6c2934db aurel32
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3184 13eb76e0 bellard
                    l = 1;
3185 13eb76e0 bellard
                }
3186 13eb76e0 bellard
            } else {
3187 b448f2f3 bellard
                unsigned long addr1;
3188 b448f2f3 bellard
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3189 13eb76e0 bellard
                /* RAM case */
3190 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(addr1);
3191 13eb76e0 bellard
                memcpy(ptr, buf, l);
3192 3a7d929e bellard
                if (!cpu_physical_memory_is_dirty(addr1)) {
3193 3a7d929e bellard
                    /* invalidate code */
3194 3a7d929e bellard
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3195 3a7d929e bellard
                    /* set dirty bit */
3196 5fafdf24 ths
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3197 f23db169 bellard
                        (0xff & ~CODE_DIRTY_FLAG);
3198 3a7d929e bellard
                }
3199 13eb76e0 bellard
            }
3200 13eb76e0 bellard
        } else {
3201 5fafdf24 ths
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3202 2a4188a3 bellard
                !(pd & IO_MEM_ROMD)) {
3203 c227f099 Anthony Liguori
                target_phys_addr_t addr1 = addr;
3204 13eb76e0 bellard
                /* I/O case */
3205 13eb76e0 bellard
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3206 8da3ff18 pbrook
                if (p)
3207 6c2934db aurel32
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3208 6c2934db aurel32
                if (l >= 4 && ((addr1 & 3) == 0)) {
3209 13eb76e0 bellard
                    /* 32 bit read access */
3210 6c2934db aurel32
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3211 c27004ec bellard
                    stl_p(buf, val);
3212 13eb76e0 bellard
                    l = 4;
3213 6c2934db aurel32
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3214 13eb76e0 bellard
                    /* 16 bit read access */
3215 6c2934db aurel32
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3216 c27004ec bellard
                    stw_p(buf, val);
3217 13eb76e0 bellard
                    l = 2;
3218 13eb76e0 bellard
                } else {
3219 1c213d19 bellard
                    /* 8 bit read access */
3220 6c2934db aurel32
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3221 c27004ec bellard
                    stb_p(buf, val);
3222 13eb76e0 bellard
                    l = 1;
3223 13eb76e0 bellard
                }
3224 13eb76e0 bellard
            } else {
3225 13eb76e0 bellard
                /* RAM case */
3226 5579c7f3 pbrook
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3227 13eb76e0 bellard
                    (addr & ~TARGET_PAGE_MASK);
3228 13eb76e0 bellard
                memcpy(buf, ptr, l);
3229 13eb76e0 bellard
            }
3230 13eb76e0 bellard
        }
3231 13eb76e0 bellard
        len -= l;
3232 13eb76e0 bellard
        buf += l;
3233 13eb76e0 bellard
        addr += l;
3234 13eb76e0 bellard
    }
3235 13eb76e0 bellard
}
3236 8df1cd07 bellard
3237 d0ecd2aa bellard
/* used for ROM loading : can write in RAM and ROM */
3238 c227f099 Anthony Liguori
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3239 d0ecd2aa bellard
                                   const uint8_t *buf, int len)
3240 d0ecd2aa bellard
{
3241 d0ecd2aa bellard
    int l;
3242 d0ecd2aa bellard
    uint8_t *ptr;
3243 c227f099 Anthony Liguori
    target_phys_addr_t page;
3244 d0ecd2aa bellard
    unsigned long pd;
3245 d0ecd2aa bellard
    PhysPageDesc *p;
3246 3b46e624 ths
3247 d0ecd2aa bellard
    while (len > 0) {
3248 d0ecd2aa bellard
        page = addr & TARGET_PAGE_MASK;
3249 d0ecd2aa bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3250 d0ecd2aa bellard
        if (l > len)
3251 d0ecd2aa bellard
            l = len;
3252 d0ecd2aa bellard
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3253 d0ecd2aa bellard
        if (!p) {
3254 d0ecd2aa bellard
            pd = IO_MEM_UNASSIGNED;
3255 d0ecd2aa bellard
        } else {
3256 d0ecd2aa bellard
            pd = p->phys_offset;
3257 d0ecd2aa bellard
        }
3258 3b46e624 ths
3259 d0ecd2aa bellard
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3260 2a4188a3 bellard
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3261 2a4188a3 bellard
            !(pd & IO_MEM_ROMD)) {
3262 d0ecd2aa bellard
            /* do nothing */
3263 d0ecd2aa bellard
        } else {
3264 d0ecd2aa bellard
            unsigned long addr1;
3265 d0ecd2aa bellard
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3266 d0ecd2aa bellard
            /* ROM/RAM case */
3267 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3268 d0ecd2aa bellard
            memcpy(ptr, buf, l);
3269 d0ecd2aa bellard
        }
3270 d0ecd2aa bellard
        len -= l;
3271 d0ecd2aa bellard
        buf += l;
3272 d0ecd2aa bellard
        addr += l;
3273 d0ecd2aa bellard
    }
3274 d0ecd2aa bellard
}
3275 d0ecd2aa bellard
3276 6d16c2f8 aliguori
typedef struct {
3277 6d16c2f8 aliguori
    void *buffer;
3278 c227f099 Anthony Liguori
    target_phys_addr_t addr;
3279 c227f099 Anthony Liguori
    target_phys_addr_t len;
3280 6d16c2f8 aliguori
} BounceBuffer;
3281 6d16c2f8 aliguori
3282 6d16c2f8 aliguori
static BounceBuffer bounce;
3283 6d16c2f8 aliguori
3284 ba223c29 aliguori
typedef struct MapClient {
3285 ba223c29 aliguori
    void *opaque;
3286 ba223c29 aliguori
    void (*callback)(void *opaque);
3287 72cf2d4f Blue Swirl
    QLIST_ENTRY(MapClient) link;
3288 ba223c29 aliguori
} MapClient;
3289 ba223c29 aliguori
3290 72cf2d4f Blue Swirl
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3291 72cf2d4f Blue Swirl
    = QLIST_HEAD_INITIALIZER(map_client_list);
3292 ba223c29 aliguori
3293 ba223c29 aliguori
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3294 ba223c29 aliguori
{
3295 ba223c29 aliguori
    MapClient *client = qemu_malloc(sizeof(*client));
3296 ba223c29 aliguori
3297 ba223c29 aliguori
    client->opaque = opaque;
3298 ba223c29 aliguori
    client->callback = callback;
3299 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3300 ba223c29 aliguori
    return client;
3301 ba223c29 aliguori
}
3302 ba223c29 aliguori
3303 ba223c29 aliguori
void cpu_unregister_map_client(void *_client)
3304 ba223c29 aliguori
{
3305 ba223c29 aliguori
    MapClient *client = (MapClient *)_client;
3306 ba223c29 aliguori
3307 72cf2d4f Blue Swirl
    QLIST_REMOVE(client, link);
3308 34d5e948 Isaku Yamahata
    qemu_free(client);
3309 ba223c29 aliguori
}
3310 ba223c29 aliguori
3311 ba223c29 aliguori
static void cpu_notify_map_clients(void)
3312 ba223c29 aliguori
{
3313 ba223c29 aliguori
    MapClient *client;
3314 ba223c29 aliguori
3315 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&map_client_list)) {
3316 72cf2d4f Blue Swirl
        client = QLIST_FIRST(&map_client_list);
3317 ba223c29 aliguori
        client->callback(client->opaque);
3318 34d5e948 Isaku Yamahata
        cpu_unregister_map_client(client);
3319 ba223c29 aliguori
    }
3320 ba223c29 aliguori
}
3321 ba223c29 aliguori
3322 6d16c2f8 aliguori
/* Map a physical memory region into a host virtual address.
3323 6d16c2f8 aliguori
 * May map a subset of the requested range, given by and returned in *plen.
3324 6d16c2f8 aliguori
 * May return NULL if resources needed to perform the mapping are exhausted.
3325 6d16c2f8 aliguori
 * Use only for reads OR writes - not for read-modify-write operations.
3326 ba223c29 aliguori
 * Use cpu_register_map_client() to know when retrying the map operation is
3327 ba223c29 aliguori
 * likely to succeed.
3328 6d16c2f8 aliguori
 */
3329 c227f099 Anthony Liguori
void *cpu_physical_memory_map(target_phys_addr_t addr,
3330 c227f099 Anthony Liguori
                              target_phys_addr_t *plen,
3331 6d16c2f8 aliguori
                              int is_write)
3332 6d16c2f8 aliguori
{
3333 c227f099 Anthony Liguori
    target_phys_addr_t len = *plen;
3334 c227f099 Anthony Liguori
    target_phys_addr_t done = 0;
3335 6d16c2f8 aliguori
    int l;
3336 6d16c2f8 aliguori
    uint8_t *ret = NULL;
3337 6d16c2f8 aliguori
    uint8_t *ptr;
3338 c227f099 Anthony Liguori
    target_phys_addr_t page;
3339 6d16c2f8 aliguori
    unsigned long pd;
3340 6d16c2f8 aliguori
    PhysPageDesc *p;
3341 6d16c2f8 aliguori
    unsigned long addr1;
3342 6d16c2f8 aliguori
3343 6d16c2f8 aliguori
    while (len > 0) {
3344 6d16c2f8 aliguori
        page = addr & TARGET_PAGE_MASK;
3345 6d16c2f8 aliguori
        l = (page + TARGET_PAGE_SIZE) - addr;
3346 6d16c2f8 aliguori
        if (l > len)
3347 6d16c2f8 aliguori
            l = len;
3348 6d16c2f8 aliguori
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3349 6d16c2f8 aliguori
        if (!p) {
3350 6d16c2f8 aliguori
            pd = IO_MEM_UNASSIGNED;
3351 6d16c2f8 aliguori
        } else {
3352 6d16c2f8 aliguori
            pd = p->phys_offset;
3353 6d16c2f8 aliguori
        }
3354 6d16c2f8 aliguori
3355 6d16c2f8 aliguori
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3356 6d16c2f8 aliguori
            if (done || bounce.buffer) {
3357 6d16c2f8 aliguori
                break;
3358 6d16c2f8 aliguori
            }
3359 6d16c2f8 aliguori
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3360 6d16c2f8 aliguori
            bounce.addr = addr;
3361 6d16c2f8 aliguori
            bounce.len = l;
3362 6d16c2f8 aliguori
            if (!is_write) {
3363 6d16c2f8 aliguori
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3364 6d16c2f8 aliguori
            }
3365 6d16c2f8 aliguori
            ptr = bounce.buffer;
3366 6d16c2f8 aliguori
        } else {
3367 6d16c2f8 aliguori
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3368 5579c7f3 pbrook
            ptr = qemu_get_ram_ptr(addr1);
3369 6d16c2f8 aliguori
        }
3370 6d16c2f8 aliguori
        if (!done) {
3371 6d16c2f8 aliguori
            ret = ptr;
3372 6d16c2f8 aliguori
        } else if (ret + done != ptr) {
3373 6d16c2f8 aliguori
            break;
3374 6d16c2f8 aliguori
        }
3375 6d16c2f8 aliguori
3376 6d16c2f8 aliguori
        len -= l;
3377 6d16c2f8 aliguori
        addr += l;
3378 6d16c2f8 aliguori
        done += l;
3379 6d16c2f8 aliguori
    }
3380 6d16c2f8 aliguori
    *plen = done;
3381 6d16c2f8 aliguori
    return ret;
3382 6d16c2f8 aliguori
}
3383 6d16c2f8 aliguori
3384 6d16c2f8 aliguori
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3385 6d16c2f8 aliguori
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3386 6d16c2f8 aliguori
 * the amount of memory that was actually read or written by the caller.
3387 6d16c2f8 aliguori
 */
3388 c227f099 Anthony Liguori
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3389 c227f099 Anthony Liguori
                               int is_write, target_phys_addr_t access_len)
3390 6d16c2f8 aliguori
{
3391 6d16c2f8 aliguori
    if (buffer != bounce.buffer) {
3392 6d16c2f8 aliguori
        if (is_write) {
3393 c227f099 Anthony Liguori
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3394 6d16c2f8 aliguori
            while (access_len) {
3395 6d16c2f8 aliguori
                unsigned l;
3396 6d16c2f8 aliguori
                l = TARGET_PAGE_SIZE;
3397 6d16c2f8 aliguori
                if (l > access_len)
3398 6d16c2f8 aliguori
                    l = access_len;
3399 6d16c2f8 aliguori
                if (!cpu_physical_memory_is_dirty(addr1)) {
3400 6d16c2f8 aliguori
                    /* invalidate code */
3401 6d16c2f8 aliguori
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3402 6d16c2f8 aliguori
                    /* set dirty bit */
3403 6d16c2f8 aliguori
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3404 6d16c2f8 aliguori
                        (0xff & ~CODE_DIRTY_FLAG);
3405 6d16c2f8 aliguori
                }
3406 6d16c2f8 aliguori
                addr1 += l;
3407 6d16c2f8 aliguori
                access_len -= l;
3408 6d16c2f8 aliguori
            }
3409 6d16c2f8 aliguori
        }
3410 6d16c2f8 aliguori
        return;
3411 6d16c2f8 aliguori
    }
3412 6d16c2f8 aliguori
    if (is_write) {
3413 6d16c2f8 aliguori
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3414 6d16c2f8 aliguori
    }
3415 f8a83245 Herve Poussineau
    qemu_vfree(bounce.buffer);
3416 6d16c2f8 aliguori
    bounce.buffer = NULL;
3417 ba223c29 aliguori
    cpu_notify_map_clients();
3418 6d16c2f8 aliguori
}
3419 d0ecd2aa bellard
3420 8df1cd07 bellard
/* warning: addr must be aligned */
3421 c227f099 Anthony Liguori
uint32_t ldl_phys(target_phys_addr_t addr)
3422 8df1cd07 bellard
{
3423 8df1cd07 bellard
    int io_index;
3424 8df1cd07 bellard
    uint8_t *ptr;
3425 8df1cd07 bellard
    uint32_t val;
3426 8df1cd07 bellard
    unsigned long pd;
3427 8df1cd07 bellard
    PhysPageDesc *p;
3428 8df1cd07 bellard
3429 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3430 8df1cd07 bellard
    if (!p) {
3431 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3432 8df1cd07 bellard
    } else {
3433 8df1cd07 bellard
        pd = p->phys_offset;
3434 8df1cd07 bellard
    }
3435 3b46e624 ths
3436 5fafdf24 ths
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3437 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3438 8df1cd07 bellard
        /* I/O case */
3439 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3440 8da3ff18 pbrook
        if (p)
3441 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3442 8df1cd07 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3443 8df1cd07 bellard
    } else {
3444 8df1cd07 bellard
        /* RAM case */
3445 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3446 8df1cd07 bellard
            (addr & ~TARGET_PAGE_MASK);
3447 8df1cd07 bellard
        val = ldl_p(ptr);
3448 8df1cd07 bellard
    }
3449 8df1cd07 bellard
    return val;
3450 8df1cd07 bellard
}
3451 8df1cd07 bellard
3452 84b7b8e7 bellard
/* warning: addr must be aligned */
3453 c227f099 Anthony Liguori
uint64_t ldq_phys(target_phys_addr_t addr)
3454 84b7b8e7 bellard
{
3455 84b7b8e7 bellard
    int io_index;
3456 84b7b8e7 bellard
    uint8_t *ptr;
3457 84b7b8e7 bellard
    uint64_t val;
3458 84b7b8e7 bellard
    unsigned long pd;
3459 84b7b8e7 bellard
    PhysPageDesc *p;
3460 84b7b8e7 bellard
3461 84b7b8e7 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3462 84b7b8e7 bellard
    if (!p) {
3463 84b7b8e7 bellard
        pd = IO_MEM_UNASSIGNED;
3464 84b7b8e7 bellard
    } else {
3465 84b7b8e7 bellard
        pd = p->phys_offset;
3466 84b7b8e7 bellard
    }
3467 3b46e624 ths
3468 2a4188a3 bellard
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3469 2a4188a3 bellard
        !(pd & IO_MEM_ROMD)) {
3470 84b7b8e7 bellard
        /* I/O case */
3471 84b7b8e7 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3472 8da3ff18 pbrook
        if (p)
3473 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3474 84b7b8e7 bellard
#ifdef TARGET_WORDS_BIGENDIAN
3475 84b7b8e7 bellard
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3476 84b7b8e7 bellard
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3477 84b7b8e7 bellard
#else
3478 84b7b8e7 bellard
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3479 84b7b8e7 bellard
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3480 84b7b8e7 bellard
#endif
3481 84b7b8e7 bellard
    } else {
3482 84b7b8e7 bellard
        /* RAM case */
3483 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3484 84b7b8e7 bellard
            (addr & ~TARGET_PAGE_MASK);
3485 84b7b8e7 bellard
        val = ldq_p(ptr);
3486 84b7b8e7 bellard
    }
3487 84b7b8e7 bellard
    return val;
3488 84b7b8e7 bellard
}
3489 84b7b8e7 bellard
3490 aab33094 bellard
/* XXX: optimize */
3491 c227f099 Anthony Liguori
uint32_t ldub_phys(target_phys_addr_t addr)
3492 aab33094 bellard
{
3493 aab33094 bellard
    uint8_t val;
3494 aab33094 bellard
    cpu_physical_memory_read(addr, &val, 1);
3495 aab33094 bellard
    return val;
3496 aab33094 bellard
}
3497 aab33094 bellard
3498 aab33094 bellard
/* XXX: optimize */
3499 c227f099 Anthony Liguori
uint32_t lduw_phys(target_phys_addr_t addr)
3500 aab33094 bellard
{
3501 aab33094 bellard
    uint16_t val;
3502 aab33094 bellard
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3503 aab33094 bellard
    return tswap16(val);
3504 aab33094 bellard
}
3505 aab33094 bellard
3506 8df1cd07 bellard
/* warning: addr must be aligned. The ram page is not masked as dirty
3507 8df1cd07 bellard
   and the code inside is not invalidated. It is useful if the dirty
3508 8df1cd07 bellard
   bits are used to track modified PTEs */
3509 c227f099 Anthony Liguori
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3510 8df1cd07 bellard
{
3511 8df1cd07 bellard
    int io_index;
3512 8df1cd07 bellard
    uint8_t *ptr;
3513 8df1cd07 bellard
    unsigned long pd;
3514 8df1cd07 bellard
    PhysPageDesc *p;
3515 8df1cd07 bellard
3516 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3517 8df1cd07 bellard
    if (!p) {
3518 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3519 8df1cd07 bellard
    } else {
3520 8df1cd07 bellard
        pd = p->phys_offset;
3521 8df1cd07 bellard
    }
3522 3b46e624 ths
3523 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3524 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3525 8da3ff18 pbrook
        if (p)
3526 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3527 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3528 8df1cd07 bellard
    } else {
3529 74576198 aliguori
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3530 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3531 8df1cd07 bellard
        stl_p(ptr, val);
3532 74576198 aliguori
3533 74576198 aliguori
        if (unlikely(in_migration)) {
3534 74576198 aliguori
            if (!cpu_physical_memory_is_dirty(addr1)) {
3535 74576198 aliguori
                /* invalidate code */
3536 74576198 aliguori
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3537 74576198 aliguori
                /* set dirty bit */
3538 74576198 aliguori
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3539 74576198 aliguori
                    (0xff & ~CODE_DIRTY_FLAG);
3540 74576198 aliguori
            }
3541 74576198 aliguori
        }
3542 8df1cd07 bellard
    }
3543 8df1cd07 bellard
}
3544 8df1cd07 bellard
3545 c227f099 Anthony Liguori
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3546 bc98a7ef j_mayer
{
3547 bc98a7ef j_mayer
    int io_index;
3548 bc98a7ef j_mayer
    uint8_t *ptr;
3549 bc98a7ef j_mayer
    unsigned long pd;
3550 bc98a7ef j_mayer
    PhysPageDesc *p;
3551 bc98a7ef j_mayer
3552 bc98a7ef j_mayer
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3553 bc98a7ef j_mayer
    if (!p) {
3554 bc98a7ef j_mayer
        pd = IO_MEM_UNASSIGNED;
3555 bc98a7ef j_mayer
    } else {
3556 bc98a7ef j_mayer
        pd = p->phys_offset;
3557 bc98a7ef j_mayer
    }
3558 3b46e624 ths
3559 bc98a7ef j_mayer
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3560 bc98a7ef j_mayer
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3561 8da3ff18 pbrook
        if (p)
3562 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3563 bc98a7ef j_mayer
#ifdef TARGET_WORDS_BIGENDIAN
3564 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3565 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3566 bc98a7ef j_mayer
#else
3567 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3568 bc98a7ef j_mayer
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3569 bc98a7ef j_mayer
#endif
3570 bc98a7ef j_mayer
    } else {
3571 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3572 bc98a7ef j_mayer
            (addr & ~TARGET_PAGE_MASK);
3573 bc98a7ef j_mayer
        stq_p(ptr, val);
3574 bc98a7ef j_mayer
    }
3575 bc98a7ef j_mayer
}
3576 bc98a7ef j_mayer
3577 8df1cd07 bellard
/* warning: addr must be aligned */
3578 c227f099 Anthony Liguori
void stl_phys(target_phys_addr_t addr, uint32_t val)
3579 8df1cd07 bellard
{
3580 8df1cd07 bellard
    int io_index;
3581 8df1cd07 bellard
    uint8_t *ptr;
3582 8df1cd07 bellard
    unsigned long pd;
3583 8df1cd07 bellard
    PhysPageDesc *p;
3584 8df1cd07 bellard
3585 8df1cd07 bellard
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3586 8df1cd07 bellard
    if (!p) {
3587 8df1cd07 bellard
        pd = IO_MEM_UNASSIGNED;
3588 8df1cd07 bellard
    } else {
3589 8df1cd07 bellard
        pd = p->phys_offset;
3590 8df1cd07 bellard
    }
3591 3b46e624 ths
3592 3a7d929e bellard
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3593 8df1cd07 bellard
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3594 8da3ff18 pbrook
        if (p)
3595 8da3ff18 pbrook
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3596 8df1cd07 bellard
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3597 8df1cd07 bellard
    } else {
3598 8df1cd07 bellard
        unsigned long addr1;
3599 8df1cd07 bellard
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3600 8df1cd07 bellard
        /* RAM case */
3601 5579c7f3 pbrook
        ptr = qemu_get_ram_ptr(addr1);
3602 8df1cd07 bellard
        stl_p(ptr, val);
3603 3a7d929e bellard
        if (!cpu_physical_memory_is_dirty(addr1)) {
3604 3a7d929e bellard
            /* invalidate code */
3605 3a7d929e bellard
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3606 3a7d929e bellard
            /* set dirty bit */
3607 f23db169 bellard
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3608 f23db169 bellard
                (0xff & ~CODE_DIRTY_FLAG);
3609 3a7d929e bellard
        }
3610 8df1cd07 bellard
    }
3611 8df1cd07 bellard
}
3612 8df1cd07 bellard
3613 aab33094 bellard
/* XXX: optimize */
3614 c227f099 Anthony Liguori
void stb_phys(target_phys_addr_t addr, uint32_t val)
3615 aab33094 bellard
{
3616 aab33094 bellard
    uint8_t v = val;
3617 aab33094 bellard
    cpu_physical_memory_write(addr, &v, 1);
3618 aab33094 bellard
}
3619 aab33094 bellard
3620 aab33094 bellard
/* XXX: optimize */
3621 c227f099 Anthony Liguori
void stw_phys(target_phys_addr_t addr, uint32_t val)
3622 aab33094 bellard
{
3623 aab33094 bellard
    uint16_t v = tswap16(val);
3624 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3625 aab33094 bellard
}
3626 aab33094 bellard
3627 aab33094 bellard
/* XXX: optimize */
3628 c227f099 Anthony Liguori
void stq_phys(target_phys_addr_t addr, uint64_t val)
3629 aab33094 bellard
{
3630 aab33094 bellard
    val = tswap64(val);
3631 aab33094 bellard
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3632 aab33094 bellard
}
3633 aab33094 bellard
3634 13eb76e0 bellard
#endif
3635 13eb76e0 bellard
3636 5e2972fd aliguori
/* virtual memory access for debug (includes writing to ROM) */
3637 5fafdf24 ths
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3638 b448f2f3 bellard
                        uint8_t *buf, int len, int is_write)
3639 13eb76e0 bellard
{
3640 13eb76e0 bellard
    int l;
3641 c227f099 Anthony Liguori
    target_phys_addr_t phys_addr;
3642 9b3c35e0 j_mayer
    target_ulong page;
3643 13eb76e0 bellard
3644 13eb76e0 bellard
    while (len > 0) {
3645 13eb76e0 bellard
        page = addr & TARGET_PAGE_MASK;
3646 13eb76e0 bellard
        phys_addr = cpu_get_phys_page_debug(env, page);
3647 13eb76e0 bellard
        /* if no physical page mapped, return an error */
3648 13eb76e0 bellard
        if (phys_addr == -1)
3649 13eb76e0 bellard
            return -1;
3650 13eb76e0 bellard
        l = (page + TARGET_PAGE_SIZE) - addr;
3651 13eb76e0 bellard
        if (l > len)
3652 13eb76e0 bellard
            l = len;
3653 5e2972fd aliguori
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3654 5e2972fd aliguori
#if !defined(CONFIG_USER_ONLY)
3655 5e2972fd aliguori
        if (is_write)
3656 5e2972fd aliguori
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3657 5e2972fd aliguori
        else
3658 5e2972fd aliguori
#endif
3659 5e2972fd aliguori
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3660 13eb76e0 bellard
        len -= l;
3661 13eb76e0 bellard
        buf += l;
3662 13eb76e0 bellard
        addr += l;
3663 13eb76e0 bellard
    }
3664 13eb76e0 bellard
    return 0;
3665 13eb76e0 bellard
}
3666 13eb76e0 bellard
3667 2e70f6ef pbrook
/* in deterministic execution mode, instructions doing device I/Os
3668 2e70f6ef pbrook
   must be at the end of the TB */
3669 2e70f6ef pbrook
void cpu_io_recompile(CPUState *env, void *retaddr)
3670 2e70f6ef pbrook
{
3671 2e70f6ef pbrook
    TranslationBlock *tb;
3672 2e70f6ef pbrook
    uint32_t n, cflags;
3673 2e70f6ef pbrook
    target_ulong pc, cs_base;
3674 2e70f6ef pbrook
    uint64_t flags;
3675 2e70f6ef pbrook
3676 2e70f6ef pbrook
    tb = tb_find_pc((unsigned long)retaddr);
3677 2e70f6ef pbrook
    if (!tb) {
3678 2e70f6ef pbrook
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3679 2e70f6ef pbrook
                  retaddr);
3680 2e70f6ef pbrook
    }
3681 2e70f6ef pbrook
    n = env->icount_decr.u16.low + tb->icount;
3682 2e70f6ef pbrook
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3683 2e70f6ef pbrook
    /* Calculate how many instructions had been executed before the fault
3684 bf20dc07 ths
       occurred.  */
3685 2e70f6ef pbrook
    n = n - env->icount_decr.u16.low;
3686 2e70f6ef pbrook
    /* Generate a new TB ending on the I/O insn.  */
3687 2e70f6ef pbrook
    n++;
3688 2e70f6ef pbrook
    /* On MIPS and SH, delay slot instructions can only be restarted if
3689 2e70f6ef pbrook
       they were already the first instruction in the TB.  If this is not
3690 bf20dc07 ths
       the first instruction in a TB then re-execute the preceding
3691 2e70f6ef pbrook
       branch.  */
3692 2e70f6ef pbrook
#if defined(TARGET_MIPS)
3693 2e70f6ef pbrook
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3694 2e70f6ef pbrook
        env->active_tc.PC -= 4;
3695 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3696 2e70f6ef pbrook
        env->hflags &= ~MIPS_HFLAG_BMASK;
3697 2e70f6ef pbrook
    }
3698 2e70f6ef pbrook
#elif defined(TARGET_SH4)
3699 2e70f6ef pbrook
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3700 2e70f6ef pbrook
            && n > 1) {
3701 2e70f6ef pbrook
        env->pc -= 2;
3702 2e70f6ef pbrook
        env->icount_decr.u16.low++;
3703 2e70f6ef pbrook
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3704 2e70f6ef pbrook
    }
3705 2e70f6ef pbrook
#endif
3706 2e70f6ef pbrook
    /* This should never happen.  */
3707 2e70f6ef pbrook
    if (n > CF_COUNT_MASK)
3708 2e70f6ef pbrook
        cpu_abort(env, "TB too big during recompile");
3709 2e70f6ef pbrook
3710 2e70f6ef pbrook
    cflags = n | CF_LAST_IO;
3711 2e70f6ef pbrook
    pc = tb->pc;
3712 2e70f6ef pbrook
    cs_base = tb->cs_base;
3713 2e70f6ef pbrook
    flags = tb->flags;
3714 2e70f6ef pbrook
    tb_phys_invalidate(tb, -1);
3715 2e70f6ef pbrook
    /* FIXME: In theory this could raise an exception.  In practice
3716 2e70f6ef pbrook
       we have already translated the block once so it's probably ok.  */
3717 2e70f6ef pbrook
    tb_gen_code(env, pc, cs_base, flags, cflags);
3718 bf20dc07 ths
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3719 2e70f6ef pbrook
       the first in the TB) then we end up generating a whole new TB and
3720 2e70f6ef pbrook
       repeating the fault, which is horribly inefficient.
3721 2e70f6ef pbrook
       Better would be to execute just this insn uncached, or generate a
3722 2e70f6ef pbrook
       second new TB.  */
3723 2e70f6ef pbrook
    cpu_resume_from_signal(env, NULL);
3724 2e70f6ef pbrook
}
3725 2e70f6ef pbrook
3726 e3db7226 bellard
void dump_exec_info(FILE *f,
3727 e3db7226 bellard
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3728 e3db7226 bellard
{
3729 e3db7226 bellard
    int i, target_code_size, max_target_code_size;
3730 e3db7226 bellard
    int direct_jmp_count, direct_jmp2_count, cross_page;
3731 e3db7226 bellard
    TranslationBlock *tb;
3732 3b46e624 ths
3733 e3db7226 bellard
    target_code_size = 0;
3734 e3db7226 bellard
    max_target_code_size = 0;
3735 e3db7226 bellard
    cross_page = 0;
3736 e3db7226 bellard
    direct_jmp_count = 0;
3737 e3db7226 bellard
    direct_jmp2_count = 0;
3738 e3db7226 bellard
    for(i = 0; i < nb_tbs; i++) {
3739 e3db7226 bellard
        tb = &tbs[i];
3740 e3db7226 bellard
        target_code_size += tb->size;
3741 e3db7226 bellard
        if (tb->size > max_target_code_size)
3742 e3db7226 bellard
            max_target_code_size = tb->size;
3743 e3db7226 bellard
        if (tb->page_addr[1] != -1)
3744 e3db7226 bellard
            cross_page++;
3745 e3db7226 bellard
        if (tb->tb_next_offset[0] != 0xffff) {
3746 e3db7226 bellard
            direct_jmp_count++;
3747 e3db7226 bellard
            if (tb->tb_next_offset[1] != 0xffff) {
3748 e3db7226 bellard
                direct_jmp2_count++;
3749 e3db7226 bellard
            }
3750 e3db7226 bellard
        }
3751 e3db7226 bellard
    }
3752 e3db7226 bellard
    /* XXX: avoid using doubles ? */
3753 57fec1fe bellard
    cpu_fprintf(f, "Translation buffer state:\n");
3754 26a5f13b bellard
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3755 26a5f13b bellard
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3756 26a5f13b bellard
    cpu_fprintf(f, "TB count            %d/%d\n", 
3757 26a5f13b bellard
                nb_tbs, code_gen_max_blocks);
3758 5fafdf24 ths
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3759 e3db7226 bellard
                nb_tbs ? target_code_size / nb_tbs : 0,
3760 e3db7226 bellard
                max_target_code_size);
3761 5fafdf24 ths
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3762 e3db7226 bellard
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3763 e3db7226 bellard
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3764 5fafdf24 ths
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3765 5fafdf24 ths
            cross_page,
3766 e3db7226 bellard
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3767 e3db7226 bellard
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3768 5fafdf24 ths
                direct_jmp_count,
3769 e3db7226 bellard
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3770 e3db7226 bellard
                direct_jmp2_count,
3771 e3db7226 bellard
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3772 57fec1fe bellard
    cpu_fprintf(f, "\nStatistics:\n");
3773 e3db7226 bellard
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3774 e3db7226 bellard
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3775 e3db7226 bellard
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3776 b67d9a52 bellard
    tcg_dump_info(f, cpu_fprintf);
3777 e3db7226 bellard
}
3778 e3db7226 bellard
3779 5fafdf24 ths
#if !defined(CONFIG_USER_ONLY)
3780 61382a50 bellard
3781 61382a50 bellard
#define MMUSUFFIX _cmmu
3782 61382a50 bellard
#define GETPC() NULL
3783 61382a50 bellard
#define env cpu_single_env
3784 b769d8fe bellard
#define SOFTMMU_CODE_ACCESS
3785 61382a50 bellard
3786 61382a50 bellard
#define SHIFT 0
3787 61382a50 bellard
#include "softmmu_template.h"
3788 61382a50 bellard
3789 61382a50 bellard
#define SHIFT 1
3790 61382a50 bellard
#include "softmmu_template.h"
3791 61382a50 bellard
3792 61382a50 bellard
#define SHIFT 2
3793 61382a50 bellard
#include "softmmu_template.h"
3794 61382a50 bellard
3795 61382a50 bellard
#define SHIFT 3
3796 61382a50 bellard
#include "softmmu_template.h"
3797 61382a50 bellard
3798 61382a50 bellard
#undef env
3799 61382a50 bellard
3800 61382a50 bellard
#endif