Statistics
| Branch: | Revision:

root / exec.c @ 1eec614b

History | View | Annotate | Download (106.8 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#include "osdep.h"
42
#include "kvm.h"
43
#if defined(CONFIG_USER_ONLY)
44
#include <qemu.h>
45
#endif
46

    
47
//#define DEBUG_TB_INVALIDATE
48
//#define DEBUG_FLUSH
49
//#define DEBUG_TLB
50
//#define DEBUG_UNASSIGNED
51

    
52
/* make various TB consistency checks */
53
//#define DEBUG_TB_CHECK
54
//#define DEBUG_TLB_CHECK
55

    
56
//#define DEBUG_IOPORT
57
//#define DEBUG_SUBPAGE
58

    
59
#if !defined(CONFIG_USER_ONLY)
60
/* TB consistency checks only implemented for usermode emulation.  */
61
#undef DEBUG_TB_CHECK
62
#endif
63

    
64
#define SMC_BITMAP_USE_THRESHOLD 10
65

    
66
#define MMAP_AREA_START        0x00000000
67
#define MMAP_AREA_END          0xa8000000
68

    
69
#if defined(TARGET_SPARC64)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 41
71
#elif defined(TARGET_SPARC)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 36
73
#elif defined(TARGET_ALPHA)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#define TARGET_VIRT_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_PPC64)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 42
80
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81
#define TARGET_PHYS_ADDR_SPACE_BITS 36
82
#else
83
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84
#define TARGET_PHYS_ADDR_SPACE_BITS 32
85
#endif
86

    
87
static TranslationBlock *tbs;
88
int code_gen_max_blocks;
89
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90
static int nb_tbs;
91
/* any access to the tbs or the page table must use this lock */
92
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93

    
94
#if defined(__arm__) || defined(__sparc_v9__)
95
/* The prologue must be reachable with a direct jump. ARM and Sparc64
96
 have limited branch ranges (possibly also PPC) so place it in a
97
 section close to code segment. */
98
#define code_gen_section                                \
99
    __attribute__((__section__(".gen_code")))           \
100
    __attribute__((aligned (32)))
101
#else
102
#define code_gen_section                                \
103
    __attribute__((aligned (32)))
104
#endif
105

    
106
uint8_t code_gen_prologue[1024] code_gen_section;
107
static uint8_t *code_gen_buffer;
108
static unsigned long code_gen_buffer_size;
109
/* threshold to flush the translated code buffer */
110
static unsigned long code_gen_buffer_max_size;
111
uint8_t *code_gen_ptr;
112

    
113
#if !defined(CONFIG_USER_ONLY)
114
ram_addr_t phys_ram_size;
115
int phys_ram_fd;
116
uint8_t *phys_ram_base;
117
uint8_t *phys_ram_dirty;
118
static int in_migration;
119
static ram_addr_t phys_ram_alloc_offset = 0;
120
#endif
121

    
122
CPUState *first_cpu;
123
/* current CPU in the current thread. It is only valid inside
124
   cpu_exec() */
125
CPUState *cpu_single_env;
126
/* 0 = Do not count executed instructions.
127
   1 = Precise instruction counting.
128
   2 = Adaptive rate instruction counting.  */
129
int use_icount = 0;
130
/* Current instruction counter.  While executing translated code this may
131
   include some instructions that have not yet been executed.  */
132
int64_t qemu_icount;
133

    
134
typedef struct PageDesc {
135
    /* list of TBs intersecting this ram page */
136
    TranslationBlock *first_tb;
137
    /* in order to optimize self modifying code, we count the number
138
       of lookups we do to a given page to use a bitmap */
139
    unsigned int code_write_count;
140
    uint8_t *code_bitmap;
141
#if defined(CONFIG_USER_ONLY)
142
    unsigned long flags;
143
#endif
144
} PageDesc;
145

    
146
typedef struct PhysPageDesc {
147
    /* offset in host memory of the page + io_index in the low bits */
148
    ram_addr_t phys_offset;
149
    ram_addr_t region_offset;
150
} PhysPageDesc;
151

    
152
#define L2_BITS 10
153
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154
/* XXX: this is a temporary hack for alpha target.
155
 *      In the future, this is to be replaced by a multi-level table
156
 *      to actually be able to handle the complete 64 bits address space.
157
 */
158
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159
#else
160
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
161
#endif
162

    
163
#define L1_SIZE (1 << L1_BITS)
164
#define L2_SIZE (1 << L2_BITS)
165

    
166
unsigned long qemu_real_host_page_size;
167
unsigned long qemu_host_page_bits;
168
unsigned long qemu_host_page_size;
169
unsigned long qemu_host_page_mask;
170

    
171
/* XXX: for system emulation, it could just be an array */
172
static PageDesc *l1_map[L1_SIZE];
173
static PhysPageDesc **l1_phys_map;
174

    
175
#if !defined(CONFIG_USER_ONLY)
176
static void io_mem_init(void);
177

    
178
/* io memory support */
179
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
181
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
182
static int io_mem_nb;
183
static int io_mem_watch;
184
#endif
185

    
186
/* log support */
187
static const char *logfilename = "/tmp/qemu.log";
188
FILE *logfile;
189
int loglevel;
190
static int log_append = 0;
191

    
192
/* statistics */
193
static int tlb_flush_count;
194
static int tb_flush_count;
195
static int tb_phys_invalidate_count;
196

    
197
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198
typedef struct subpage_t {
199
    target_phys_addr_t base;
200
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202
    void *opaque[TARGET_PAGE_SIZE][2][4];
203
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
204
} subpage_t;
205

    
206
#ifdef _WIN32
207
static void map_exec(void *addr, long size)
208
{
209
    DWORD old_protect;
210
    VirtualProtect(addr, size,
211
                   PAGE_EXECUTE_READWRITE, &old_protect);
212
    
213
}
214
#else
215
static void map_exec(void *addr, long size)
216
{
217
    unsigned long start, end, page_size;
218
    
219
    page_size = getpagesize();
220
    start = (unsigned long)addr;
221
    start &= ~(page_size - 1);
222
    
223
    end = (unsigned long)addr + size;
224
    end += page_size - 1;
225
    end &= ~(page_size - 1);
226
    
227
    mprotect((void *)start, end - start,
228
             PROT_READ | PROT_WRITE | PROT_EXEC);
229
}
230
#endif
231

    
232
static void page_init(void)
233
{
234
    /* NOTE: we can always suppose that qemu_host_page_size >=
235
       TARGET_PAGE_SIZE */
236
#ifdef _WIN32
237
    {
238
        SYSTEM_INFO system_info;
239

    
240
        GetSystemInfo(&system_info);
241
        qemu_real_host_page_size = system_info.dwPageSize;
242
    }
243
#else
244
    qemu_real_host_page_size = getpagesize();
245
#endif
246
    if (qemu_host_page_size == 0)
247
        qemu_host_page_size = qemu_real_host_page_size;
248
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
249
        qemu_host_page_size = TARGET_PAGE_SIZE;
250
    qemu_host_page_bits = 0;
251
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252
        qemu_host_page_bits++;
253
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
254
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
256

    
257
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
258
    {
259
        long long startaddr, endaddr;
260
        FILE *f;
261
        int n;
262

    
263
        mmap_lock();
264
        last_brk = (unsigned long)sbrk(0);
265
        f = fopen("/proc/self/maps", "r");
266
        if (f) {
267
            do {
268
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269
                if (n == 2) {
270
                    startaddr = MIN(startaddr,
271
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272
                    endaddr = MIN(endaddr,
273
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
274
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
275
                                   TARGET_PAGE_ALIGN(endaddr),
276
                                   PAGE_RESERVED); 
277
                }
278
            } while (!feof(f));
279
            fclose(f);
280
        }
281
        mmap_unlock();
282
    }
283
#endif
284
}
285

    
286
static inline PageDesc **page_l1_map(target_ulong index)
287
{
288
#if TARGET_LONG_BITS > 32
289
    /* Host memory outside guest VM.  For 32-bit targets we have already
290
       excluded high addresses.  */
291
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
292
        return NULL;
293
#endif
294
    return &l1_map[index >> L2_BITS];
295
}
296

    
297
static inline PageDesc *page_find_alloc(target_ulong index)
298
{
299
    PageDesc **lp, *p;
300
    lp = page_l1_map(index);
301
    if (!lp)
302
        return NULL;
303

    
304
    p = *lp;
305
    if (!p) {
306
        /* allocate if not found */
307
#if defined(CONFIG_USER_ONLY)
308
        size_t len = sizeof(PageDesc) * L2_SIZE;
309
        /* Don't use qemu_malloc because it may recurse.  */
310
        p = mmap(0, len, PROT_READ | PROT_WRITE,
311
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
312
        *lp = p;
313
        if (h2g_valid(p)) {
314
            unsigned long addr = h2g(p);
315
            page_set_flags(addr & TARGET_PAGE_MASK,
316
                           TARGET_PAGE_ALIGN(addr + len),
317
                           PAGE_RESERVED); 
318
        }
319
#else
320
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321
        *lp = p;
322
#endif
323
    }
324
    return p + (index & (L2_SIZE - 1));
325
}
326

    
327
static inline PageDesc *page_find(target_ulong index)
328
{
329
    PageDesc **lp, *p;
330
    lp = page_l1_map(index);
331
    if (!lp)
332
        return NULL;
333

    
334
    p = *lp;
335
    if (!p)
336
        return 0;
337
    return p + (index & (L2_SIZE - 1));
338
}
339

    
340
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
341
{
342
    void **lp, **p;
343
    PhysPageDesc *pd;
344

    
345
    p = (void **)l1_phys_map;
346
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
347

    
348
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350
#endif
351
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
352
    p = *lp;
353
    if (!p) {
354
        /* allocate if not found */
355
        if (!alloc)
356
            return NULL;
357
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358
        memset(p, 0, sizeof(void *) * L1_SIZE);
359
        *lp = p;
360
    }
361
#endif
362
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
363
    pd = *lp;
364
    if (!pd) {
365
        int i;
366
        /* allocate if not found */
367
        if (!alloc)
368
            return NULL;
369
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370
        *lp = pd;
371
        for (i = 0; i < L2_SIZE; i++)
372
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
373
    }
374
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
375
}
376

    
377
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
378
{
379
    return phys_page_find_alloc(index, 0);
380
}
381

    
382
#if !defined(CONFIG_USER_ONLY)
383
static void tlb_protect_code(ram_addr_t ram_addr);
384
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
385
                                    target_ulong vaddr);
386
#define mmap_lock() do { } while(0)
387
#define mmap_unlock() do { } while(0)
388
#endif
389

    
390
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
391

    
392
#if defined(CONFIG_USER_ONLY)
393
/* Currently it is not recommanded to allocate big chunks of data in
394
   user mode. It will change when a dedicated libc will be used */
395
#define USE_STATIC_CODE_GEN_BUFFER
396
#endif
397

    
398
#ifdef USE_STATIC_CODE_GEN_BUFFER
399
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
400
#endif
401

    
402
static void code_gen_alloc(unsigned long tb_size)
403
{
404
#ifdef USE_STATIC_CODE_GEN_BUFFER
405
    code_gen_buffer = static_code_gen_buffer;
406
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
407
    map_exec(code_gen_buffer, code_gen_buffer_size);
408
#else
409
    code_gen_buffer_size = tb_size;
410
    if (code_gen_buffer_size == 0) {
411
#if defined(CONFIG_USER_ONLY)
412
        /* in user mode, phys_ram_size is not meaningful */
413
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
414
#else
415
        /* XXX: needs ajustments */
416
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
417
#endif
418
    }
419
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
420
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
421
    /* The code gen buffer location may have constraints depending on
422
       the host cpu and OS */
423
#if defined(__linux__) 
424
    {
425
        int flags;
426
        void *start = NULL;
427

    
428
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
429
#if defined(__x86_64__)
430
        flags |= MAP_32BIT;
431
        /* Cannot map more than that */
432
        if (code_gen_buffer_size > (800 * 1024 * 1024))
433
            code_gen_buffer_size = (800 * 1024 * 1024);
434
#elif defined(__sparc_v9__)
435
        // Map the buffer below 2G, so we can use direct calls and branches
436
        flags |= MAP_FIXED;
437
        start = (void *) 0x60000000UL;
438
        if (code_gen_buffer_size > (512 * 1024 * 1024))
439
            code_gen_buffer_size = (512 * 1024 * 1024);
440
#elif defined(__arm__)
441
        /* Map the buffer below 32M, so we can use direct calls and branches */
442
        flags |= MAP_FIXED;
443
        start = (void *) 0x01000000UL;
444
        if (code_gen_buffer_size > 16 * 1024 * 1024)
445
            code_gen_buffer_size = 16 * 1024 * 1024;
446
#endif
447
        code_gen_buffer = mmap(start, code_gen_buffer_size,
448
                               PROT_WRITE | PROT_READ | PROT_EXEC,
449
                               flags, -1, 0);
450
        if (code_gen_buffer == MAP_FAILED) {
451
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
452
            exit(1);
453
        }
454
    }
455
#elif defined(__FreeBSD__)
456
    {
457
        int flags;
458
        void *addr = NULL;
459
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
460
#if defined(__x86_64__)
461
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
462
         * 0x40000000 is free */
463
        flags |= MAP_FIXED;
464
        addr = (void *)0x40000000;
465
        /* Cannot map more than that */
466
        if (code_gen_buffer_size > (800 * 1024 * 1024))
467
            code_gen_buffer_size = (800 * 1024 * 1024);
468
#endif
469
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
470
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
471
                               flags, -1, 0);
472
        if (code_gen_buffer == MAP_FAILED) {
473
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
474
            exit(1);
475
        }
476
    }
477
#else
478
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
479
    map_exec(code_gen_buffer, code_gen_buffer_size);
480
#endif
481
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
482
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
483
    code_gen_buffer_max_size = code_gen_buffer_size - 
484
        code_gen_max_block_size();
485
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
486
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
487
}
488

    
489
/* Must be called before using the QEMU cpus. 'tb_size' is the size
490
   (in bytes) allocated to the translation buffer. Zero means default
491
   size. */
492
void cpu_exec_init_all(unsigned long tb_size)
493
{
494
    cpu_gen_init();
495
    code_gen_alloc(tb_size);
496
    code_gen_ptr = code_gen_buffer;
497
    page_init();
498
#if !defined(CONFIG_USER_ONLY)
499
    io_mem_init();
500
#endif
501
}
502

    
503
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
504

    
505
#define CPU_COMMON_SAVE_VERSION 1
506

    
507
static void cpu_common_save(QEMUFile *f, void *opaque)
508
{
509
    CPUState *env = opaque;
510

    
511
    qemu_put_be32s(f, &env->halted);
512
    qemu_put_be32s(f, &env->interrupt_request);
513
}
514

    
515
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
516
{
517
    CPUState *env = opaque;
518

    
519
    if (version_id != CPU_COMMON_SAVE_VERSION)
520
        return -EINVAL;
521

    
522
    qemu_get_be32s(f, &env->halted);
523
    qemu_get_be32s(f, &env->interrupt_request);
524
    tlb_flush(env, 1);
525

    
526
    return 0;
527
}
528
#endif
529

    
530
void cpu_exec_init(CPUState *env)
531
{
532
    CPUState **penv;
533
    int cpu_index;
534

    
535
    env->next_cpu = NULL;
536
    penv = &first_cpu;
537
    cpu_index = 0;
538
    while (*penv != NULL) {
539
        penv = (CPUState **)&(*penv)->next_cpu;
540
        cpu_index++;
541
    }
542
    env->cpu_index = cpu_index;
543
    TAILQ_INIT(&env->breakpoints);
544
    TAILQ_INIT(&env->watchpoints);
545
    *penv = env;
546
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
547
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
548
                    cpu_common_save, cpu_common_load, env);
549
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
550
                    cpu_save, cpu_load, env);
551
#endif
552
}
553

    
554
static inline void invalidate_page_bitmap(PageDesc *p)
555
{
556
    if (p->code_bitmap) {
557
        qemu_free(p->code_bitmap);
558
        p->code_bitmap = NULL;
559
    }
560
    p->code_write_count = 0;
561
}
562

    
563
/* set to NULL all the 'first_tb' fields in all PageDescs */
564
static void page_flush_tb(void)
565
{
566
    int i, j;
567
    PageDesc *p;
568

    
569
    for(i = 0; i < L1_SIZE; i++) {
570
        p = l1_map[i];
571
        if (p) {
572
            for(j = 0; j < L2_SIZE; j++) {
573
                p->first_tb = NULL;
574
                invalidate_page_bitmap(p);
575
                p++;
576
            }
577
        }
578
    }
579
}
580

    
581
/* flush all the translation blocks */
582
/* XXX: tb_flush is currently not thread safe */
583
void tb_flush(CPUState *env1)
584
{
585
    CPUState *env;
586
#if defined(DEBUG_FLUSH)
587
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
588
           (unsigned long)(code_gen_ptr - code_gen_buffer),
589
           nb_tbs, nb_tbs > 0 ?
590
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
591
#endif
592
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
593
        cpu_abort(env1, "Internal error: code buffer overflow\n");
594

    
595
    nb_tbs = 0;
596

    
597
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
598
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
599
    }
600

    
601
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
602
    page_flush_tb();
603

    
604
    code_gen_ptr = code_gen_buffer;
605
    /* XXX: flush processor icache at this point if cache flush is
606
       expensive */
607
    tb_flush_count++;
608
}
609

    
610
#ifdef DEBUG_TB_CHECK
611

    
612
static void tb_invalidate_check(target_ulong address)
613
{
614
    TranslationBlock *tb;
615
    int i;
616
    address &= TARGET_PAGE_MASK;
617
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
618
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
619
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
620
                  address >= tb->pc + tb->size)) {
621
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
622
                       address, (long)tb->pc, tb->size);
623
            }
624
        }
625
    }
626
}
627

    
628
/* verify that all the pages have correct rights for code */
629
static void tb_page_check(void)
630
{
631
    TranslationBlock *tb;
632
    int i, flags1, flags2;
633

    
634
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
635
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
636
            flags1 = page_get_flags(tb->pc);
637
            flags2 = page_get_flags(tb->pc + tb->size - 1);
638
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
639
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
640
                       (long)tb->pc, tb->size, flags1, flags2);
641
            }
642
        }
643
    }
644
}
645

    
646
static void tb_jmp_check(TranslationBlock *tb)
647
{
648
    TranslationBlock *tb1;
649
    unsigned int n1;
650

    
651
    /* suppress any remaining jumps to this TB */
652
    tb1 = tb->jmp_first;
653
    for(;;) {
654
        n1 = (long)tb1 & 3;
655
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
656
        if (n1 == 2)
657
            break;
658
        tb1 = tb1->jmp_next[n1];
659
    }
660
    /* check end of list */
661
    if (tb1 != tb) {
662
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
663
    }
664
}
665

    
666
#endif
667

    
668
/* invalidate one TB */
669
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
670
                             int next_offset)
671
{
672
    TranslationBlock *tb1;
673
    for(;;) {
674
        tb1 = *ptb;
675
        if (tb1 == tb) {
676
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
677
            break;
678
        }
679
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
680
    }
681
}
682

    
683
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
684
{
685
    TranslationBlock *tb1;
686
    unsigned int n1;
687

    
688
    for(;;) {
689
        tb1 = *ptb;
690
        n1 = (long)tb1 & 3;
691
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
692
        if (tb1 == tb) {
693
            *ptb = tb1->page_next[n1];
694
            break;
695
        }
696
        ptb = &tb1->page_next[n1];
697
    }
698
}
699

    
700
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
701
{
702
    TranslationBlock *tb1, **ptb;
703
    unsigned int n1;
704

    
705
    ptb = &tb->jmp_next[n];
706
    tb1 = *ptb;
707
    if (tb1) {
708
        /* find tb(n) in circular list */
709
        for(;;) {
710
            tb1 = *ptb;
711
            n1 = (long)tb1 & 3;
712
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
713
            if (n1 == n && tb1 == tb)
714
                break;
715
            if (n1 == 2) {
716
                ptb = &tb1->jmp_first;
717
            } else {
718
                ptb = &tb1->jmp_next[n1];
719
            }
720
        }
721
        /* now we can suppress tb(n) from the list */
722
        *ptb = tb->jmp_next[n];
723

    
724
        tb->jmp_next[n] = NULL;
725
    }
726
}
727

    
728
/* reset the jump entry 'n' of a TB so that it is not chained to
729
   another TB */
730
static inline void tb_reset_jump(TranslationBlock *tb, int n)
731
{
732
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
733
}
734

    
735
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
736
{
737
    CPUState *env;
738
    PageDesc *p;
739
    unsigned int h, n1;
740
    target_phys_addr_t phys_pc;
741
    TranslationBlock *tb1, *tb2;
742

    
743
    /* remove the TB from the hash list */
744
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
745
    h = tb_phys_hash_func(phys_pc);
746
    tb_remove(&tb_phys_hash[h], tb,
747
              offsetof(TranslationBlock, phys_hash_next));
748

    
749
    /* remove the TB from the page list */
750
    if (tb->page_addr[0] != page_addr) {
751
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
752
        tb_page_remove(&p->first_tb, tb);
753
        invalidate_page_bitmap(p);
754
    }
755
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
756
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
757
        tb_page_remove(&p->first_tb, tb);
758
        invalidate_page_bitmap(p);
759
    }
760

    
761
    tb_invalidated_flag = 1;
762

    
763
    /* remove the TB from the hash list */
764
    h = tb_jmp_cache_hash_func(tb->pc);
765
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
766
        if (env->tb_jmp_cache[h] == tb)
767
            env->tb_jmp_cache[h] = NULL;
768
    }
769

    
770
    /* suppress this TB from the two jump lists */
771
    tb_jmp_remove(tb, 0);
772
    tb_jmp_remove(tb, 1);
773

    
774
    /* suppress any remaining jumps to this TB */
775
    tb1 = tb->jmp_first;
776
    for(;;) {
777
        n1 = (long)tb1 & 3;
778
        if (n1 == 2)
779
            break;
780
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
781
        tb2 = tb1->jmp_next[n1];
782
        tb_reset_jump(tb1, n1);
783
        tb1->jmp_next[n1] = NULL;
784
        tb1 = tb2;
785
    }
786
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
787

    
788
    tb_phys_invalidate_count++;
789
}
790

    
791
static inline void set_bits(uint8_t *tab, int start, int len)
792
{
793
    int end, mask, end1;
794

    
795
    end = start + len;
796
    tab += start >> 3;
797
    mask = 0xff << (start & 7);
798
    if ((start & ~7) == (end & ~7)) {
799
        if (start < end) {
800
            mask &= ~(0xff << (end & 7));
801
            *tab |= mask;
802
        }
803
    } else {
804
        *tab++ |= mask;
805
        start = (start + 8) & ~7;
806
        end1 = end & ~7;
807
        while (start < end1) {
808
            *tab++ = 0xff;
809
            start += 8;
810
        }
811
        if (start < end) {
812
            mask = ~(0xff << (end & 7));
813
            *tab |= mask;
814
        }
815
    }
816
}
817

    
818
static void build_page_bitmap(PageDesc *p)
819
{
820
    int n, tb_start, tb_end;
821
    TranslationBlock *tb;
822

    
823
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
824

    
825
    tb = p->first_tb;
826
    while (tb != NULL) {
827
        n = (long)tb & 3;
828
        tb = (TranslationBlock *)((long)tb & ~3);
829
        /* NOTE: this is subtle as a TB may span two physical pages */
830
        if (n == 0) {
831
            /* NOTE: tb_end may be after the end of the page, but
832
               it is not a problem */
833
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
834
            tb_end = tb_start + tb->size;
835
            if (tb_end > TARGET_PAGE_SIZE)
836
                tb_end = TARGET_PAGE_SIZE;
837
        } else {
838
            tb_start = 0;
839
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
840
        }
841
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
842
        tb = tb->page_next[n];
843
    }
844
}
845

    
846
TranslationBlock *tb_gen_code(CPUState *env,
847
                              target_ulong pc, target_ulong cs_base,
848
                              int flags, int cflags)
849
{
850
    TranslationBlock *tb;
851
    uint8_t *tc_ptr;
852
    target_ulong phys_pc, phys_page2, virt_page2;
853
    int code_gen_size;
854

    
855
    phys_pc = get_phys_addr_code(env, pc);
856
    tb = tb_alloc(pc);
857
    if (!tb) {
858
        /* flush must be done */
859
        tb_flush(env);
860
        /* cannot fail at this point */
861
        tb = tb_alloc(pc);
862
        /* Don't forget to invalidate previous TB info.  */
863
        tb_invalidated_flag = 1;
864
    }
865
    tc_ptr = code_gen_ptr;
866
    tb->tc_ptr = tc_ptr;
867
    tb->cs_base = cs_base;
868
    tb->flags = flags;
869
    tb->cflags = cflags;
870
    cpu_gen_code(env, tb, &code_gen_size);
871
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
872

    
873
    /* check next page if needed */
874
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
875
    phys_page2 = -1;
876
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
877
        phys_page2 = get_phys_addr_code(env, virt_page2);
878
    }
879
    tb_link_phys(tb, phys_pc, phys_page2);
880
    return tb;
881
}
882

    
883
/* invalidate all TBs which intersect with the target physical page
884
   starting in range [start;end[. NOTE: start and end must refer to
885
   the same physical page. 'is_cpu_write_access' should be true if called
886
   from a real cpu write access: the virtual CPU will exit the current
887
   TB if code is modified inside this TB. */
888
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
889
                                   int is_cpu_write_access)
890
{
891
    TranslationBlock *tb, *tb_next, *saved_tb;
892
    CPUState *env = cpu_single_env;
893
    target_ulong tb_start, tb_end;
894
    PageDesc *p;
895
    int n;
896
#ifdef TARGET_HAS_PRECISE_SMC
897
    int current_tb_not_found = is_cpu_write_access;
898
    TranslationBlock *current_tb = NULL;
899
    int current_tb_modified = 0;
900
    target_ulong current_pc = 0;
901
    target_ulong current_cs_base = 0;
902
    int current_flags = 0;
903
#endif /* TARGET_HAS_PRECISE_SMC */
904

    
905
    p = page_find(start >> TARGET_PAGE_BITS);
906
    if (!p)
907
        return;
908
    if (!p->code_bitmap &&
909
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
910
        is_cpu_write_access) {
911
        /* build code bitmap */
912
        build_page_bitmap(p);
913
    }
914

    
915
    /* we remove all the TBs in the range [start, end[ */
916
    /* XXX: see if in some cases it could be faster to invalidate all the code */
917
    tb = p->first_tb;
918
    while (tb != NULL) {
919
        n = (long)tb & 3;
920
        tb = (TranslationBlock *)((long)tb & ~3);
921
        tb_next = tb->page_next[n];
922
        /* NOTE: this is subtle as a TB may span two physical pages */
923
        if (n == 0) {
924
            /* NOTE: tb_end may be after the end of the page, but
925
               it is not a problem */
926
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
927
            tb_end = tb_start + tb->size;
928
        } else {
929
            tb_start = tb->page_addr[1];
930
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
931
        }
932
        if (!(tb_end <= start || tb_start >= end)) {
933
#ifdef TARGET_HAS_PRECISE_SMC
934
            if (current_tb_not_found) {
935
                current_tb_not_found = 0;
936
                current_tb = NULL;
937
                if (env->mem_io_pc) {
938
                    /* now we have a real cpu fault */
939
                    current_tb = tb_find_pc(env->mem_io_pc);
940
                }
941
            }
942
            if (current_tb == tb &&
943
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
944
                /* If we are modifying the current TB, we must stop
945
                its execution. We could be more precise by checking
946
                that the modification is after the current PC, but it
947
                would require a specialized function to partially
948
                restore the CPU state */
949

    
950
                current_tb_modified = 1;
951
                cpu_restore_state(current_tb, env,
952
                                  env->mem_io_pc, NULL);
953
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
954
                                     &current_flags);
955
            }
956
#endif /* TARGET_HAS_PRECISE_SMC */
957
            /* we need to do that to handle the case where a signal
958
               occurs while doing tb_phys_invalidate() */
959
            saved_tb = NULL;
960
            if (env) {
961
                saved_tb = env->current_tb;
962
                env->current_tb = NULL;
963
            }
964
            tb_phys_invalidate(tb, -1);
965
            if (env) {
966
                env->current_tb = saved_tb;
967
                if (env->interrupt_request && env->current_tb)
968
                    cpu_interrupt(env, env->interrupt_request);
969
            }
970
        }
971
        tb = tb_next;
972
    }
973
#if !defined(CONFIG_USER_ONLY)
974
    /* if no code remaining, no need to continue to use slow writes */
975
    if (!p->first_tb) {
976
        invalidate_page_bitmap(p);
977
        if (is_cpu_write_access) {
978
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
979
        }
980
    }
981
#endif
982
#ifdef TARGET_HAS_PRECISE_SMC
983
    if (current_tb_modified) {
984
        /* we generate a block containing just the instruction
985
           modifying the memory. It will ensure that it cannot modify
986
           itself */
987
        env->current_tb = NULL;
988
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
989
        cpu_resume_from_signal(env, NULL);
990
    }
991
#endif
992
}
993

    
994
/* len must be <= 8 and start must be a multiple of len */
995
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
996
{
997
    PageDesc *p;
998
    int offset, b;
999
#if 0
1000
    if (1) {
1001
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1002
                  cpu_single_env->mem_io_vaddr, len,
1003
                  cpu_single_env->eip,
1004
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1005
    }
1006
#endif
1007
    p = page_find(start >> TARGET_PAGE_BITS);
1008
    if (!p)
1009
        return;
1010
    if (p->code_bitmap) {
1011
        offset = start & ~TARGET_PAGE_MASK;
1012
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1013
        if (b & ((1 << len) - 1))
1014
            goto do_invalidate;
1015
    } else {
1016
    do_invalidate:
1017
        tb_invalidate_phys_page_range(start, start + len, 1);
1018
    }
1019
}
1020

    
1021
#if !defined(CONFIG_SOFTMMU)
1022
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1023
                                    unsigned long pc, void *puc)
1024
{
1025
    TranslationBlock *tb;
1026
    PageDesc *p;
1027
    int n;
1028
#ifdef TARGET_HAS_PRECISE_SMC
1029
    TranslationBlock *current_tb = NULL;
1030
    CPUState *env = cpu_single_env;
1031
    int current_tb_modified = 0;
1032
    target_ulong current_pc = 0;
1033
    target_ulong current_cs_base = 0;
1034
    int current_flags = 0;
1035
#endif
1036

    
1037
    addr &= TARGET_PAGE_MASK;
1038
    p = page_find(addr >> TARGET_PAGE_BITS);
1039
    if (!p)
1040
        return;
1041
    tb = p->first_tb;
1042
#ifdef TARGET_HAS_PRECISE_SMC
1043
    if (tb && pc != 0) {
1044
        current_tb = tb_find_pc(pc);
1045
    }
1046
#endif
1047
    while (tb != NULL) {
1048
        n = (long)tb & 3;
1049
        tb = (TranslationBlock *)((long)tb & ~3);
1050
#ifdef TARGET_HAS_PRECISE_SMC
1051
        if (current_tb == tb &&
1052
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1053
                /* If we are modifying the current TB, we must stop
1054
                   its execution. We could be more precise by checking
1055
                   that the modification is after the current PC, but it
1056
                   would require a specialized function to partially
1057
                   restore the CPU state */
1058

    
1059
            current_tb_modified = 1;
1060
            cpu_restore_state(current_tb, env, pc, puc);
1061
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1062
                                 &current_flags);
1063
        }
1064
#endif /* TARGET_HAS_PRECISE_SMC */
1065
        tb_phys_invalidate(tb, addr);
1066
        tb = tb->page_next[n];
1067
    }
1068
    p->first_tb = NULL;
1069
#ifdef TARGET_HAS_PRECISE_SMC
1070
    if (current_tb_modified) {
1071
        /* we generate a block containing just the instruction
1072
           modifying the memory. It will ensure that it cannot modify
1073
           itself */
1074
        env->current_tb = NULL;
1075
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1076
        cpu_resume_from_signal(env, puc);
1077
    }
1078
#endif
1079
}
1080
#endif
1081

    
1082
/* add the tb in the target page and protect it if necessary */
1083
static inline void tb_alloc_page(TranslationBlock *tb,
1084
                                 unsigned int n, target_ulong page_addr)
1085
{
1086
    PageDesc *p;
1087
    TranslationBlock *last_first_tb;
1088

    
1089
    tb->page_addr[n] = page_addr;
1090
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1091
    tb->page_next[n] = p->first_tb;
1092
    last_first_tb = p->first_tb;
1093
    p->first_tb = (TranslationBlock *)((long)tb | n);
1094
    invalidate_page_bitmap(p);
1095

    
1096
#if defined(TARGET_HAS_SMC) || 1
1097

    
1098
#if defined(CONFIG_USER_ONLY)
1099
    if (p->flags & PAGE_WRITE) {
1100
        target_ulong addr;
1101
        PageDesc *p2;
1102
        int prot;
1103

    
1104
        /* force the host page as non writable (writes will have a
1105
           page fault + mprotect overhead) */
1106
        page_addr &= qemu_host_page_mask;
1107
        prot = 0;
1108
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1109
            addr += TARGET_PAGE_SIZE) {
1110

    
1111
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1112
            if (!p2)
1113
                continue;
1114
            prot |= p2->flags;
1115
            p2->flags &= ~PAGE_WRITE;
1116
            page_get_flags(addr);
1117
          }
1118
        mprotect(g2h(page_addr), qemu_host_page_size,
1119
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1120
#ifdef DEBUG_TB_INVALIDATE
1121
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1122
               page_addr);
1123
#endif
1124
    }
1125
#else
1126
    /* if some code is already present, then the pages are already
1127
       protected. So we handle the case where only the first TB is
1128
       allocated in a physical page */
1129
    if (!last_first_tb) {
1130
        tlb_protect_code(page_addr);
1131
    }
1132
#endif
1133

    
1134
#endif /* TARGET_HAS_SMC */
1135
}
1136

    
1137
/* Allocate a new translation block. Flush the translation buffer if
1138
   too many translation blocks or too much generated code. */
1139
TranslationBlock *tb_alloc(target_ulong pc)
1140
{
1141
    TranslationBlock *tb;
1142

    
1143
    if (nb_tbs >= code_gen_max_blocks ||
1144
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1145
        return NULL;
1146
    tb = &tbs[nb_tbs++];
1147
    tb->pc = pc;
1148
    tb->cflags = 0;
1149
    return tb;
1150
}
1151

    
1152
void tb_free(TranslationBlock *tb)
1153
{
1154
    /* In practice this is mostly used for single use temporary TB
1155
       Ignore the hard cases and just back up if this TB happens to
1156
       be the last one generated.  */
1157
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1158
        code_gen_ptr = tb->tc_ptr;
1159
        nb_tbs--;
1160
    }
1161
}
1162

    
1163
/* add a new TB and link it to the physical page tables. phys_page2 is
1164
   (-1) to indicate that only one page contains the TB. */
1165
void tb_link_phys(TranslationBlock *tb,
1166
                  target_ulong phys_pc, target_ulong phys_page2)
1167
{
1168
    unsigned int h;
1169
    TranslationBlock **ptb;
1170

    
1171
    /* Grab the mmap lock to stop another thread invalidating this TB
1172
       before we are done.  */
1173
    mmap_lock();
1174
    /* add in the physical hash table */
1175
    h = tb_phys_hash_func(phys_pc);
1176
    ptb = &tb_phys_hash[h];
1177
    tb->phys_hash_next = *ptb;
1178
    *ptb = tb;
1179

    
1180
    /* add in the page list */
1181
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1182
    if (phys_page2 != -1)
1183
        tb_alloc_page(tb, 1, phys_page2);
1184
    else
1185
        tb->page_addr[1] = -1;
1186

    
1187
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1188
    tb->jmp_next[0] = NULL;
1189
    tb->jmp_next[1] = NULL;
1190

    
1191
    /* init original jump addresses */
1192
    if (tb->tb_next_offset[0] != 0xffff)
1193
        tb_reset_jump(tb, 0);
1194
    if (tb->tb_next_offset[1] != 0xffff)
1195
        tb_reset_jump(tb, 1);
1196

    
1197
#ifdef DEBUG_TB_CHECK
1198
    tb_page_check();
1199
#endif
1200
    mmap_unlock();
1201
}
1202

    
1203
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1204
   tb[1].tc_ptr. Return NULL if not found */
1205
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1206
{
1207
    int m_min, m_max, m;
1208
    unsigned long v;
1209
    TranslationBlock *tb;
1210

    
1211
    if (nb_tbs <= 0)
1212
        return NULL;
1213
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1214
        tc_ptr >= (unsigned long)code_gen_ptr)
1215
        return NULL;
1216
    /* binary search (cf Knuth) */
1217
    m_min = 0;
1218
    m_max = nb_tbs - 1;
1219
    while (m_min <= m_max) {
1220
        m = (m_min + m_max) >> 1;
1221
        tb = &tbs[m];
1222
        v = (unsigned long)tb->tc_ptr;
1223
        if (v == tc_ptr)
1224
            return tb;
1225
        else if (tc_ptr < v) {
1226
            m_max = m - 1;
1227
        } else {
1228
            m_min = m + 1;
1229
        }
1230
    }
1231
    return &tbs[m_max];
1232
}
1233

    
1234
static void tb_reset_jump_recursive(TranslationBlock *tb);
1235

    
1236
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1237
{
1238
    TranslationBlock *tb1, *tb_next, **ptb;
1239
    unsigned int n1;
1240

    
1241
    tb1 = tb->jmp_next[n];
1242
    if (tb1 != NULL) {
1243
        /* find head of list */
1244
        for(;;) {
1245
            n1 = (long)tb1 & 3;
1246
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1247
            if (n1 == 2)
1248
                break;
1249
            tb1 = tb1->jmp_next[n1];
1250
        }
1251
        /* we are now sure now that tb jumps to tb1 */
1252
        tb_next = tb1;
1253

    
1254
        /* remove tb from the jmp_first list */
1255
        ptb = &tb_next->jmp_first;
1256
        for(;;) {
1257
            tb1 = *ptb;
1258
            n1 = (long)tb1 & 3;
1259
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1260
            if (n1 == n && tb1 == tb)
1261
                break;
1262
            ptb = &tb1->jmp_next[n1];
1263
        }
1264
        *ptb = tb->jmp_next[n];
1265
        tb->jmp_next[n] = NULL;
1266

    
1267
        /* suppress the jump to next tb in generated code */
1268
        tb_reset_jump(tb, n);
1269

    
1270
        /* suppress jumps in the tb on which we could have jumped */
1271
        tb_reset_jump_recursive(tb_next);
1272
    }
1273
}
1274

    
1275
static void tb_reset_jump_recursive(TranslationBlock *tb)
1276
{
1277
    tb_reset_jump_recursive2(tb, 0);
1278
    tb_reset_jump_recursive2(tb, 1);
1279
}
1280

    
1281
#if defined(TARGET_HAS_ICE)
1282
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1283
{
1284
    target_phys_addr_t addr;
1285
    target_ulong pd;
1286
    ram_addr_t ram_addr;
1287
    PhysPageDesc *p;
1288

    
1289
    addr = cpu_get_phys_page_debug(env, pc);
1290
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1291
    if (!p) {
1292
        pd = IO_MEM_UNASSIGNED;
1293
    } else {
1294
        pd = p->phys_offset;
1295
    }
1296
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1297
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1298
}
1299
#endif
1300

    
1301
/* Add a watchpoint.  */
1302
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1303
                          int flags, CPUWatchpoint **watchpoint)
1304
{
1305
    target_ulong len_mask = ~(len - 1);
1306
    CPUWatchpoint *wp;
1307

    
1308
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1309
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1310
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1311
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1312
        return -EINVAL;
1313
    }
1314
    wp = qemu_malloc(sizeof(*wp));
1315

    
1316
    wp->vaddr = addr;
1317
    wp->len_mask = len_mask;
1318
    wp->flags = flags;
1319

    
1320
    /* keep all GDB-injected watchpoints in front */
1321
    if (flags & BP_GDB)
1322
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1323
    else
1324
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1325

    
1326
    tlb_flush_page(env, addr);
1327

    
1328
    if (watchpoint)
1329
        *watchpoint = wp;
1330
    return 0;
1331
}
1332

    
1333
/* Remove a specific watchpoint.  */
1334
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1335
                          int flags)
1336
{
1337
    target_ulong len_mask = ~(len - 1);
1338
    CPUWatchpoint *wp;
1339

    
1340
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1341
        if (addr == wp->vaddr && len_mask == wp->len_mask
1342
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1343
            cpu_watchpoint_remove_by_ref(env, wp);
1344
            return 0;
1345
        }
1346
    }
1347
    return -ENOENT;
1348
}
1349

    
1350
/* Remove a specific watchpoint by reference.  */
1351
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1352
{
1353
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1354

    
1355
    tlb_flush_page(env, watchpoint->vaddr);
1356

    
1357
    qemu_free(watchpoint);
1358
}
1359

    
1360
/* Remove all matching watchpoints.  */
1361
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1362
{
1363
    CPUWatchpoint *wp, *next;
1364

    
1365
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1366
        if (wp->flags & mask)
1367
            cpu_watchpoint_remove_by_ref(env, wp);
1368
    }
1369
}
1370

    
1371
/* Add a breakpoint.  */
1372
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1373
                          CPUBreakpoint **breakpoint)
1374
{
1375
#if defined(TARGET_HAS_ICE)
1376
    CPUBreakpoint *bp;
1377

    
1378
    bp = qemu_malloc(sizeof(*bp));
1379

    
1380
    bp->pc = pc;
1381
    bp->flags = flags;
1382

    
1383
    /* keep all GDB-injected breakpoints in front */
1384
    if (flags & BP_GDB)
1385
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1386
    else
1387
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1388

    
1389
    breakpoint_invalidate(env, pc);
1390

    
1391
    if (breakpoint)
1392
        *breakpoint = bp;
1393
    return 0;
1394
#else
1395
    return -ENOSYS;
1396
#endif
1397
}
1398

    
1399
/* Remove a specific breakpoint.  */
1400
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1401
{
1402
#if defined(TARGET_HAS_ICE)
1403
    CPUBreakpoint *bp;
1404

    
1405
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1406
        if (bp->pc == pc && bp->flags == flags) {
1407
            cpu_breakpoint_remove_by_ref(env, bp);
1408
            return 0;
1409
        }
1410
    }
1411
    return -ENOENT;
1412
#else
1413
    return -ENOSYS;
1414
#endif
1415
}
1416

    
1417
/* Remove a specific breakpoint by reference.  */
1418
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1419
{
1420
#if defined(TARGET_HAS_ICE)
1421
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1422

    
1423
    breakpoint_invalidate(env, breakpoint->pc);
1424

    
1425
    qemu_free(breakpoint);
1426
#endif
1427
}
1428

    
1429
/* Remove all matching breakpoints. */
1430
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1431
{
1432
#if defined(TARGET_HAS_ICE)
1433
    CPUBreakpoint *bp, *next;
1434

    
1435
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1436
        if (bp->flags & mask)
1437
            cpu_breakpoint_remove_by_ref(env, bp);
1438
    }
1439
#endif
1440
}
1441

    
1442
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1443
   CPU loop after each instruction */
1444
void cpu_single_step(CPUState *env, int enabled)
1445
{
1446
#if defined(TARGET_HAS_ICE)
1447
    if (env->singlestep_enabled != enabled) {
1448
        env->singlestep_enabled = enabled;
1449
        /* must flush all the translated code to avoid inconsistancies */
1450
        /* XXX: only flush what is necessary */
1451
        tb_flush(env);
1452
    }
1453
#endif
1454
}
1455

    
1456
/* enable or disable low levels log */
1457
void cpu_set_log(int log_flags)
1458
{
1459
    loglevel = log_flags;
1460
    if (loglevel && !logfile) {
1461
        logfile = fopen(logfilename, log_append ? "a" : "w");
1462
        if (!logfile) {
1463
            perror(logfilename);
1464
            _exit(1);
1465
        }
1466
#if !defined(CONFIG_SOFTMMU)
1467
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1468
        {
1469
            static char logfile_buf[4096];
1470
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1471
        }
1472
#else
1473
        setvbuf(logfile, NULL, _IOLBF, 0);
1474
#endif
1475
        log_append = 1;
1476
    }
1477
    if (!loglevel && logfile) {
1478
        fclose(logfile);
1479
        logfile = NULL;
1480
    }
1481
}
1482

    
1483
void cpu_set_log_filename(const char *filename)
1484
{
1485
    logfilename = strdup(filename);
1486
    if (logfile) {
1487
        fclose(logfile);
1488
        logfile = NULL;
1489
    }
1490
    cpu_set_log(loglevel);
1491
}
1492

    
1493
/* mask must never be zero, except for A20 change call */
1494
void cpu_interrupt(CPUState *env, int mask)
1495
{
1496
#if !defined(USE_NPTL)
1497
    TranslationBlock *tb;
1498
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1499
#endif
1500
    int old_mask;
1501

    
1502
    old_mask = env->interrupt_request;
1503
    /* FIXME: This is probably not threadsafe.  A different thread could
1504
       be in the middle of a read-modify-write operation.  */
1505
    env->interrupt_request |= mask;
1506
#if defined(USE_NPTL)
1507
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1508
       problem and hope the cpu will stop of its own accord.  For userspace
1509
       emulation this often isn't actually as bad as it sounds.  Often
1510
       signals are used primarily to interrupt blocking syscalls.  */
1511
#else
1512
    if (use_icount) {
1513
        env->icount_decr.u16.high = 0xffff;
1514
#ifndef CONFIG_USER_ONLY
1515
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1516
           an async event happened and we need to process it.  */
1517
        if (!can_do_io(env)
1518
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1519
            cpu_abort(env, "Raised interrupt while not in I/O function");
1520
        }
1521
#endif
1522
    } else {
1523
        tb = env->current_tb;
1524
        /* if the cpu is currently executing code, we must unlink it and
1525
           all the potentially executing TB */
1526
        if (tb && !testandset(&interrupt_lock)) {
1527
            env->current_tb = NULL;
1528
            tb_reset_jump_recursive(tb);
1529
            resetlock(&interrupt_lock);
1530
        }
1531
    }
1532
#endif
1533
}
1534

    
1535
void cpu_reset_interrupt(CPUState *env, int mask)
1536
{
1537
    env->interrupt_request &= ~mask;
1538
}
1539

    
1540
const CPULogItem cpu_log_items[] = {
1541
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1542
      "show generated host assembly code for each compiled TB" },
1543
    { CPU_LOG_TB_IN_ASM, "in_asm",
1544
      "show target assembly code for each compiled TB" },
1545
    { CPU_LOG_TB_OP, "op",
1546
      "show micro ops for each compiled TB" },
1547
    { CPU_LOG_TB_OP_OPT, "op_opt",
1548
      "show micro ops "
1549
#ifdef TARGET_I386
1550
      "before eflags optimization and "
1551
#endif
1552
      "after liveness analysis" },
1553
    { CPU_LOG_INT, "int",
1554
      "show interrupts/exceptions in short format" },
1555
    { CPU_LOG_EXEC, "exec",
1556
      "show trace before each executed TB (lots of logs)" },
1557
    { CPU_LOG_TB_CPU, "cpu",
1558
      "show CPU state before block translation" },
1559
#ifdef TARGET_I386
1560
    { CPU_LOG_PCALL, "pcall",
1561
      "show protected mode far calls/returns/exceptions" },
1562
    { CPU_LOG_RESET, "cpu_reset",
1563
      "show CPU state before CPU resets" },
1564
#endif
1565
#ifdef DEBUG_IOPORT
1566
    { CPU_LOG_IOPORT, "ioport",
1567
      "show all i/o ports accesses" },
1568
#endif
1569
    { 0, NULL, NULL },
1570
};
1571

    
1572
static int cmp1(const char *s1, int n, const char *s2)
1573
{
1574
    if (strlen(s2) != n)
1575
        return 0;
1576
    return memcmp(s1, s2, n) == 0;
1577
}
1578

    
1579
/* takes a comma separated list of log masks. Return 0 if error. */
1580
int cpu_str_to_log_mask(const char *str)
1581
{
1582
    const CPULogItem *item;
1583
    int mask;
1584
    const char *p, *p1;
1585

    
1586
    p = str;
1587
    mask = 0;
1588
    for(;;) {
1589
        p1 = strchr(p, ',');
1590
        if (!p1)
1591
            p1 = p + strlen(p);
1592
        if(cmp1(p,p1-p,"all")) {
1593
                for(item = cpu_log_items; item->mask != 0; item++) {
1594
                        mask |= item->mask;
1595
                }
1596
        } else {
1597
        for(item = cpu_log_items; item->mask != 0; item++) {
1598
            if (cmp1(p, p1 - p, item->name))
1599
                goto found;
1600
        }
1601
        return 0;
1602
        }
1603
    found:
1604
        mask |= item->mask;
1605
        if (*p1 != ',')
1606
            break;
1607
        p = p1 + 1;
1608
    }
1609
    return mask;
1610
}
1611

    
1612
void cpu_abort(CPUState *env, const char *fmt, ...)
1613
{
1614
    va_list ap;
1615
    va_list ap2;
1616

    
1617
    va_start(ap, fmt);
1618
    va_copy(ap2, ap);
1619
    fprintf(stderr, "qemu: fatal: ");
1620
    vfprintf(stderr, fmt, ap);
1621
    fprintf(stderr, "\n");
1622
#ifdef TARGET_I386
1623
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1624
#else
1625
    cpu_dump_state(env, stderr, fprintf, 0);
1626
#endif
1627
    if (qemu_log_enabled()) {
1628
        qemu_log("qemu: fatal: ");
1629
        qemu_log_vprintf(fmt, ap2);
1630
        qemu_log("\n");
1631
#ifdef TARGET_I386
1632
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1633
#else
1634
        log_cpu_state(env, 0);
1635
#endif
1636
        qemu_log_flush();
1637
        qemu_log_close();
1638
    }
1639
    va_end(ap2);
1640
    va_end(ap);
1641
    abort();
1642
}
1643

    
1644
CPUState *cpu_copy(CPUState *env)
1645
{
1646
    CPUState *new_env = cpu_init(env->cpu_model_str);
1647
    CPUState *next_cpu = new_env->next_cpu;
1648
    int cpu_index = new_env->cpu_index;
1649
#if defined(TARGET_HAS_ICE)
1650
    CPUBreakpoint *bp;
1651
    CPUWatchpoint *wp;
1652
#endif
1653

    
1654
    memcpy(new_env, env, sizeof(CPUState));
1655

    
1656
    /* Preserve chaining and index. */
1657
    new_env->next_cpu = next_cpu;
1658
    new_env->cpu_index = cpu_index;
1659

    
1660
    /* Clone all break/watchpoints.
1661
       Note: Once we support ptrace with hw-debug register access, make sure
1662
       BP_CPU break/watchpoints are handled correctly on clone. */
1663
    TAILQ_INIT(&env->breakpoints);
1664
    TAILQ_INIT(&env->watchpoints);
1665
#if defined(TARGET_HAS_ICE)
1666
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1667
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1668
    }
1669
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1670
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1671
                              wp->flags, NULL);
1672
    }
1673
#endif
1674

    
1675
    return new_env;
1676
}
1677

    
1678
#if !defined(CONFIG_USER_ONLY)
1679

    
1680
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1681
{
1682
    unsigned int i;
1683

    
1684
    /* Discard jump cache entries for any tb which might potentially
1685
       overlap the flushed page.  */
1686
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1687
    memset (&env->tb_jmp_cache[i], 0, 
1688
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1689

    
1690
    i = tb_jmp_cache_hash_page(addr);
1691
    memset (&env->tb_jmp_cache[i], 0, 
1692
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1693
}
1694

    
1695
/* NOTE: if flush_global is true, also flush global entries (not
1696
   implemented yet) */
1697
void tlb_flush(CPUState *env, int flush_global)
1698
{
1699
    int i;
1700

    
1701
#if defined(DEBUG_TLB)
1702
    printf("tlb_flush:\n");
1703
#endif
1704
    /* must reset current TB so that interrupts cannot modify the
1705
       links while we are modifying them */
1706
    env->current_tb = NULL;
1707

    
1708
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1709
        env->tlb_table[0][i].addr_read = -1;
1710
        env->tlb_table[0][i].addr_write = -1;
1711
        env->tlb_table[0][i].addr_code = -1;
1712
        env->tlb_table[1][i].addr_read = -1;
1713
        env->tlb_table[1][i].addr_write = -1;
1714
        env->tlb_table[1][i].addr_code = -1;
1715
#if (NB_MMU_MODES >= 3)
1716
        env->tlb_table[2][i].addr_read = -1;
1717
        env->tlb_table[2][i].addr_write = -1;
1718
        env->tlb_table[2][i].addr_code = -1;
1719
#if (NB_MMU_MODES == 4)
1720
        env->tlb_table[3][i].addr_read = -1;
1721
        env->tlb_table[3][i].addr_write = -1;
1722
        env->tlb_table[3][i].addr_code = -1;
1723
#endif
1724
#endif
1725
    }
1726

    
1727
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1728

    
1729
#ifdef USE_KQEMU
1730
    if (env->kqemu_enabled) {
1731
        kqemu_flush(env, flush_global);
1732
    }
1733
#endif
1734
    tlb_flush_count++;
1735
}
1736

    
1737
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1738
{
1739
    if (addr == (tlb_entry->addr_read &
1740
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1741
        addr == (tlb_entry->addr_write &
1742
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1743
        addr == (tlb_entry->addr_code &
1744
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1745
        tlb_entry->addr_read = -1;
1746
        tlb_entry->addr_write = -1;
1747
        tlb_entry->addr_code = -1;
1748
    }
1749
}
1750

    
1751
void tlb_flush_page(CPUState *env, target_ulong addr)
1752
{
1753
    int i;
1754

    
1755
#if defined(DEBUG_TLB)
1756
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1757
#endif
1758
    /* must reset current TB so that interrupts cannot modify the
1759
       links while we are modifying them */
1760
    env->current_tb = NULL;
1761

    
1762
    addr &= TARGET_PAGE_MASK;
1763
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1764
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1765
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1766
#if (NB_MMU_MODES >= 3)
1767
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1768
#if (NB_MMU_MODES == 4)
1769
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1770
#endif
1771
#endif
1772

    
1773
    tlb_flush_jmp_cache(env, addr);
1774

    
1775
#ifdef USE_KQEMU
1776
    if (env->kqemu_enabled) {
1777
        kqemu_flush_page(env, addr);
1778
    }
1779
#endif
1780
}
1781

    
1782
/* update the TLBs so that writes to code in the virtual page 'addr'
1783
   can be detected */
1784
static void tlb_protect_code(ram_addr_t ram_addr)
1785
{
1786
    cpu_physical_memory_reset_dirty(ram_addr,
1787
                                    ram_addr + TARGET_PAGE_SIZE,
1788
                                    CODE_DIRTY_FLAG);
1789
}
1790

    
1791
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1792
   tested for self modifying code */
1793
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1794
                                    target_ulong vaddr)
1795
{
1796
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1797
}
1798

    
1799
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1800
                                         unsigned long start, unsigned long length)
1801
{
1802
    unsigned long addr;
1803
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1804
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1805
        if ((addr - start) < length) {
1806
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1807
        }
1808
    }
1809
}
1810

    
1811
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1812
                                     int dirty_flags)
1813
{
1814
    CPUState *env;
1815
    unsigned long length, start1;
1816
    int i, mask, len;
1817
    uint8_t *p;
1818

    
1819
    start &= TARGET_PAGE_MASK;
1820
    end = TARGET_PAGE_ALIGN(end);
1821

    
1822
    length = end - start;
1823
    if (length == 0)
1824
        return;
1825
    len = length >> TARGET_PAGE_BITS;
1826
#ifdef USE_KQEMU
1827
    /* XXX: should not depend on cpu context */
1828
    env = first_cpu;
1829
    if (env->kqemu_enabled) {
1830
        ram_addr_t addr;
1831
        addr = start;
1832
        for(i = 0; i < len; i++) {
1833
            kqemu_set_notdirty(env, addr);
1834
            addr += TARGET_PAGE_SIZE;
1835
        }
1836
    }
1837
#endif
1838
    mask = ~dirty_flags;
1839
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1840
    for(i = 0; i < len; i++)
1841
        p[i] &= mask;
1842

    
1843
    /* we modify the TLB cache so that the dirty bit will be set again
1844
       when accessing the range */
1845
    start1 = start + (unsigned long)phys_ram_base;
1846
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1847
        for(i = 0; i < CPU_TLB_SIZE; i++)
1848
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1849
        for(i = 0; i < CPU_TLB_SIZE; i++)
1850
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1851
#if (NB_MMU_MODES >= 3)
1852
        for(i = 0; i < CPU_TLB_SIZE; i++)
1853
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1854
#if (NB_MMU_MODES == 4)
1855
        for(i = 0; i < CPU_TLB_SIZE; i++)
1856
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1857
#endif
1858
#endif
1859
    }
1860
}
1861

    
1862
int cpu_physical_memory_set_dirty_tracking(int enable)
1863
{
1864
    in_migration = enable;
1865
    return 0;
1866
}
1867

    
1868
int cpu_physical_memory_get_dirty_tracking(void)
1869
{
1870
    return in_migration;
1871
}
1872

    
1873
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1874
{
1875
    if (kvm_enabled())
1876
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1877
}
1878

    
1879
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1880
{
1881
    ram_addr_t ram_addr;
1882

    
1883
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1884
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1885
            tlb_entry->addend - (unsigned long)phys_ram_base;
1886
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1887
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1888
        }
1889
    }
1890
}
1891

    
1892
/* update the TLB according to the current state of the dirty bits */
1893
void cpu_tlb_update_dirty(CPUState *env)
1894
{
1895
    int i;
1896
    for(i = 0; i < CPU_TLB_SIZE; i++)
1897
        tlb_update_dirty(&env->tlb_table[0][i]);
1898
    for(i = 0; i < CPU_TLB_SIZE; i++)
1899
        tlb_update_dirty(&env->tlb_table[1][i]);
1900
#if (NB_MMU_MODES >= 3)
1901
    for(i = 0; i < CPU_TLB_SIZE; i++)
1902
        tlb_update_dirty(&env->tlb_table[2][i]);
1903
#if (NB_MMU_MODES == 4)
1904
    for(i = 0; i < CPU_TLB_SIZE; i++)
1905
        tlb_update_dirty(&env->tlb_table[3][i]);
1906
#endif
1907
#endif
1908
}
1909

    
1910
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1911
{
1912
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1913
        tlb_entry->addr_write = vaddr;
1914
}
1915

    
1916
/* update the TLB corresponding to virtual page vaddr
1917
   so that it is no longer dirty */
1918
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1919
{
1920
    int i;
1921

    
1922
    vaddr &= TARGET_PAGE_MASK;
1923
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1924
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1925
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1926
#if (NB_MMU_MODES >= 3)
1927
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1928
#if (NB_MMU_MODES == 4)
1929
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1930
#endif
1931
#endif
1932
}
1933

    
1934
/* add a new TLB entry. At most one entry for a given virtual address
1935
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1936
   (can only happen in non SOFTMMU mode for I/O pages or pages
1937
   conflicting with the host address space). */
1938
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1939
                      target_phys_addr_t paddr, int prot,
1940
                      int mmu_idx, int is_softmmu)
1941
{
1942
    PhysPageDesc *p;
1943
    unsigned long pd;
1944
    unsigned int index;
1945
    target_ulong address;
1946
    target_ulong code_address;
1947
    target_phys_addr_t addend;
1948
    int ret;
1949
    CPUTLBEntry *te;
1950
    CPUWatchpoint *wp;
1951
    target_phys_addr_t iotlb;
1952

    
1953
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1954
    if (!p) {
1955
        pd = IO_MEM_UNASSIGNED;
1956
    } else {
1957
        pd = p->phys_offset;
1958
    }
1959
#if defined(DEBUG_TLB)
1960
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1961
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1962
#endif
1963

    
1964
    ret = 0;
1965
    address = vaddr;
1966
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1967
        /* IO memory case (romd handled later) */
1968
        address |= TLB_MMIO;
1969
    }
1970
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1971
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1972
        /* Normal RAM.  */
1973
        iotlb = pd & TARGET_PAGE_MASK;
1974
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1975
            iotlb |= IO_MEM_NOTDIRTY;
1976
        else
1977
            iotlb |= IO_MEM_ROM;
1978
    } else {
1979
        /* IO handlers are currently passed a phsical address.
1980
           It would be nice to pass an offset from the base address
1981
           of that region.  This would avoid having to special case RAM,
1982
           and avoid full address decoding in every device.
1983
           We can't use the high bits of pd for this because
1984
           IO_MEM_ROMD uses these as a ram address.  */
1985
        iotlb = (pd & ~TARGET_PAGE_MASK);
1986
        if (p) {
1987
            iotlb += p->region_offset;
1988
        } else {
1989
            iotlb += paddr;
1990
        }
1991
    }
1992

    
1993
    code_address = address;
1994
    /* Make accesses to pages with watchpoints go via the
1995
       watchpoint trap routines.  */
1996
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1997
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1998
            iotlb = io_mem_watch + paddr;
1999
            /* TODO: The memory case can be optimized by not trapping
2000
               reads of pages with a write breakpoint.  */
2001
            address |= TLB_MMIO;
2002
        }
2003
    }
2004

    
2005
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2006
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2007
    te = &env->tlb_table[mmu_idx][index];
2008
    te->addend = addend - vaddr;
2009
    if (prot & PAGE_READ) {
2010
        te->addr_read = address;
2011
    } else {
2012
        te->addr_read = -1;
2013
    }
2014

    
2015
    if (prot & PAGE_EXEC) {
2016
        te->addr_code = code_address;
2017
    } else {
2018
        te->addr_code = -1;
2019
    }
2020
    if (prot & PAGE_WRITE) {
2021
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2022
            (pd & IO_MEM_ROMD)) {
2023
            /* Write access calls the I/O callback.  */
2024
            te->addr_write = address | TLB_MMIO;
2025
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2026
                   !cpu_physical_memory_is_dirty(pd)) {
2027
            te->addr_write = address | TLB_NOTDIRTY;
2028
        } else {
2029
            te->addr_write = address;
2030
        }
2031
    } else {
2032
        te->addr_write = -1;
2033
    }
2034
    return ret;
2035
}
2036

    
2037
#else
2038

    
2039
void tlb_flush(CPUState *env, int flush_global)
2040
{
2041
}
2042

    
2043
void tlb_flush_page(CPUState *env, target_ulong addr)
2044
{
2045
}
2046

    
2047
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2048
                      target_phys_addr_t paddr, int prot,
2049
                      int mmu_idx, int is_softmmu)
2050
{
2051
    return 0;
2052
}
2053

    
2054
/* dump memory mappings */
2055
void page_dump(FILE *f)
2056
{
2057
    unsigned long start, end;
2058
    int i, j, prot, prot1;
2059
    PageDesc *p;
2060

    
2061
    fprintf(f, "%-8s %-8s %-8s %s\n",
2062
            "start", "end", "size", "prot");
2063
    start = -1;
2064
    end = -1;
2065
    prot = 0;
2066
    for(i = 0; i <= L1_SIZE; i++) {
2067
        if (i < L1_SIZE)
2068
            p = l1_map[i];
2069
        else
2070
            p = NULL;
2071
        for(j = 0;j < L2_SIZE; j++) {
2072
            if (!p)
2073
                prot1 = 0;
2074
            else
2075
                prot1 = p[j].flags;
2076
            if (prot1 != prot) {
2077
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2078
                if (start != -1) {
2079
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2080
                            start, end, end - start,
2081
                            prot & PAGE_READ ? 'r' : '-',
2082
                            prot & PAGE_WRITE ? 'w' : '-',
2083
                            prot & PAGE_EXEC ? 'x' : '-');
2084
                }
2085
                if (prot1 != 0)
2086
                    start = end;
2087
                else
2088
                    start = -1;
2089
                prot = prot1;
2090
            }
2091
            if (!p)
2092
                break;
2093
        }
2094
    }
2095
}
2096

    
2097
int page_get_flags(target_ulong address)
2098
{
2099
    PageDesc *p;
2100

    
2101
    p = page_find(address >> TARGET_PAGE_BITS);
2102
    if (!p)
2103
        return 0;
2104
    return p->flags;
2105
}
2106

    
2107
/* modify the flags of a page and invalidate the code if
2108
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2109
   depending on PAGE_WRITE */
2110
void page_set_flags(target_ulong start, target_ulong end, int flags)
2111
{
2112
    PageDesc *p;
2113
    target_ulong addr;
2114

    
2115
    /* mmap_lock should already be held.  */
2116
    start = start & TARGET_PAGE_MASK;
2117
    end = TARGET_PAGE_ALIGN(end);
2118
    if (flags & PAGE_WRITE)
2119
        flags |= PAGE_WRITE_ORG;
2120
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2121
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2122
        /* We may be called for host regions that are outside guest
2123
           address space.  */
2124
        if (!p)
2125
            return;
2126
        /* if the write protection is set, then we invalidate the code
2127
           inside */
2128
        if (!(p->flags & PAGE_WRITE) &&
2129
            (flags & PAGE_WRITE) &&
2130
            p->first_tb) {
2131
            tb_invalidate_phys_page(addr, 0, NULL);
2132
        }
2133
        p->flags = flags;
2134
    }
2135
}
2136

    
2137
int page_check_range(target_ulong start, target_ulong len, int flags)
2138
{
2139
    PageDesc *p;
2140
    target_ulong end;
2141
    target_ulong addr;
2142

    
2143
    if (start + len < start)
2144
        /* we've wrapped around */
2145
        return -1;
2146

    
2147
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2148
    start = start & TARGET_PAGE_MASK;
2149

    
2150
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2151
        p = page_find(addr >> TARGET_PAGE_BITS);
2152
        if( !p )
2153
            return -1;
2154
        if( !(p->flags & PAGE_VALID) )
2155
            return -1;
2156

    
2157
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2158
            return -1;
2159
        if (flags & PAGE_WRITE) {
2160
            if (!(p->flags & PAGE_WRITE_ORG))
2161
                return -1;
2162
            /* unprotect the page if it was put read-only because it
2163
               contains translated code */
2164
            if (!(p->flags & PAGE_WRITE)) {
2165
                if (!page_unprotect(addr, 0, NULL))
2166
                    return -1;
2167
            }
2168
            return 0;
2169
        }
2170
    }
2171
    return 0;
2172
}
2173

    
2174
/* called from signal handler: invalidate the code and unprotect the
2175
   page. Return TRUE if the fault was succesfully handled. */
2176
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2177
{
2178
    unsigned int page_index, prot, pindex;
2179
    PageDesc *p, *p1;
2180
    target_ulong host_start, host_end, addr;
2181

    
2182
    /* Technically this isn't safe inside a signal handler.  However we
2183
       know this only ever happens in a synchronous SEGV handler, so in
2184
       practice it seems to be ok.  */
2185
    mmap_lock();
2186

    
2187
    host_start = address & qemu_host_page_mask;
2188
    page_index = host_start >> TARGET_PAGE_BITS;
2189
    p1 = page_find(page_index);
2190
    if (!p1) {
2191
        mmap_unlock();
2192
        return 0;
2193
    }
2194
    host_end = host_start + qemu_host_page_size;
2195
    p = p1;
2196
    prot = 0;
2197
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2198
        prot |= p->flags;
2199
        p++;
2200
    }
2201
    /* if the page was really writable, then we change its
2202
       protection back to writable */
2203
    if (prot & PAGE_WRITE_ORG) {
2204
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2205
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2206
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2207
                     (prot & PAGE_BITS) | PAGE_WRITE);
2208
            p1[pindex].flags |= PAGE_WRITE;
2209
            /* and since the content will be modified, we must invalidate
2210
               the corresponding translated code. */
2211
            tb_invalidate_phys_page(address, pc, puc);
2212
#ifdef DEBUG_TB_CHECK
2213
            tb_invalidate_check(address);
2214
#endif
2215
            mmap_unlock();
2216
            return 1;
2217
        }
2218
    }
2219
    mmap_unlock();
2220
    return 0;
2221
}
2222

    
2223
static inline void tlb_set_dirty(CPUState *env,
2224
                                 unsigned long addr, target_ulong vaddr)
2225
{
2226
}
2227
#endif /* defined(CONFIG_USER_ONLY) */
2228

    
2229
#if !defined(CONFIG_USER_ONLY)
2230

    
2231
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2232
                             ram_addr_t memory, ram_addr_t region_offset);
2233
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2234
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2235
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2236
                      need_subpage)                                     \
2237
    do {                                                                \
2238
        if (addr > start_addr)                                          \
2239
            start_addr2 = 0;                                            \
2240
        else {                                                          \
2241
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2242
            if (start_addr2 > 0)                                        \
2243
                need_subpage = 1;                                       \
2244
        }                                                               \
2245
                                                                        \
2246
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2247
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2248
        else {                                                          \
2249
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2250
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2251
                need_subpage = 1;                                       \
2252
        }                                                               \
2253
    } while (0)
2254

    
2255
/* register physical memory. 'size' must be a multiple of the target
2256
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2257
   io memory page.  The address used when calling the IO function is
2258
   the offset from the start of the region, plus region_offset.  Both
2259
   start_region and regon_offset are rounded down to a page boundary
2260
   before calculating this offset.  This should not be a problem unless
2261
   the low bits of start_addr and region_offset differ.  */
2262
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2263
                                         ram_addr_t size,
2264
                                         ram_addr_t phys_offset,
2265
                                         ram_addr_t region_offset)
2266
{
2267
    target_phys_addr_t addr, end_addr;
2268
    PhysPageDesc *p;
2269
    CPUState *env;
2270
    ram_addr_t orig_size = size;
2271
    void *subpage;
2272

    
2273
#ifdef USE_KQEMU
2274
    /* XXX: should not depend on cpu context */
2275
    env = first_cpu;
2276
    if (env->kqemu_enabled) {
2277
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2278
    }
2279
#endif
2280
    if (kvm_enabled())
2281
        kvm_set_phys_mem(start_addr, size, phys_offset);
2282

    
2283
    region_offset &= TARGET_PAGE_MASK;
2284
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2285
    end_addr = start_addr + (target_phys_addr_t)size;
2286
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2287
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2288
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2289
            ram_addr_t orig_memory = p->phys_offset;
2290
            target_phys_addr_t start_addr2, end_addr2;
2291
            int need_subpage = 0;
2292

    
2293
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2294
                          need_subpage);
2295
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2296
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2297
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2298
                                           &p->phys_offset, orig_memory,
2299
                                           p->region_offset);
2300
                } else {
2301
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2302
                                            >> IO_MEM_SHIFT];
2303
                }
2304
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2305
                                 region_offset);
2306
                p->region_offset = 0;
2307
            } else {
2308
                p->phys_offset = phys_offset;
2309
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2310
                    (phys_offset & IO_MEM_ROMD))
2311
                    phys_offset += TARGET_PAGE_SIZE;
2312
            }
2313
        } else {
2314
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2315
            p->phys_offset = phys_offset;
2316
            p->region_offset = region_offset;
2317
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2318
                (phys_offset & IO_MEM_ROMD)) {
2319
                phys_offset += TARGET_PAGE_SIZE;
2320
            } else {
2321
                target_phys_addr_t start_addr2, end_addr2;
2322
                int need_subpage = 0;
2323

    
2324
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2325
                              end_addr2, need_subpage);
2326

    
2327
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2328
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2329
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2330
                                           0);
2331
                    subpage_register(subpage, start_addr2, end_addr2,
2332
                                     phys_offset, region_offset);
2333
                    p->region_offset = 0;
2334
                }
2335
            }
2336
        }
2337
        region_offset += TARGET_PAGE_SIZE;
2338
    }
2339

    
2340
    /* since each CPU stores ram addresses in its TLB cache, we must
2341
       reset the modified entries */
2342
    /* XXX: slow ! */
2343
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2344
        tlb_flush(env, 1);
2345
    }
2346
}
2347

    
2348
/* XXX: temporary until new memory mapping API */
2349
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2350
{
2351
    PhysPageDesc *p;
2352

    
2353
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2354
    if (!p)
2355
        return IO_MEM_UNASSIGNED;
2356
    return p->phys_offset;
2357
}
2358

    
2359
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2360
{
2361
    if (kvm_enabled())
2362
        kvm_coalesce_mmio_region(addr, size);
2363
}
2364

    
2365
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2366
{
2367
    if (kvm_enabled())
2368
        kvm_uncoalesce_mmio_region(addr, size);
2369
}
2370

    
2371
/* XXX: better than nothing */
2372
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2373
{
2374
    ram_addr_t addr;
2375
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2376
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2377
                (uint64_t)size, (uint64_t)phys_ram_size);
2378
        abort();
2379
    }
2380
    addr = phys_ram_alloc_offset;
2381
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2382
    return addr;
2383
}
2384

    
2385
void qemu_ram_free(ram_addr_t addr)
2386
{
2387
}
2388

    
2389
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2390
{
2391
#ifdef DEBUG_UNASSIGNED
2392
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2393
#endif
2394
#if defined(TARGET_SPARC)
2395
    do_unassigned_access(addr, 0, 0, 0, 1);
2396
#endif
2397
    return 0;
2398
}
2399

    
2400
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2401
{
2402
#ifdef DEBUG_UNASSIGNED
2403
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2404
#endif
2405
#if defined(TARGET_SPARC)
2406
    do_unassigned_access(addr, 0, 0, 0, 2);
2407
#endif
2408
    return 0;
2409
}
2410

    
2411
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2412
{
2413
#ifdef DEBUG_UNASSIGNED
2414
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2415
#endif
2416
#if defined(TARGET_SPARC)
2417
    do_unassigned_access(addr, 0, 0, 0, 4);
2418
#endif
2419
    return 0;
2420
}
2421

    
2422
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2423
{
2424
#ifdef DEBUG_UNASSIGNED
2425
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2426
#endif
2427
#if defined(TARGET_SPARC)
2428
    do_unassigned_access(addr, 1, 0, 0, 1);
2429
#endif
2430
}
2431

    
2432
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2433
{
2434
#ifdef DEBUG_UNASSIGNED
2435
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2436
#endif
2437
#if defined(TARGET_SPARC)
2438
    do_unassigned_access(addr, 1, 0, 0, 2);
2439
#endif
2440
}
2441

    
2442
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2443
{
2444
#ifdef DEBUG_UNASSIGNED
2445
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2446
#endif
2447
#if defined(TARGET_SPARC)
2448
    do_unassigned_access(addr, 1, 0, 0, 4);
2449
#endif
2450
}
2451

    
2452
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2453
    unassigned_mem_readb,
2454
    unassigned_mem_readw,
2455
    unassigned_mem_readl,
2456
};
2457

    
2458
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2459
    unassigned_mem_writeb,
2460
    unassigned_mem_writew,
2461
    unassigned_mem_writel,
2462
};
2463

    
2464
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2465
                                uint32_t val)
2466
{
2467
    int dirty_flags;
2468
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2469
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2470
#if !defined(CONFIG_USER_ONLY)
2471
        tb_invalidate_phys_page_fast(ram_addr, 1);
2472
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2473
#endif
2474
    }
2475
    stb_p(phys_ram_base + ram_addr, val);
2476
#ifdef USE_KQEMU
2477
    if (cpu_single_env->kqemu_enabled &&
2478
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2479
        kqemu_modify_page(cpu_single_env, ram_addr);
2480
#endif
2481
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2482
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2483
    /* we remove the notdirty callback only if the code has been
2484
       flushed */
2485
    if (dirty_flags == 0xff)
2486
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2487
}
2488

    
2489
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2490
                                uint32_t val)
2491
{
2492
    int dirty_flags;
2493
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2494
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2495
#if !defined(CONFIG_USER_ONLY)
2496
        tb_invalidate_phys_page_fast(ram_addr, 2);
2497
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2498
#endif
2499
    }
2500
    stw_p(phys_ram_base + ram_addr, val);
2501
#ifdef USE_KQEMU
2502
    if (cpu_single_env->kqemu_enabled &&
2503
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2504
        kqemu_modify_page(cpu_single_env, ram_addr);
2505
#endif
2506
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2507
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2508
    /* we remove the notdirty callback only if the code has been
2509
       flushed */
2510
    if (dirty_flags == 0xff)
2511
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2512
}
2513

    
2514
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2515
                                uint32_t val)
2516
{
2517
    int dirty_flags;
2518
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2519
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2520
#if !defined(CONFIG_USER_ONLY)
2521
        tb_invalidate_phys_page_fast(ram_addr, 4);
2522
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2523
#endif
2524
    }
2525
    stl_p(phys_ram_base + ram_addr, val);
2526
#ifdef USE_KQEMU
2527
    if (cpu_single_env->kqemu_enabled &&
2528
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2529
        kqemu_modify_page(cpu_single_env, ram_addr);
2530
#endif
2531
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2532
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2533
    /* we remove the notdirty callback only if the code has been
2534
       flushed */
2535
    if (dirty_flags == 0xff)
2536
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2537
}
2538

    
2539
static CPUReadMemoryFunc *error_mem_read[3] = {
2540
    NULL, /* never used */
2541
    NULL, /* never used */
2542
    NULL, /* never used */
2543
};
2544

    
2545
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2546
    notdirty_mem_writeb,
2547
    notdirty_mem_writew,
2548
    notdirty_mem_writel,
2549
};
2550

    
2551
/* Generate a debug exception if a watchpoint has been hit.  */
2552
static void check_watchpoint(int offset, int len_mask, int flags)
2553
{
2554
    CPUState *env = cpu_single_env;
2555
    target_ulong pc, cs_base;
2556
    TranslationBlock *tb;
2557
    target_ulong vaddr;
2558
    CPUWatchpoint *wp;
2559
    int cpu_flags;
2560

    
2561
    if (env->watchpoint_hit) {
2562
        /* We re-entered the check after replacing the TB. Now raise
2563
         * the debug interrupt so that is will trigger after the
2564
         * current instruction. */
2565
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2566
        return;
2567
    }
2568
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2569
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2570
        if ((vaddr == (wp->vaddr & len_mask) ||
2571
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2572
            wp->flags |= BP_WATCHPOINT_HIT;
2573
            if (!env->watchpoint_hit) {
2574
                env->watchpoint_hit = wp;
2575
                tb = tb_find_pc(env->mem_io_pc);
2576
                if (!tb) {
2577
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2578
                              "pc=%p", (void *)env->mem_io_pc);
2579
                }
2580
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2581
                tb_phys_invalidate(tb, -1);
2582
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2583
                    env->exception_index = EXCP_DEBUG;
2584
                } else {
2585
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2586
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2587
                }
2588
                cpu_resume_from_signal(env, NULL);
2589
            }
2590
        } else {
2591
            wp->flags &= ~BP_WATCHPOINT_HIT;
2592
        }
2593
    }
2594
}
2595

    
2596
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2597
   so these check for a hit then pass through to the normal out-of-line
2598
   phys routines.  */
2599
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2600
{
2601
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2602
    return ldub_phys(addr);
2603
}
2604

    
2605
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2606
{
2607
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2608
    return lduw_phys(addr);
2609
}
2610

    
2611
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2612
{
2613
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2614
    return ldl_phys(addr);
2615
}
2616

    
2617
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2618
                             uint32_t val)
2619
{
2620
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2621
    stb_phys(addr, val);
2622
}
2623

    
2624
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2625
                             uint32_t val)
2626
{
2627
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2628
    stw_phys(addr, val);
2629
}
2630

    
2631
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2632
                             uint32_t val)
2633
{
2634
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2635
    stl_phys(addr, val);
2636
}
2637

    
2638
static CPUReadMemoryFunc *watch_mem_read[3] = {
2639
    watch_mem_readb,
2640
    watch_mem_readw,
2641
    watch_mem_readl,
2642
};
2643

    
2644
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2645
    watch_mem_writeb,
2646
    watch_mem_writew,
2647
    watch_mem_writel,
2648
};
2649

    
2650
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2651
                                 unsigned int len)
2652
{
2653
    uint32_t ret;
2654
    unsigned int idx;
2655

    
2656
    idx = SUBPAGE_IDX(addr);
2657
#if defined(DEBUG_SUBPAGE)
2658
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2659
           mmio, len, addr, idx);
2660
#endif
2661
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2662
                                       addr + mmio->region_offset[idx][0][len]);
2663

    
2664
    return ret;
2665
}
2666

    
2667
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2668
                              uint32_t value, unsigned int len)
2669
{
2670
    unsigned int idx;
2671

    
2672
    idx = SUBPAGE_IDX(addr);
2673
#if defined(DEBUG_SUBPAGE)
2674
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2675
           mmio, len, addr, idx, value);
2676
#endif
2677
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2678
                                  addr + mmio->region_offset[idx][1][len],
2679
                                  value);
2680
}
2681

    
2682
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2683
{
2684
#if defined(DEBUG_SUBPAGE)
2685
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2686
#endif
2687

    
2688
    return subpage_readlen(opaque, addr, 0);
2689
}
2690

    
2691
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2692
                            uint32_t value)
2693
{
2694
#if defined(DEBUG_SUBPAGE)
2695
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2696
#endif
2697
    subpage_writelen(opaque, addr, value, 0);
2698
}
2699

    
2700
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2701
{
2702
#if defined(DEBUG_SUBPAGE)
2703
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2704
#endif
2705

    
2706
    return subpage_readlen(opaque, addr, 1);
2707
}
2708

    
2709
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2710
                            uint32_t value)
2711
{
2712
#if defined(DEBUG_SUBPAGE)
2713
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2714
#endif
2715
    subpage_writelen(opaque, addr, value, 1);
2716
}
2717

    
2718
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2719
{
2720
#if defined(DEBUG_SUBPAGE)
2721
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2722
#endif
2723

    
2724
    return subpage_readlen(opaque, addr, 2);
2725
}
2726

    
2727
static void subpage_writel (void *opaque,
2728
                         target_phys_addr_t addr, uint32_t value)
2729
{
2730
#if defined(DEBUG_SUBPAGE)
2731
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2732
#endif
2733
    subpage_writelen(opaque, addr, value, 2);
2734
}
2735

    
2736
static CPUReadMemoryFunc *subpage_read[] = {
2737
    &subpage_readb,
2738
    &subpage_readw,
2739
    &subpage_readl,
2740
};
2741

    
2742
static CPUWriteMemoryFunc *subpage_write[] = {
2743
    &subpage_writeb,
2744
    &subpage_writew,
2745
    &subpage_writel,
2746
};
2747

    
2748
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2749
                             ram_addr_t memory, ram_addr_t region_offset)
2750
{
2751
    int idx, eidx;
2752
    unsigned int i;
2753

    
2754
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2755
        return -1;
2756
    idx = SUBPAGE_IDX(start);
2757
    eidx = SUBPAGE_IDX(end);
2758
#if defined(DEBUG_SUBPAGE)
2759
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2760
           mmio, start, end, idx, eidx, memory);
2761
#endif
2762
    memory >>= IO_MEM_SHIFT;
2763
    for (; idx <= eidx; idx++) {
2764
        for (i = 0; i < 4; i++) {
2765
            if (io_mem_read[memory][i]) {
2766
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2767
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2768
                mmio->region_offset[idx][0][i] = region_offset;
2769
            }
2770
            if (io_mem_write[memory][i]) {
2771
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2772
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2773
                mmio->region_offset[idx][1][i] = region_offset;
2774
            }
2775
        }
2776
    }
2777

    
2778
    return 0;
2779
}
2780

    
2781
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2782
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2783
{
2784
    subpage_t *mmio;
2785
    int subpage_memory;
2786

    
2787
    mmio = qemu_mallocz(sizeof(subpage_t));
2788

    
2789
    mmio->base = base;
2790
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2791
#if defined(DEBUG_SUBPAGE)
2792
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2793
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2794
#endif
2795
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2796
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2797
                         region_offset);
2798

    
2799
    return mmio;
2800
}
2801

    
2802
static void io_mem_init(void)
2803
{
2804
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2805
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2806
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2807
    io_mem_nb = 5;
2808

    
2809
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2810
                                          watch_mem_write, NULL);
2811
    /* alloc dirty bits array */
2812
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2813
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2814
}
2815

    
2816
/* mem_read and mem_write are arrays of functions containing the
2817
   function to access byte (index 0), word (index 1) and dword (index
2818
   2). Functions can be omitted with a NULL function pointer. The
2819
   registered functions may be modified dynamically later.
2820
   If io_index is non zero, the corresponding io zone is
2821
   modified. If it is zero, a new io zone is allocated. The return
2822
   value can be used with cpu_register_physical_memory(). (-1) is
2823
   returned if error. */
2824
int cpu_register_io_memory(int io_index,
2825
                           CPUReadMemoryFunc **mem_read,
2826
                           CPUWriteMemoryFunc **mem_write,
2827
                           void *opaque)
2828
{
2829
    int i, subwidth = 0;
2830

    
2831
    if (io_index <= 0) {
2832
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2833
            return -1;
2834
        io_index = io_mem_nb++;
2835
    } else {
2836
        if (io_index >= IO_MEM_NB_ENTRIES)
2837
            return -1;
2838
    }
2839

    
2840
    for(i = 0;i < 3; i++) {
2841
        if (!mem_read[i] || !mem_write[i])
2842
            subwidth = IO_MEM_SUBWIDTH;
2843
        io_mem_read[io_index][i] = mem_read[i];
2844
        io_mem_write[io_index][i] = mem_write[i];
2845
    }
2846
    io_mem_opaque[io_index] = opaque;
2847
    return (io_index << IO_MEM_SHIFT) | subwidth;
2848
}
2849

    
2850
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2851
{
2852
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2853
}
2854

    
2855
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2856
{
2857
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2858
}
2859

    
2860
#endif /* !defined(CONFIG_USER_ONLY) */
2861

    
2862
/* physical memory access (slow version, mainly for debug) */
2863
#if defined(CONFIG_USER_ONLY)
2864
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2865
                            int len, int is_write)
2866
{
2867
    int l, flags;
2868
    target_ulong page;
2869
    void * p;
2870

    
2871
    while (len > 0) {
2872
        page = addr & TARGET_PAGE_MASK;
2873
        l = (page + TARGET_PAGE_SIZE) - addr;
2874
        if (l > len)
2875
            l = len;
2876
        flags = page_get_flags(page);
2877
        if (!(flags & PAGE_VALID))
2878
            return;
2879
        if (is_write) {
2880
            if (!(flags & PAGE_WRITE))
2881
                return;
2882
            /* XXX: this code should not depend on lock_user */
2883
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2884
                /* FIXME - should this return an error rather than just fail? */
2885
                return;
2886
            memcpy(p, buf, l);
2887
            unlock_user(p, addr, l);
2888
        } else {
2889
            if (!(flags & PAGE_READ))
2890
                return;
2891
            /* XXX: this code should not depend on lock_user */
2892
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2893
                /* FIXME - should this return an error rather than just fail? */
2894
                return;
2895
            memcpy(buf, p, l);
2896
            unlock_user(p, addr, 0);
2897
        }
2898
        len -= l;
2899
        buf += l;
2900
        addr += l;
2901
    }
2902
}
2903

    
2904
#else
2905
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2906
                            int len, int is_write)
2907
{
2908
    int l, io_index;
2909
    uint8_t *ptr;
2910
    uint32_t val;
2911
    target_phys_addr_t page;
2912
    unsigned long pd;
2913
    PhysPageDesc *p;
2914

    
2915
    while (len > 0) {
2916
        page = addr & TARGET_PAGE_MASK;
2917
        l = (page + TARGET_PAGE_SIZE) - addr;
2918
        if (l > len)
2919
            l = len;
2920
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2921
        if (!p) {
2922
            pd = IO_MEM_UNASSIGNED;
2923
        } else {
2924
            pd = p->phys_offset;
2925
        }
2926

    
2927
        if (is_write) {
2928
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2929
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2930
                if (p)
2931
                    addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2932
                /* XXX: could force cpu_single_env to NULL to avoid
2933
                   potential bugs */
2934
                if (l >= 4 && ((addr & 3) == 0)) {
2935
                    /* 32 bit write access */
2936
                    val = ldl_p(buf);
2937
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2938
                    l = 4;
2939
                } else if (l >= 2 && ((addr & 1) == 0)) {
2940
                    /* 16 bit write access */
2941
                    val = lduw_p(buf);
2942
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2943
                    l = 2;
2944
                } else {
2945
                    /* 8 bit write access */
2946
                    val = ldub_p(buf);
2947
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2948
                    l = 1;
2949
                }
2950
            } else {
2951
                unsigned long addr1;
2952
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2953
                /* RAM case */
2954
                ptr = phys_ram_base + addr1;
2955
                memcpy(ptr, buf, l);
2956
                if (!cpu_physical_memory_is_dirty(addr1)) {
2957
                    /* invalidate code */
2958
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2959
                    /* set dirty bit */
2960
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2961
                        (0xff & ~CODE_DIRTY_FLAG);
2962
                }
2963
            }
2964
        } else {
2965
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2966
                !(pd & IO_MEM_ROMD)) {
2967
                /* I/O case */
2968
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2969
                if (p)
2970
                    addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2971
                if (l >= 4 && ((addr & 3) == 0)) {
2972
                    /* 32 bit read access */
2973
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2974
                    stl_p(buf, val);
2975
                    l = 4;
2976
                } else if (l >= 2 && ((addr & 1) == 0)) {
2977
                    /* 16 bit read access */
2978
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2979
                    stw_p(buf, val);
2980
                    l = 2;
2981
                } else {
2982
                    /* 8 bit read access */
2983
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2984
                    stb_p(buf, val);
2985
                    l = 1;
2986
                }
2987
            } else {
2988
                /* RAM case */
2989
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2990
                    (addr & ~TARGET_PAGE_MASK);
2991
                memcpy(buf, ptr, l);
2992
            }
2993
        }
2994
        len -= l;
2995
        buf += l;
2996
        addr += l;
2997
    }
2998
}
2999

    
3000
/* used for ROM loading : can write in RAM and ROM */
3001
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3002
                                   const uint8_t *buf, int len)
3003
{
3004
    int l;
3005
    uint8_t *ptr;
3006
    target_phys_addr_t page;
3007
    unsigned long pd;
3008
    PhysPageDesc *p;
3009

    
3010
    while (len > 0) {
3011
        page = addr & TARGET_PAGE_MASK;
3012
        l = (page + TARGET_PAGE_SIZE) - addr;
3013
        if (l > len)
3014
            l = len;
3015
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3016
        if (!p) {
3017
            pd = IO_MEM_UNASSIGNED;
3018
        } else {
3019
            pd = p->phys_offset;
3020
        }
3021

    
3022
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3023
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3024
            !(pd & IO_MEM_ROMD)) {
3025
            /* do nothing */
3026
        } else {
3027
            unsigned long addr1;
3028
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3029
            /* ROM/RAM case */
3030
            ptr = phys_ram_base + addr1;
3031
            memcpy(ptr, buf, l);
3032
        }
3033
        len -= l;
3034
        buf += l;
3035
        addr += l;
3036
    }
3037
}
3038

    
3039
typedef struct {
3040
    void *buffer;
3041
    target_phys_addr_t addr;
3042
    target_phys_addr_t len;
3043
} BounceBuffer;
3044

    
3045
static BounceBuffer bounce;
3046

    
3047
typedef struct MapClient {
3048
    void *opaque;
3049
    void (*callback)(void *opaque);
3050
    LIST_ENTRY(MapClient) link;
3051
} MapClient;
3052

    
3053
static LIST_HEAD(map_client_list, MapClient) map_client_list
3054
    = LIST_HEAD_INITIALIZER(map_client_list);
3055

    
3056
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3057
{
3058
    MapClient *client = qemu_malloc(sizeof(*client));
3059

    
3060
    client->opaque = opaque;
3061
    client->callback = callback;
3062
    LIST_INSERT_HEAD(&map_client_list, client, link);
3063
    return client;
3064
}
3065

    
3066
void cpu_unregister_map_client(void *_client)
3067
{
3068
    MapClient *client = (MapClient *)_client;
3069

    
3070
    LIST_REMOVE(client, link);
3071
}
3072

    
3073
static void cpu_notify_map_clients(void)
3074
{
3075
    MapClient *client;
3076

    
3077
    while (!LIST_EMPTY(&map_client_list)) {
3078
        client = LIST_FIRST(&map_client_list);
3079
        client->callback(client->opaque);
3080
        LIST_REMOVE(client, link);
3081
    }
3082
}
3083

    
3084
/* Map a physical memory region into a host virtual address.
3085
 * May map a subset of the requested range, given by and returned in *plen.
3086
 * May return NULL if resources needed to perform the mapping are exhausted.
3087
 * Use only for reads OR writes - not for read-modify-write operations.
3088
 * Use cpu_register_map_client() to know when retrying the map operation is
3089
 * likely to succeed.
3090
 */
3091
void *cpu_physical_memory_map(target_phys_addr_t addr,
3092
                              target_phys_addr_t *plen,
3093
                              int is_write)
3094
{
3095
    target_phys_addr_t len = *plen;
3096
    target_phys_addr_t done = 0;
3097
    int l;
3098
    uint8_t *ret = NULL;
3099
    uint8_t *ptr;
3100
    target_phys_addr_t page;
3101
    unsigned long pd;
3102
    PhysPageDesc *p;
3103
    unsigned long addr1;
3104

    
3105
    while (len > 0) {
3106
        page = addr & TARGET_PAGE_MASK;
3107
        l = (page + TARGET_PAGE_SIZE) - addr;
3108
        if (l > len)
3109
            l = len;
3110
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3111
        if (!p) {
3112
            pd = IO_MEM_UNASSIGNED;
3113
        } else {
3114
            pd = p->phys_offset;
3115
        }
3116

    
3117
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3118
            if (done || bounce.buffer) {
3119
                break;
3120
            }
3121
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3122
            bounce.addr = addr;
3123
            bounce.len = l;
3124
            if (!is_write) {
3125
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3126
            }
3127
            ptr = bounce.buffer;
3128
        } else {
3129
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3130
            ptr = phys_ram_base + addr1;
3131
        }
3132
        if (!done) {
3133
            ret = ptr;
3134
        } else if (ret + done != ptr) {
3135
            break;
3136
        }
3137

    
3138
        len -= l;
3139
        addr += l;
3140
        done += l;
3141
    }
3142
    *plen = done;
3143
    return ret;
3144
}
3145

    
3146
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3147
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3148
 * the amount of memory that was actually read or written by the caller.
3149
 */
3150
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3151
                               int is_write, target_phys_addr_t access_len)
3152
{
3153
    if (buffer != bounce.buffer) {
3154
        if (is_write) {
3155
            unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3156
            while (access_len) {
3157
                unsigned l;
3158
                l = TARGET_PAGE_SIZE;
3159
                if (l > access_len)
3160
                    l = access_len;
3161
                if (!cpu_physical_memory_is_dirty(addr1)) {
3162
                    /* invalidate code */
3163
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3164
                    /* set dirty bit */
3165
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3166
                        (0xff & ~CODE_DIRTY_FLAG);
3167
                }
3168
                addr1 += l;
3169
                access_len -= l;
3170
            }
3171
        }
3172
        return;
3173
    }
3174
    if (is_write) {
3175
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3176
    }
3177
    qemu_free(bounce.buffer);
3178
    bounce.buffer = NULL;
3179
    cpu_notify_map_clients();
3180
}
3181

    
3182
/* warning: addr must be aligned */
3183
uint32_t ldl_phys(target_phys_addr_t addr)
3184
{
3185
    int io_index;
3186
    uint8_t *ptr;
3187
    uint32_t val;
3188
    unsigned long pd;
3189
    PhysPageDesc *p;
3190

    
3191
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3192
    if (!p) {
3193
        pd = IO_MEM_UNASSIGNED;
3194
    } else {
3195
        pd = p->phys_offset;
3196
    }
3197

    
3198
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3199
        !(pd & IO_MEM_ROMD)) {
3200
        /* I/O case */
3201
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3202
        if (p)
3203
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3204
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3205
    } else {
3206
        /* RAM case */
3207
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3208
            (addr & ~TARGET_PAGE_MASK);
3209
        val = ldl_p(ptr);
3210
    }
3211
    return val;
3212
}
3213

    
3214
/* warning: addr must be aligned */
3215
uint64_t ldq_phys(target_phys_addr_t addr)
3216
{
3217
    int io_index;
3218
    uint8_t *ptr;
3219
    uint64_t val;
3220
    unsigned long pd;
3221
    PhysPageDesc *p;
3222

    
3223
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3224
    if (!p) {
3225
        pd = IO_MEM_UNASSIGNED;
3226
    } else {
3227
        pd = p->phys_offset;
3228
    }
3229

    
3230
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3231
        !(pd & IO_MEM_ROMD)) {
3232
        /* I/O case */
3233
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3234
        if (p)
3235
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3236
#ifdef TARGET_WORDS_BIGENDIAN
3237
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3238
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3239
#else
3240
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3241
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3242
#endif
3243
    } else {
3244
        /* RAM case */
3245
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3246
            (addr & ~TARGET_PAGE_MASK);
3247
        val = ldq_p(ptr);
3248
    }
3249
    return val;
3250
}
3251

    
3252
/* XXX: optimize */
3253
uint32_t ldub_phys(target_phys_addr_t addr)
3254
{
3255
    uint8_t val;
3256
    cpu_physical_memory_read(addr, &val, 1);
3257
    return val;
3258
}
3259

    
3260
/* XXX: optimize */
3261
uint32_t lduw_phys(target_phys_addr_t addr)
3262
{
3263
    uint16_t val;
3264
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3265
    return tswap16(val);
3266
}
3267

    
3268
/* warning: addr must be aligned. The ram page is not masked as dirty
3269
   and the code inside is not invalidated. It is useful if the dirty
3270
   bits are used to track modified PTEs */
3271
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3272
{
3273
    int io_index;
3274
    uint8_t *ptr;
3275
    unsigned long pd;
3276
    PhysPageDesc *p;
3277

    
3278
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3279
    if (!p) {
3280
        pd = IO_MEM_UNASSIGNED;
3281
    } else {
3282
        pd = p->phys_offset;
3283
    }
3284

    
3285
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3286
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3287
        if (p)
3288
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3289
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3290
    } else {
3291
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3292
        ptr = phys_ram_base + addr1;
3293
        stl_p(ptr, val);
3294

    
3295
        if (unlikely(in_migration)) {
3296
            if (!cpu_physical_memory_is_dirty(addr1)) {
3297
                /* invalidate code */
3298
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3299
                /* set dirty bit */
3300
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3301
                    (0xff & ~CODE_DIRTY_FLAG);
3302
            }
3303
        }
3304
    }
3305
}
3306

    
3307
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3308
{
3309
    int io_index;
3310
    uint8_t *ptr;
3311
    unsigned long pd;
3312
    PhysPageDesc *p;
3313

    
3314
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3315
    if (!p) {
3316
        pd = IO_MEM_UNASSIGNED;
3317
    } else {
3318
        pd = p->phys_offset;
3319
    }
3320

    
3321
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3322
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3323
        if (p)
3324
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3325
#ifdef TARGET_WORDS_BIGENDIAN
3326
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3327
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3328
#else
3329
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3330
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3331
#endif
3332
    } else {
3333
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3334
            (addr & ~TARGET_PAGE_MASK);
3335
        stq_p(ptr, val);
3336
    }
3337
}
3338

    
3339
/* warning: addr must be aligned */
3340
void stl_phys(target_phys_addr_t addr, uint32_t val)
3341
{
3342
    int io_index;
3343
    uint8_t *ptr;
3344
    unsigned long pd;
3345
    PhysPageDesc *p;
3346

    
3347
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3348
    if (!p) {
3349
        pd = IO_MEM_UNASSIGNED;
3350
    } else {
3351
        pd = p->phys_offset;
3352
    }
3353

    
3354
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3355
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3356
        if (p)
3357
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3358
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3359
    } else {
3360
        unsigned long addr1;
3361
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3362
        /* RAM case */
3363
        ptr = phys_ram_base + addr1;
3364
        stl_p(ptr, val);
3365
        if (!cpu_physical_memory_is_dirty(addr1)) {
3366
            /* invalidate code */
3367
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3368
            /* set dirty bit */
3369
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3370
                (0xff & ~CODE_DIRTY_FLAG);
3371
        }
3372
    }
3373
}
3374

    
3375
/* XXX: optimize */
3376
void stb_phys(target_phys_addr_t addr, uint32_t val)
3377
{
3378
    uint8_t v = val;
3379
    cpu_physical_memory_write(addr, &v, 1);
3380
}
3381

    
3382
/* XXX: optimize */
3383
void stw_phys(target_phys_addr_t addr, uint32_t val)
3384
{
3385
    uint16_t v = tswap16(val);
3386
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3387
}
3388

    
3389
/* XXX: optimize */
3390
void stq_phys(target_phys_addr_t addr, uint64_t val)
3391
{
3392
    val = tswap64(val);
3393
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3394
}
3395

    
3396
#endif
3397

    
3398
/* virtual memory access for debug */
3399
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3400
                        uint8_t *buf, int len, int is_write)
3401
{
3402
    int l;
3403
    target_phys_addr_t phys_addr;
3404
    target_ulong page;
3405

    
3406
    while (len > 0) {
3407
        page = addr & TARGET_PAGE_MASK;
3408
        phys_addr = cpu_get_phys_page_debug(env, page);
3409
        /* if no physical page mapped, return an error */
3410
        if (phys_addr == -1)
3411
            return -1;
3412
        l = (page + TARGET_PAGE_SIZE) - addr;
3413
        if (l > len)
3414
            l = len;
3415
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3416
                               buf, l, is_write);
3417
        len -= l;
3418
        buf += l;
3419
        addr += l;
3420
    }
3421
    return 0;
3422
}
3423

    
3424
/* in deterministic execution mode, instructions doing device I/Os
3425
   must be at the end of the TB */
3426
void cpu_io_recompile(CPUState *env, void *retaddr)
3427
{
3428
    TranslationBlock *tb;
3429
    uint32_t n, cflags;
3430
    target_ulong pc, cs_base;
3431
    uint64_t flags;
3432

    
3433
    tb = tb_find_pc((unsigned long)retaddr);
3434
    if (!tb) {
3435
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3436
                  retaddr);
3437
    }
3438
    n = env->icount_decr.u16.low + tb->icount;
3439
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3440
    /* Calculate how many instructions had been executed before the fault
3441
       occurred.  */
3442
    n = n - env->icount_decr.u16.low;
3443
    /* Generate a new TB ending on the I/O insn.  */
3444
    n++;
3445
    /* On MIPS and SH, delay slot instructions can only be restarted if
3446
       they were already the first instruction in the TB.  If this is not
3447
       the first instruction in a TB then re-execute the preceding
3448
       branch.  */
3449
#if defined(TARGET_MIPS)
3450
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3451
        env->active_tc.PC -= 4;
3452
        env->icount_decr.u16.low++;
3453
        env->hflags &= ~MIPS_HFLAG_BMASK;
3454
    }
3455
#elif defined(TARGET_SH4)
3456
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3457
            && n > 1) {
3458
        env->pc -= 2;
3459
        env->icount_decr.u16.low++;
3460
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3461
    }
3462
#endif
3463
    /* This should never happen.  */
3464
    if (n > CF_COUNT_MASK)
3465
        cpu_abort(env, "TB too big during recompile");
3466

    
3467
    cflags = n | CF_LAST_IO;
3468
    pc = tb->pc;
3469
    cs_base = tb->cs_base;
3470
    flags = tb->flags;
3471
    tb_phys_invalidate(tb, -1);
3472
    /* FIXME: In theory this could raise an exception.  In practice
3473
       we have already translated the block once so it's probably ok.  */
3474
    tb_gen_code(env, pc, cs_base, flags, cflags);
3475
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3476
       the first in the TB) then we end up generating a whole new TB and
3477
       repeating the fault, which is horribly inefficient.
3478
       Better would be to execute just this insn uncached, or generate a
3479
       second new TB.  */
3480
    cpu_resume_from_signal(env, NULL);
3481
}
3482

    
3483
void dump_exec_info(FILE *f,
3484
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3485
{
3486
    int i, target_code_size, max_target_code_size;
3487
    int direct_jmp_count, direct_jmp2_count, cross_page;
3488
    TranslationBlock *tb;
3489

    
3490
    target_code_size = 0;
3491
    max_target_code_size = 0;
3492
    cross_page = 0;
3493
    direct_jmp_count = 0;
3494
    direct_jmp2_count = 0;
3495
    for(i = 0; i < nb_tbs; i++) {
3496
        tb = &tbs[i];
3497
        target_code_size += tb->size;
3498
        if (tb->size > max_target_code_size)
3499
            max_target_code_size = tb->size;
3500
        if (tb->page_addr[1] != -1)
3501
            cross_page++;
3502
        if (tb->tb_next_offset[0] != 0xffff) {
3503
            direct_jmp_count++;
3504
            if (tb->tb_next_offset[1] != 0xffff) {
3505
                direct_jmp2_count++;
3506
            }
3507
        }
3508
    }
3509
    /* XXX: avoid using doubles ? */
3510
    cpu_fprintf(f, "Translation buffer state:\n");
3511
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3512
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3513
    cpu_fprintf(f, "TB count            %d/%d\n", 
3514
                nb_tbs, code_gen_max_blocks);
3515
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3516
                nb_tbs ? target_code_size / nb_tbs : 0,
3517
                max_target_code_size);
3518
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3519
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3520
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3521
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3522
            cross_page,
3523
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3524
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3525
                direct_jmp_count,
3526
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3527
                direct_jmp2_count,
3528
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3529
    cpu_fprintf(f, "\nStatistics:\n");
3530
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3531
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3532
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3533
    tcg_dump_info(f, cpu_fprintf);
3534
}
3535

    
3536
#if !defined(CONFIG_USER_ONLY)
3537

    
3538
#define MMUSUFFIX _cmmu
3539
#define GETPC() NULL
3540
#define env cpu_single_env
3541
#define SOFTMMU_CODE_ACCESS
3542

    
3543
#define SHIFT 0
3544
#include "softmmu_template.h"
3545

    
3546
#define SHIFT 1
3547
#include "softmmu_template.h"
3548

    
3549
#define SHIFT 2
3550
#include "softmmu_template.h"
3551

    
3552
#define SHIFT 3
3553
#include "softmmu_template.h"
3554

    
3555
#undef env
3556

    
3557
#endif