Statistics
| Branch: | Revision:

root / exec.c @ 511d2b14

History | View | Annotate | Download (107.7 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#include "osdep.h"
42
#include "kvm.h"
43
#if defined(CONFIG_USER_ONLY)
44
#include <qemu.h>
45
#endif
46

    
47
//#define DEBUG_TB_INVALIDATE
48
//#define DEBUG_FLUSH
49
//#define DEBUG_TLB
50
//#define DEBUG_UNASSIGNED
51

    
52
/* make various TB consistency checks */
53
//#define DEBUG_TB_CHECK
54
//#define DEBUG_TLB_CHECK
55

    
56
//#define DEBUG_IOPORT
57
//#define DEBUG_SUBPAGE
58

    
59
#if !defined(CONFIG_USER_ONLY)
60
/* TB consistency checks only implemented for usermode emulation.  */
61
#undef DEBUG_TB_CHECK
62
#endif
63

    
64
#define SMC_BITMAP_USE_THRESHOLD 10
65

    
66
#define MMAP_AREA_START        0x00000000
67
#define MMAP_AREA_END          0xa8000000
68

    
69
#if defined(TARGET_SPARC64)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 41
71
#elif defined(TARGET_SPARC)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 36
73
#elif defined(TARGET_ALPHA)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#define TARGET_VIRT_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_PPC64)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 42
80
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81
#define TARGET_PHYS_ADDR_SPACE_BITS 36
82
#else
83
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84
#define TARGET_PHYS_ADDR_SPACE_BITS 32
85
#endif
86

    
87
static TranslationBlock *tbs;
88
int code_gen_max_blocks;
89
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90
static int nb_tbs;
91
/* any access to the tbs or the page table must use this lock */
92
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93

    
94
#if defined(__arm__) || defined(__sparc_v9__)
95
/* The prologue must be reachable with a direct jump. ARM and Sparc64
96
 have limited branch ranges (possibly also PPC) so place it in a
97
 section close to code segment. */
98
#define code_gen_section                                \
99
    __attribute__((__section__(".gen_code")))           \
100
    __attribute__((aligned (32)))
101
#else
102
#define code_gen_section                                \
103
    __attribute__((aligned (32)))
104
#endif
105

    
106
uint8_t code_gen_prologue[1024] code_gen_section;
107
static uint8_t *code_gen_buffer;
108
static unsigned long code_gen_buffer_size;
109
/* threshold to flush the translated code buffer */
110
static unsigned long code_gen_buffer_max_size;
111
uint8_t *code_gen_ptr;
112

    
113
#if !defined(CONFIG_USER_ONLY)
114
ram_addr_t phys_ram_size;
115
int phys_ram_fd;
116
uint8_t *phys_ram_base;
117
uint8_t *phys_ram_dirty;
118
static int in_migration;
119
static ram_addr_t phys_ram_alloc_offset = 0;
120
#endif
121

    
122
CPUState *first_cpu;
123
/* current CPU in the current thread. It is only valid inside
124
   cpu_exec() */
125
CPUState *cpu_single_env;
126
/* 0 = Do not count executed instructions.
127
   1 = Precise instruction counting.
128
   2 = Adaptive rate instruction counting.  */
129
int use_icount = 0;
130
/* Current instruction counter.  While executing translated code this may
131
   include some instructions that have not yet been executed.  */
132
int64_t qemu_icount;
133

    
134
typedef struct PageDesc {
135
    /* list of TBs intersecting this ram page */
136
    TranslationBlock *first_tb;
137
    /* in order to optimize self modifying code, we count the number
138
       of lookups we do to a given page to use a bitmap */
139
    unsigned int code_write_count;
140
    uint8_t *code_bitmap;
141
#if defined(CONFIG_USER_ONLY)
142
    unsigned long flags;
143
#endif
144
} PageDesc;
145

    
146
typedef struct PhysPageDesc {
147
    /* offset in host memory of the page + io_index in the low bits */
148
    ram_addr_t phys_offset;
149
    ram_addr_t region_offset;
150
} PhysPageDesc;
151

    
152
#define L2_BITS 10
153
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154
/* XXX: this is a temporary hack for alpha target.
155
 *      In the future, this is to be replaced by a multi-level table
156
 *      to actually be able to handle the complete 64 bits address space.
157
 */
158
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159
#else
160
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
161
#endif
162

    
163
#define L1_SIZE (1 << L1_BITS)
164
#define L2_SIZE (1 << L2_BITS)
165

    
166
unsigned long qemu_real_host_page_size;
167
unsigned long qemu_host_page_bits;
168
unsigned long qemu_host_page_size;
169
unsigned long qemu_host_page_mask;
170

    
171
/* XXX: for system emulation, it could just be an array */
172
static PageDesc *l1_map[L1_SIZE];
173
static PhysPageDesc **l1_phys_map;
174

    
175
#if !defined(CONFIG_USER_ONLY)
176
static void io_mem_init(void);
177

    
178
/* io memory support */
179
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
181
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
182
static char io_mem_used[IO_MEM_NB_ENTRIES];
183
static int io_mem_watch;
184
#endif
185

    
186
/* log support */
187
static const char *logfilename = "/tmp/qemu.log";
188
FILE *logfile;
189
int loglevel;
190
static int log_append = 0;
191

    
192
/* statistics */
193
static int tlb_flush_count;
194
static int tb_flush_count;
195
static int tb_phys_invalidate_count;
196

    
197
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198
typedef struct subpage_t {
199
    target_phys_addr_t base;
200
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202
    void *opaque[TARGET_PAGE_SIZE][2][4];
203
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
204
} subpage_t;
205

    
206
#ifdef _WIN32
207
static void map_exec(void *addr, long size)
208
{
209
    DWORD old_protect;
210
    VirtualProtect(addr, size,
211
                   PAGE_EXECUTE_READWRITE, &old_protect);
212
    
213
}
214
#else
215
static void map_exec(void *addr, long size)
216
{
217
    unsigned long start, end, page_size;
218
    
219
    page_size = getpagesize();
220
    start = (unsigned long)addr;
221
    start &= ~(page_size - 1);
222
    
223
    end = (unsigned long)addr + size;
224
    end += page_size - 1;
225
    end &= ~(page_size - 1);
226
    
227
    mprotect((void *)start, end - start,
228
             PROT_READ | PROT_WRITE | PROT_EXEC);
229
}
230
#endif
231

    
232
static void page_init(void)
233
{
234
    /* NOTE: we can always suppose that qemu_host_page_size >=
235
       TARGET_PAGE_SIZE */
236
#ifdef _WIN32
237
    {
238
        SYSTEM_INFO system_info;
239

    
240
        GetSystemInfo(&system_info);
241
        qemu_real_host_page_size = system_info.dwPageSize;
242
    }
243
#else
244
    qemu_real_host_page_size = getpagesize();
245
#endif
246
    if (qemu_host_page_size == 0)
247
        qemu_host_page_size = qemu_real_host_page_size;
248
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
249
        qemu_host_page_size = TARGET_PAGE_SIZE;
250
    qemu_host_page_bits = 0;
251
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252
        qemu_host_page_bits++;
253
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
254
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
256

    
257
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
258
    {
259
        long long startaddr, endaddr;
260
        FILE *f;
261
        int n;
262

    
263
        mmap_lock();
264
        last_brk = (unsigned long)sbrk(0);
265
        f = fopen("/proc/self/maps", "r");
266
        if (f) {
267
            do {
268
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269
                if (n == 2) {
270
                    startaddr = MIN(startaddr,
271
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272
                    endaddr = MIN(endaddr,
273
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
274
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
275
                                   TARGET_PAGE_ALIGN(endaddr),
276
                                   PAGE_RESERVED); 
277
                }
278
            } while (!feof(f));
279
            fclose(f);
280
        }
281
        mmap_unlock();
282
    }
283
#endif
284
}
285

    
286
static inline PageDesc **page_l1_map(target_ulong index)
287
{
288
#if TARGET_LONG_BITS > 32
289
    /* Host memory outside guest VM.  For 32-bit targets we have already
290
       excluded high addresses.  */
291
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
292
        return NULL;
293
#endif
294
    return &l1_map[index >> L2_BITS];
295
}
296

    
297
static inline PageDesc *page_find_alloc(target_ulong index)
298
{
299
    PageDesc **lp, *p;
300
    lp = page_l1_map(index);
301
    if (!lp)
302
        return NULL;
303

    
304
    p = *lp;
305
    if (!p) {
306
        /* allocate if not found */
307
#if defined(CONFIG_USER_ONLY)
308
        size_t len = sizeof(PageDesc) * L2_SIZE;
309
        /* Don't use qemu_malloc because it may recurse.  */
310
        p = mmap(0, len, PROT_READ | PROT_WRITE,
311
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
312
        *lp = p;
313
        if (h2g_valid(p)) {
314
            unsigned long addr = h2g(p);
315
            page_set_flags(addr & TARGET_PAGE_MASK,
316
                           TARGET_PAGE_ALIGN(addr + len),
317
                           PAGE_RESERVED); 
318
        }
319
#else
320
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321
        *lp = p;
322
#endif
323
    }
324
    return p + (index & (L2_SIZE - 1));
325
}
326

    
327
static inline PageDesc *page_find(target_ulong index)
328
{
329
    PageDesc **lp, *p;
330
    lp = page_l1_map(index);
331
    if (!lp)
332
        return NULL;
333

    
334
    p = *lp;
335
    if (!p)
336
        return 0;
337
    return p + (index & (L2_SIZE - 1));
338
}
339

    
340
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
341
{
342
    void **lp, **p;
343
    PhysPageDesc *pd;
344

    
345
    p = (void **)l1_phys_map;
346
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
347

    
348
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350
#endif
351
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
352
    p = *lp;
353
    if (!p) {
354
        /* allocate if not found */
355
        if (!alloc)
356
            return NULL;
357
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358
        memset(p, 0, sizeof(void *) * L1_SIZE);
359
        *lp = p;
360
    }
361
#endif
362
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
363
    pd = *lp;
364
    if (!pd) {
365
        int i;
366
        /* allocate if not found */
367
        if (!alloc)
368
            return NULL;
369
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370
        *lp = pd;
371
        for (i = 0; i < L2_SIZE; i++) {
372
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
373
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
374
        }
375
    }
376
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
377
}
378

    
379
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
380
{
381
    return phys_page_find_alloc(index, 0);
382
}
383

    
384
#if !defined(CONFIG_USER_ONLY)
385
static void tlb_protect_code(ram_addr_t ram_addr);
386
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
387
                                    target_ulong vaddr);
388
#define mmap_lock() do { } while(0)
389
#define mmap_unlock() do { } while(0)
390
#endif
391

    
392
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
393

    
394
#if defined(CONFIG_USER_ONLY)
395
/* Currently it is not recommanded to allocate big chunks of data in
396
   user mode. It will change when a dedicated libc will be used */
397
#define USE_STATIC_CODE_GEN_BUFFER
398
#endif
399

    
400
#ifdef USE_STATIC_CODE_GEN_BUFFER
401
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
402
#endif
403

    
404
static void code_gen_alloc(unsigned long tb_size)
405
{
406
#ifdef USE_STATIC_CODE_GEN_BUFFER
407
    code_gen_buffer = static_code_gen_buffer;
408
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
409
    map_exec(code_gen_buffer, code_gen_buffer_size);
410
#else
411
    code_gen_buffer_size = tb_size;
412
    if (code_gen_buffer_size == 0) {
413
#if defined(CONFIG_USER_ONLY)
414
        /* in user mode, phys_ram_size is not meaningful */
415
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
416
#else
417
        /* XXX: needs ajustments */
418
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
419
#endif
420
    }
421
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
422
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
423
    /* The code gen buffer location may have constraints depending on
424
       the host cpu and OS */
425
#if defined(__linux__) 
426
    {
427
        int flags;
428
        void *start = NULL;
429

    
430
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
431
#if defined(__x86_64__)
432
        flags |= MAP_32BIT;
433
        /* Cannot map more than that */
434
        if (code_gen_buffer_size > (800 * 1024 * 1024))
435
            code_gen_buffer_size = (800 * 1024 * 1024);
436
#elif defined(__sparc_v9__)
437
        // Map the buffer below 2G, so we can use direct calls and branches
438
        flags |= MAP_FIXED;
439
        start = (void *) 0x60000000UL;
440
        if (code_gen_buffer_size > (512 * 1024 * 1024))
441
            code_gen_buffer_size = (512 * 1024 * 1024);
442
#elif defined(__arm__)
443
        /* Map the buffer below 32M, so we can use direct calls and branches */
444
        flags |= MAP_FIXED;
445
        start = (void *) 0x01000000UL;
446
        if (code_gen_buffer_size > 16 * 1024 * 1024)
447
            code_gen_buffer_size = 16 * 1024 * 1024;
448
#endif
449
        code_gen_buffer = mmap(start, code_gen_buffer_size,
450
                               PROT_WRITE | PROT_READ | PROT_EXEC,
451
                               flags, -1, 0);
452
        if (code_gen_buffer == MAP_FAILED) {
453
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
454
            exit(1);
455
        }
456
    }
457
#elif defined(__FreeBSD__)
458
    {
459
        int flags;
460
        void *addr = NULL;
461
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
462
#if defined(__x86_64__)
463
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
464
         * 0x40000000 is free */
465
        flags |= MAP_FIXED;
466
        addr = (void *)0x40000000;
467
        /* Cannot map more than that */
468
        if (code_gen_buffer_size > (800 * 1024 * 1024))
469
            code_gen_buffer_size = (800 * 1024 * 1024);
470
#endif
471
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
472
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
473
                               flags, -1, 0);
474
        if (code_gen_buffer == MAP_FAILED) {
475
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
476
            exit(1);
477
        }
478
    }
479
#else
480
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
481
    map_exec(code_gen_buffer, code_gen_buffer_size);
482
#endif
483
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
484
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
485
    code_gen_buffer_max_size = code_gen_buffer_size - 
486
        code_gen_max_block_size();
487
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
488
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
489
}
490

    
491
/* Must be called before using the QEMU cpus. 'tb_size' is the size
492
   (in bytes) allocated to the translation buffer. Zero means default
493
   size. */
494
void cpu_exec_init_all(unsigned long tb_size)
495
{
496
    cpu_gen_init();
497
    code_gen_alloc(tb_size);
498
    code_gen_ptr = code_gen_buffer;
499
    page_init();
500
#if !defined(CONFIG_USER_ONLY)
501
    io_mem_init();
502
#endif
503
}
504

    
505
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
506

    
507
#define CPU_COMMON_SAVE_VERSION 1
508

    
509
static void cpu_common_save(QEMUFile *f, void *opaque)
510
{
511
    CPUState *env = opaque;
512

    
513
    qemu_put_be32s(f, &env->halted);
514
    qemu_put_be32s(f, &env->interrupt_request);
515
}
516

    
517
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
518
{
519
    CPUState *env = opaque;
520

    
521
    if (version_id != CPU_COMMON_SAVE_VERSION)
522
        return -EINVAL;
523

    
524
    qemu_get_be32s(f, &env->halted);
525
    qemu_get_be32s(f, &env->interrupt_request);
526
    tlb_flush(env, 1);
527

    
528
    return 0;
529
}
530
#endif
531

    
532
void cpu_exec_init(CPUState *env)
533
{
534
    CPUState **penv;
535
    int cpu_index;
536

    
537
#if defined(CONFIG_USER_ONLY)
538
    cpu_list_lock();
539
#endif
540
    env->next_cpu = NULL;
541
    penv = &first_cpu;
542
    cpu_index = 0;
543
    while (*penv != NULL) {
544
        penv = (CPUState **)&(*penv)->next_cpu;
545
        cpu_index++;
546
    }
547
    env->cpu_index = cpu_index;
548
    TAILQ_INIT(&env->breakpoints);
549
    TAILQ_INIT(&env->watchpoints);
550
    *penv = env;
551
#if defined(CONFIG_USER_ONLY)
552
    cpu_list_unlock();
553
#endif
554
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
555
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
556
                    cpu_common_save, cpu_common_load, env);
557
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
558
                    cpu_save, cpu_load, env);
559
#endif
560
}
561

    
562
static inline void invalidate_page_bitmap(PageDesc *p)
563
{
564
    if (p->code_bitmap) {
565
        qemu_free(p->code_bitmap);
566
        p->code_bitmap = NULL;
567
    }
568
    p->code_write_count = 0;
569
}
570

    
571
/* set to NULL all the 'first_tb' fields in all PageDescs */
572
static void page_flush_tb(void)
573
{
574
    int i, j;
575
    PageDesc *p;
576

    
577
    for(i = 0; i < L1_SIZE; i++) {
578
        p = l1_map[i];
579
        if (p) {
580
            for(j = 0; j < L2_SIZE; j++) {
581
                p->first_tb = NULL;
582
                invalidate_page_bitmap(p);
583
                p++;
584
            }
585
        }
586
    }
587
}
588

    
589
/* flush all the translation blocks */
590
/* XXX: tb_flush is currently not thread safe */
591
void tb_flush(CPUState *env1)
592
{
593
    CPUState *env;
594
#if defined(DEBUG_FLUSH)
595
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
596
           (unsigned long)(code_gen_ptr - code_gen_buffer),
597
           nb_tbs, nb_tbs > 0 ?
598
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
599
#endif
600
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
601
        cpu_abort(env1, "Internal error: code buffer overflow\n");
602

    
603
    nb_tbs = 0;
604

    
605
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
606
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
607
    }
608

    
609
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
610
    page_flush_tb();
611

    
612
    code_gen_ptr = code_gen_buffer;
613
    /* XXX: flush processor icache at this point if cache flush is
614
       expensive */
615
    tb_flush_count++;
616
}
617

    
618
#ifdef DEBUG_TB_CHECK
619

    
620
static void tb_invalidate_check(target_ulong address)
621
{
622
    TranslationBlock *tb;
623
    int i;
624
    address &= TARGET_PAGE_MASK;
625
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
626
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
627
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
628
                  address >= tb->pc + tb->size)) {
629
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
630
                       address, (long)tb->pc, tb->size);
631
            }
632
        }
633
    }
634
}
635

    
636
/* verify that all the pages have correct rights for code */
637
static void tb_page_check(void)
638
{
639
    TranslationBlock *tb;
640
    int i, flags1, flags2;
641

    
642
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
643
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
644
            flags1 = page_get_flags(tb->pc);
645
            flags2 = page_get_flags(tb->pc + tb->size - 1);
646
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
647
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
648
                       (long)tb->pc, tb->size, flags1, flags2);
649
            }
650
        }
651
    }
652
}
653

    
654
static void tb_jmp_check(TranslationBlock *tb)
655
{
656
    TranslationBlock *tb1;
657
    unsigned int n1;
658

    
659
    /* suppress any remaining jumps to this TB */
660
    tb1 = tb->jmp_first;
661
    for(;;) {
662
        n1 = (long)tb1 & 3;
663
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
664
        if (n1 == 2)
665
            break;
666
        tb1 = tb1->jmp_next[n1];
667
    }
668
    /* check end of list */
669
    if (tb1 != tb) {
670
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
671
    }
672
}
673

    
674
#endif
675

    
676
/* invalidate one TB */
677
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
678
                             int next_offset)
679
{
680
    TranslationBlock *tb1;
681
    for(;;) {
682
        tb1 = *ptb;
683
        if (tb1 == tb) {
684
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
685
            break;
686
        }
687
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
688
    }
689
}
690

    
691
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
692
{
693
    TranslationBlock *tb1;
694
    unsigned int n1;
695

    
696
    for(;;) {
697
        tb1 = *ptb;
698
        n1 = (long)tb1 & 3;
699
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
700
        if (tb1 == tb) {
701
            *ptb = tb1->page_next[n1];
702
            break;
703
        }
704
        ptb = &tb1->page_next[n1];
705
    }
706
}
707

    
708
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
709
{
710
    TranslationBlock *tb1, **ptb;
711
    unsigned int n1;
712

    
713
    ptb = &tb->jmp_next[n];
714
    tb1 = *ptb;
715
    if (tb1) {
716
        /* find tb(n) in circular list */
717
        for(;;) {
718
            tb1 = *ptb;
719
            n1 = (long)tb1 & 3;
720
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
721
            if (n1 == n && tb1 == tb)
722
                break;
723
            if (n1 == 2) {
724
                ptb = &tb1->jmp_first;
725
            } else {
726
                ptb = &tb1->jmp_next[n1];
727
            }
728
        }
729
        /* now we can suppress tb(n) from the list */
730
        *ptb = tb->jmp_next[n];
731

    
732
        tb->jmp_next[n] = NULL;
733
    }
734
}
735

    
736
/* reset the jump entry 'n' of a TB so that it is not chained to
737
   another TB */
738
static inline void tb_reset_jump(TranslationBlock *tb, int n)
739
{
740
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
741
}
742

    
743
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
744
{
745
    CPUState *env;
746
    PageDesc *p;
747
    unsigned int h, n1;
748
    target_phys_addr_t phys_pc;
749
    TranslationBlock *tb1, *tb2;
750

    
751
    /* remove the TB from the hash list */
752
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
753
    h = tb_phys_hash_func(phys_pc);
754
    tb_remove(&tb_phys_hash[h], tb,
755
              offsetof(TranslationBlock, phys_hash_next));
756

    
757
    /* remove the TB from the page list */
758
    if (tb->page_addr[0] != page_addr) {
759
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
760
        tb_page_remove(&p->first_tb, tb);
761
        invalidate_page_bitmap(p);
762
    }
763
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
764
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
765
        tb_page_remove(&p->first_tb, tb);
766
        invalidate_page_bitmap(p);
767
    }
768

    
769
    tb_invalidated_flag = 1;
770

    
771
    /* remove the TB from the hash list */
772
    h = tb_jmp_cache_hash_func(tb->pc);
773
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
774
        if (env->tb_jmp_cache[h] == tb)
775
            env->tb_jmp_cache[h] = NULL;
776
    }
777

    
778
    /* suppress this TB from the two jump lists */
779
    tb_jmp_remove(tb, 0);
780
    tb_jmp_remove(tb, 1);
781

    
782
    /* suppress any remaining jumps to this TB */
783
    tb1 = tb->jmp_first;
784
    for(;;) {
785
        n1 = (long)tb1 & 3;
786
        if (n1 == 2)
787
            break;
788
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
789
        tb2 = tb1->jmp_next[n1];
790
        tb_reset_jump(tb1, n1);
791
        tb1->jmp_next[n1] = NULL;
792
        tb1 = tb2;
793
    }
794
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
795

    
796
    tb_phys_invalidate_count++;
797
}
798

    
799
static inline void set_bits(uint8_t *tab, int start, int len)
800
{
801
    int end, mask, end1;
802

    
803
    end = start + len;
804
    tab += start >> 3;
805
    mask = 0xff << (start & 7);
806
    if ((start & ~7) == (end & ~7)) {
807
        if (start < end) {
808
            mask &= ~(0xff << (end & 7));
809
            *tab |= mask;
810
        }
811
    } else {
812
        *tab++ |= mask;
813
        start = (start + 8) & ~7;
814
        end1 = end & ~7;
815
        while (start < end1) {
816
            *tab++ = 0xff;
817
            start += 8;
818
        }
819
        if (start < end) {
820
            mask = ~(0xff << (end & 7));
821
            *tab |= mask;
822
        }
823
    }
824
}
825

    
826
static void build_page_bitmap(PageDesc *p)
827
{
828
    int n, tb_start, tb_end;
829
    TranslationBlock *tb;
830

    
831
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
832

    
833
    tb = p->first_tb;
834
    while (tb != NULL) {
835
        n = (long)tb & 3;
836
        tb = (TranslationBlock *)((long)tb & ~3);
837
        /* NOTE: this is subtle as a TB may span two physical pages */
838
        if (n == 0) {
839
            /* NOTE: tb_end may be after the end of the page, but
840
               it is not a problem */
841
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
842
            tb_end = tb_start + tb->size;
843
            if (tb_end > TARGET_PAGE_SIZE)
844
                tb_end = TARGET_PAGE_SIZE;
845
        } else {
846
            tb_start = 0;
847
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
848
        }
849
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
850
        tb = tb->page_next[n];
851
    }
852
}
853

    
854
TranslationBlock *tb_gen_code(CPUState *env,
855
                              target_ulong pc, target_ulong cs_base,
856
                              int flags, int cflags)
857
{
858
    TranslationBlock *tb;
859
    uint8_t *tc_ptr;
860
    target_ulong phys_pc, phys_page2, virt_page2;
861
    int code_gen_size;
862

    
863
    phys_pc = get_phys_addr_code(env, pc);
864
    tb = tb_alloc(pc);
865
    if (!tb) {
866
        /* flush must be done */
867
        tb_flush(env);
868
        /* cannot fail at this point */
869
        tb = tb_alloc(pc);
870
        /* Don't forget to invalidate previous TB info.  */
871
        tb_invalidated_flag = 1;
872
    }
873
    tc_ptr = code_gen_ptr;
874
    tb->tc_ptr = tc_ptr;
875
    tb->cs_base = cs_base;
876
    tb->flags = flags;
877
    tb->cflags = cflags;
878
    cpu_gen_code(env, tb, &code_gen_size);
879
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
880

    
881
    /* check next page if needed */
882
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
883
    phys_page2 = -1;
884
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
885
        phys_page2 = get_phys_addr_code(env, virt_page2);
886
    }
887
    tb_link_phys(tb, phys_pc, phys_page2);
888
    return tb;
889
}
890

    
891
/* invalidate all TBs which intersect with the target physical page
892
   starting in range [start;end[. NOTE: start and end must refer to
893
   the same physical page. 'is_cpu_write_access' should be true if called
894
   from a real cpu write access: the virtual CPU will exit the current
895
   TB if code is modified inside this TB. */
896
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
897
                                   int is_cpu_write_access)
898
{
899
    TranslationBlock *tb, *tb_next, *saved_tb;
900
    CPUState *env = cpu_single_env;
901
    target_ulong tb_start, tb_end;
902
    PageDesc *p;
903
    int n;
904
#ifdef TARGET_HAS_PRECISE_SMC
905
    int current_tb_not_found = is_cpu_write_access;
906
    TranslationBlock *current_tb = NULL;
907
    int current_tb_modified = 0;
908
    target_ulong current_pc = 0;
909
    target_ulong current_cs_base = 0;
910
    int current_flags = 0;
911
#endif /* TARGET_HAS_PRECISE_SMC */
912

    
913
    p = page_find(start >> TARGET_PAGE_BITS);
914
    if (!p)
915
        return;
916
    if (!p->code_bitmap &&
917
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
918
        is_cpu_write_access) {
919
        /* build code bitmap */
920
        build_page_bitmap(p);
921
    }
922

    
923
    /* we remove all the TBs in the range [start, end[ */
924
    /* XXX: see if in some cases it could be faster to invalidate all the code */
925
    tb = p->first_tb;
926
    while (tb != NULL) {
927
        n = (long)tb & 3;
928
        tb = (TranslationBlock *)((long)tb & ~3);
929
        tb_next = tb->page_next[n];
930
        /* NOTE: this is subtle as a TB may span two physical pages */
931
        if (n == 0) {
932
            /* NOTE: tb_end may be after the end of the page, but
933
               it is not a problem */
934
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
935
            tb_end = tb_start + tb->size;
936
        } else {
937
            tb_start = tb->page_addr[1];
938
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
939
        }
940
        if (!(tb_end <= start || tb_start >= end)) {
941
#ifdef TARGET_HAS_PRECISE_SMC
942
            if (current_tb_not_found) {
943
                current_tb_not_found = 0;
944
                current_tb = NULL;
945
                if (env->mem_io_pc) {
946
                    /* now we have a real cpu fault */
947
                    current_tb = tb_find_pc(env->mem_io_pc);
948
                }
949
            }
950
            if (current_tb == tb &&
951
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
952
                /* If we are modifying the current TB, we must stop
953
                its execution. We could be more precise by checking
954
                that the modification is after the current PC, but it
955
                would require a specialized function to partially
956
                restore the CPU state */
957

    
958
                current_tb_modified = 1;
959
                cpu_restore_state(current_tb, env,
960
                                  env->mem_io_pc, NULL);
961
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
962
                                     &current_flags);
963
            }
964
#endif /* TARGET_HAS_PRECISE_SMC */
965
            /* we need to do that to handle the case where a signal
966
               occurs while doing tb_phys_invalidate() */
967
            saved_tb = NULL;
968
            if (env) {
969
                saved_tb = env->current_tb;
970
                env->current_tb = NULL;
971
            }
972
            tb_phys_invalidate(tb, -1);
973
            if (env) {
974
                env->current_tb = saved_tb;
975
                if (env->interrupt_request && env->current_tb)
976
                    cpu_interrupt(env, env->interrupt_request);
977
            }
978
        }
979
        tb = tb_next;
980
    }
981
#if !defined(CONFIG_USER_ONLY)
982
    /* if no code remaining, no need to continue to use slow writes */
983
    if (!p->first_tb) {
984
        invalidate_page_bitmap(p);
985
        if (is_cpu_write_access) {
986
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
987
        }
988
    }
989
#endif
990
#ifdef TARGET_HAS_PRECISE_SMC
991
    if (current_tb_modified) {
992
        /* we generate a block containing just the instruction
993
           modifying the memory. It will ensure that it cannot modify
994
           itself */
995
        env->current_tb = NULL;
996
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
997
        cpu_resume_from_signal(env, NULL);
998
    }
999
#endif
1000
}
1001

    
1002
/* len must be <= 8 and start must be a multiple of len */
1003
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1004
{
1005
    PageDesc *p;
1006
    int offset, b;
1007
#if 0
1008
    if (1) {
1009
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1010
                  cpu_single_env->mem_io_vaddr, len,
1011
                  cpu_single_env->eip,
1012
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1013
    }
1014
#endif
1015
    p = page_find(start >> TARGET_PAGE_BITS);
1016
    if (!p)
1017
        return;
1018
    if (p->code_bitmap) {
1019
        offset = start & ~TARGET_PAGE_MASK;
1020
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1021
        if (b & ((1 << len) - 1))
1022
            goto do_invalidate;
1023
    } else {
1024
    do_invalidate:
1025
        tb_invalidate_phys_page_range(start, start + len, 1);
1026
    }
1027
}
1028

    
1029
#if !defined(CONFIG_SOFTMMU)
1030
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1031
                                    unsigned long pc, void *puc)
1032
{
1033
    TranslationBlock *tb;
1034
    PageDesc *p;
1035
    int n;
1036
#ifdef TARGET_HAS_PRECISE_SMC
1037
    TranslationBlock *current_tb = NULL;
1038
    CPUState *env = cpu_single_env;
1039
    int current_tb_modified = 0;
1040
    target_ulong current_pc = 0;
1041
    target_ulong current_cs_base = 0;
1042
    int current_flags = 0;
1043
#endif
1044

    
1045
    addr &= TARGET_PAGE_MASK;
1046
    p = page_find(addr >> TARGET_PAGE_BITS);
1047
    if (!p)
1048
        return;
1049
    tb = p->first_tb;
1050
#ifdef TARGET_HAS_PRECISE_SMC
1051
    if (tb && pc != 0) {
1052
        current_tb = tb_find_pc(pc);
1053
    }
1054
#endif
1055
    while (tb != NULL) {
1056
        n = (long)tb & 3;
1057
        tb = (TranslationBlock *)((long)tb & ~3);
1058
#ifdef TARGET_HAS_PRECISE_SMC
1059
        if (current_tb == tb &&
1060
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1061
                /* If we are modifying the current TB, we must stop
1062
                   its execution. We could be more precise by checking
1063
                   that the modification is after the current PC, but it
1064
                   would require a specialized function to partially
1065
                   restore the CPU state */
1066

    
1067
            current_tb_modified = 1;
1068
            cpu_restore_state(current_tb, env, pc, puc);
1069
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1070
                                 &current_flags);
1071
        }
1072
#endif /* TARGET_HAS_PRECISE_SMC */
1073
        tb_phys_invalidate(tb, addr);
1074
        tb = tb->page_next[n];
1075
    }
1076
    p->first_tb = NULL;
1077
#ifdef TARGET_HAS_PRECISE_SMC
1078
    if (current_tb_modified) {
1079
        /* we generate a block containing just the instruction
1080
           modifying the memory. It will ensure that it cannot modify
1081
           itself */
1082
        env->current_tb = NULL;
1083
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1084
        cpu_resume_from_signal(env, puc);
1085
    }
1086
#endif
1087
}
1088
#endif
1089

    
1090
/* add the tb in the target page and protect it if necessary */
1091
static inline void tb_alloc_page(TranslationBlock *tb,
1092
                                 unsigned int n, target_ulong page_addr)
1093
{
1094
    PageDesc *p;
1095
    TranslationBlock *last_first_tb;
1096

    
1097
    tb->page_addr[n] = page_addr;
1098
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1099
    tb->page_next[n] = p->first_tb;
1100
    last_first_tb = p->first_tb;
1101
    p->first_tb = (TranslationBlock *)((long)tb | n);
1102
    invalidate_page_bitmap(p);
1103

    
1104
#if defined(TARGET_HAS_SMC) || 1
1105

    
1106
#if defined(CONFIG_USER_ONLY)
1107
    if (p->flags & PAGE_WRITE) {
1108
        target_ulong addr;
1109
        PageDesc *p2;
1110
        int prot;
1111

    
1112
        /* force the host page as non writable (writes will have a
1113
           page fault + mprotect overhead) */
1114
        page_addr &= qemu_host_page_mask;
1115
        prot = 0;
1116
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1117
            addr += TARGET_PAGE_SIZE) {
1118

    
1119
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1120
            if (!p2)
1121
                continue;
1122
            prot |= p2->flags;
1123
            p2->flags &= ~PAGE_WRITE;
1124
            page_get_flags(addr);
1125
          }
1126
        mprotect(g2h(page_addr), qemu_host_page_size,
1127
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1128
#ifdef DEBUG_TB_INVALIDATE
1129
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1130
               page_addr);
1131
#endif
1132
    }
1133
#else
1134
    /* if some code is already present, then the pages are already
1135
       protected. So we handle the case where only the first TB is
1136
       allocated in a physical page */
1137
    if (!last_first_tb) {
1138
        tlb_protect_code(page_addr);
1139
    }
1140
#endif
1141

    
1142
#endif /* TARGET_HAS_SMC */
1143
}
1144

    
1145
/* Allocate a new translation block. Flush the translation buffer if
1146
   too many translation blocks or too much generated code. */
1147
TranslationBlock *tb_alloc(target_ulong pc)
1148
{
1149
    TranslationBlock *tb;
1150

    
1151
    if (nb_tbs >= code_gen_max_blocks ||
1152
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1153
        return NULL;
1154
    tb = &tbs[nb_tbs++];
1155
    tb->pc = pc;
1156
    tb->cflags = 0;
1157
    return tb;
1158
}
1159

    
1160
void tb_free(TranslationBlock *tb)
1161
{
1162
    /* In practice this is mostly used for single use temporary TB
1163
       Ignore the hard cases and just back up if this TB happens to
1164
       be the last one generated.  */
1165
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1166
        code_gen_ptr = tb->tc_ptr;
1167
        nb_tbs--;
1168
    }
1169
}
1170

    
1171
/* add a new TB and link it to the physical page tables. phys_page2 is
1172
   (-1) to indicate that only one page contains the TB. */
1173
void tb_link_phys(TranslationBlock *tb,
1174
                  target_ulong phys_pc, target_ulong phys_page2)
1175
{
1176
    unsigned int h;
1177
    TranslationBlock **ptb;
1178

    
1179
    /* Grab the mmap lock to stop another thread invalidating this TB
1180
       before we are done.  */
1181
    mmap_lock();
1182
    /* add in the physical hash table */
1183
    h = tb_phys_hash_func(phys_pc);
1184
    ptb = &tb_phys_hash[h];
1185
    tb->phys_hash_next = *ptb;
1186
    *ptb = tb;
1187

    
1188
    /* add in the page list */
1189
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1190
    if (phys_page2 != -1)
1191
        tb_alloc_page(tb, 1, phys_page2);
1192
    else
1193
        tb->page_addr[1] = -1;
1194

    
1195
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1196
    tb->jmp_next[0] = NULL;
1197
    tb->jmp_next[1] = NULL;
1198

    
1199
    /* init original jump addresses */
1200
    if (tb->tb_next_offset[0] != 0xffff)
1201
        tb_reset_jump(tb, 0);
1202
    if (tb->tb_next_offset[1] != 0xffff)
1203
        tb_reset_jump(tb, 1);
1204

    
1205
#ifdef DEBUG_TB_CHECK
1206
    tb_page_check();
1207
#endif
1208
    mmap_unlock();
1209
}
1210

    
1211
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1212
   tb[1].tc_ptr. Return NULL if not found */
1213
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1214
{
1215
    int m_min, m_max, m;
1216
    unsigned long v;
1217
    TranslationBlock *tb;
1218

    
1219
    if (nb_tbs <= 0)
1220
        return NULL;
1221
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1222
        tc_ptr >= (unsigned long)code_gen_ptr)
1223
        return NULL;
1224
    /* binary search (cf Knuth) */
1225
    m_min = 0;
1226
    m_max = nb_tbs - 1;
1227
    while (m_min <= m_max) {
1228
        m = (m_min + m_max) >> 1;
1229
        tb = &tbs[m];
1230
        v = (unsigned long)tb->tc_ptr;
1231
        if (v == tc_ptr)
1232
            return tb;
1233
        else if (tc_ptr < v) {
1234
            m_max = m - 1;
1235
        } else {
1236
            m_min = m + 1;
1237
        }
1238
    }
1239
    return &tbs[m_max];
1240
}
1241

    
1242
static void tb_reset_jump_recursive(TranslationBlock *tb);
1243

    
1244
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1245
{
1246
    TranslationBlock *tb1, *tb_next, **ptb;
1247
    unsigned int n1;
1248

    
1249
    tb1 = tb->jmp_next[n];
1250
    if (tb1 != NULL) {
1251
        /* find head of list */
1252
        for(;;) {
1253
            n1 = (long)tb1 & 3;
1254
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1255
            if (n1 == 2)
1256
                break;
1257
            tb1 = tb1->jmp_next[n1];
1258
        }
1259
        /* we are now sure now that tb jumps to tb1 */
1260
        tb_next = tb1;
1261

    
1262
        /* remove tb from the jmp_first list */
1263
        ptb = &tb_next->jmp_first;
1264
        for(;;) {
1265
            tb1 = *ptb;
1266
            n1 = (long)tb1 & 3;
1267
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1268
            if (n1 == n && tb1 == tb)
1269
                break;
1270
            ptb = &tb1->jmp_next[n1];
1271
        }
1272
        *ptb = tb->jmp_next[n];
1273
        tb->jmp_next[n] = NULL;
1274

    
1275
        /* suppress the jump to next tb in generated code */
1276
        tb_reset_jump(tb, n);
1277

    
1278
        /* suppress jumps in the tb on which we could have jumped */
1279
        tb_reset_jump_recursive(tb_next);
1280
    }
1281
}
1282

    
1283
static void tb_reset_jump_recursive(TranslationBlock *tb)
1284
{
1285
    tb_reset_jump_recursive2(tb, 0);
1286
    tb_reset_jump_recursive2(tb, 1);
1287
}
1288

    
1289
#if defined(TARGET_HAS_ICE)
1290
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1291
{
1292
    target_phys_addr_t addr;
1293
    target_ulong pd;
1294
    ram_addr_t ram_addr;
1295
    PhysPageDesc *p;
1296

    
1297
    addr = cpu_get_phys_page_debug(env, pc);
1298
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1299
    if (!p) {
1300
        pd = IO_MEM_UNASSIGNED;
1301
    } else {
1302
        pd = p->phys_offset;
1303
    }
1304
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1305
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1306
}
1307
#endif
1308

    
1309
/* Add a watchpoint.  */
1310
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1311
                          int flags, CPUWatchpoint **watchpoint)
1312
{
1313
    target_ulong len_mask = ~(len - 1);
1314
    CPUWatchpoint *wp;
1315

    
1316
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1317
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1318
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1319
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1320
        return -EINVAL;
1321
    }
1322
    wp = qemu_malloc(sizeof(*wp));
1323

    
1324
    wp->vaddr = addr;
1325
    wp->len_mask = len_mask;
1326
    wp->flags = flags;
1327

    
1328
    /* keep all GDB-injected watchpoints in front */
1329
    if (flags & BP_GDB)
1330
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1331
    else
1332
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1333

    
1334
    tlb_flush_page(env, addr);
1335

    
1336
    if (watchpoint)
1337
        *watchpoint = wp;
1338
    return 0;
1339
}
1340

    
1341
/* Remove a specific watchpoint.  */
1342
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1343
                          int flags)
1344
{
1345
    target_ulong len_mask = ~(len - 1);
1346
    CPUWatchpoint *wp;
1347

    
1348
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1349
        if (addr == wp->vaddr && len_mask == wp->len_mask
1350
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1351
            cpu_watchpoint_remove_by_ref(env, wp);
1352
            return 0;
1353
        }
1354
    }
1355
    return -ENOENT;
1356
}
1357

    
1358
/* Remove a specific watchpoint by reference.  */
1359
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1360
{
1361
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1362

    
1363
    tlb_flush_page(env, watchpoint->vaddr);
1364

    
1365
    qemu_free(watchpoint);
1366
}
1367

    
1368
/* Remove all matching watchpoints.  */
1369
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1370
{
1371
    CPUWatchpoint *wp, *next;
1372

    
1373
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1374
        if (wp->flags & mask)
1375
            cpu_watchpoint_remove_by_ref(env, wp);
1376
    }
1377
}
1378

    
1379
/* Add a breakpoint.  */
1380
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1381
                          CPUBreakpoint **breakpoint)
1382
{
1383
#if defined(TARGET_HAS_ICE)
1384
    CPUBreakpoint *bp;
1385

    
1386
    bp = qemu_malloc(sizeof(*bp));
1387

    
1388
    bp->pc = pc;
1389
    bp->flags = flags;
1390

    
1391
    /* keep all GDB-injected breakpoints in front */
1392
    if (flags & BP_GDB)
1393
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1394
    else
1395
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1396

    
1397
    breakpoint_invalidate(env, pc);
1398

    
1399
    if (breakpoint)
1400
        *breakpoint = bp;
1401
    return 0;
1402
#else
1403
    return -ENOSYS;
1404
#endif
1405
}
1406

    
1407
/* Remove a specific breakpoint.  */
1408
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1409
{
1410
#if defined(TARGET_HAS_ICE)
1411
    CPUBreakpoint *bp;
1412

    
1413
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1414
        if (bp->pc == pc && bp->flags == flags) {
1415
            cpu_breakpoint_remove_by_ref(env, bp);
1416
            return 0;
1417
        }
1418
    }
1419
    return -ENOENT;
1420
#else
1421
    return -ENOSYS;
1422
#endif
1423
}
1424

    
1425
/* Remove a specific breakpoint by reference.  */
1426
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1427
{
1428
#if defined(TARGET_HAS_ICE)
1429
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1430

    
1431
    breakpoint_invalidate(env, breakpoint->pc);
1432

    
1433
    qemu_free(breakpoint);
1434
#endif
1435
}
1436

    
1437
/* Remove all matching breakpoints. */
1438
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1439
{
1440
#if defined(TARGET_HAS_ICE)
1441
    CPUBreakpoint *bp, *next;
1442

    
1443
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1444
        if (bp->flags & mask)
1445
            cpu_breakpoint_remove_by_ref(env, bp);
1446
    }
1447
#endif
1448
}
1449

    
1450
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1451
   CPU loop after each instruction */
1452
void cpu_single_step(CPUState *env, int enabled)
1453
{
1454
#if defined(TARGET_HAS_ICE)
1455
    if (env->singlestep_enabled != enabled) {
1456
        env->singlestep_enabled = enabled;
1457
        /* must flush all the translated code to avoid inconsistancies */
1458
        /* XXX: only flush what is necessary */
1459
        tb_flush(env);
1460
    }
1461
#endif
1462
}
1463

    
1464
/* enable or disable low levels log */
1465
void cpu_set_log(int log_flags)
1466
{
1467
    loglevel = log_flags;
1468
    if (loglevel && !logfile) {
1469
        logfile = fopen(logfilename, log_append ? "a" : "w");
1470
        if (!logfile) {
1471
            perror(logfilename);
1472
            _exit(1);
1473
        }
1474
#if !defined(CONFIG_SOFTMMU)
1475
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1476
        {
1477
            static char logfile_buf[4096];
1478
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1479
        }
1480
#else
1481
        setvbuf(logfile, NULL, _IOLBF, 0);
1482
#endif
1483
        log_append = 1;
1484
    }
1485
    if (!loglevel && logfile) {
1486
        fclose(logfile);
1487
        logfile = NULL;
1488
    }
1489
}
1490

    
1491
void cpu_set_log_filename(const char *filename)
1492
{
1493
    logfilename = strdup(filename);
1494
    if (logfile) {
1495
        fclose(logfile);
1496
        logfile = NULL;
1497
    }
1498
    cpu_set_log(loglevel);
1499
}
1500

    
1501
/* mask must never be zero, except for A20 change call */
1502
void cpu_interrupt(CPUState *env, int mask)
1503
{
1504
#if !defined(USE_NPTL)
1505
    TranslationBlock *tb;
1506
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1507
#endif
1508
    int old_mask;
1509

    
1510
    if (mask & CPU_INTERRUPT_EXIT) {
1511
        env->exit_request = 1;
1512
        mask &= ~CPU_INTERRUPT_EXIT;
1513
    }
1514

    
1515
    old_mask = env->interrupt_request;
1516
    env->interrupt_request |= mask;
1517
#if defined(USE_NPTL)
1518
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1519
       problem and hope the cpu will stop of its own accord.  For userspace
1520
       emulation this often isn't actually as bad as it sounds.  Often
1521
       signals are used primarily to interrupt blocking syscalls.  */
1522
#else
1523
    if (use_icount) {
1524
        env->icount_decr.u16.high = 0xffff;
1525
#ifndef CONFIG_USER_ONLY
1526
        if (!can_do_io(env)
1527
            && (mask & ~old_mask) != 0) {
1528
            cpu_abort(env, "Raised interrupt while not in I/O function");
1529
        }
1530
#endif
1531
    } else {
1532
        tb = env->current_tb;
1533
        /* if the cpu is currently executing code, we must unlink it and
1534
           all the potentially executing TB */
1535
        if (tb && !testandset(&interrupt_lock)) {
1536
            env->current_tb = NULL;
1537
            tb_reset_jump_recursive(tb);
1538
            resetlock(&interrupt_lock);
1539
        }
1540
    }
1541
#endif
1542
}
1543

    
1544
void cpu_reset_interrupt(CPUState *env, int mask)
1545
{
1546
    env->interrupt_request &= ~mask;
1547
}
1548

    
1549
const CPULogItem cpu_log_items[] = {
1550
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1551
      "show generated host assembly code for each compiled TB" },
1552
    { CPU_LOG_TB_IN_ASM, "in_asm",
1553
      "show target assembly code for each compiled TB" },
1554
    { CPU_LOG_TB_OP, "op",
1555
      "show micro ops for each compiled TB" },
1556
    { CPU_LOG_TB_OP_OPT, "op_opt",
1557
      "show micro ops "
1558
#ifdef TARGET_I386
1559
      "before eflags optimization and "
1560
#endif
1561
      "after liveness analysis" },
1562
    { CPU_LOG_INT, "int",
1563
      "show interrupts/exceptions in short format" },
1564
    { CPU_LOG_EXEC, "exec",
1565
      "show trace before each executed TB (lots of logs)" },
1566
    { CPU_LOG_TB_CPU, "cpu",
1567
      "show CPU state before block translation" },
1568
#ifdef TARGET_I386
1569
    { CPU_LOG_PCALL, "pcall",
1570
      "show protected mode far calls/returns/exceptions" },
1571
    { CPU_LOG_RESET, "cpu_reset",
1572
      "show CPU state before CPU resets" },
1573
#endif
1574
#ifdef DEBUG_IOPORT
1575
    { CPU_LOG_IOPORT, "ioport",
1576
      "show all i/o ports accesses" },
1577
#endif
1578
    { 0, NULL, NULL },
1579
};
1580

    
1581
static int cmp1(const char *s1, int n, const char *s2)
1582
{
1583
    if (strlen(s2) != n)
1584
        return 0;
1585
    return memcmp(s1, s2, n) == 0;
1586
}
1587

    
1588
/* takes a comma separated list of log masks. Return 0 if error. */
1589
int cpu_str_to_log_mask(const char *str)
1590
{
1591
    const CPULogItem *item;
1592
    int mask;
1593
    const char *p, *p1;
1594

    
1595
    p = str;
1596
    mask = 0;
1597
    for(;;) {
1598
        p1 = strchr(p, ',');
1599
        if (!p1)
1600
            p1 = p + strlen(p);
1601
        if(cmp1(p,p1-p,"all")) {
1602
                for(item = cpu_log_items; item->mask != 0; item++) {
1603
                        mask |= item->mask;
1604
                }
1605
        } else {
1606
        for(item = cpu_log_items; item->mask != 0; item++) {
1607
            if (cmp1(p, p1 - p, item->name))
1608
                goto found;
1609
        }
1610
        return 0;
1611
        }
1612
    found:
1613
        mask |= item->mask;
1614
        if (*p1 != ',')
1615
            break;
1616
        p = p1 + 1;
1617
    }
1618
    return mask;
1619
}
1620

    
1621
void cpu_abort(CPUState *env, const char *fmt, ...)
1622
{
1623
    va_list ap;
1624
    va_list ap2;
1625

    
1626
    va_start(ap, fmt);
1627
    va_copy(ap2, ap);
1628
    fprintf(stderr, "qemu: fatal: ");
1629
    vfprintf(stderr, fmt, ap);
1630
    fprintf(stderr, "\n");
1631
#ifdef TARGET_I386
1632
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1633
#else
1634
    cpu_dump_state(env, stderr, fprintf, 0);
1635
#endif
1636
    if (qemu_log_enabled()) {
1637
        qemu_log("qemu: fatal: ");
1638
        qemu_log_vprintf(fmt, ap2);
1639
        qemu_log("\n");
1640
#ifdef TARGET_I386
1641
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1642
#else
1643
        log_cpu_state(env, 0);
1644
#endif
1645
        qemu_log_flush();
1646
        qemu_log_close();
1647
    }
1648
    va_end(ap2);
1649
    va_end(ap);
1650
    abort();
1651
}
1652

    
1653
CPUState *cpu_copy(CPUState *env)
1654
{
1655
    CPUState *new_env = cpu_init(env->cpu_model_str);
1656
    CPUState *next_cpu = new_env->next_cpu;
1657
    int cpu_index = new_env->cpu_index;
1658
#if defined(TARGET_HAS_ICE)
1659
    CPUBreakpoint *bp;
1660
    CPUWatchpoint *wp;
1661
#endif
1662

    
1663
    memcpy(new_env, env, sizeof(CPUState));
1664

    
1665
    /* Preserve chaining and index. */
1666
    new_env->next_cpu = next_cpu;
1667
    new_env->cpu_index = cpu_index;
1668

    
1669
    /* Clone all break/watchpoints.
1670
       Note: Once we support ptrace with hw-debug register access, make sure
1671
       BP_CPU break/watchpoints are handled correctly on clone. */
1672
    TAILQ_INIT(&env->breakpoints);
1673
    TAILQ_INIT(&env->watchpoints);
1674
#if defined(TARGET_HAS_ICE)
1675
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1676
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1677
    }
1678
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1679
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1680
                              wp->flags, NULL);
1681
    }
1682
#endif
1683

    
1684
    return new_env;
1685
}
1686

    
1687
#if !defined(CONFIG_USER_ONLY)
1688

    
1689
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1690
{
1691
    unsigned int i;
1692

    
1693
    /* Discard jump cache entries for any tb which might potentially
1694
       overlap the flushed page.  */
1695
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1696
    memset (&env->tb_jmp_cache[i], 0, 
1697
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1698

    
1699
    i = tb_jmp_cache_hash_page(addr);
1700
    memset (&env->tb_jmp_cache[i], 0, 
1701
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1702
}
1703

    
1704
/* NOTE: if flush_global is true, also flush global entries (not
1705
   implemented yet) */
1706
void tlb_flush(CPUState *env, int flush_global)
1707
{
1708
    int i;
1709

    
1710
#if defined(DEBUG_TLB)
1711
    printf("tlb_flush:\n");
1712
#endif
1713
    /* must reset current TB so that interrupts cannot modify the
1714
       links while we are modifying them */
1715
    env->current_tb = NULL;
1716

    
1717
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1718
        env->tlb_table[0][i].addr_read = -1;
1719
        env->tlb_table[0][i].addr_write = -1;
1720
        env->tlb_table[0][i].addr_code = -1;
1721
        env->tlb_table[1][i].addr_read = -1;
1722
        env->tlb_table[1][i].addr_write = -1;
1723
        env->tlb_table[1][i].addr_code = -1;
1724
#if (NB_MMU_MODES >= 3)
1725
        env->tlb_table[2][i].addr_read = -1;
1726
        env->tlb_table[2][i].addr_write = -1;
1727
        env->tlb_table[2][i].addr_code = -1;
1728
#if (NB_MMU_MODES == 4)
1729
        env->tlb_table[3][i].addr_read = -1;
1730
        env->tlb_table[3][i].addr_write = -1;
1731
        env->tlb_table[3][i].addr_code = -1;
1732
#endif
1733
#endif
1734
    }
1735

    
1736
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1737

    
1738
#ifdef USE_KQEMU
1739
    if (env->kqemu_enabled) {
1740
        kqemu_flush(env, flush_global);
1741
    }
1742
#endif
1743
    tlb_flush_count++;
1744
}
1745

    
1746
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1747
{
1748
    if (addr == (tlb_entry->addr_read &
1749
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1750
        addr == (tlb_entry->addr_write &
1751
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1752
        addr == (tlb_entry->addr_code &
1753
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1754
        tlb_entry->addr_read = -1;
1755
        tlb_entry->addr_write = -1;
1756
        tlb_entry->addr_code = -1;
1757
    }
1758
}
1759

    
1760
void tlb_flush_page(CPUState *env, target_ulong addr)
1761
{
1762
    int i;
1763

    
1764
#if defined(DEBUG_TLB)
1765
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1766
#endif
1767
    /* must reset current TB so that interrupts cannot modify the
1768
       links while we are modifying them */
1769
    env->current_tb = NULL;
1770

    
1771
    addr &= TARGET_PAGE_MASK;
1772
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1773
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1774
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1775
#if (NB_MMU_MODES >= 3)
1776
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1777
#if (NB_MMU_MODES == 4)
1778
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1779
#endif
1780
#endif
1781

    
1782
    tlb_flush_jmp_cache(env, addr);
1783

    
1784
#ifdef USE_KQEMU
1785
    if (env->kqemu_enabled) {
1786
        kqemu_flush_page(env, addr);
1787
    }
1788
#endif
1789
}
1790

    
1791
/* update the TLBs so that writes to code in the virtual page 'addr'
1792
   can be detected */
1793
static void tlb_protect_code(ram_addr_t ram_addr)
1794
{
1795
    cpu_physical_memory_reset_dirty(ram_addr,
1796
                                    ram_addr + TARGET_PAGE_SIZE,
1797
                                    CODE_DIRTY_FLAG);
1798
}
1799

    
1800
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1801
   tested for self modifying code */
1802
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1803
                                    target_ulong vaddr)
1804
{
1805
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1806
}
1807

    
1808
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1809
                                         unsigned long start, unsigned long length)
1810
{
1811
    unsigned long addr;
1812
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1813
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1814
        if ((addr - start) < length) {
1815
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1816
        }
1817
    }
1818
}
1819

    
1820
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1821
                                     int dirty_flags)
1822
{
1823
    CPUState *env;
1824
    unsigned long length, start1;
1825
    int i, mask, len;
1826
    uint8_t *p;
1827

    
1828
    start &= TARGET_PAGE_MASK;
1829
    end = TARGET_PAGE_ALIGN(end);
1830

    
1831
    length = end - start;
1832
    if (length == 0)
1833
        return;
1834
    len = length >> TARGET_PAGE_BITS;
1835
#ifdef USE_KQEMU
1836
    /* XXX: should not depend on cpu context */
1837
    env = first_cpu;
1838
    if (env->kqemu_enabled) {
1839
        ram_addr_t addr;
1840
        addr = start;
1841
        for(i = 0; i < len; i++) {
1842
            kqemu_set_notdirty(env, addr);
1843
            addr += TARGET_PAGE_SIZE;
1844
        }
1845
    }
1846
#endif
1847
    mask = ~dirty_flags;
1848
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1849
    for(i = 0; i < len; i++)
1850
        p[i] &= mask;
1851

    
1852
    /* we modify the TLB cache so that the dirty bit will be set again
1853
       when accessing the range */
1854
    start1 = start + (unsigned long)phys_ram_base;
1855
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1856
        for(i = 0; i < CPU_TLB_SIZE; i++)
1857
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1858
        for(i = 0; i < CPU_TLB_SIZE; i++)
1859
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1860
#if (NB_MMU_MODES >= 3)
1861
        for(i = 0; i < CPU_TLB_SIZE; i++)
1862
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1863
#if (NB_MMU_MODES == 4)
1864
        for(i = 0; i < CPU_TLB_SIZE; i++)
1865
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1866
#endif
1867
#endif
1868
    }
1869
}
1870

    
1871
int cpu_physical_memory_set_dirty_tracking(int enable)
1872
{
1873
    in_migration = enable;
1874
    return 0;
1875
}
1876

    
1877
int cpu_physical_memory_get_dirty_tracking(void)
1878
{
1879
    return in_migration;
1880
}
1881

    
1882
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1883
{
1884
    if (kvm_enabled())
1885
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1886
}
1887

    
1888
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1889
{
1890
    ram_addr_t ram_addr;
1891

    
1892
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1893
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1894
            tlb_entry->addend - (unsigned long)phys_ram_base;
1895
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1896
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1897
        }
1898
    }
1899
}
1900

    
1901
/* update the TLB according to the current state of the dirty bits */
1902
void cpu_tlb_update_dirty(CPUState *env)
1903
{
1904
    int i;
1905
    for(i = 0; i < CPU_TLB_SIZE; i++)
1906
        tlb_update_dirty(&env->tlb_table[0][i]);
1907
    for(i = 0; i < CPU_TLB_SIZE; i++)
1908
        tlb_update_dirty(&env->tlb_table[1][i]);
1909
#if (NB_MMU_MODES >= 3)
1910
    for(i = 0; i < CPU_TLB_SIZE; i++)
1911
        tlb_update_dirty(&env->tlb_table[2][i]);
1912
#if (NB_MMU_MODES == 4)
1913
    for(i = 0; i < CPU_TLB_SIZE; i++)
1914
        tlb_update_dirty(&env->tlb_table[3][i]);
1915
#endif
1916
#endif
1917
}
1918

    
1919
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1920
{
1921
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1922
        tlb_entry->addr_write = vaddr;
1923
}
1924

    
1925
/* update the TLB corresponding to virtual page vaddr
1926
   so that it is no longer dirty */
1927
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1928
{
1929
    int i;
1930

    
1931
    vaddr &= TARGET_PAGE_MASK;
1932
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1933
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1934
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1935
#if (NB_MMU_MODES >= 3)
1936
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1937
#if (NB_MMU_MODES == 4)
1938
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1939
#endif
1940
#endif
1941
}
1942

    
1943
/* add a new TLB entry. At most one entry for a given virtual address
1944
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1945
   (can only happen in non SOFTMMU mode for I/O pages or pages
1946
   conflicting with the host address space). */
1947
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1948
                      target_phys_addr_t paddr, int prot,
1949
                      int mmu_idx, int is_softmmu)
1950
{
1951
    PhysPageDesc *p;
1952
    unsigned long pd;
1953
    unsigned int index;
1954
    target_ulong address;
1955
    target_ulong code_address;
1956
    target_phys_addr_t addend;
1957
    int ret;
1958
    CPUTLBEntry *te;
1959
    CPUWatchpoint *wp;
1960
    target_phys_addr_t iotlb;
1961

    
1962
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1963
    if (!p) {
1964
        pd = IO_MEM_UNASSIGNED;
1965
    } else {
1966
        pd = p->phys_offset;
1967
    }
1968
#if defined(DEBUG_TLB)
1969
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1970
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1971
#endif
1972

    
1973
    ret = 0;
1974
    address = vaddr;
1975
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1976
        /* IO memory case (romd handled later) */
1977
        address |= TLB_MMIO;
1978
    }
1979
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1980
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1981
        /* Normal RAM.  */
1982
        iotlb = pd & TARGET_PAGE_MASK;
1983
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1984
            iotlb |= IO_MEM_NOTDIRTY;
1985
        else
1986
            iotlb |= IO_MEM_ROM;
1987
    } else {
1988
        /* IO handlers are currently passed a phsical address.
1989
           It would be nice to pass an offset from the base address
1990
           of that region.  This would avoid having to special case RAM,
1991
           and avoid full address decoding in every device.
1992
           We can't use the high bits of pd for this because
1993
           IO_MEM_ROMD uses these as a ram address.  */
1994
        iotlb = (pd & ~TARGET_PAGE_MASK);
1995
        if (p) {
1996
            iotlb += p->region_offset;
1997
        } else {
1998
            iotlb += paddr;
1999
        }
2000
    }
2001

    
2002
    code_address = address;
2003
    /* Make accesses to pages with watchpoints go via the
2004
       watchpoint trap routines.  */
2005
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2006
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2007
            iotlb = io_mem_watch + paddr;
2008
            /* TODO: The memory case can be optimized by not trapping
2009
               reads of pages with a write breakpoint.  */
2010
            address |= TLB_MMIO;
2011
        }
2012
    }
2013

    
2014
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2015
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2016
    te = &env->tlb_table[mmu_idx][index];
2017
    te->addend = addend - vaddr;
2018
    if (prot & PAGE_READ) {
2019
        te->addr_read = address;
2020
    } else {
2021
        te->addr_read = -1;
2022
    }
2023

    
2024
    if (prot & PAGE_EXEC) {
2025
        te->addr_code = code_address;
2026
    } else {
2027
        te->addr_code = -1;
2028
    }
2029
    if (prot & PAGE_WRITE) {
2030
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2031
            (pd & IO_MEM_ROMD)) {
2032
            /* Write access calls the I/O callback.  */
2033
            te->addr_write = address | TLB_MMIO;
2034
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2035
                   !cpu_physical_memory_is_dirty(pd)) {
2036
            te->addr_write = address | TLB_NOTDIRTY;
2037
        } else {
2038
            te->addr_write = address;
2039
        }
2040
    } else {
2041
        te->addr_write = -1;
2042
    }
2043
    return ret;
2044
}
2045

    
2046
#else
2047

    
2048
void tlb_flush(CPUState *env, int flush_global)
2049
{
2050
}
2051

    
2052
void tlb_flush_page(CPUState *env, target_ulong addr)
2053
{
2054
}
2055

    
2056
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2057
                      target_phys_addr_t paddr, int prot,
2058
                      int mmu_idx, int is_softmmu)
2059
{
2060
    return 0;
2061
}
2062

    
2063
/* dump memory mappings */
2064
void page_dump(FILE *f)
2065
{
2066
    unsigned long start, end;
2067
    int i, j, prot, prot1;
2068
    PageDesc *p;
2069

    
2070
    fprintf(f, "%-8s %-8s %-8s %s\n",
2071
            "start", "end", "size", "prot");
2072
    start = -1;
2073
    end = -1;
2074
    prot = 0;
2075
    for(i = 0; i <= L1_SIZE; i++) {
2076
        if (i < L1_SIZE)
2077
            p = l1_map[i];
2078
        else
2079
            p = NULL;
2080
        for(j = 0;j < L2_SIZE; j++) {
2081
            if (!p)
2082
                prot1 = 0;
2083
            else
2084
                prot1 = p[j].flags;
2085
            if (prot1 != prot) {
2086
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2087
                if (start != -1) {
2088
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2089
                            start, end, end - start,
2090
                            prot & PAGE_READ ? 'r' : '-',
2091
                            prot & PAGE_WRITE ? 'w' : '-',
2092
                            prot & PAGE_EXEC ? 'x' : '-');
2093
                }
2094
                if (prot1 != 0)
2095
                    start = end;
2096
                else
2097
                    start = -1;
2098
                prot = prot1;
2099
            }
2100
            if (!p)
2101
                break;
2102
        }
2103
    }
2104
}
2105

    
2106
int page_get_flags(target_ulong address)
2107
{
2108
    PageDesc *p;
2109

    
2110
    p = page_find(address >> TARGET_PAGE_BITS);
2111
    if (!p)
2112
        return 0;
2113
    return p->flags;
2114
}
2115

    
2116
/* modify the flags of a page and invalidate the code if
2117
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2118
   depending on PAGE_WRITE */
2119
void page_set_flags(target_ulong start, target_ulong end, int flags)
2120
{
2121
    PageDesc *p;
2122
    target_ulong addr;
2123

    
2124
    /* mmap_lock should already be held.  */
2125
    start = start & TARGET_PAGE_MASK;
2126
    end = TARGET_PAGE_ALIGN(end);
2127
    if (flags & PAGE_WRITE)
2128
        flags |= PAGE_WRITE_ORG;
2129
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2130
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2131
        /* We may be called for host regions that are outside guest
2132
           address space.  */
2133
        if (!p)
2134
            return;
2135
        /* if the write protection is set, then we invalidate the code
2136
           inside */
2137
        if (!(p->flags & PAGE_WRITE) &&
2138
            (flags & PAGE_WRITE) &&
2139
            p->first_tb) {
2140
            tb_invalidate_phys_page(addr, 0, NULL);
2141
        }
2142
        p->flags = flags;
2143
    }
2144
}
2145

    
2146
int page_check_range(target_ulong start, target_ulong len, int flags)
2147
{
2148
    PageDesc *p;
2149
    target_ulong end;
2150
    target_ulong addr;
2151

    
2152
    if (start + len < start)
2153
        /* we've wrapped around */
2154
        return -1;
2155

    
2156
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2157
    start = start & TARGET_PAGE_MASK;
2158

    
2159
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2160
        p = page_find(addr >> TARGET_PAGE_BITS);
2161
        if( !p )
2162
            return -1;
2163
        if( !(p->flags & PAGE_VALID) )
2164
            return -1;
2165

    
2166
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2167
            return -1;
2168
        if (flags & PAGE_WRITE) {
2169
            if (!(p->flags & PAGE_WRITE_ORG))
2170
                return -1;
2171
            /* unprotect the page if it was put read-only because it
2172
               contains translated code */
2173
            if (!(p->flags & PAGE_WRITE)) {
2174
                if (!page_unprotect(addr, 0, NULL))
2175
                    return -1;
2176
            }
2177
            return 0;
2178
        }
2179
    }
2180
    return 0;
2181
}
2182

    
2183
/* called from signal handler: invalidate the code and unprotect the
2184
   page. Return TRUE if the fault was succesfully handled. */
2185
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2186
{
2187
    unsigned int page_index, prot, pindex;
2188
    PageDesc *p, *p1;
2189
    target_ulong host_start, host_end, addr;
2190

    
2191
    /* Technically this isn't safe inside a signal handler.  However we
2192
       know this only ever happens in a synchronous SEGV handler, so in
2193
       practice it seems to be ok.  */
2194
    mmap_lock();
2195

    
2196
    host_start = address & qemu_host_page_mask;
2197
    page_index = host_start >> TARGET_PAGE_BITS;
2198
    p1 = page_find(page_index);
2199
    if (!p1) {
2200
        mmap_unlock();
2201
        return 0;
2202
    }
2203
    host_end = host_start + qemu_host_page_size;
2204
    p = p1;
2205
    prot = 0;
2206
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2207
        prot |= p->flags;
2208
        p++;
2209
    }
2210
    /* if the page was really writable, then we change its
2211
       protection back to writable */
2212
    if (prot & PAGE_WRITE_ORG) {
2213
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2214
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2215
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2216
                     (prot & PAGE_BITS) | PAGE_WRITE);
2217
            p1[pindex].flags |= PAGE_WRITE;
2218
            /* and since the content will be modified, we must invalidate
2219
               the corresponding translated code. */
2220
            tb_invalidate_phys_page(address, pc, puc);
2221
#ifdef DEBUG_TB_CHECK
2222
            tb_invalidate_check(address);
2223
#endif
2224
            mmap_unlock();
2225
            return 1;
2226
        }
2227
    }
2228
    mmap_unlock();
2229
    return 0;
2230
}
2231

    
2232
static inline void tlb_set_dirty(CPUState *env,
2233
                                 unsigned long addr, target_ulong vaddr)
2234
{
2235
}
2236
#endif /* defined(CONFIG_USER_ONLY) */
2237

    
2238
#if !defined(CONFIG_USER_ONLY)
2239

    
2240
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2241
                             ram_addr_t memory, ram_addr_t region_offset);
2242
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2243
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2244
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2245
                      need_subpage)                                     \
2246
    do {                                                                \
2247
        if (addr > start_addr)                                          \
2248
            start_addr2 = 0;                                            \
2249
        else {                                                          \
2250
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2251
            if (start_addr2 > 0)                                        \
2252
                need_subpage = 1;                                       \
2253
        }                                                               \
2254
                                                                        \
2255
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2256
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2257
        else {                                                          \
2258
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2259
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2260
                need_subpage = 1;                                       \
2261
        }                                                               \
2262
    } while (0)
2263

    
2264
/* register physical memory. 'size' must be a multiple of the target
2265
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2266
   io memory page.  The address used when calling the IO function is
2267
   the offset from the start of the region, plus region_offset.  Both
2268
   start_region and regon_offset are rounded down to a page boundary
2269
   before calculating this offset.  This should not be a problem unless
2270
   the low bits of start_addr and region_offset differ.  */
2271
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2272
                                         ram_addr_t size,
2273
                                         ram_addr_t phys_offset,
2274
                                         ram_addr_t region_offset)
2275
{
2276
    target_phys_addr_t addr, end_addr;
2277
    PhysPageDesc *p;
2278
    CPUState *env;
2279
    ram_addr_t orig_size = size;
2280
    void *subpage;
2281

    
2282
#ifdef USE_KQEMU
2283
    /* XXX: should not depend on cpu context */
2284
    env = first_cpu;
2285
    if (env->kqemu_enabled) {
2286
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2287
    }
2288
#endif
2289
    if (kvm_enabled())
2290
        kvm_set_phys_mem(start_addr, size, phys_offset);
2291

    
2292
    if (phys_offset == IO_MEM_UNASSIGNED) {
2293
        region_offset = start_addr;
2294
    }
2295
    region_offset &= TARGET_PAGE_MASK;
2296
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2297
    end_addr = start_addr + (target_phys_addr_t)size;
2298
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2299
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2300
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2301
            ram_addr_t orig_memory = p->phys_offset;
2302
            target_phys_addr_t start_addr2, end_addr2;
2303
            int need_subpage = 0;
2304

    
2305
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2306
                          need_subpage);
2307
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2308
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2309
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2310
                                           &p->phys_offset, orig_memory,
2311
                                           p->region_offset);
2312
                } else {
2313
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2314
                                            >> IO_MEM_SHIFT];
2315
                }
2316
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2317
                                 region_offset);
2318
                p->region_offset = 0;
2319
            } else {
2320
                p->phys_offset = phys_offset;
2321
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2322
                    (phys_offset & IO_MEM_ROMD))
2323
                    phys_offset += TARGET_PAGE_SIZE;
2324
            }
2325
        } else {
2326
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2327
            p->phys_offset = phys_offset;
2328
            p->region_offset = region_offset;
2329
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2330
                (phys_offset & IO_MEM_ROMD)) {
2331
                phys_offset += TARGET_PAGE_SIZE;
2332
            } else {
2333
                target_phys_addr_t start_addr2, end_addr2;
2334
                int need_subpage = 0;
2335

    
2336
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2337
                              end_addr2, need_subpage);
2338

    
2339
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2340
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2341
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2342
                                           addr & TARGET_PAGE_MASK);
2343
                    subpage_register(subpage, start_addr2, end_addr2,
2344
                                     phys_offset, region_offset);
2345
                    p->region_offset = 0;
2346
                }
2347
            }
2348
        }
2349
        region_offset += TARGET_PAGE_SIZE;
2350
    }
2351

    
2352
    /* since each CPU stores ram addresses in its TLB cache, we must
2353
       reset the modified entries */
2354
    /* XXX: slow ! */
2355
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2356
        tlb_flush(env, 1);
2357
    }
2358
}
2359

    
2360
/* XXX: temporary until new memory mapping API */
2361
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2362
{
2363
    PhysPageDesc *p;
2364

    
2365
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2366
    if (!p)
2367
        return IO_MEM_UNASSIGNED;
2368
    return p->phys_offset;
2369
}
2370

    
2371
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2372
{
2373
    if (kvm_enabled())
2374
        kvm_coalesce_mmio_region(addr, size);
2375
}
2376

    
2377
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2378
{
2379
    if (kvm_enabled())
2380
        kvm_uncoalesce_mmio_region(addr, size);
2381
}
2382

    
2383
/* XXX: better than nothing */
2384
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2385
{
2386
    ram_addr_t addr;
2387
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2388
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2389
                (uint64_t)size, (uint64_t)phys_ram_size);
2390
        abort();
2391
    }
2392
    addr = phys_ram_alloc_offset;
2393
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2394
    return addr;
2395
}
2396

    
2397
void qemu_ram_free(ram_addr_t addr)
2398
{
2399
}
2400

    
2401
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2402
{
2403
#ifdef DEBUG_UNASSIGNED
2404
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2405
#endif
2406
#if defined(TARGET_SPARC)
2407
    do_unassigned_access(addr, 0, 0, 0, 1);
2408
#endif
2409
    return 0;
2410
}
2411

    
2412
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2413
{
2414
#ifdef DEBUG_UNASSIGNED
2415
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2416
#endif
2417
#if defined(TARGET_SPARC)
2418
    do_unassigned_access(addr, 0, 0, 0, 2);
2419
#endif
2420
    return 0;
2421
}
2422

    
2423
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2424
{
2425
#ifdef DEBUG_UNASSIGNED
2426
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2427
#endif
2428
#if defined(TARGET_SPARC)
2429
    do_unassigned_access(addr, 0, 0, 0, 4);
2430
#endif
2431
    return 0;
2432
}
2433

    
2434
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2435
{
2436
#ifdef DEBUG_UNASSIGNED
2437
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2438
#endif
2439
#if defined(TARGET_SPARC)
2440
    do_unassigned_access(addr, 1, 0, 0, 1);
2441
#endif
2442
}
2443

    
2444
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2445
{
2446
#ifdef DEBUG_UNASSIGNED
2447
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2448
#endif
2449
#if defined(TARGET_SPARC)
2450
    do_unassigned_access(addr, 1, 0, 0, 2);
2451
#endif
2452
}
2453

    
2454
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2455
{
2456
#ifdef DEBUG_UNASSIGNED
2457
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2458
#endif
2459
#if defined(TARGET_SPARC)
2460
    do_unassigned_access(addr, 1, 0, 0, 4);
2461
#endif
2462
}
2463

    
2464
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2465
    unassigned_mem_readb,
2466
    unassigned_mem_readw,
2467
    unassigned_mem_readl,
2468
};
2469

    
2470
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2471
    unassigned_mem_writeb,
2472
    unassigned_mem_writew,
2473
    unassigned_mem_writel,
2474
};
2475

    
2476
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2477
                                uint32_t val)
2478
{
2479
    int dirty_flags;
2480
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2481
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2482
#if !defined(CONFIG_USER_ONLY)
2483
        tb_invalidate_phys_page_fast(ram_addr, 1);
2484
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2485
#endif
2486
    }
2487
    stb_p(phys_ram_base + ram_addr, val);
2488
#ifdef USE_KQEMU
2489
    if (cpu_single_env->kqemu_enabled &&
2490
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2491
        kqemu_modify_page(cpu_single_env, ram_addr);
2492
#endif
2493
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2494
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2495
    /* we remove the notdirty callback only if the code has been
2496
       flushed */
2497
    if (dirty_flags == 0xff)
2498
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2499
}
2500

    
2501
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2502
                                uint32_t val)
2503
{
2504
    int dirty_flags;
2505
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2506
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2507
#if !defined(CONFIG_USER_ONLY)
2508
        tb_invalidate_phys_page_fast(ram_addr, 2);
2509
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2510
#endif
2511
    }
2512
    stw_p(phys_ram_base + ram_addr, val);
2513
#ifdef USE_KQEMU
2514
    if (cpu_single_env->kqemu_enabled &&
2515
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2516
        kqemu_modify_page(cpu_single_env, ram_addr);
2517
#endif
2518
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2519
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2520
    /* we remove the notdirty callback only if the code has been
2521
       flushed */
2522
    if (dirty_flags == 0xff)
2523
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2524
}
2525

    
2526
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2527
                                uint32_t val)
2528
{
2529
    int dirty_flags;
2530
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2531
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2532
#if !defined(CONFIG_USER_ONLY)
2533
        tb_invalidate_phys_page_fast(ram_addr, 4);
2534
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2535
#endif
2536
    }
2537
    stl_p(phys_ram_base + ram_addr, val);
2538
#ifdef USE_KQEMU
2539
    if (cpu_single_env->kqemu_enabled &&
2540
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2541
        kqemu_modify_page(cpu_single_env, ram_addr);
2542
#endif
2543
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2544
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2545
    /* we remove the notdirty callback only if the code has been
2546
       flushed */
2547
    if (dirty_flags == 0xff)
2548
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2549
}
2550

    
2551
static CPUReadMemoryFunc *error_mem_read[3] = {
2552
    NULL, /* never used */
2553
    NULL, /* never used */
2554
    NULL, /* never used */
2555
};
2556

    
2557
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2558
    notdirty_mem_writeb,
2559
    notdirty_mem_writew,
2560
    notdirty_mem_writel,
2561
};
2562

    
2563
/* Generate a debug exception if a watchpoint has been hit.  */
2564
static void check_watchpoint(int offset, int len_mask, int flags)
2565
{
2566
    CPUState *env = cpu_single_env;
2567
    target_ulong pc, cs_base;
2568
    TranslationBlock *tb;
2569
    target_ulong vaddr;
2570
    CPUWatchpoint *wp;
2571
    int cpu_flags;
2572

    
2573
    if (env->watchpoint_hit) {
2574
        /* We re-entered the check after replacing the TB. Now raise
2575
         * the debug interrupt so that is will trigger after the
2576
         * current instruction. */
2577
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2578
        return;
2579
    }
2580
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2581
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2582
        if ((vaddr == (wp->vaddr & len_mask) ||
2583
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2584
            wp->flags |= BP_WATCHPOINT_HIT;
2585
            if (!env->watchpoint_hit) {
2586
                env->watchpoint_hit = wp;
2587
                tb = tb_find_pc(env->mem_io_pc);
2588
                if (!tb) {
2589
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2590
                              "pc=%p", (void *)env->mem_io_pc);
2591
                }
2592
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2593
                tb_phys_invalidate(tb, -1);
2594
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2595
                    env->exception_index = EXCP_DEBUG;
2596
                } else {
2597
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2598
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2599
                }
2600
                cpu_resume_from_signal(env, NULL);
2601
            }
2602
        } else {
2603
            wp->flags &= ~BP_WATCHPOINT_HIT;
2604
        }
2605
    }
2606
}
2607

    
2608
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2609
   so these check for a hit then pass through to the normal out-of-line
2610
   phys routines.  */
2611
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2612
{
2613
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2614
    return ldub_phys(addr);
2615
}
2616

    
2617
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2618
{
2619
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2620
    return lduw_phys(addr);
2621
}
2622

    
2623
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2624
{
2625
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2626
    return ldl_phys(addr);
2627
}
2628

    
2629
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2630
                             uint32_t val)
2631
{
2632
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2633
    stb_phys(addr, val);
2634
}
2635

    
2636
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2637
                             uint32_t val)
2638
{
2639
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2640
    stw_phys(addr, val);
2641
}
2642

    
2643
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2644
                             uint32_t val)
2645
{
2646
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2647
    stl_phys(addr, val);
2648
}
2649

    
2650
static CPUReadMemoryFunc *watch_mem_read[3] = {
2651
    watch_mem_readb,
2652
    watch_mem_readw,
2653
    watch_mem_readl,
2654
};
2655

    
2656
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2657
    watch_mem_writeb,
2658
    watch_mem_writew,
2659
    watch_mem_writel,
2660
};
2661

    
2662
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2663
                                 unsigned int len)
2664
{
2665
    uint32_t ret;
2666
    unsigned int idx;
2667

    
2668
    idx = SUBPAGE_IDX(addr);
2669
#if defined(DEBUG_SUBPAGE)
2670
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2671
           mmio, len, addr, idx);
2672
#endif
2673
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2674
                                       addr + mmio->region_offset[idx][0][len]);
2675

    
2676
    return ret;
2677
}
2678

    
2679
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2680
                              uint32_t value, unsigned int len)
2681
{
2682
    unsigned int idx;
2683

    
2684
    idx = SUBPAGE_IDX(addr);
2685
#if defined(DEBUG_SUBPAGE)
2686
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2687
           mmio, len, addr, idx, value);
2688
#endif
2689
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2690
                                  addr + mmio->region_offset[idx][1][len],
2691
                                  value);
2692
}
2693

    
2694
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2695
{
2696
#if defined(DEBUG_SUBPAGE)
2697
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2698
#endif
2699

    
2700
    return subpage_readlen(opaque, addr, 0);
2701
}
2702

    
2703
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2704
                            uint32_t value)
2705
{
2706
#if defined(DEBUG_SUBPAGE)
2707
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2708
#endif
2709
    subpage_writelen(opaque, addr, value, 0);
2710
}
2711

    
2712
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2713
{
2714
#if defined(DEBUG_SUBPAGE)
2715
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2716
#endif
2717

    
2718
    return subpage_readlen(opaque, addr, 1);
2719
}
2720

    
2721
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2722
                            uint32_t value)
2723
{
2724
#if defined(DEBUG_SUBPAGE)
2725
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2726
#endif
2727
    subpage_writelen(opaque, addr, value, 1);
2728
}
2729

    
2730
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2731
{
2732
#if defined(DEBUG_SUBPAGE)
2733
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2734
#endif
2735

    
2736
    return subpage_readlen(opaque, addr, 2);
2737
}
2738

    
2739
static void subpage_writel (void *opaque,
2740
                         target_phys_addr_t addr, uint32_t value)
2741
{
2742
#if defined(DEBUG_SUBPAGE)
2743
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2744
#endif
2745
    subpage_writelen(opaque, addr, value, 2);
2746
}
2747

    
2748
static CPUReadMemoryFunc *subpage_read[] = {
2749
    &subpage_readb,
2750
    &subpage_readw,
2751
    &subpage_readl,
2752
};
2753

    
2754
static CPUWriteMemoryFunc *subpage_write[] = {
2755
    &subpage_writeb,
2756
    &subpage_writew,
2757
    &subpage_writel,
2758
};
2759

    
2760
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2761
                             ram_addr_t memory, ram_addr_t region_offset)
2762
{
2763
    int idx, eidx;
2764
    unsigned int i;
2765

    
2766
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2767
        return -1;
2768
    idx = SUBPAGE_IDX(start);
2769
    eidx = SUBPAGE_IDX(end);
2770
#if defined(DEBUG_SUBPAGE)
2771
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2772
           mmio, start, end, idx, eidx, memory);
2773
#endif
2774
    memory >>= IO_MEM_SHIFT;
2775
    for (; idx <= eidx; idx++) {
2776
        for (i = 0; i < 4; i++) {
2777
            if (io_mem_read[memory][i]) {
2778
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2779
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2780
                mmio->region_offset[idx][0][i] = region_offset;
2781
            }
2782
            if (io_mem_write[memory][i]) {
2783
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2784
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2785
                mmio->region_offset[idx][1][i] = region_offset;
2786
            }
2787
        }
2788
    }
2789

    
2790
    return 0;
2791
}
2792

    
2793
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2794
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2795
{
2796
    subpage_t *mmio;
2797
    int subpage_memory;
2798

    
2799
    mmio = qemu_mallocz(sizeof(subpage_t));
2800

    
2801
    mmio->base = base;
2802
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2803
#if defined(DEBUG_SUBPAGE)
2804
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2805
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2806
#endif
2807
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2808
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2809
                         region_offset);
2810

    
2811
    return mmio;
2812
}
2813

    
2814
static int get_free_io_mem_idx(void)
2815
{
2816
    int i;
2817

    
2818
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2819
        if (!io_mem_used[i]) {
2820
            io_mem_used[i] = 1;
2821
            return i;
2822
        }
2823

    
2824
    return -1;
2825
}
2826

    
2827
static void io_mem_init(void)
2828
{
2829
    int i;
2830

    
2831
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2832
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2833
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2834
    for (i=0; i<5; i++)
2835
        io_mem_used[i] = 1;
2836

    
2837
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2838
                                          watch_mem_write, NULL);
2839
    /* alloc dirty bits array */
2840
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2841
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2842
}
2843

    
2844
/* mem_read and mem_write are arrays of functions containing the
2845
   function to access byte (index 0), word (index 1) and dword (index
2846
   2). Functions can be omitted with a NULL function pointer. The
2847
   registered functions may be modified dynamically later.
2848
   If io_index is non zero, the corresponding io zone is
2849
   modified. If it is zero, a new io zone is allocated. The return
2850
   value can be used with cpu_register_physical_memory(). (-1) is
2851
   returned if error. */
2852
int cpu_register_io_memory(int io_index,
2853
                           CPUReadMemoryFunc **mem_read,
2854
                           CPUWriteMemoryFunc **mem_write,
2855
                           void *opaque)
2856
{
2857
    int i, subwidth = 0;
2858

    
2859
    if (io_index <= 0) {
2860
        io_index = get_free_io_mem_idx();
2861
        if (io_index == -1)
2862
            return io_index;
2863
    } else {
2864
        if (io_index >= IO_MEM_NB_ENTRIES)
2865
            return -1;
2866
    }
2867

    
2868
    for(i = 0;i < 3; i++) {
2869
        if (!mem_read[i] || !mem_write[i])
2870
            subwidth = IO_MEM_SUBWIDTH;
2871
        io_mem_read[io_index][i] = mem_read[i];
2872
        io_mem_write[io_index][i] = mem_write[i];
2873
    }
2874
    io_mem_opaque[io_index] = opaque;
2875
    return (io_index << IO_MEM_SHIFT) | subwidth;
2876
}
2877

    
2878
void cpu_unregister_io_memory(int io_table_address)
2879
{
2880
    int i;
2881
    int io_index = io_table_address >> IO_MEM_SHIFT;
2882

    
2883
    for (i=0;i < 3; i++) {
2884
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2885
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2886
    }
2887
    io_mem_opaque[io_index] = NULL;
2888
    io_mem_used[io_index] = 0;
2889
}
2890

    
2891
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2892
{
2893
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2894
}
2895

    
2896
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2897
{
2898
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2899
}
2900

    
2901
#endif /* !defined(CONFIG_USER_ONLY) */
2902

    
2903
/* physical memory access (slow version, mainly for debug) */
2904
#if defined(CONFIG_USER_ONLY)
2905
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2906
                            int len, int is_write)
2907
{
2908
    int l, flags;
2909
    target_ulong page;
2910
    void * p;
2911

    
2912
    while (len > 0) {
2913
        page = addr & TARGET_PAGE_MASK;
2914
        l = (page + TARGET_PAGE_SIZE) - addr;
2915
        if (l > len)
2916
            l = len;
2917
        flags = page_get_flags(page);
2918
        if (!(flags & PAGE_VALID))
2919
            return;
2920
        if (is_write) {
2921
            if (!(flags & PAGE_WRITE))
2922
                return;
2923
            /* XXX: this code should not depend on lock_user */
2924
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2925
                /* FIXME - should this return an error rather than just fail? */
2926
                return;
2927
            memcpy(p, buf, l);
2928
            unlock_user(p, addr, l);
2929
        } else {
2930
            if (!(flags & PAGE_READ))
2931
                return;
2932
            /* XXX: this code should not depend on lock_user */
2933
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2934
                /* FIXME - should this return an error rather than just fail? */
2935
                return;
2936
            memcpy(buf, p, l);
2937
            unlock_user(p, addr, 0);
2938
        }
2939
        len -= l;
2940
        buf += l;
2941
        addr += l;
2942
    }
2943
}
2944

    
2945
#else
2946
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2947
                            int len, int is_write)
2948
{
2949
    int l, io_index;
2950
    uint8_t *ptr;
2951
    uint32_t val;
2952
    target_phys_addr_t page;
2953
    unsigned long pd;
2954
    PhysPageDesc *p;
2955

    
2956
    while (len > 0) {
2957
        page = addr & TARGET_PAGE_MASK;
2958
        l = (page + TARGET_PAGE_SIZE) - addr;
2959
        if (l > len)
2960
            l = len;
2961
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2962
        if (!p) {
2963
            pd = IO_MEM_UNASSIGNED;
2964
        } else {
2965
            pd = p->phys_offset;
2966
        }
2967

    
2968
        if (is_write) {
2969
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2970
                target_phys_addr_t addr1 = addr;
2971
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2972
                if (p)
2973
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2974
                /* XXX: could force cpu_single_env to NULL to avoid
2975
                   potential bugs */
2976
                if (l >= 4 && ((addr1 & 3) == 0)) {
2977
                    /* 32 bit write access */
2978
                    val = ldl_p(buf);
2979
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
2980
                    l = 4;
2981
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
2982
                    /* 16 bit write access */
2983
                    val = lduw_p(buf);
2984
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
2985
                    l = 2;
2986
                } else {
2987
                    /* 8 bit write access */
2988
                    val = ldub_p(buf);
2989
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
2990
                    l = 1;
2991
                }
2992
            } else {
2993
                unsigned long addr1;
2994
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2995
                /* RAM case */
2996
                ptr = phys_ram_base + addr1;
2997
                memcpy(ptr, buf, l);
2998
                if (!cpu_physical_memory_is_dirty(addr1)) {
2999
                    /* invalidate code */
3000
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3001
                    /* set dirty bit */
3002
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3003
                        (0xff & ~CODE_DIRTY_FLAG);
3004
                }
3005
            }
3006
        } else {
3007
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3008
                !(pd & IO_MEM_ROMD)) {
3009
                target_phys_addr_t addr1 = addr;
3010
                /* I/O case */
3011
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3012
                if (p)
3013
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3014
                if (l >= 4 && ((addr1 & 3) == 0)) {
3015
                    /* 32 bit read access */
3016
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3017
                    stl_p(buf, val);
3018
                    l = 4;
3019
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3020
                    /* 16 bit read access */
3021
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3022
                    stw_p(buf, val);
3023
                    l = 2;
3024
                } else {
3025
                    /* 8 bit read access */
3026
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3027
                    stb_p(buf, val);
3028
                    l = 1;
3029
                }
3030
            } else {
3031
                /* RAM case */
3032
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3033
                    (addr & ~TARGET_PAGE_MASK);
3034
                memcpy(buf, ptr, l);
3035
            }
3036
        }
3037
        len -= l;
3038
        buf += l;
3039
        addr += l;
3040
    }
3041
}
3042

    
3043
/* used for ROM loading : can write in RAM and ROM */
3044
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3045
                                   const uint8_t *buf, int len)
3046
{
3047
    int l;
3048
    uint8_t *ptr;
3049
    target_phys_addr_t page;
3050
    unsigned long pd;
3051
    PhysPageDesc *p;
3052

    
3053
    while (len > 0) {
3054
        page = addr & TARGET_PAGE_MASK;
3055
        l = (page + TARGET_PAGE_SIZE) - addr;
3056
        if (l > len)
3057
            l = len;
3058
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3059
        if (!p) {
3060
            pd = IO_MEM_UNASSIGNED;
3061
        } else {
3062
            pd = p->phys_offset;
3063
        }
3064

    
3065
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3066
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3067
            !(pd & IO_MEM_ROMD)) {
3068
            /* do nothing */
3069
        } else {
3070
            unsigned long addr1;
3071
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3072
            /* ROM/RAM case */
3073
            ptr = phys_ram_base + addr1;
3074
            memcpy(ptr, buf, l);
3075
        }
3076
        len -= l;
3077
        buf += l;
3078
        addr += l;
3079
    }
3080
}
3081

    
3082
typedef struct {
3083
    void *buffer;
3084
    target_phys_addr_t addr;
3085
    target_phys_addr_t len;
3086
} BounceBuffer;
3087

    
3088
static BounceBuffer bounce;
3089

    
3090
typedef struct MapClient {
3091
    void *opaque;
3092
    void (*callback)(void *opaque);
3093
    LIST_ENTRY(MapClient) link;
3094
} MapClient;
3095

    
3096
static LIST_HEAD(map_client_list, MapClient) map_client_list
3097
    = LIST_HEAD_INITIALIZER(map_client_list);
3098

    
3099
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3100
{
3101
    MapClient *client = qemu_malloc(sizeof(*client));
3102

    
3103
    client->opaque = opaque;
3104
    client->callback = callback;
3105
    LIST_INSERT_HEAD(&map_client_list, client, link);
3106
    return client;
3107
}
3108

    
3109
void cpu_unregister_map_client(void *_client)
3110
{
3111
    MapClient *client = (MapClient *)_client;
3112

    
3113
    LIST_REMOVE(client, link);
3114
}
3115

    
3116
static void cpu_notify_map_clients(void)
3117
{
3118
    MapClient *client;
3119

    
3120
    while (!LIST_EMPTY(&map_client_list)) {
3121
        client = LIST_FIRST(&map_client_list);
3122
        client->callback(client->opaque);
3123
        LIST_REMOVE(client, link);
3124
    }
3125
}
3126

    
3127
/* Map a physical memory region into a host virtual address.
3128
 * May map a subset of the requested range, given by and returned in *plen.
3129
 * May return NULL if resources needed to perform the mapping are exhausted.
3130
 * Use only for reads OR writes - not for read-modify-write operations.
3131
 * Use cpu_register_map_client() to know when retrying the map operation is
3132
 * likely to succeed.
3133
 */
3134
void *cpu_physical_memory_map(target_phys_addr_t addr,
3135
                              target_phys_addr_t *plen,
3136
                              int is_write)
3137
{
3138
    target_phys_addr_t len = *plen;
3139
    target_phys_addr_t done = 0;
3140
    int l;
3141
    uint8_t *ret = NULL;
3142
    uint8_t *ptr;
3143
    target_phys_addr_t page;
3144
    unsigned long pd;
3145
    PhysPageDesc *p;
3146
    unsigned long addr1;
3147

    
3148
    while (len > 0) {
3149
        page = addr & TARGET_PAGE_MASK;
3150
        l = (page + TARGET_PAGE_SIZE) - addr;
3151
        if (l > len)
3152
            l = len;
3153
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3154
        if (!p) {
3155
            pd = IO_MEM_UNASSIGNED;
3156
        } else {
3157
            pd = p->phys_offset;
3158
        }
3159

    
3160
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3161
            if (done || bounce.buffer) {
3162
                break;
3163
            }
3164
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3165
            bounce.addr = addr;
3166
            bounce.len = l;
3167
            if (!is_write) {
3168
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3169
            }
3170
            ptr = bounce.buffer;
3171
        } else {
3172
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3173
            ptr = phys_ram_base + addr1;
3174
        }
3175
        if (!done) {
3176
            ret = ptr;
3177
        } else if (ret + done != ptr) {
3178
            break;
3179
        }
3180

    
3181
        len -= l;
3182
        addr += l;
3183
        done += l;
3184
    }
3185
    *plen = done;
3186
    return ret;
3187
}
3188

    
3189
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3190
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3191
 * the amount of memory that was actually read or written by the caller.
3192
 */
3193
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3194
                               int is_write, target_phys_addr_t access_len)
3195
{
3196
    if (buffer != bounce.buffer) {
3197
        if (is_write) {
3198
            unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3199
            while (access_len) {
3200
                unsigned l;
3201
                l = TARGET_PAGE_SIZE;
3202
                if (l > access_len)
3203
                    l = access_len;
3204
                if (!cpu_physical_memory_is_dirty(addr1)) {
3205
                    /* invalidate code */
3206
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3207
                    /* set dirty bit */
3208
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3209
                        (0xff & ~CODE_DIRTY_FLAG);
3210
                }
3211
                addr1 += l;
3212
                access_len -= l;
3213
            }
3214
        }
3215
        return;
3216
    }
3217
    if (is_write) {
3218
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3219
    }
3220
    qemu_free(bounce.buffer);
3221
    bounce.buffer = NULL;
3222
    cpu_notify_map_clients();
3223
}
3224

    
3225
/* warning: addr must be aligned */
3226
uint32_t ldl_phys(target_phys_addr_t addr)
3227
{
3228
    int io_index;
3229
    uint8_t *ptr;
3230
    uint32_t val;
3231
    unsigned long pd;
3232
    PhysPageDesc *p;
3233

    
3234
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3235
    if (!p) {
3236
        pd = IO_MEM_UNASSIGNED;
3237
    } else {
3238
        pd = p->phys_offset;
3239
    }
3240

    
3241
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3242
        !(pd & IO_MEM_ROMD)) {
3243
        /* I/O case */
3244
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3245
        if (p)
3246
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3247
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3248
    } else {
3249
        /* RAM case */
3250
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3251
            (addr & ~TARGET_PAGE_MASK);
3252
        val = ldl_p(ptr);
3253
    }
3254
    return val;
3255
}
3256

    
3257
/* warning: addr must be aligned */
3258
uint64_t ldq_phys(target_phys_addr_t addr)
3259
{
3260
    int io_index;
3261
    uint8_t *ptr;
3262
    uint64_t val;
3263
    unsigned long pd;
3264
    PhysPageDesc *p;
3265

    
3266
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3267
    if (!p) {
3268
        pd = IO_MEM_UNASSIGNED;
3269
    } else {
3270
        pd = p->phys_offset;
3271
    }
3272

    
3273
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3274
        !(pd & IO_MEM_ROMD)) {
3275
        /* I/O case */
3276
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3277
        if (p)
3278
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3279
#ifdef TARGET_WORDS_BIGENDIAN
3280
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3281
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3282
#else
3283
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3284
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3285
#endif
3286
    } else {
3287
        /* RAM case */
3288
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3289
            (addr & ~TARGET_PAGE_MASK);
3290
        val = ldq_p(ptr);
3291
    }
3292
    return val;
3293
}
3294

    
3295
/* XXX: optimize */
3296
uint32_t ldub_phys(target_phys_addr_t addr)
3297
{
3298
    uint8_t val;
3299
    cpu_physical_memory_read(addr, &val, 1);
3300
    return val;
3301
}
3302

    
3303
/* XXX: optimize */
3304
uint32_t lduw_phys(target_phys_addr_t addr)
3305
{
3306
    uint16_t val;
3307
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3308
    return tswap16(val);
3309
}
3310

    
3311
/* warning: addr must be aligned. The ram page is not masked as dirty
3312
   and the code inside is not invalidated. It is useful if the dirty
3313
   bits are used to track modified PTEs */
3314
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3315
{
3316
    int io_index;
3317
    uint8_t *ptr;
3318
    unsigned long pd;
3319
    PhysPageDesc *p;
3320

    
3321
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3322
    if (!p) {
3323
        pd = IO_MEM_UNASSIGNED;
3324
    } else {
3325
        pd = p->phys_offset;
3326
    }
3327

    
3328
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3329
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3330
        if (p)
3331
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3332
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3333
    } else {
3334
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3335
        ptr = phys_ram_base + addr1;
3336
        stl_p(ptr, val);
3337

    
3338
        if (unlikely(in_migration)) {
3339
            if (!cpu_physical_memory_is_dirty(addr1)) {
3340
                /* invalidate code */
3341
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3342
                /* set dirty bit */
3343
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3344
                    (0xff & ~CODE_DIRTY_FLAG);
3345
            }
3346
        }
3347
    }
3348
}
3349

    
3350
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3351
{
3352
    int io_index;
3353
    uint8_t *ptr;
3354
    unsigned long pd;
3355
    PhysPageDesc *p;
3356

    
3357
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3358
    if (!p) {
3359
        pd = IO_MEM_UNASSIGNED;
3360
    } else {
3361
        pd = p->phys_offset;
3362
    }
3363

    
3364
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3365
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3366
        if (p)
3367
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3368
#ifdef TARGET_WORDS_BIGENDIAN
3369
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3370
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3371
#else
3372
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3373
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3374
#endif
3375
    } else {
3376
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3377
            (addr & ~TARGET_PAGE_MASK);
3378
        stq_p(ptr, val);
3379
    }
3380
}
3381

    
3382
/* warning: addr must be aligned */
3383
void stl_phys(target_phys_addr_t addr, uint32_t val)
3384
{
3385
    int io_index;
3386
    uint8_t *ptr;
3387
    unsigned long pd;
3388
    PhysPageDesc *p;
3389

    
3390
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3391
    if (!p) {
3392
        pd = IO_MEM_UNASSIGNED;
3393
    } else {
3394
        pd = p->phys_offset;
3395
    }
3396

    
3397
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3398
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3399
        if (p)
3400
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3401
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3402
    } else {
3403
        unsigned long addr1;
3404
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3405
        /* RAM case */
3406
        ptr = phys_ram_base + addr1;
3407
        stl_p(ptr, val);
3408
        if (!cpu_physical_memory_is_dirty(addr1)) {
3409
            /* invalidate code */
3410
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3411
            /* set dirty bit */
3412
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3413
                (0xff & ~CODE_DIRTY_FLAG);
3414
        }
3415
    }
3416
}
3417

    
3418
/* XXX: optimize */
3419
void stb_phys(target_phys_addr_t addr, uint32_t val)
3420
{
3421
    uint8_t v = val;
3422
    cpu_physical_memory_write(addr, &v, 1);
3423
}
3424

    
3425
/* XXX: optimize */
3426
void stw_phys(target_phys_addr_t addr, uint32_t val)
3427
{
3428
    uint16_t v = tswap16(val);
3429
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3430
}
3431

    
3432
/* XXX: optimize */
3433
void stq_phys(target_phys_addr_t addr, uint64_t val)
3434
{
3435
    val = tswap64(val);
3436
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3437
}
3438

    
3439
#endif
3440

    
3441
/* virtual memory access for debug */
3442
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3443
                        uint8_t *buf, int len, int is_write)
3444
{
3445
    int l;
3446
    target_phys_addr_t phys_addr;
3447
    target_ulong page;
3448

    
3449
    while (len > 0) {
3450
        page = addr & TARGET_PAGE_MASK;
3451
        phys_addr = cpu_get_phys_page_debug(env, page);
3452
        /* if no physical page mapped, return an error */
3453
        if (phys_addr == -1)
3454
            return -1;
3455
        l = (page + TARGET_PAGE_SIZE) - addr;
3456
        if (l > len)
3457
            l = len;
3458
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3459
                               buf, l, is_write);
3460
        len -= l;
3461
        buf += l;
3462
        addr += l;
3463
    }
3464
    return 0;
3465
}
3466

    
3467
/* in deterministic execution mode, instructions doing device I/Os
3468
   must be at the end of the TB */
3469
void cpu_io_recompile(CPUState *env, void *retaddr)
3470
{
3471
    TranslationBlock *tb;
3472
    uint32_t n, cflags;
3473
    target_ulong pc, cs_base;
3474
    uint64_t flags;
3475

    
3476
    tb = tb_find_pc((unsigned long)retaddr);
3477
    if (!tb) {
3478
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3479
                  retaddr);
3480
    }
3481
    n = env->icount_decr.u16.low + tb->icount;
3482
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3483
    /* Calculate how many instructions had been executed before the fault
3484
       occurred.  */
3485
    n = n - env->icount_decr.u16.low;
3486
    /* Generate a new TB ending on the I/O insn.  */
3487
    n++;
3488
    /* On MIPS and SH, delay slot instructions can only be restarted if
3489
       they were already the first instruction in the TB.  If this is not
3490
       the first instruction in a TB then re-execute the preceding
3491
       branch.  */
3492
#if defined(TARGET_MIPS)
3493
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3494
        env->active_tc.PC -= 4;
3495
        env->icount_decr.u16.low++;
3496
        env->hflags &= ~MIPS_HFLAG_BMASK;
3497
    }
3498
#elif defined(TARGET_SH4)
3499
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3500
            && n > 1) {
3501
        env->pc -= 2;
3502
        env->icount_decr.u16.low++;
3503
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3504
    }
3505
#endif
3506
    /* This should never happen.  */
3507
    if (n > CF_COUNT_MASK)
3508
        cpu_abort(env, "TB too big during recompile");
3509

    
3510
    cflags = n | CF_LAST_IO;
3511
    pc = tb->pc;
3512
    cs_base = tb->cs_base;
3513
    flags = tb->flags;
3514
    tb_phys_invalidate(tb, -1);
3515
    /* FIXME: In theory this could raise an exception.  In practice
3516
       we have already translated the block once so it's probably ok.  */
3517
    tb_gen_code(env, pc, cs_base, flags, cflags);
3518
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3519
       the first in the TB) then we end up generating a whole new TB and
3520
       repeating the fault, which is horribly inefficient.
3521
       Better would be to execute just this insn uncached, or generate a
3522
       second new TB.  */
3523
    cpu_resume_from_signal(env, NULL);
3524
}
3525

    
3526
void dump_exec_info(FILE *f,
3527
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3528
{
3529
    int i, target_code_size, max_target_code_size;
3530
    int direct_jmp_count, direct_jmp2_count, cross_page;
3531
    TranslationBlock *tb;
3532

    
3533
    target_code_size = 0;
3534
    max_target_code_size = 0;
3535
    cross_page = 0;
3536
    direct_jmp_count = 0;
3537
    direct_jmp2_count = 0;
3538
    for(i = 0; i < nb_tbs; i++) {
3539
        tb = &tbs[i];
3540
        target_code_size += tb->size;
3541
        if (tb->size > max_target_code_size)
3542
            max_target_code_size = tb->size;
3543
        if (tb->page_addr[1] != -1)
3544
            cross_page++;
3545
        if (tb->tb_next_offset[0] != 0xffff) {
3546
            direct_jmp_count++;
3547
            if (tb->tb_next_offset[1] != 0xffff) {
3548
                direct_jmp2_count++;
3549
            }
3550
        }
3551
    }
3552
    /* XXX: avoid using doubles ? */
3553
    cpu_fprintf(f, "Translation buffer state:\n");
3554
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3555
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3556
    cpu_fprintf(f, "TB count            %d/%d\n", 
3557
                nb_tbs, code_gen_max_blocks);
3558
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3559
                nb_tbs ? target_code_size / nb_tbs : 0,
3560
                max_target_code_size);
3561
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3562
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3563
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3564
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3565
            cross_page,
3566
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3567
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3568
                direct_jmp_count,
3569
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3570
                direct_jmp2_count,
3571
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3572
    cpu_fprintf(f, "\nStatistics:\n");
3573
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3574
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3575
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3576
    tcg_dump_info(f, cpu_fprintf);
3577
}
3578

    
3579
#if !defined(CONFIG_USER_ONLY)
3580

    
3581
#define MMUSUFFIX _cmmu
3582
#define GETPC() NULL
3583
#define env cpu_single_env
3584
#define SOFTMMU_CODE_ACCESS
3585

    
3586
#define SHIFT 0
3587
#include "softmmu_template.h"
3588

    
3589
#define SHIFT 1
3590
#include "softmmu_template.h"
3591

    
3592
#define SHIFT 2
3593
#include "softmmu_template.h"
3594

    
3595
#define SHIFT 3
3596
#include "softmmu_template.h"
3597

    
3598
#undef env
3599

    
3600
#endif