Statistics
| Branch: | Revision:

root / exec.c @ 0497d2f4

History | View | Annotate | Download (107.7 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#include "osdep.h"
42
#include "kvm.h"
43
#if defined(CONFIG_USER_ONLY)
44
#include <qemu.h>
45
#endif
46

    
47
//#define DEBUG_TB_INVALIDATE
48
//#define DEBUG_FLUSH
49
//#define DEBUG_TLB
50
//#define DEBUG_UNASSIGNED
51

    
52
/* make various TB consistency checks */
53
//#define DEBUG_TB_CHECK
54
//#define DEBUG_TLB_CHECK
55

    
56
//#define DEBUG_IOPORT
57
//#define DEBUG_SUBPAGE
58

    
59
#if !defined(CONFIG_USER_ONLY)
60
/* TB consistency checks only implemented for usermode emulation.  */
61
#undef DEBUG_TB_CHECK
62
#endif
63

    
64
#define SMC_BITMAP_USE_THRESHOLD 10
65

    
66
#define MMAP_AREA_START        0x00000000
67
#define MMAP_AREA_END          0xa8000000
68

    
69
#if defined(TARGET_SPARC64)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 41
71
#elif defined(TARGET_SPARC)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 36
73
#elif defined(TARGET_ALPHA)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#define TARGET_VIRT_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_PPC64)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 42
80
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81
#define TARGET_PHYS_ADDR_SPACE_BITS 36
82
#else
83
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84
#define TARGET_PHYS_ADDR_SPACE_BITS 32
85
#endif
86

    
87
static TranslationBlock *tbs;
88
int code_gen_max_blocks;
89
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90
static int nb_tbs;
91
/* any access to the tbs or the page table must use this lock */
92
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93

    
94
#if defined(__arm__) || defined(__sparc_v9__)
95
/* The prologue must be reachable with a direct jump. ARM and Sparc64
96
 have limited branch ranges (possibly also PPC) so place it in a
97
 section close to code segment. */
98
#define code_gen_section                                \
99
    __attribute__((__section__(".gen_code")))           \
100
    __attribute__((aligned (32)))
101
#else
102
#define code_gen_section                                \
103
    __attribute__((aligned (32)))
104
#endif
105

    
106
uint8_t code_gen_prologue[1024] code_gen_section;
107
static uint8_t *code_gen_buffer;
108
static unsigned long code_gen_buffer_size;
109
/* threshold to flush the translated code buffer */
110
static unsigned long code_gen_buffer_max_size;
111
uint8_t *code_gen_ptr;
112

    
113
#if !defined(CONFIG_USER_ONLY)
114
ram_addr_t phys_ram_size;
115
int phys_ram_fd;
116
uint8_t *phys_ram_base;
117
uint8_t *phys_ram_dirty;
118
static int in_migration;
119
static ram_addr_t phys_ram_alloc_offset = 0;
120
#endif
121

    
122
CPUState *first_cpu;
123
/* current CPU in the current thread. It is only valid inside
124
   cpu_exec() */
125
CPUState *cpu_single_env;
126
/* 0 = Do not count executed instructions.
127
   1 = Precise instruction counting.
128
   2 = Adaptive rate instruction counting.  */
129
int use_icount = 0;
130
/* Current instruction counter.  While executing translated code this may
131
   include some instructions that have not yet been executed.  */
132
int64_t qemu_icount;
133

    
134
typedef struct PageDesc {
135
    /* list of TBs intersecting this ram page */
136
    TranslationBlock *first_tb;
137
    /* in order to optimize self modifying code, we count the number
138
       of lookups we do to a given page to use a bitmap */
139
    unsigned int code_write_count;
140
    uint8_t *code_bitmap;
141
#if defined(CONFIG_USER_ONLY)
142
    unsigned long flags;
143
#endif
144
} PageDesc;
145

    
146
typedef struct PhysPageDesc {
147
    /* offset in host memory of the page + io_index in the low bits */
148
    ram_addr_t phys_offset;
149
    ram_addr_t region_offset;
150
} PhysPageDesc;
151

    
152
#define L2_BITS 10
153
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154
/* XXX: this is a temporary hack for alpha target.
155
 *      In the future, this is to be replaced by a multi-level table
156
 *      to actually be able to handle the complete 64 bits address space.
157
 */
158
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159
#else
160
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
161
#endif
162

    
163
#define L1_SIZE (1 << L1_BITS)
164
#define L2_SIZE (1 << L2_BITS)
165

    
166
unsigned long qemu_real_host_page_size;
167
unsigned long qemu_host_page_bits;
168
unsigned long qemu_host_page_size;
169
unsigned long qemu_host_page_mask;
170

    
171
/* XXX: for system emulation, it could just be an array */
172
static PageDesc *l1_map[L1_SIZE];
173
static PhysPageDesc **l1_phys_map;
174

    
175
#if !defined(CONFIG_USER_ONLY)
176
static void io_mem_init(void);
177

    
178
/* io memory support */
179
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
181
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
182
static char io_mem_used[IO_MEM_NB_ENTRIES];
183
static int io_mem_watch;
184
#endif
185

    
186
/* log support */
187
static const char *logfilename = "/tmp/qemu.log";
188
FILE *logfile;
189
int loglevel;
190
static int log_append = 0;
191

    
192
/* statistics */
193
static int tlb_flush_count;
194
static int tb_flush_count;
195
static int tb_phys_invalidate_count;
196

    
197
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198
typedef struct subpage_t {
199
    target_phys_addr_t base;
200
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202
    void *opaque[TARGET_PAGE_SIZE][2][4];
203
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
204
} subpage_t;
205

    
206
#ifdef _WIN32
207
static void map_exec(void *addr, long size)
208
{
209
    DWORD old_protect;
210
    VirtualProtect(addr, size,
211
                   PAGE_EXECUTE_READWRITE, &old_protect);
212
    
213
}
214
#else
215
static void map_exec(void *addr, long size)
216
{
217
    unsigned long start, end, page_size;
218
    
219
    page_size = getpagesize();
220
    start = (unsigned long)addr;
221
    start &= ~(page_size - 1);
222
    
223
    end = (unsigned long)addr + size;
224
    end += page_size - 1;
225
    end &= ~(page_size - 1);
226
    
227
    mprotect((void *)start, end - start,
228
             PROT_READ | PROT_WRITE | PROT_EXEC);
229
}
230
#endif
231

    
232
static void page_init(void)
233
{
234
    /* NOTE: we can always suppose that qemu_host_page_size >=
235
       TARGET_PAGE_SIZE */
236
#ifdef _WIN32
237
    {
238
        SYSTEM_INFO system_info;
239

    
240
        GetSystemInfo(&system_info);
241
        qemu_real_host_page_size = system_info.dwPageSize;
242
    }
243
#else
244
    qemu_real_host_page_size = getpagesize();
245
#endif
246
    if (qemu_host_page_size == 0)
247
        qemu_host_page_size = qemu_real_host_page_size;
248
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
249
        qemu_host_page_size = TARGET_PAGE_SIZE;
250
    qemu_host_page_bits = 0;
251
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252
        qemu_host_page_bits++;
253
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
254
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
256

    
257
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
258
    {
259
        long long startaddr, endaddr;
260
        FILE *f;
261
        int n;
262

    
263
        mmap_lock();
264
        last_brk = (unsigned long)sbrk(0);
265
        f = fopen("/proc/self/maps", "r");
266
        if (f) {
267
            do {
268
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269
                if (n == 2) {
270
                    startaddr = MIN(startaddr,
271
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272
                    endaddr = MIN(endaddr,
273
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
274
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
275
                                   TARGET_PAGE_ALIGN(endaddr),
276
                                   PAGE_RESERVED); 
277
                }
278
            } while (!feof(f));
279
            fclose(f);
280
        }
281
        mmap_unlock();
282
    }
283
#endif
284
}
285

    
286
static inline PageDesc **page_l1_map(target_ulong index)
287
{
288
#if TARGET_LONG_BITS > 32
289
    /* Host memory outside guest VM.  For 32-bit targets we have already
290
       excluded high addresses.  */
291
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
292
        return NULL;
293
#endif
294
    return &l1_map[index >> L2_BITS];
295
}
296

    
297
static inline PageDesc *page_find_alloc(target_ulong index)
298
{
299
    PageDesc **lp, *p;
300
    lp = page_l1_map(index);
301
    if (!lp)
302
        return NULL;
303

    
304
    p = *lp;
305
    if (!p) {
306
        /* allocate if not found */
307
#if defined(CONFIG_USER_ONLY)
308
        size_t len = sizeof(PageDesc) * L2_SIZE;
309
        /* Don't use qemu_malloc because it may recurse.  */
310
        p = mmap(0, len, PROT_READ | PROT_WRITE,
311
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
312
        *lp = p;
313
        if (h2g_valid(p)) {
314
            unsigned long addr = h2g(p);
315
            page_set_flags(addr & TARGET_PAGE_MASK,
316
                           TARGET_PAGE_ALIGN(addr + len),
317
                           PAGE_RESERVED); 
318
        }
319
#else
320
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321
        *lp = p;
322
#endif
323
    }
324
    return p + (index & (L2_SIZE - 1));
325
}
326

    
327
static inline PageDesc *page_find(target_ulong index)
328
{
329
    PageDesc **lp, *p;
330
    lp = page_l1_map(index);
331
    if (!lp)
332
        return NULL;
333

    
334
    p = *lp;
335
    if (!p)
336
        return 0;
337
    return p + (index & (L2_SIZE - 1));
338
}
339

    
340
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
341
{
342
    void **lp, **p;
343
    PhysPageDesc *pd;
344

    
345
    p = (void **)l1_phys_map;
346
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
347

    
348
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350
#endif
351
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
352
    p = *lp;
353
    if (!p) {
354
        /* allocate if not found */
355
        if (!alloc)
356
            return NULL;
357
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358
        memset(p, 0, sizeof(void *) * L1_SIZE);
359
        *lp = p;
360
    }
361
#endif
362
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
363
    pd = *lp;
364
    if (!pd) {
365
        int i;
366
        /* allocate if not found */
367
        if (!alloc)
368
            return NULL;
369
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370
        *lp = pd;
371
        for (i = 0; i < L2_SIZE; i++) {
372
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
373
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
374
        }
375
    }
376
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
377
}
378

    
379
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
380
{
381
    return phys_page_find_alloc(index, 0);
382
}
383

    
384
#if !defined(CONFIG_USER_ONLY)
385
static void tlb_protect_code(ram_addr_t ram_addr);
386
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
387
                                    target_ulong vaddr);
388
#define mmap_lock() do { } while(0)
389
#define mmap_unlock() do { } while(0)
390
#endif
391

    
392
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
393

    
394
#if defined(CONFIG_USER_ONLY)
395
/* Currently it is not recommanded to allocate big chunks of data in
396
   user mode. It will change when a dedicated libc will be used */
397
#define USE_STATIC_CODE_GEN_BUFFER
398
#endif
399

    
400
#ifdef USE_STATIC_CODE_GEN_BUFFER
401
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
402
#endif
403

    
404
static void code_gen_alloc(unsigned long tb_size)
405
{
406
#ifdef USE_STATIC_CODE_GEN_BUFFER
407
    code_gen_buffer = static_code_gen_buffer;
408
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
409
    map_exec(code_gen_buffer, code_gen_buffer_size);
410
#else
411
    code_gen_buffer_size = tb_size;
412
    if (code_gen_buffer_size == 0) {
413
#if defined(CONFIG_USER_ONLY)
414
        /* in user mode, phys_ram_size is not meaningful */
415
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
416
#else
417
        /* XXX: needs ajustments */
418
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
419
#endif
420
    }
421
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
422
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
423
    /* The code gen buffer location may have constraints depending on
424
       the host cpu and OS */
425
#if defined(__linux__) 
426
    {
427
        int flags;
428
        void *start = NULL;
429

    
430
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
431
#if defined(__x86_64__)
432
        flags |= MAP_32BIT;
433
        /* Cannot map more than that */
434
        if (code_gen_buffer_size > (800 * 1024 * 1024))
435
            code_gen_buffer_size = (800 * 1024 * 1024);
436
#elif defined(__sparc_v9__)
437
        // Map the buffer below 2G, so we can use direct calls and branches
438
        flags |= MAP_FIXED;
439
        start = (void *) 0x60000000UL;
440
        if (code_gen_buffer_size > (512 * 1024 * 1024))
441
            code_gen_buffer_size = (512 * 1024 * 1024);
442
#elif defined(__arm__)
443
        /* Map the buffer below 32M, so we can use direct calls and branches */
444
        flags |= MAP_FIXED;
445
        start = (void *) 0x01000000UL;
446
        if (code_gen_buffer_size > 16 * 1024 * 1024)
447
            code_gen_buffer_size = 16 * 1024 * 1024;
448
#endif
449
        code_gen_buffer = mmap(start, code_gen_buffer_size,
450
                               PROT_WRITE | PROT_READ | PROT_EXEC,
451
                               flags, -1, 0);
452
        if (code_gen_buffer == MAP_FAILED) {
453
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
454
            exit(1);
455
        }
456
    }
457
#elif defined(__FreeBSD__) || defined(__DragonFly__)
458
    {
459
        int flags;
460
        void *addr = NULL;
461
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
462
#if defined(__x86_64__)
463
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
464
         * 0x40000000 is free */
465
        flags |= MAP_FIXED;
466
        addr = (void *)0x40000000;
467
        /* Cannot map more than that */
468
        if (code_gen_buffer_size > (800 * 1024 * 1024))
469
            code_gen_buffer_size = (800 * 1024 * 1024);
470
#endif
471
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
472
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
473
                               flags, -1, 0);
474
        if (code_gen_buffer == MAP_FAILED) {
475
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
476
            exit(1);
477
        }
478
    }
479
#else
480
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
481
    map_exec(code_gen_buffer, code_gen_buffer_size);
482
#endif
483
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
484
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
485
    code_gen_buffer_max_size = code_gen_buffer_size - 
486
        code_gen_max_block_size();
487
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
488
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
489
}
490

    
491
/* Must be called before using the QEMU cpus. 'tb_size' is the size
492
   (in bytes) allocated to the translation buffer. Zero means default
493
   size. */
494
void cpu_exec_init_all(unsigned long tb_size)
495
{
496
    cpu_gen_init();
497
    code_gen_alloc(tb_size);
498
    code_gen_ptr = code_gen_buffer;
499
    page_init();
500
#if !defined(CONFIG_USER_ONLY)
501
    io_mem_init();
502
#endif
503
}
504

    
505
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
506

    
507
#define CPU_COMMON_SAVE_VERSION 1
508

    
509
static void cpu_common_save(QEMUFile *f, void *opaque)
510
{
511
    CPUState *env = opaque;
512

    
513
    qemu_put_be32s(f, &env->halted);
514
    qemu_put_be32s(f, &env->interrupt_request);
515
}
516

    
517
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
518
{
519
    CPUState *env = opaque;
520

    
521
    if (version_id != CPU_COMMON_SAVE_VERSION)
522
        return -EINVAL;
523

    
524
    qemu_get_be32s(f, &env->halted);
525
    qemu_get_be32s(f, &env->interrupt_request);
526
    env->interrupt_request &= ~CPU_INTERRUPT_EXIT;
527
    tlb_flush(env, 1);
528

    
529
    return 0;
530
}
531
#endif
532

    
533
void cpu_exec_init(CPUState *env)
534
{
535
    CPUState **penv;
536
    int cpu_index;
537

    
538
#if defined(CONFIG_USER_ONLY)
539
    cpu_list_lock();
540
#endif
541
    env->next_cpu = NULL;
542
    penv = &first_cpu;
543
    cpu_index = 0;
544
    while (*penv != NULL) {
545
        penv = (CPUState **)&(*penv)->next_cpu;
546
        cpu_index++;
547
    }
548
    env->cpu_index = cpu_index;
549
    TAILQ_INIT(&env->breakpoints);
550
    TAILQ_INIT(&env->watchpoints);
551
    *penv = env;
552
#if defined(CONFIG_USER_ONLY)
553
    cpu_list_unlock();
554
#endif
555
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
556
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
557
                    cpu_common_save, cpu_common_load, env);
558
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
559
                    cpu_save, cpu_load, env);
560
#endif
561
}
562

    
563
static inline void invalidate_page_bitmap(PageDesc *p)
564
{
565
    if (p->code_bitmap) {
566
        qemu_free(p->code_bitmap);
567
        p->code_bitmap = NULL;
568
    }
569
    p->code_write_count = 0;
570
}
571

    
572
/* set to NULL all the 'first_tb' fields in all PageDescs */
573
static void page_flush_tb(void)
574
{
575
    int i, j;
576
    PageDesc *p;
577

    
578
    for(i = 0; i < L1_SIZE; i++) {
579
        p = l1_map[i];
580
        if (p) {
581
            for(j = 0; j < L2_SIZE; j++) {
582
                p->first_tb = NULL;
583
                invalidate_page_bitmap(p);
584
                p++;
585
            }
586
        }
587
    }
588
}
589

    
590
/* flush all the translation blocks */
591
/* XXX: tb_flush is currently not thread safe */
592
void tb_flush(CPUState *env1)
593
{
594
    CPUState *env;
595
#if defined(DEBUG_FLUSH)
596
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
597
           (unsigned long)(code_gen_ptr - code_gen_buffer),
598
           nb_tbs, nb_tbs > 0 ?
599
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
600
#endif
601
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
602
        cpu_abort(env1, "Internal error: code buffer overflow\n");
603

    
604
    nb_tbs = 0;
605

    
606
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
607
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
608
    }
609

    
610
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
611
    page_flush_tb();
612

    
613
    code_gen_ptr = code_gen_buffer;
614
    /* XXX: flush processor icache at this point if cache flush is
615
       expensive */
616
    tb_flush_count++;
617
}
618

    
619
#ifdef DEBUG_TB_CHECK
620

    
621
static void tb_invalidate_check(target_ulong address)
622
{
623
    TranslationBlock *tb;
624
    int i;
625
    address &= TARGET_PAGE_MASK;
626
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
627
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
628
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
629
                  address >= tb->pc + tb->size)) {
630
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
631
                       address, (long)tb->pc, tb->size);
632
            }
633
        }
634
    }
635
}
636

    
637
/* verify that all the pages have correct rights for code */
638
static void tb_page_check(void)
639
{
640
    TranslationBlock *tb;
641
    int i, flags1, flags2;
642

    
643
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
644
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
645
            flags1 = page_get_flags(tb->pc);
646
            flags2 = page_get_flags(tb->pc + tb->size - 1);
647
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
648
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
649
                       (long)tb->pc, tb->size, flags1, flags2);
650
            }
651
        }
652
    }
653
}
654

    
655
static void tb_jmp_check(TranslationBlock *tb)
656
{
657
    TranslationBlock *tb1;
658
    unsigned int n1;
659

    
660
    /* suppress any remaining jumps to this TB */
661
    tb1 = tb->jmp_first;
662
    for(;;) {
663
        n1 = (long)tb1 & 3;
664
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
665
        if (n1 == 2)
666
            break;
667
        tb1 = tb1->jmp_next[n1];
668
    }
669
    /* check end of list */
670
    if (tb1 != tb) {
671
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
672
    }
673
}
674

    
675
#endif
676

    
677
/* invalidate one TB */
678
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
679
                             int next_offset)
680
{
681
    TranslationBlock *tb1;
682
    for(;;) {
683
        tb1 = *ptb;
684
        if (tb1 == tb) {
685
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
686
            break;
687
        }
688
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
689
    }
690
}
691

    
692
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
693
{
694
    TranslationBlock *tb1;
695
    unsigned int n1;
696

    
697
    for(;;) {
698
        tb1 = *ptb;
699
        n1 = (long)tb1 & 3;
700
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
701
        if (tb1 == tb) {
702
            *ptb = tb1->page_next[n1];
703
            break;
704
        }
705
        ptb = &tb1->page_next[n1];
706
    }
707
}
708

    
709
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
710
{
711
    TranslationBlock *tb1, **ptb;
712
    unsigned int n1;
713

    
714
    ptb = &tb->jmp_next[n];
715
    tb1 = *ptb;
716
    if (tb1) {
717
        /* find tb(n) in circular list */
718
        for(;;) {
719
            tb1 = *ptb;
720
            n1 = (long)tb1 & 3;
721
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
722
            if (n1 == n && tb1 == tb)
723
                break;
724
            if (n1 == 2) {
725
                ptb = &tb1->jmp_first;
726
            } else {
727
                ptb = &tb1->jmp_next[n1];
728
            }
729
        }
730
        /* now we can suppress tb(n) from the list */
731
        *ptb = tb->jmp_next[n];
732

    
733
        tb->jmp_next[n] = NULL;
734
    }
735
}
736

    
737
/* reset the jump entry 'n' of a TB so that it is not chained to
738
   another TB */
739
static inline void tb_reset_jump(TranslationBlock *tb, int n)
740
{
741
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
742
}
743

    
744
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
745
{
746
    CPUState *env;
747
    PageDesc *p;
748
    unsigned int h, n1;
749
    target_phys_addr_t phys_pc;
750
    TranslationBlock *tb1, *tb2;
751

    
752
    /* remove the TB from the hash list */
753
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
754
    h = tb_phys_hash_func(phys_pc);
755
    tb_remove(&tb_phys_hash[h], tb,
756
              offsetof(TranslationBlock, phys_hash_next));
757

    
758
    /* remove the TB from the page list */
759
    if (tb->page_addr[0] != page_addr) {
760
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
761
        tb_page_remove(&p->first_tb, tb);
762
        invalidate_page_bitmap(p);
763
    }
764
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
765
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
766
        tb_page_remove(&p->first_tb, tb);
767
        invalidate_page_bitmap(p);
768
    }
769

    
770
    tb_invalidated_flag = 1;
771

    
772
    /* remove the TB from the hash list */
773
    h = tb_jmp_cache_hash_func(tb->pc);
774
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
775
        if (env->tb_jmp_cache[h] == tb)
776
            env->tb_jmp_cache[h] = NULL;
777
    }
778

    
779
    /* suppress this TB from the two jump lists */
780
    tb_jmp_remove(tb, 0);
781
    tb_jmp_remove(tb, 1);
782

    
783
    /* suppress any remaining jumps to this TB */
784
    tb1 = tb->jmp_first;
785
    for(;;) {
786
        n1 = (long)tb1 & 3;
787
        if (n1 == 2)
788
            break;
789
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
790
        tb2 = tb1->jmp_next[n1];
791
        tb_reset_jump(tb1, n1);
792
        tb1->jmp_next[n1] = NULL;
793
        tb1 = tb2;
794
    }
795
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
796

    
797
    tb_phys_invalidate_count++;
798
}
799

    
800
static inline void set_bits(uint8_t *tab, int start, int len)
801
{
802
    int end, mask, end1;
803

    
804
    end = start + len;
805
    tab += start >> 3;
806
    mask = 0xff << (start & 7);
807
    if ((start & ~7) == (end & ~7)) {
808
        if (start < end) {
809
            mask &= ~(0xff << (end & 7));
810
            *tab |= mask;
811
        }
812
    } else {
813
        *tab++ |= mask;
814
        start = (start + 8) & ~7;
815
        end1 = end & ~7;
816
        while (start < end1) {
817
            *tab++ = 0xff;
818
            start += 8;
819
        }
820
        if (start < end) {
821
            mask = ~(0xff << (end & 7));
822
            *tab |= mask;
823
        }
824
    }
825
}
826

    
827
static void build_page_bitmap(PageDesc *p)
828
{
829
    int n, tb_start, tb_end;
830
    TranslationBlock *tb;
831

    
832
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
833

    
834
    tb = p->first_tb;
835
    while (tb != NULL) {
836
        n = (long)tb & 3;
837
        tb = (TranslationBlock *)((long)tb & ~3);
838
        /* NOTE: this is subtle as a TB may span two physical pages */
839
        if (n == 0) {
840
            /* NOTE: tb_end may be after the end of the page, but
841
               it is not a problem */
842
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
843
            tb_end = tb_start + tb->size;
844
            if (tb_end > TARGET_PAGE_SIZE)
845
                tb_end = TARGET_PAGE_SIZE;
846
        } else {
847
            tb_start = 0;
848
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
849
        }
850
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
851
        tb = tb->page_next[n];
852
    }
853
}
854

    
855
TranslationBlock *tb_gen_code(CPUState *env,
856
                              target_ulong pc, target_ulong cs_base,
857
                              int flags, int cflags)
858
{
859
    TranslationBlock *tb;
860
    uint8_t *tc_ptr;
861
    target_ulong phys_pc, phys_page2, virt_page2;
862
    int code_gen_size;
863

    
864
    phys_pc = get_phys_addr_code(env, pc);
865
    tb = tb_alloc(pc);
866
    if (!tb) {
867
        /* flush must be done */
868
        tb_flush(env);
869
        /* cannot fail at this point */
870
        tb = tb_alloc(pc);
871
        /* Don't forget to invalidate previous TB info.  */
872
        tb_invalidated_flag = 1;
873
    }
874
    tc_ptr = code_gen_ptr;
875
    tb->tc_ptr = tc_ptr;
876
    tb->cs_base = cs_base;
877
    tb->flags = flags;
878
    tb->cflags = cflags;
879
    cpu_gen_code(env, tb, &code_gen_size);
880
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
881

    
882
    /* check next page if needed */
883
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
884
    phys_page2 = -1;
885
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
886
        phys_page2 = get_phys_addr_code(env, virt_page2);
887
    }
888
    tb_link_phys(tb, phys_pc, phys_page2);
889
    return tb;
890
}
891

    
892
/* invalidate all TBs which intersect with the target physical page
893
   starting in range [start;end[. NOTE: start and end must refer to
894
   the same physical page. 'is_cpu_write_access' should be true if called
895
   from a real cpu write access: the virtual CPU will exit the current
896
   TB if code is modified inside this TB. */
897
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
898
                                   int is_cpu_write_access)
899
{
900
    TranslationBlock *tb, *tb_next, *saved_tb;
901
    CPUState *env = cpu_single_env;
902
    target_ulong tb_start, tb_end;
903
    PageDesc *p;
904
    int n;
905
#ifdef TARGET_HAS_PRECISE_SMC
906
    int current_tb_not_found = is_cpu_write_access;
907
    TranslationBlock *current_tb = NULL;
908
    int current_tb_modified = 0;
909
    target_ulong current_pc = 0;
910
    target_ulong current_cs_base = 0;
911
    int current_flags = 0;
912
#endif /* TARGET_HAS_PRECISE_SMC */
913

    
914
    p = page_find(start >> TARGET_PAGE_BITS);
915
    if (!p)
916
        return;
917
    if (!p->code_bitmap &&
918
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
919
        is_cpu_write_access) {
920
        /* build code bitmap */
921
        build_page_bitmap(p);
922
    }
923

    
924
    /* we remove all the TBs in the range [start, end[ */
925
    /* XXX: see if in some cases it could be faster to invalidate all the code */
926
    tb = p->first_tb;
927
    while (tb != NULL) {
928
        n = (long)tb & 3;
929
        tb = (TranslationBlock *)((long)tb & ~3);
930
        tb_next = tb->page_next[n];
931
        /* NOTE: this is subtle as a TB may span two physical pages */
932
        if (n == 0) {
933
            /* NOTE: tb_end may be after the end of the page, but
934
               it is not a problem */
935
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
936
            tb_end = tb_start + tb->size;
937
        } else {
938
            tb_start = tb->page_addr[1];
939
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
940
        }
941
        if (!(tb_end <= start || tb_start >= end)) {
942
#ifdef TARGET_HAS_PRECISE_SMC
943
            if (current_tb_not_found) {
944
                current_tb_not_found = 0;
945
                current_tb = NULL;
946
                if (env->mem_io_pc) {
947
                    /* now we have a real cpu fault */
948
                    current_tb = tb_find_pc(env->mem_io_pc);
949
                }
950
            }
951
            if (current_tb == tb &&
952
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
953
                /* If we are modifying the current TB, we must stop
954
                its execution. We could be more precise by checking
955
                that the modification is after the current PC, but it
956
                would require a specialized function to partially
957
                restore the CPU state */
958

    
959
                current_tb_modified = 1;
960
                cpu_restore_state(current_tb, env,
961
                                  env->mem_io_pc, NULL);
962
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
963
                                     &current_flags);
964
            }
965
#endif /* TARGET_HAS_PRECISE_SMC */
966
            /* we need to do that to handle the case where a signal
967
               occurs while doing tb_phys_invalidate() */
968
            saved_tb = NULL;
969
            if (env) {
970
                saved_tb = env->current_tb;
971
                env->current_tb = NULL;
972
            }
973
            tb_phys_invalidate(tb, -1);
974
            if (env) {
975
                env->current_tb = saved_tb;
976
                if (env->interrupt_request && env->current_tb)
977
                    cpu_interrupt(env, env->interrupt_request);
978
            }
979
        }
980
        tb = tb_next;
981
    }
982
#if !defined(CONFIG_USER_ONLY)
983
    /* if no code remaining, no need to continue to use slow writes */
984
    if (!p->first_tb) {
985
        invalidate_page_bitmap(p);
986
        if (is_cpu_write_access) {
987
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
988
        }
989
    }
990
#endif
991
#ifdef TARGET_HAS_PRECISE_SMC
992
    if (current_tb_modified) {
993
        /* we generate a block containing just the instruction
994
           modifying the memory. It will ensure that it cannot modify
995
           itself */
996
        env->current_tb = NULL;
997
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
998
        cpu_resume_from_signal(env, NULL);
999
    }
1000
#endif
1001
}
1002

    
1003
/* len must be <= 8 and start must be a multiple of len */
1004
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1005
{
1006
    PageDesc *p;
1007
    int offset, b;
1008
#if 0
1009
    if (1) {
1010
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1011
                  cpu_single_env->mem_io_vaddr, len,
1012
                  cpu_single_env->eip,
1013
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1014
    }
1015
#endif
1016
    p = page_find(start >> TARGET_PAGE_BITS);
1017
    if (!p)
1018
        return;
1019
    if (p->code_bitmap) {
1020
        offset = start & ~TARGET_PAGE_MASK;
1021
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1022
        if (b & ((1 << len) - 1))
1023
            goto do_invalidate;
1024
    } else {
1025
    do_invalidate:
1026
        tb_invalidate_phys_page_range(start, start + len, 1);
1027
    }
1028
}
1029

    
1030
#if !defined(CONFIG_SOFTMMU)
1031
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1032
                                    unsigned long pc, void *puc)
1033
{
1034
    TranslationBlock *tb;
1035
    PageDesc *p;
1036
    int n;
1037
#ifdef TARGET_HAS_PRECISE_SMC
1038
    TranslationBlock *current_tb = NULL;
1039
    CPUState *env = cpu_single_env;
1040
    int current_tb_modified = 0;
1041
    target_ulong current_pc = 0;
1042
    target_ulong current_cs_base = 0;
1043
    int current_flags = 0;
1044
#endif
1045

    
1046
    addr &= TARGET_PAGE_MASK;
1047
    p = page_find(addr >> TARGET_PAGE_BITS);
1048
    if (!p)
1049
        return;
1050
    tb = p->first_tb;
1051
#ifdef TARGET_HAS_PRECISE_SMC
1052
    if (tb && pc != 0) {
1053
        current_tb = tb_find_pc(pc);
1054
    }
1055
#endif
1056
    while (tb != NULL) {
1057
        n = (long)tb & 3;
1058
        tb = (TranslationBlock *)((long)tb & ~3);
1059
#ifdef TARGET_HAS_PRECISE_SMC
1060
        if (current_tb == tb &&
1061
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1062
                /* If we are modifying the current TB, we must stop
1063
                   its execution. We could be more precise by checking
1064
                   that the modification is after the current PC, but it
1065
                   would require a specialized function to partially
1066
                   restore the CPU state */
1067

    
1068
            current_tb_modified = 1;
1069
            cpu_restore_state(current_tb, env, pc, puc);
1070
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1071
                                 &current_flags);
1072
        }
1073
#endif /* TARGET_HAS_PRECISE_SMC */
1074
        tb_phys_invalidate(tb, addr);
1075
        tb = tb->page_next[n];
1076
    }
1077
    p->first_tb = NULL;
1078
#ifdef TARGET_HAS_PRECISE_SMC
1079
    if (current_tb_modified) {
1080
        /* we generate a block containing just the instruction
1081
           modifying the memory. It will ensure that it cannot modify
1082
           itself */
1083
        env->current_tb = NULL;
1084
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1085
        cpu_resume_from_signal(env, puc);
1086
    }
1087
#endif
1088
}
1089
#endif
1090

    
1091
/* add the tb in the target page and protect it if necessary */
1092
static inline void tb_alloc_page(TranslationBlock *tb,
1093
                                 unsigned int n, target_ulong page_addr)
1094
{
1095
    PageDesc *p;
1096
    TranslationBlock *last_first_tb;
1097

    
1098
    tb->page_addr[n] = page_addr;
1099
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1100
    tb->page_next[n] = p->first_tb;
1101
    last_first_tb = p->first_tb;
1102
    p->first_tb = (TranslationBlock *)((long)tb | n);
1103
    invalidate_page_bitmap(p);
1104

    
1105
#if defined(TARGET_HAS_SMC) || 1
1106

    
1107
#if defined(CONFIG_USER_ONLY)
1108
    if (p->flags & PAGE_WRITE) {
1109
        target_ulong addr;
1110
        PageDesc *p2;
1111
        int prot;
1112

    
1113
        /* force the host page as non writable (writes will have a
1114
           page fault + mprotect overhead) */
1115
        page_addr &= qemu_host_page_mask;
1116
        prot = 0;
1117
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1118
            addr += TARGET_PAGE_SIZE) {
1119

    
1120
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1121
            if (!p2)
1122
                continue;
1123
            prot |= p2->flags;
1124
            p2->flags &= ~PAGE_WRITE;
1125
            page_get_flags(addr);
1126
          }
1127
        mprotect(g2h(page_addr), qemu_host_page_size,
1128
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1129
#ifdef DEBUG_TB_INVALIDATE
1130
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1131
               page_addr);
1132
#endif
1133
    }
1134
#else
1135
    /* if some code is already present, then the pages are already
1136
       protected. So we handle the case where only the first TB is
1137
       allocated in a physical page */
1138
    if (!last_first_tb) {
1139
        tlb_protect_code(page_addr);
1140
    }
1141
#endif
1142

    
1143
#endif /* TARGET_HAS_SMC */
1144
}
1145

    
1146
/* Allocate a new translation block. Flush the translation buffer if
1147
   too many translation blocks or too much generated code. */
1148
TranslationBlock *tb_alloc(target_ulong pc)
1149
{
1150
    TranslationBlock *tb;
1151

    
1152
    if (nb_tbs >= code_gen_max_blocks ||
1153
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1154
        return NULL;
1155
    tb = &tbs[nb_tbs++];
1156
    tb->pc = pc;
1157
    tb->cflags = 0;
1158
    return tb;
1159
}
1160

    
1161
void tb_free(TranslationBlock *tb)
1162
{
1163
    /* In practice this is mostly used for single use temporary TB
1164
       Ignore the hard cases and just back up if this TB happens to
1165
       be the last one generated.  */
1166
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1167
        code_gen_ptr = tb->tc_ptr;
1168
        nb_tbs--;
1169
    }
1170
}
1171

    
1172
/* add a new TB and link it to the physical page tables. phys_page2 is
1173
   (-1) to indicate that only one page contains the TB. */
1174
void tb_link_phys(TranslationBlock *tb,
1175
                  target_ulong phys_pc, target_ulong phys_page2)
1176
{
1177
    unsigned int h;
1178
    TranslationBlock **ptb;
1179

    
1180
    /* Grab the mmap lock to stop another thread invalidating this TB
1181
       before we are done.  */
1182
    mmap_lock();
1183
    /* add in the physical hash table */
1184
    h = tb_phys_hash_func(phys_pc);
1185
    ptb = &tb_phys_hash[h];
1186
    tb->phys_hash_next = *ptb;
1187
    *ptb = tb;
1188

    
1189
    /* add in the page list */
1190
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1191
    if (phys_page2 != -1)
1192
        tb_alloc_page(tb, 1, phys_page2);
1193
    else
1194
        tb->page_addr[1] = -1;
1195

    
1196
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1197
    tb->jmp_next[0] = NULL;
1198
    tb->jmp_next[1] = NULL;
1199

    
1200
    /* init original jump addresses */
1201
    if (tb->tb_next_offset[0] != 0xffff)
1202
        tb_reset_jump(tb, 0);
1203
    if (tb->tb_next_offset[1] != 0xffff)
1204
        tb_reset_jump(tb, 1);
1205

    
1206
#ifdef DEBUG_TB_CHECK
1207
    tb_page_check();
1208
#endif
1209
    mmap_unlock();
1210
}
1211

    
1212
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1213
   tb[1].tc_ptr. Return NULL if not found */
1214
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1215
{
1216
    int m_min, m_max, m;
1217
    unsigned long v;
1218
    TranslationBlock *tb;
1219

    
1220
    if (nb_tbs <= 0)
1221
        return NULL;
1222
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1223
        tc_ptr >= (unsigned long)code_gen_ptr)
1224
        return NULL;
1225
    /* binary search (cf Knuth) */
1226
    m_min = 0;
1227
    m_max = nb_tbs - 1;
1228
    while (m_min <= m_max) {
1229
        m = (m_min + m_max) >> 1;
1230
        tb = &tbs[m];
1231
        v = (unsigned long)tb->tc_ptr;
1232
        if (v == tc_ptr)
1233
            return tb;
1234
        else if (tc_ptr < v) {
1235
            m_max = m - 1;
1236
        } else {
1237
            m_min = m + 1;
1238
        }
1239
    }
1240
    return &tbs[m_max];
1241
}
1242

    
1243
static void tb_reset_jump_recursive(TranslationBlock *tb);
1244

    
1245
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1246
{
1247
    TranslationBlock *tb1, *tb_next, **ptb;
1248
    unsigned int n1;
1249

    
1250
    tb1 = tb->jmp_next[n];
1251
    if (tb1 != NULL) {
1252
        /* find head of list */
1253
        for(;;) {
1254
            n1 = (long)tb1 & 3;
1255
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1256
            if (n1 == 2)
1257
                break;
1258
            tb1 = tb1->jmp_next[n1];
1259
        }
1260
        /* we are now sure now that tb jumps to tb1 */
1261
        tb_next = tb1;
1262

    
1263
        /* remove tb from the jmp_first list */
1264
        ptb = &tb_next->jmp_first;
1265
        for(;;) {
1266
            tb1 = *ptb;
1267
            n1 = (long)tb1 & 3;
1268
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1269
            if (n1 == n && tb1 == tb)
1270
                break;
1271
            ptb = &tb1->jmp_next[n1];
1272
        }
1273
        *ptb = tb->jmp_next[n];
1274
        tb->jmp_next[n] = NULL;
1275

    
1276
        /* suppress the jump to next tb in generated code */
1277
        tb_reset_jump(tb, n);
1278

    
1279
        /* suppress jumps in the tb on which we could have jumped */
1280
        tb_reset_jump_recursive(tb_next);
1281
    }
1282
}
1283

    
1284
static void tb_reset_jump_recursive(TranslationBlock *tb)
1285
{
1286
    tb_reset_jump_recursive2(tb, 0);
1287
    tb_reset_jump_recursive2(tb, 1);
1288
}
1289

    
1290
#if defined(TARGET_HAS_ICE)
1291
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1292
{
1293
    target_phys_addr_t addr;
1294
    target_ulong pd;
1295
    ram_addr_t ram_addr;
1296
    PhysPageDesc *p;
1297

    
1298
    addr = cpu_get_phys_page_debug(env, pc);
1299
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1300
    if (!p) {
1301
        pd = IO_MEM_UNASSIGNED;
1302
    } else {
1303
        pd = p->phys_offset;
1304
    }
1305
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1306
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1307
}
1308
#endif
1309

    
1310
/* Add a watchpoint.  */
1311
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1312
                          int flags, CPUWatchpoint **watchpoint)
1313
{
1314
    target_ulong len_mask = ~(len - 1);
1315
    CPUWatchpoint *wp;
1316

    
1317
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1318
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1319
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1320
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1321
        return -EINVAL;
1322
    }
1323
    wp = qemu_malloc(sizeof(*wp));
1324

    
1325
    wp->vaddr = addr;
1326
    wp->len_mask = len_mask;
1327
    wp->flags = flags;
1328

    
1329
    /* keep all GDB-injected watchpoints in front */
1330
    if (flags & BP_GDB)
1331
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1332
    else
1333
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1334

    
1335
    tlb_flush_page(env, addr);
1336

    
1337
    if (watchpoint)
1338
        *watchpoint = wp;
1339
    return 0;
1340
}
1341

    
1342
/* Remove a specific watchpoint.  */
1343
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1344
                          int flags)
1345
{
1346
    target_ulong len_mask = ~(len - 1);
1347
    CPUWatchpoint *wp;
1348

    
1349
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1350
        if (addr == wp->vaddr && len_mask == wp->len_mask
1351
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1352
            cpu_watchpoint_remove_by_ref(env, wp);
1353
            return 0;
1354
        }
1355
    }
1356
    return -ENOENT;
1357
}
1358

    
1359
/* Remove a specific watchpoint by reference.  */
1360
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1361
{
1362
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1363

    
1364
    tlb_flush_page(env, watchpoint->vaddr);
1365

    
1366
    qemu_free(watchpoint);
1367
}
1368

    
1369
/* Remove all matching watchpoints.  */
1370
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1371
{
1372
    CPUWatchpoint *wp, *next;
1373

    
1374
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1375
        if (wp->flags & mask)
1376
            cpu_watchpoint_remove_by_ref(env, wp);
1377
    }
1378
}
1379

    
1380
/* Add a breakpoint.  */
1381
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1382
                          CPUBreakpoint **breakpoint)
1383
{
1384
#if defined(TARGET_HAS_ICE)
1385
    CPUBreakpoint *bp;
1386

    
1387
    bp = qemu_malloc(sizeof(*bp));
1388

    
1389
    bp->pc = pc;
1390
    bp->flags = flags;
1391

    
1392
    /* keep all GDB-injected breakpoints in front */
1393
    if (flags & BP_GDB)
1394
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1395
    else
1396
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1397

    
1398
    breakpoint_invalidate(env, pc);
1399

    
1400
    if (breakpoint)
1401
        *breakpoint = bp;
1402
    return 0;
1403
#else
1404
    return -ENOSYS;
1405
#endif
1406
}
1407

    
1408
/* Remove a specific breakpoint.  */
1409
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1410
{
1411
#if defined(TARGET_HAS_ICE)
1412
    CPUBreakpoint *bp;
1413

    
1414
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1415
        if (bp->pc == pc && bp->flags == flags) {
1416
            cpu_breakpoint_remove_by_ref(env, bp);
1417
            return 0;
1418
        }
1419
    }
1420
    return -ENOENT;
1421
#else
1422
    return -ENOSYS;
1423
#endif
1424
}
1425

    
1426
/* Remove a specific breakpoint by reference.  */
1427
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1428
{
1429
#if defined(TARGET_HAS_ICE)
1430
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1431

    
1432
    breakpoint_invalidate(env, breakpoint->pc);
1433

    
1434
    qemu_free(breakpoint);
1435
#endif
1436
}
1437

    
1438
/* Remove all matching breakpoints. */
1439
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1440
{
1441
#if defined(TARGET_HAS_ICE)
1442
    CPUBreakpoint *bp, *next;
1443

    
1444
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1445
        if (bp->flags & mask)
1446
            cpu_breakpoint_remove_by_ref(env, bp);
1447
    }
1448
#endif
1449
}
1450

    
1451
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1452
   CPU loop after each instruction */
1453
void cpu_single_step(CPUState *env, int enabled)
1454
{
1455
#if defined(TARGET_HAS_ICE)
1456
    if (env->singlestep_enabled != enabled) {
1457
        env->singlestep_enabled = enabled;
1458
        /* must flush all the translated code to avoid inconsistancies */
1459
        /* XXX: only flush what is necessary */
1460
        tb_flush(env);
1461
    }
1462
#endif
1463
}
1464

    
1465
/* enable or disable low levels log */
1466
void cpu_set_log(int log_flags)
1467
{
1468
    loglevel = log_flags;
1469
    if (loglevel && !logfile) {
1470
        logfile = fopen(logfilename, log_append ? "a" : "w");
1471
        if (!logfile) {
1472
            perror(logfilename);
1473
            _exit(1);
1474
        }
1475
#if !defined(CONFIG_SOFTMMU)
1476
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1477
        {
1478
            static char logfile_buf[4096];
1479
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1480
        }
1481
#else
1482
        setvbuf(logfile, NULL, _IOLBF, 0);
1483
#endif
1484
        log_append = 1;
1485
    }
1486
    if (!loglevel && logfile) {
1487
        fclose(logfile);
1488
        logfile = NULL;
1489
    }
1490
}
1491

    
1492
void cpu_set_log_filename(const char *filename)
1493
{
1494
    logfilename = strdup(filename);
1495
    if (logfile) {
1496
        fclose(logfile);
1497
        logfile = NULL;
1498
    }
1499
    cpu_set_log(loglevel);
1500
}
1501

    
1502
/* mask must never be zero, except for A20 change call */
1503
void cpu_interrupt(CPUState *env, int mask)
1504
{
1505
#if !defined(USE_NPTL)
1506
    TranslationBlock *tb;
1507
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1508
#endif
1509
    int old_mask;
1510

    
1511
    if (mask & CPU_INTERRUPT_EXIT) {
1512
        env->exit_request = 1;
1513
        mask &= ~CPU_INTERRUPT_EXIT;
1514
    }
1515

    
1516
    old_mask = env->interrupt_request;
1517
    env->interrupt_request |= mask;
1518
#if defined(USE_NPTL)
1519
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1520
       problem and hope the cpu will stop of its own accord.  For userspace
1521
       emulation this often isn't actually as bad as it sounds.  Often
1522
       signals are used primarily to interrupt blocking syscalls.  */
1523
#else
1524
    if (use_icount) {
1525
        env->icount_decr.u16.high = 0xffff;
1526
#ifndef CONFIG_USER_ONLY
1527
        if (!can_do_io(env)
1528
            && (mask & ~old_mask) != 0) {
1529
            cpu_abort(env, "Raised interrupt while not in I/O function");
1530
        }
1531
#endif
1532
    } else {
1533
        tb = env->current_tb;
1534
        /* if the cpu is currently executing code, we must unlink it and
1535
           all the potentially executing TB */
1536
        if (tb && !testandset(&interrupt_lock)) {
1537
            env->current_tb = NULL;
1538
            tb_reset_jump_recursive(tb);
1539
            resetlock(&interrupt_lock);
1540
        }
1541
    }
1542
#endif
1543
}
1544

    
1545
void cpu_reset_interrupt(CPUState *env, int mask)
1546
{
1547
    env->interrupt_request &= ~mask;
1548
}
1549

    
1550
const CPULogItem cpu_log_items[] = {
1551
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1552
      "show generated host assembly code for each compiled TB" },
1553
    { CPU_LOG_TB_IN_ASM, "in_asm",
1554
      "show target assembly code for each compiled TB" },
1555
    { CPU_LOG_TB_OP, "op",
1556
      "show micro ops for each compiled TB" },
1557
    { CPU_LOG_TB_OP_OPT, "op_opt",
1558
      "show micro ops "
1559
#ifdef TARGET_I386
1560
      "before eflags optimization and "
1561
#endif
1562
      "after liveness analysis" },
1563
    { CPU_LOG_INT, "int",
1564
      "show interrupts/exceptions in short format" },
1565
    { CPU_LOG_EXEC, "exec",
1566
      "show trace before each executed TB (lots of logs)" },
1567
    { CPU_LOG_TB_CPU, "cpu",
1568
      "show CPU state before block translation" },
1569
#ifdef TARGET_I386
1570
    { CPU_LOG_PCALL, "pcall",
1571
      "show protected mode far calls/returns/exceptions" },
1572
    { CPU_LOG_RESET, "cpu_reset",
1573
      "show CPU state before CPU resets" },
1574
#endif
1575
#ifdef DEBUG_IOPORT
1576
    { CPU_LOG_IOPORT, "ioport",
1577
      "show all i/o ports accesses" },
1578
#endif
1579
    { 0, NULL, NULL },
1580
};
1581

    
1582
static int cmp1(const char *s1, int n, const char *s2)
1583
{
1584
    if (strlen(s2) != n)
1585
        return 0;
1586
    return memcmp(s1, s2, n) == 0;
1587
}
1588

    
1589
/* takes a comma separated list of log masks. Return 0 if error. */
1590
int cpu_str_to_log_mask(const char *str)
1591
{
1592
    const CPULogItem *item;
1593
    int mask;
1594
    const char *p, *p1;
1595

    
1596
    p = str;
1597
    mask = 0;
1598
    for(;;) {
1599
        p1 = strchr(p, ',');
1600
        if (!p1)
1601
            p1 = p + strlen(p);
1602
        if(cmp1(p,p1-p,"all")) {
1603
                for(item = cpu_log_items; item->mask != 0; item++) {
1604
                        mask |= item->mask;
1605
                }
1606
        } else {
1607
        for(item = cpu_log_items; item->mask != 0; item++) {
1608
            if (cmp1(p, p1 - p, item->name))
1609
                goto found;
1610
        }
1611
        return 0;
1612
        }
1613
    found:
1614
        mask |= item->mask;
1615
        if (*p1 != ',')
1616
            break;
1617
        p = p1 + 1;
1618
    }
1619
    return mask;
1620
}
1621

    
1622
void cpu_abort(CPUState *env, const char *fmt, ...)
1623
{
1624
    va_list ap;
1625
    va_list ap2;
1626

    
1627
    va_start(ap, fmt);
1628
    va_copy(ap2, ap);
1629
    fprintf(stderr, "qemu: fatal: ");
1630
    vfprintf(stderr, fmt, ap);
1631
    fprintf(stderr, "\n");
1632
#ifdef TARGET_I386
1633
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1634
#else
1635
    cpu_dump_state(env, stderr, fprintf, 0);
1636
#endif
1637
    if (qemu_log_enabled()) {
1638
        qemu_log("qemu: fatal: ");
1639
        qemu_log_vprintf(fmt, ap2);
1640
        qemu_log("\n");
1641
#ifdef TARGET_I386
1642
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1643
#else
1644
        log_cpu_state(env, 0);
1645
#endif
1646
        qemu_log_flush();
1647
        qemu_log_close();
1648
    }
1649
    va_end(ap2);
1650
    va_end(ap);
1651
    abort();
1652
}
1653

    
1654
CPUState *cpu_copy(CPUState *env)
1655
{
1656
    CPUState *new_env = cpu_init(env->cpu_model_str);
1657
    CPUState *next_cpu = new_env->next_cpu;
1658
    int cpu_index = new_env->cpu_index;
1659
#if defined(TARGET_HAS_ICE)
1660
    CPUBreakpoint *bp;
1661
    CPUWatchpoint *wp;
1662
#endif
1663

    
1664
    memcpy(new_env, env, sizeof(CPUState));
1665

    
1666
    /* Preserve chaining and index. */
1667
    new_env->next_cpu = next_cpu;
1668
    new_env->cpu_index = cpu_index;
1669

    
1670
    /* Clone all break/watchpoints.
1671
       Note: Once we support ptrace with hw-debug register access, make sure
1672
       BP_CPU break/watchpoints are handled correctly on clone. */
1673
    TAILQ_INIT(&env->breakpoints);
1674
    TAILQ_INIT(&env->watchpoints);
1675
#if defined(TARGET_HAS_ICE)
1676
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1677
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1678
    }
1679
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1680
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1681
                              wp->flags, NULL);
1682
    }
1683
#endif
1684

    
1685
    return new_env;
1686
}
1687

    
1688
#if !defined(CONFIG_USER_ONLY)
1689

    
1690
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1691
{
1692
    unsigned int i;
1693

    
1694
    /* Discard jump cache entries for any tb which might potentially
1695
       overlap the flushed page.  */
1696
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1697
    memset (&env->tb_jmp_cache[i], 0, 
1698
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1699

    
1700
    i = tb_jmp_cache_hash_page(addr);
1701
    memset (&env->tb_jmp_cache[i], 0, 
1702
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1703
}
1704

    
1705
/* NOTE: if flush_global is true, also flush global entries (not
1706
   implemented yet) */
1707
void tlb_flush(CPUState *env, int flush_global)
1708
{
1709
    int i;
1710

    
1711
#if defined(DEBUG_TLB)
1712
    printf("tlb_flush:\n");
1713
#endif
1714
    /* must reset current TB so that interrupts cannot modify the
1715
       links while we are modifying them */
1716
    env->current_tb = NULL;
1717

    
1718
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1719
        env->tlb_table[0][i].addr_read = -1;
1720
        env->tlb_table[0][i].addr_write = -1;
1721
        env->tlb_table[0][i].addr_code = -1;
1722
        env->tlb_table[1][i].addr_read = -1;
1723
        env->tlb_table[1][i].addr_write = -1;
1724
        env->tlb_table[1][i].addr_code = -1;
1725
#if (NB_MMU_MODES >= 3)
1726
        env->tlb_table[2][i].addr_read = -1;
1727
        env->tlb_table[2][i].addr_write = -1;
1728
        env->tlb_table[2][i].addr_code = -1;
1729
#if (NB_MMU_MODES == 4)
1730
        env->tlb_table[3][i].addr_read = -1;
1731
        env->tlb_table[3][i].addr_write = -1;
1732
        env->tlb_table[3][i].addr_code = -1;
1733
#endif
1734
#endif
1735
    }
1736

    
1737
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1738

    
1739
#ifdef USE_KQEMU
1740
    if (env->kqemu_enabled) {
1741
        kqemu_flush(env, flush_global);
1742
    }
1743
#endif
1744
    tlb_flush_count++;
1745
}
1746

    
1747
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1748
{
1749
    if (addr == (tlb_entry->addr_read &
1750
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1751
        addr == (tlb_entry->addr_write &
1752
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1753
        addr == (tlb_entry->addr_code &
1754
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1755
        tlb_entry->addr_read = -1;
1756
        tlb_entry->addr_write = -1;
1757
        tlb_entry->addr_code = -1;
1758
    }
1759
}
1760

    
1761
void tlb_flush_page(CPUState *env, target_ulong addr)
1762
{
1763
    int i;
1764

    
1765
#if defined(DEBUG_TLB)
1766
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1767
#endif
1768
    /* must reset current TB so that interrupts cannot modify the
1769
       links while we are modifying them */
1770
    env->current_tb = NULL;
1771

    
1772
    addr &= TARGET_PAGE_MASK;
1773
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1774
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1775
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1776
#if (NB_MMU_MODES >= 3)
1777
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1778
#if (NB_MMU_MODES == 4)
1779
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1780
#endif
1781
#endif
1782

    
1783
    tlb_flush_jmp_cache(env, addr);
1784

    
1785
#ifdef USE_KQEMU
1786
    if (env->kqemu_enabled) {
1787
        kqemu_flush_page(env, addr);
1788
    }
1789
#endif
1790
}
1791

    
1792
/* update the TLBs so that writes to code in the virtual page 'addr'
1793
   can be detected */
1794
static void tlb_protect_code(ram_addr_t ram_addr)
1795
{
1796
    cpu_physical_memory_reset_dirty(ram_addr,
1797
                                    ram_addr + TARGET_PAGE_SIZE,
1798
                                    CODE_DIRTY_FLAG);
1799
}
1800

    
1801
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1802
   tested for self modifying code */
1803
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1804
                                    target_ulong vaddr)
1805
{
1806
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1807
}
1808

    
1809
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1810
                                         unsigned long start, unsigned long length)
1811
{
1812
    unsigned long addr;
1813
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1814
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1815
        if ((addr - start) < length) {
1816
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1817
        }
1818
    }
1819
}
1820

    
1821
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1822
                                     int dirty_flags)
1823
{
1824
    CPUState *env;
1825
    unsigned long length, start1;
1826
    int i, mask, len;
1827
    uint8_t *p;
1828

    
1829
    start &= TARGET_PAGE_MASK;
1830
    end = TARGET_PAGE_ALIGN(end);
1831

    
1832
    length = end - start;
1833
    if (length == 0)
1834
        return;
1835
    len = length >> TARGET_PAGE_BITS;
1836
#ifdef USE_KQEMU
1837
    /* XXX: should not depend on cpu context */
1838
    env = first_cpu;
1839
    if (env->kqemu_enabled) {
1840
        ram_addr_t addr;
1841
        addr = start;
1842
        for(i = 0; i < len; i++) {
1843
            kqemu_set_notdirty(env, addr);
1844
            addr += TARGET_PAGE_SIZE;
1845
        }
1846
    }
1847
#endif
1848
    mask = ~dirty_flags;
1849
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1850
    for(i = 0; i < len; i++)
1851
        p[i] &= mask;
1852

    
1853
    /* we modify the TLB cache so that the dirty bit will be set again
1854
       when accessing the range */
1855
    start1 = start + (unsigned long)phys_ram_base;
1856
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1857
        for(i = 0; i < CPU_TLB_SIZE; i++)
1858
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1859
        for(i = 0; i < CPU_TLB_SIZE; i++)
1860
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1861
#if (NB_MMU_MODES >= 3)
1862
        for(i = 0; i < CPU_TLB_SIZE; i++)
1863
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1864
#if (NB_MMU_MODES == 4)
1865
        for(i = 0; i < CPU_TLB_SIZE; i++)
1866
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1867
#endif
1868
#endif
1869
    }
1870
}
1871

    
1872
int cpu_physical_memory_set_dirty_tracking(int enable)
1873
{
1874
    in_migration = enable;
1875
    return 0;
1876
}
1877

    
1878
int cpu_physical_memory_get_dirty_tracking(void)
1879
{
1880
    return in_migration;
1881
}
1882

    
1883
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1884
{
1885
    if (kvm_enabled())
1886
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1887
}
1888

    
1889
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1890
{
1891
    ram_addr_t ram_addr;
1892

    
1893
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1894
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1895
            tlb_entry->addend - (unsigned long)phys_ram_base;
1896
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1897
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1898
        }
1899
    }
1900
}
1901

    
1902
/* update the TLB according to the current state of the dirty bits */
1903
void cpu_tlb_update_dirty(CPUState *env)
1904
{
1905
    int i;
1906
    for(i = 0; i < CPU_TLB_SIZE; i++)
1907
        tlb_update_dirty(&env->tlb_table[0][i]);
1908
    for(i = 0; i < CPU_TLB_SIZE; i++)
1909
        tlb_update_dirty(&env->tlb_table[1][i]);
1910
#if (NB_MMU_MODES >= 3)
1911
    for(i = 0; i < CPU_TLB_SIZE; i++)
1912
        tlb_update_dirty(&env->tlb_table[2][i]);
1913
#if (NB_MMU_MODES == 4)
1914
    for(i = 0; i < CPU_TLB_SIZE; i++)
1915
        tlb_update_dirty(&env->tlb_table[3][i]);
1916
#endif
1917
#endif
1918
}
1919

    
1920
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1921
{
1922
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1923
        tlb_entry->addr_write = vaddr;
1924
}
1925

    
1926
/* update the TLB corresponding to virtual page vaddr
1927
   so that it is no longer dirty */
1928
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1929
{
1930
    int i;
1931

    
1932
    vaddr &= TARGET_PAGE_MASK;
1933
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1934
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1935
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1936
#if (NB_MMU_MODES >= 3)
1937
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1938
#if (NB_MMU_MODES == 4)
1939
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1940
#endif
1941
#endif
1942
}
1943

    
1944
/* add a new TLB entry. At most one entry for a given virtual address
1945
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1946
   (can only happen in non SOFTMMU mode for I/O pages or pages
1947
   conflicting with the host address space). */
1948
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1949
                      target_phys_addr_t paddr, int prot,
1950
                      int mmu_idx, int is_softmmu)
1951
{
1952
    PhysPageDesc *p;
1953
    unsigned long pd;
1954
    unsigned int index;
1955
    target_ulong address;
1956
    target_ulong code_address;
1957
    target_phys_addr_t addend;
1958
    int ret;
1959
    CPUTLBEntry *te;
1960
    CPUWatchpoint *wp;
1961
    target_phys_addr_t iotlb;
1962

    
1963
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1964
    if (!p) {
1965
        pd = IO_MEM_UNASSIGNED;
1966
    } else {
1967
        pd = p->phys_offset;
1968
    }
1969
#if defined(DEBUG_TLB)
1970
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1971
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1972
#endif
1973

    
1974
    ret = 0;
1975
    address = vaddr;
1976
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1977
        /* IO memory case (romd handled later) */
1978
        address |= TLB_MMIO;
1979
    }
1980
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1981
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1982
        /* Normal RAM.  */
1983
        iotlb = pd & TARGET_PAGE_MASK;
1984
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1985
            iotlb |= IO_MEM_NOTDIRTY;
1986
        else
1987
            iotlb |= IO_MEM_ROM;
1988
    } else {
1989
        /* IO handlers are currently passed a phsical address.
1990
           It would be nice to pass an offset from the base address
1991
           of that region.  This would avoid having to special case RAM,
1992
           and avoid full address decoding in every device.
1993
           We can't use the high bits of pd for this because
1994
           IO_MEM_ROMD uses these as a ram address.  */
1995
        iotlb = (pd & ~TARGET_PAGE_MASK);
1996
        if (p) {
1997
            iotlb += p->region_offset;
1998
        } else {
1999
            iotlb += paddr;
2000
        }
2001
    }
2002

    
2003
    code_address = address;
2004
    /* Make accesses to pages with watchpoints go via the
2005
       watchpoint trap routines.  */
2006
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2007
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2008
            iotlb = io_mem_watch + paddr;
2009
            /* TODO: The memory case can be optimized by not trapping
2010
               reads of pages with a write breakpoint.  */
2011
            address |= TLB_MMIO;
2012
        }
2013
    }
2014

    
2015
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2016
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2017
    te = &env->tlb_table[mmu_idx][index];
2018
    te->addend = addend - vaddr;
2019
    if (prot & PAGE_READ) {
2020
        te->addr_read = address;
2021
    } else {
2022
        te->addr_read = -1;
2023
    }
2024

    
2025
    if (prot & PAGE_EXEC) {
2026
        te->addr_code = code_address;
2027
    } else {
2028
        te->addr_code = -1;
2029
    }
2030
    if (prot & PAGE_WRITE) {
2031
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2032
            (pd & IO_MEM_ROMD)) {
2033
            /* Write access calls the I/O callback.  */
2034
            te->addr_write = address | TLB_MMIO;
2035
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2036
                   !cpu_physical_memory_is_dirty(pd)) {
2037
            te->addr_write = address | TLB_NOTDIRTY;
2038
        } else {
2039
            te->addr_write = address;
2040
        }
2041
    } else {
2042
        te->addr_write = -1;
2043
    }
2044
    return ret;
2045
}
2046

    
2047
#else
2048

    
2049
void tlb_flush(CPUState *env, int flush_global)
2050
{
2051
}
2052

    
2053
void tlb_flush_page(CPUState *env, target_ulong addr)
2054
{
2055
}
2056

    
2057
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2058
                      target_phys_addr_t paddr, int prot,
2059
                      int mmu_idx, int is_softmmu)
2060
{
2061
    return 0;
2062
}
2063

    
2064
/* dump memory mappings */
2065
void page_dump(FILE *f)
2066
{
2067
    unsigned long start, end;
2068
    int i, j, prot, prot1;
2069
    PageDesc *p;
2070

    
2071
    fprintf(f, "%-8s %-8s %-8s %s\n",
2072
            "start", "end", "size", "prot");
2073
    start = -1;
2074
    end = -1;
2075
    prot = 0;
2076
    for(i = 0; i <= L1_SIZE; i++) {
2077
        if (i < L1_SIZE)
2078
            p = l1_map[i];
2079
        else
2080
            p = NULL;
2081
        for(j = 0;j < L2_SIZE; j++) {
2082
            if (!p)
2083
                prot1 = 0;
2084
            else
2085
                prot1 = p[j].flags;
2086
            if (prot1 != prot) {
2087
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2088
                if (start != -1) {
2089
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2090
                            start, end, end - start,
2091
                            prot & PAGE_READ ? 'r' : '-',
2092
                            prot & PAGE_WRITE ? 'w' : '-',
2093
                            prot & PAGE_EXEC ? 'x' : '-');
2094
                }
2095
                if (prot1 != 0)
2096
                    start = end;
2097
                else
2098
                    start = -1;
2099
                prot = prot1;
2100
            }
2101
            if (!p)
2102
                break;
2103
        }
2104
    }
2105
}
2106

    
2107
int page_get_flags(target_ulong address)
2108
{
2109
    PageDesc *p;
2110

    
2111
    p = page_find(address >> TARGET_PAGE_BITS);
2112
    if (!p)
2113
        return 0;
2114
    return p->flags;
2115
}
2116

    
2117
/* modify the flags of a page and invalidate the code if
2118
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2119
   depending on PAGE_WRITE */
2120
void page_set_flags(target_ulong start, target_ulong end, int flags)
2121
{
2122
    PageDesc *p;
2123
    target_ulong addr;
2124

    
2125
    /* mmap_lock should already be held.  */
2126
    start = start & TARGET_PAGE_MASK;
2127
    end = TARGET_PAGE_ALIGN(end);
2128
    if (flags & PAGE_WRITE)
2129
        flags |= PAGE_WRITE_ORG;
2130
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2131
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2132
        /* We may be called for host regions that are outside guest
2133
           address space.  */
2134
        if (!p)
2135
            return;
2136
        /* if the write protection is set, then we invalidate the code
2137
           inside */
2138
        if (!(p->flags & PAGE_WRITE) &&
2139
            (flags & PAGE_WRITE) &&
2140
            p->first_tb) {
2141
            tb_invalidate_phys_page(addr, 0, NULL);
2142
        }
2143
        p->flags = flags;
2144
    }
2145
}
2146

    
2147
int page_check_range(target_ulong start, target_ulong len, int flags)
2148
{
2149
    PageDesc *p;
2150
    target_ulong end;
2151
    target_ulong addr;
2152

    
2153
    if (start + len < start)
2154
        /* we've wrapped around */
2155
        return -1;
2156

    
2157
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2158
    start = start & TARGET_PAGE_MASK;
2159

    
2160
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2161
        p = page_find(addr >> TARGET_PAGE_BITS);
2162
        if( !p )
2163
            return -1;
2164
        if( !(p->flags & PAGE_VALID) )
2165
            return -1;
2166

    
2167
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2168
            return -1;
2169
        if (flags & PAGE_WRITE) {
2170
            if (!(p->flags & PAGE_WRITE_ORG))
2171
                return -1;
2172
            /* unprotect the page if it was put read-only because it
2173
               contains translated code */
2174
            if (!(p->flags & PAGE_WRITE)) {
2175
                if (!page_unprotect(addr, 0, NULL))
2176
                    return -1;
2177
            }
2178
            return 0;
2179
        }
2180
    }
2181
    return 0;
2182
}
2183

    
2184
/* called from signal handler: invalidate the code and unprotect the
2185
   page. Return TRUE if the fault was succesfully handled. */
2186
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2187
{
2188
    unsigned int page_index, prot, pindex;
2189
    PageDesc *p, *p1;
2190
    target_ulong host_start, host_end, addr;
2191

    
2192
    /* Technically this isn't safe inside a signal handler.  However we
2193
       know this only ever happens in a synchronous SEGV handler, so in
2194
       practice it seems to be ok.  */
2195
    mmap_lock();
2196

    
2197
    host_start = address & qemu_host_page_mask;
2198
    page_index = host_start >> TARGET_PAGE_BITS;
2199
    p1 = page_find(page_index);
2200
    if (!p1) {
2201
        mmap_unlock();
2202
        return 0;
2203
    }
2204
    host_end = host_start + qemu_host_page_size;
2205
    p = p1;
2206
    prot = 0;
2207
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2208
        prot |= p->flags;
2209
        p++;
2210
    }
2211
    /* if the page was really writable, then we change its
2212
       protection back to writable */
2213
    if (prot & PAGE_WRITE_ORG) {
2214
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2215
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2216
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2217
                     (prot & PAGE_BITS) | PAGE_WRITE);
2218
            p1[pindex].flags |= PAGE_WRITE;
2219
            /* and since the content will be modified, we must invalidate
2220
               the corresponding translated code. */
2221
            tb_invalidate_phys_page(address, pc, puc);
2222
#ifdef DEBUG_TB_CHECK
2223
            tb_invalidate_check(address);
2224
#endif
2225
            mmap_unlock();
2226
            return 1;
2227
        }
2228
    }
2229
    mmap_unlock();
2230
    return 0;
2231
}
2232

    
2233
static inline void tlb_set_dirty(CPUState *env,
2234
                                 unsigned long addr, target_ulong vaddr)
2235
{
2236
}
2237
#endif /* defined(CONFIG_USER_ONLY) */
2238

    
2239
#if !defined(CONFIG_USER_ONLY)
2240

    
2241
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2242
                             ram_addr_t memory, ram_addr_t region_offset);
2243
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2244
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2245
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2246
                      need_subpage)                                     \
2247
    do {                                                                \
2248
        if (addr > start_addr)                                          \
2249
            start_addr2 = 0;                                            \
2250
        else {                                                          \
2251
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2252
            if (start_addr2 > 0)                                        \
2253
                need_subpage = 1;                                       \
2254
        }                                                               \
2255
                                                                        \
2256
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2257
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2258
        else {                                                          \
2259
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2260
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2261
                need_subpage = 1;                                       \
2262
        }                                                               \
2263
    } while (0)
2264

    
2265
/* register physical memory. 'size' must be a multiple of the target
2266
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2267
   io memory page.  The address used when calling the IO function is
2268
   the offset from the start of the region, plus region_offset.  Both
2269
   start_region and regon_offset are rounded down to a page boundary
2270
   before calculating this offset.  This should not be a problem unless
2271
   the low bits of start_addr and region_offset differ.  */
2272
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2273
                                         ram_addr_t size,
2274
                                         ram_addr_t phys_offset,
2275
                                         ram_addr_t region_offset)
2276
{
2277
    target_phys_addr_t addr, end_addr;
2278
    PhysPageDesc *p;
2279
    CPUState *env;
2280
    ram_addr_t orig_size = size;
2281
    void *subpage;
2282

    
2283
#ifdef USE_KQEMU
2284
    /* XXX: should not depend on cpu context */
2285
    env = first_cpu;
2286
    if (env->kqemu_enabled) {
2287
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2288
    }
2289
#endif
2290
    if (kvm_enabled())
2291
        kvm_set_phys_mem(start_addr, size, phys_offset);
2292

    
2293
    if (phys_offset == IO_MEM_UNASSIGNED) {
2294
        region_offset = start_addr;
2295
    }
2296
    region_offset &= TARGET_PAGE_MASK;
2297
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2298
    end_addr = start_addr + (target_phys_addr_t)size;
2299
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2300
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2301
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2302
            ram_addr_t orig_memory = p->phys_offset;
2303
            target_phys_addr_t start_addr2, end_addr2;
2304
            int need_subpage = 0;
2305

    
2306
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2307
                          need_subpage);
2308
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2309
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2310
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2311
                                           &p->phys_offset, orig_memory,
2312
                                           p->region_offset);
2313
                } else {
2314
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2315
                                            >> IO_MEM_SHIFT];
2316
                }
2317
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2318
                                 region_offset);
2319
                p->region_offset = 0;
2320
            } else {
2321
                p->phys_offset = phys_offset;
2322
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2323
                    (phys_offset & IO_MEM_ROMD))
2324
                    phys_offset += TARGET_PAGE_SIZE;
2325
            }
2326
        } else {
2327
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2328
            p->phys_offset = phys_offset;
2329
            p->region_offset = region_offset;
2330
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2331
                (phys_offset & IO_MEM_ROMD)) {
2332
                phys_offset += TARGET_PAGE_SIZE;
2333
            } else {
2334
                target_phys_addr_t start_addr2, end_addr2;
2335
                int need_subpage = 0;
2336

    
2337
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2338
                              end_addr2, need_subpage);
2339

    
2340
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2341
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2342
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2343
                                           addr & TARGET_PAGE_MASK);
2344
                    subpage_register(subpage, start_addr2, end_addr2,
2345
                                     phys_offset, region_offset);
2346
                    p->region_offset = 0;
2347
                }
2348
            }
2349
        }
2350
        region_offset += TARGET_PAGE_SIZE;
2351
    }
2352

    
2353
    /* since each CPU stores ram addresses in its TLB cache, we must
2354
       reset the modified entries */
2355
    /* XXX: slow ! */
2356
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2357
        tlb_flush(env, 1);
2358
    }
2359
}
2360

    
2361
/* XXX: temporary until new memory mapping API */
2362
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2363
{
2364
    PhysPageDesc *p;
2365

    
2366
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2367
    if (!p)
2368
        return IO_MEM_UNASSIGNED;
2369
    return p->phys_offset;
2370
}
2371

    
2372
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2373
{
2374
    if (kvm_enabled())
2375
        kvm_coalesce_mmio_region(addr, size);
2376
}
2377

    
2378
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2379
{
2380
    if (kvm_enabled())
2381
        kvm_uncoalesce_mmio_region(addr, size);
2382
}
2383

    
2384
/* XXX: better than nothing */
2385
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2386
{
2387
    ram_addr_t addr;
2388
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2389
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2390
                (uint64_t)size, (uint64_t)phys_ram_size);
2391
        abort();
2392
    }
2393
    addr = phys_ram_alloc_offset;
2394
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2395
    return addr;
2396
}
2397

    
2398
void qemu_ram_free(ram_addr_t addr)
2399
{
2400
}
2401

    
2402
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2403
{
2404
#ifdef DEBUG_UNASSIGNED
2405
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2406
#endif
2407
#if defined(TARGET_SPARC)
2408
    do_unassigned_access(addr, 0, 0, 0, 1);
2409
#endif
2410
    return 0;
2411
}
2412

    
2413
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2414
{
2415
#ifdef DEBUG_UNASSIGNED
2416
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2417
#endif
2418
#if defined(TARGET_SPARC)
2419
    do_unassigned_access(addr, 0, 0, 0, 2);
2420
#endif
2421
    return 0;
2422
}
2423

    
2424
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2425
{
2426
#ifdef DEBUG_UNASSIGNED
2427
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2428
#endif
2429
#if defined(TARGET_SPARC)
2430
    do_unassigned_access(addr, 0, 0, 0, 4);
2431
#endif
2432
    return 0;
2433
}
2434

    
2435
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2436
{
2437
#ifdef DEBUG_UNASSIGNED
2438
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2439
#endif
2440
#if defined(TARGET_SPARC)
2441
    do_unassigned_access(addr, 1, 0, 0, 1);
2442
#endif
2443
}
2444

    
2445
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2446
{
2447
#ifdef DEBUG_UNASSIGNED
2448
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2449
#endif
2450
#if defined(TARGET_SPARC)
2451
    do_unassigned_access(addr, 1, 0, 0, 2);
2452
#endif
2453
}
2454

    
2455
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2456
{
2457
#ifdef DEBUG_UNASSIGNED
2458
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2459
#endif
2460
#if defined(TARGET_SPARC)
2461
    do_unassigned_access(addr, 1, 0, 0, 4);
2462
#endif
2463
}
2464

    
2465
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2466
    unassigned_mem_readb,
2467
    unassigned_mem_readw,
2468
    unassigned_mem_readl,
2469
};
2470

    
2471
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2472
    unassigned_mem_writeb,
2473
    unassigned_mem_writew,
2474
    unassigned_mem_writel,
2475
};
2476

    
2477
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2478
                                uint32_t val)
2479
{
2480
    int dirty_flags;
2481
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2482
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2483
#if !defined(CONFIG_USER_ONLY)
2484
        tb_invalidate_phys_page_fast(ram_addr, 1);
2485
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2486
#endif
2487
    }
2488
    stb_p(phys_ram_base + ram_addr, val);
2489
#ifdef USE_KQEMU
2490
    if (cpu_single_env->kqemu_enabled &&
2491
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2492
        kqemu_modify_page(cpu_single_env, ram_addr);
2493
#endif
2494
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2495
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2496
    /* we remove the notdirty callback only if the code has been
2497
       flushed */
2498
    if (dirty_flags == 0xff)
2499
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2500
}
2501

    
2502
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2503
                                uint32_t val)
2504
{
2505
    int dirty_flags;
2506
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2507
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2508
#if !defined(CONFIG_USER_ONLY)
2509
        tb_invalidate_phys_page_fast(ram_addr, 2);
2510
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2511
#endif
2512
    }
2513
    stw_p(phys_ram_base + ram_addr, val);
2514
#ifdef USE_KQEMU
2515
    if (cpu_single_env->kqemu_enabled &&
2516
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2517
        kqemu_modify_page(cpu_single_env, ram_addr);
2518
#endif
2519
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2520
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2521
    /* we remove the notdirty callback only if the code has been
2522
       flushed */
2523
    if (dirty_flags == 0xff)
2524
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2525
}
2526

    
2527
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2528
                                uint32_t val)
2529
{
2530
    int dirty_flags;
2531
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2532
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2533
#if !defined(CONFIG_USER_ONLY)
2534
        tb_invalidate_phys_page_fast(ram_addr, 4);
2535
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2536
#endif
2537
    }
2538
    stl_p(phys_ram_base + ram_addr, val);
2539
#ifdef USE_KQEMU
2540
    if (cpu_single_env->kqemu_enabled &&
2541
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2542
        kqemu_modify_page(cpu_single_env, ram_addr);
2543
#endif
2544
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2545
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2546
    /* we remove the notdirty callback only if the code has been
2547
       flushed */
2548
    if (dirty_flags == 0xff)
2549
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2550
}
2551

    
2552
static CPUReadMemoryFunc *error_mem_read[3] = {
2553
    NULL, /* never used */
2554
    NULL, /* never used */
2555
    NULL, /* never used */
2556
};
2557

    
2558
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2559
    notdirty_mem_writeb,
2560
    notdirty_mem_writew,
2561
    notdirty_mem_writel,
2562
};
2563

    
2564
/* Generate a debug exception if a watchpoint has been hit.  */
2565
static void check_watchpoint(int offset, int len_mask, int flags)
2566
{
2567
    CPUState *env = cpu_single_env;
2568
    target_ulong pc, cs_base;
2569
    TranslationBlock *tb;
2570
    target_ulong vaddr;
2571
    CPUWatchpoint *wp;
2572
    int cpu_flags;
2573

    
2574
    if (env->watchpoint_hit) {
2575
        /* We re-entered the check after replacing the TB. Now raise
2576
         * the debug interrupt so that is will trigger after the
2577
         * current instruction. */
2578
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2579
        return;
2580
    }
2581
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2582
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2583
        if ((vaddr == (wp->vaddr & len_mask) ||
2584
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2585
            wp->flags |= BP_WATCHPOINT_HIT;
2586
            if (!env->watchpoint_hit) {
2587
                env->watchpoint_hit = wp;
2588
                tb = tb_find_pc(env->mem_io_pc);
2589
                if (!tb) {
2590
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2591
                              "pc=%p", (void *)env->mem_io_pc);
2592
                }
2593
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2594
                tb_phys_invalidate(tb, -1);
2595
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2596
                    env->exception_index = EXCP_DEBUG;
2597
                } else {
2598
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2599
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2600
                }
2601
                cpu_resume_from_signal(env, NULL);
2602
            }
2603
        } else {
2604
            wp->flags &= ~BP_WATCHPOINT_HIT;
2605
        }
2606
    }
2607
}
2608

    
2609
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2610
   so these check for a hit then pass through to the normal out-of-line
2611
   phys routines.  */
2612
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2613
{
2614
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2615
    return ldub_phys(addr);
2616
}
2617

    
2618
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2619
{
2620
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2621
    return lduw_phys(addr);
2622
}
2623

    
2624
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2625
{
2626
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2627
    return ldl_phys(addr);
2628
}
2629

    
2630
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2631
                             uint32_t val)
2632
{
2633
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2634
    stb_phys(addr, val);
2635
}
2636

    
2637
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2638
                             uint32_t val)
2639
{
2640
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2641
    stw_phys(addr, val);
2642
}
2643

    
2644
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2645
                             uint32_t val)
2646
{
2647
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2648
    stl_phys(addr, val);
2649
}
2650

    
2651
static CPUReadMemoryFunc *watch_mem_read[3] = {
2652
    watch_mem_readb,
2653
    watch_mem_readw,
2654
    watch_mem_readl,
2655
};
2656

    
2657
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2658
    watch_mem_writeb,
2659
    watch_mem_writew,
2660
    watch_mem_writel,
2661
};
2662

    
2663
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2664
                                 unsigned int len)
2665
{
2666
    uint32_t ret;
2667
    unsigned int idx;
2668

    
2669
    idx = SUBPAGE_IDX(addr);
2670
#if defined(DEBUG_SUBPAGE)
2671
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2672
           mmio, len, addr, idx);
2673
#endif
2674
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2675
                                       addr + mmio->region_offset[idx][0][len]);
2676

    
2677
    return ret;
2678
}
2679

    
2680
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2681
                              uint32_t value, unsigned int len)
2682
{
2683
    unsigned int idx;
2684

    
2685
    idx = SUBPAGE_IDX(addr);
2686
#if defined(DEBUG_SUBPAGE)
2687
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2688
           mmio, len, addr, idx, value);
2689
#endif
2690
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2691
                                  addr + mmio->region_offset[idx][1][len],
2692
                                  value);
2693
}
2694

    
2695
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2696
{
2697
#if defined(DEBUG_SUBPAGE)
2698
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2699
#endif
2700

    
2701
    return subpage_readlen(opaque, addr, 0);
2702
}
2703

    
2704
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2705
                            uint32_t value)
2706
{
2707
#if defined(DEBUG_SUBPAGE)
2708
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2709
#endif
2710
    subpage_writelen(opaque, addr, value, 0);
2711
}
2712

    
2713
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2714
{
2715
#if defined(DEBUG_SUBPAGE)
2716
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2717
#endif
2718

    
2719
    return subpage_readlen(opaque, addr, 1);
2720
}
2721

    
2722
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2723
                            uint32_t value)
2724
{
2725
#if defined(DEBUG_SUBPAGE)
2726
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2727
#endif
2728
    subpage_writelen(opaque, addr, value, 1);
2729
}
2730

    
2731
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2732
{
2733
#if defined(DEBUG_SUBPAGE)
2734
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2735
#endif
2736

    
2737
    return subpage_readlen(opaque, addr, 2);
2738
}
2739

    
2740
static void subpage_writel (void *opaque,
2741
                         target_phys_addr_t addr, uint32_t value)
2742
{
2743
#if defined(DEBUG_SUBPAGE)
2744
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2745
#endif
2746
    subpage_writelen(opaque, addr, value, 2);
2747
}
2748

    
2749
static CPUReadMemoryFunc *subpage_read[] = {
2750
    &subpage_readb,
2751
    &subpage_readw,
2752
    &subpage_readl,
2753
};
2754

    
2755
static CPUWriteMemoryFunc *subpage_write[] = {
2756
    &subpage_writeb,
2757
    &subpage_writew,
2758
    &subpage_writel,
2759
};
2760

    
2761
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2762
                             ram_addr_t memory, ram_addr_t region_offset)
2763
{
2764
    int idx, eidx;
2765
    unsigned int i;
2766

    
2767
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2768
        return -1;
2769
    idx = SUBPAGE_IDX(start);
2770
    eidx = SUBPAGE_IDX(end);
2771
#if defined(DEBUG_SUBPAGE)
2772
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2773
           mmio, start, end, idx, eidx, memory);
2774
#endif
2775
    memory >>= IO_MEM_SHIFT;
2776
    for (; idx <= eidx; idx++) {
2777
        for (i = 0; i < 4; i++) {
2778
            if (io_mem_read[memory][i]) {
2779
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2780
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2781
                mmio->region_offset[idx][0][i] = region_offset;
2782
            }
2783
            if (io_mem_write[memory][i]) {
2784
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2785
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2786
                mmio->region_offset[idx][1][i] = region_offset;
2787
            }
2788
        }
2789
    }
2790

    
2791
    return 0;
2792
}
2793

    
2794
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2795
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2796
{
2797
    subpage_t *mmio;
2798
    int subpage_memory;
2799

    
2800
    mmio = qemu_mallocz(sizeof(subpage_t));
2801

    
2802
    mmio->base = base;
2803
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2804
#if defined(DEBUG_SUBPAGE)
2805
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2806
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2807
#endif
2808
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2809
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2810
                         region_offset);
2811

    
2812
    return mmio;
2813
}
2814

    
2815
static int get_free_io_mem_idx(void)
2816
{
2817
    int i;
2818

    
2819
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2820
        if (!io_mem_used[i]) {
2821
            io_mem_used[i] = 1;
2822
            return i;
2823
        }
2824

    
2825
    return -1;
2826
}
2827

    
2828
static void io_mem_init(void)
2829
{
2830
    int i;
2831

    
2832
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2833
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2834
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2835
    for (i=0; i<5; i++)
2836
        io_mem_used[i] = 1;
2837

    
2838
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2839
                                          watch_mem_write, NULL);
2840
    /* alloc dirty bits array */
2841
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2842
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2843
}
2844

    
2845
/* mem_read and mem_write are arrays of functions containing the
2846
   function to access byte (index 0), word (index 1) and dword (index
2847
   2). Functions can be omitted with a NULL function pointer. The
2848
   registered functions may be modified dynamically later.
2849
   If io_index is non zero, the corresponding io zone is
2850
   modified. If it is zero, a new io zone is allocated. The return
2851
   value can be used with cpu_register_physical_memory(). (-1) is
2852
   returned if error. */
2853
int cpu_register_io_memory(int io_index,
2854
                           CPUReadMemoryFunc **mem_read,
2855
                           CPUWriteMemoryFunc **mem_write,
2856
                           void *opaque)
2857
{
2858
    int i, subwidth = 0;
2859

    
2860
    if (io_index <= 0) {
2861
        io_index = get_free_io_mem_idx();
2862
        if (io_index == -1)
2863
            return io_index;
2864
    } else {
2865
        if (io_index >= IO_MEM_NB_ENTRIES)
2866
            return -1;
2867
    }
2868

    
2869
    for(i = 0;i < 3; i++) {
2870
        if (!mem_read[i] || !mem_write[i])
2871
            subwidth = IO_MEM_SUBWIDTH;
2872
        io_mem_read[io_index][i] = mem_read[i];
2873
        io_mem_write[io_index][i] = mem_write[i];
2874
    }
2875
    io_mem_opaque[io_index] = opaque;
2876
    return (io_index << IO_MEM_SHIFT) | subwidth;
2877
}
2878

    
2879
void cpu_unregister_io_memory(int io_table_address)
2880
{
2881
    int i;
2882
    int io_index = io_table_address >> IO_MEM_SHIFT;
2883

    
2884
    for (i=0;i < 3; i++) {
2885
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2886
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2887
    }
2888
    io_mem_opaque[io_index] = NULL;
2889
    io_mem_used[io_index] = 0;
2890
}
2891

    
2892
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2893
{
2894
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2895
}
2896

    
2897
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2898
{
2899
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2900
}
2901

    
2902
#endif /* !defined(CONFIG_USER_ONLY) */
2903

    
2904
/* physical memory access (slow version, mainly for debug) */
2905
#if defined(CONFIG_USER_ONLY)
2906
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2907
                            int len, int is_write)
2908
{
2909
    int l, flags;
2910
    target_ulong page;
2911
    void * p;
2912

    
2913
    while (len > 0) {
2914
        page = addr & TARGET_PAGE_MASK;
2915
        l = (page + TARGET_PAGE_SIZE) - addr;
2916
        if (l > len)
2917
            l = len;
2918
        flags = page_get_flags(page);
2919
        if (!(flags & PAGE_VALID))
2920
            return;
2921
        if (is_write) {
2922
            if (!(flags & PAGE_WRITE))
2923
                return;
2924
            /* XXX: this code should not depend on lock_user */
2925
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2926
                /* FIXME - should this return an error rather than just fail? */
2927
                return;
2928
            memcpy(p, buf, l);
2929
            unlock_user(p, addr, l);
2930
        } else {
2931
            if (!(flags & PAGE_READ))
2932
                return;
2933
            /* XXX: this code should not depend on lock_user */
2934
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2935
                /* FIXME - should this return an error rather than just fail? */
2936
                return;
2937
            memcpy(buf, p, l);
2938
            unlock_user(p, addr, 0);
2939
        }
2940
        len -= l;
2941
        buf += l;
2942
        addr += l;
2943
    }
2944
}
2945

    
2946
#else
2947
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2948
                            int len, int is_write)
2949
{
2950
    int l, io_index;
2951
    uint8_t *ptr;
2952
    uint32_t val;
2953
    target_phys_addr_t page;
2954
    unsigned long pd;
2955
    PhysPageDesc *p;
2956

    
2957
    while (len > 0) {
2958
        page = addr & TARGET_PAGE_MASK;
2959
        l = (page + TARGET_PAGE_SIZE) - addr;
2960
        if (l > len)
2961
            l = len;
2962
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2963
        if (!p) {
2964
            pd = IO_MEM_UNASSIGNED;
2965
        } else {
2966
            pd = p->phys_offset;
2967
        }
2968

    
2969
        if (is_write) {
2970
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2971
                target_phys_addr_t addr1 = addr;
2972
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2973
                if (p)
2974
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2975
                /* XXX: could force cpu_single_env to NULL to avoid
2976
                   potential bugs */
2977
                if (l >= 4 && ((addr1 & 3) == 0)) {
2978
                    /* 32 bit write access */
2979
                    val = ldl_p(buf);
2980
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
2981
                    l = 4;
2982
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
2983
                    /* 16 bit write access */
2984
                    val = lduw_p(buf);
2985
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
2986
                    l = 2;
2987
                } else {
2988
                    /* 8 bit write access */
2989
                    val = ldub_p(buf);
2990
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
2991
                    l = 1;
2992
                }
2993
            } else {
2994
                unsigned long addr1;
2995
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2996
                /* RAM case */
2997
                ptr = phys_ram_base + addr1;
2998
                memcpy(ptr, buf, l);
2999
                if (!cpu_physical_memory_is_dirty(addr1)) {
3000
                    /* invalidate code */
3001
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3002
                    /* set dirty bit */
3003
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3004
                        (0xff & ~CODE_DIRTY_FLAG);
3005
                }
3006
            }
3007
        } else {
3008
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3009
                !(pd & IO_MEM_ROMD)) {
3010
                target_phys_addr_t addr1 = addr;
3011
                /* I/O case */
3012
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3013
                if (p)
3014
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3015
                if (l >= 4 && ((addr1 & 3) == 0)) {
3016
                    /* 32 bit read access */
3017
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3018
                    stl_p(buf, val);
3019
                    l = 4;
3020
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3021
                    /* 16 bit read access */
3022
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3023
                    stw_p(buf, val);
3024
                    l = 2;
3025
                } else {
3026
                    /* 8 bit read access */
3027
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3028
                    stb_p(buf, val);
3029
                    l = 1;
3030
                }
3031
            } else {
3032
                /* RAM case */
3033
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3034
                    (addr & ~TARGET_PAGE_MASK);
3035
                memcpy(buf, ptr, l);
3036
            }
3037
        }
3038
        len -= l;
3039
        buf += l;
3040
        addr += l;
3041
    }
3042
}
3043

    
3044
/* used for ROM loading : can write in RAM and ROM */
3045
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3046
                                   const uint8_t *buf, int len)
3047
{
3048
    int l;
3049
    uint8_t *ptr;
3050
    target_phys_addr_t page;
3051
    unsigned long pd;
3052
    PhysPageDesc *p;
3053

    
3054
    while (len > 0) {
3055
        page = addr & TARGET_PAGE_MASK;
3056
        l = (page + TARGET_PAGE_SIZE) - addr;
3057
        if (l > len)
3058
            l = len;
3059
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3060
        if (!p) {
3061
            pd = IO_MEM_UNASSIGNED;
3062
        } else {
3063
            pd = p->phys_offset;
3064
        }
3065

    
3066
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3067
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3068
            !(pd & IO_MEM_ROMD)) {
3069
            /* do nothing */
3070
        } else {
3071
            unsigned long addr1;
3072
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3073
            /* ROM/RAM case */
3074
            ptr = phys_ram_base + addr1;
3075
            memcpy(ptr, buf, l);
3076
        }
3077
        len -= l;
3078
        buf += l;
3079
        addr += l;
3080
    }
3081
}
3082

    
3083
typedef struct {
3084
    void *buffer;
3085
    target_phys_addr_t addr;
3086
    target_phys_addr_t len;
3087
} BounceBuffer;
3088

    
3089
static BounceBuffer bounce;
3090

    
3091
typedef struct MapClient {
3092
    void *opaque;
3093
    void (*callback)(void *opaque);
3094
    LIST_ENTRY(MapClient) link;
3095
} MapClient;
3096

    
3097
static LIST_HEAD(map_client_list, MapClient) map_client_list
3098
    = LIST_HEAD_INITIALIZER(map_client_list);
3099

    
3100
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3101
{
3102
    MapClient *client = qemu_malloc(sizeof(*client));
3103

    
3104
    client->opaque = opaque;
3105
    client->callback = callback;
3106
    LIST_INSERT_HEAD(&map_client_list, client, link);
3107
    return client;
3108
}
3109

    
3110
void cpu_unregister_map_client(void *_client)
3111
{
3112
    MapClient *client = (MapClient *)_client;
3113

    
3114
    LIST_REMOVE(client, link);
3115
}
3116

    
3117
static void cpu_notify_map_clients(void)
3118
{
3119
    MapClient *client;
3120

    
3121
    while (!LIST_EMPTY(&map_client_list)) {
3122
        client = LIST_FIRST(&map_client_list);
3123
        client->callback(client->opaque);
3124
        LIST_REMOVE(client, link);
3125
    }
3126
}
3127

    
3128
/* Map a physical memory region into a host virtual address.
3129
 * May map a subset of the requested range, given by and returned in *plen.
3130
 * May return NULL if resources needed to perform the mapping are exhausted.
3131
 * Use only for reads OR writes - not for read-modify-write operations.
3132
 * Use cpu_register_map_client() to know when retrying the map operation is
3133
 * likely to succeed.
3134
 */
3135
void *cpu_physical_memory_map(target_phys_addr_t addr,
3136
                              target_phys_addr_t *plen,
3137
                              int is_write)
3138
{
3139
    target_phys_addr_t len = *plen;
3140
    target_phys_addr_t done = 0;
3141
    int l;
3142
    uint8_t *ret = NULL;
3143
    uint8_t *ptr;
3144
    target_phys_addr_t page;
3145
    unsigned long pd;
3146
    PhysPageDesc *p;
3147
    unsigned long addr1;
3148

    
3149
    while (len > 0) {
3150
        page = addr & TARGET_PAGE_MASK;
3151
        l = (page + TARGET_PAGE_SIZE) - addr;
3152
        if (l > len)
3153
            l = len;
3154
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3155
        if (!p) {
3156
            pd = IO_MEM_UNASSIGNED;
3157
        } else {
3158
            pd = p->phys_offset;
3159
        }
3160

    
3161
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3162
            if (done || bounce.buffer) {
3163
                break;
3164
            }
3165
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3166
            bounce.addr = addr;
3167
            bounce.len = l;
3168
            if (!is_write) {
3169
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3170
            }
3171
            ptr = bounce.buffer;
3172
        } else {
3173
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3174
            ptr = phys_ram_base + addr1;
3175
        }
3176
        if (!done) {
3177
            ret = ptr;
3178
        } else if (ret + done != ptr) {
3179
            break;
3180
        }
3181

    
3182
        len -= l;
3183
        addr += l;
3184
        done += l;
3185
    }
3186
    *plen = done;
3187
    return ret;
3188
}
3189

    
3190
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3191
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3192
 * the amount of memory that was actually read or written by the caller.
3193
 */
3194
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3195
                               int is_write, target_phys_addr_t access_len)
3196
{
3197
    if (buffer != bounce.buffer) {
3198
        if (is_write) {
3199
            unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3200
            while (access_len) {
3201
                unsigned l;
3202
                l = TARGET_PAGE_SIZE;
3203
                if (l > access_len)
3204
                    l = access_len;
3205
                if (!cpu_physical_memory_is_dirty(addr1)) {
3206
                    /* invalidate code */
3207
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3208
                    /* set dirty bit */
3209
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3210
                        (0xff & ~CODE_DIRTY_FLAG);
3211
                }
3212
                addr1 += l;
3213
                access_len -= l;
3214
            }
3215
        }
3216
        return;
3217
    }
3218
    if (is_write) {
3219
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3220
    }
3221
    qemu_free(bounce.buffer);
3222
    bounce.buffer = NULL;
3223
    cpu_notify_map_clients();
3224
}
3225

    
3226
/* warning: addr must be aligned */
3227
uint32_t ldl_phys(target_phys_addr_t addr)
3228
{
3229
    int io_index;
3230
    uint8_t *ptr;
3231
    uint32_t val;
3232
    unsigned long pd;
3233
    PhysPageDesc *p;
3234

    
3235
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3236
    if (!p) {
3237
        pd = IO_MEM_UNASSIGNED;
3238
    } else {
3239
        pd = p->phys_offset;
3240
    }
3241

    
3242
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3243
        !(pd & IO_MEM_ROMD)) {
3244
        /* I/O case */
3245
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3246
        if (p)
3247
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3248
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3249
    } else {
3250
        /* RAM case */
3251
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3252
            (addr & ~TARGET_PAGE_MASK);
3253
        val = ldl_p(ptr);
3254
    }
3255
    return val;
3256
}
3257

    
3258
/* warning: addr must be aligned */
3259
uint64_t ldq_phys(target_phys_addr_t addr)
3260
{
3261
    int io_index;
3262
    uint8_t *ptr;
3263
    uint64_t val;
3264
    unsigned long pd;
3265
    PhysPageDesc *p;
3266

    
3267
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3268
    if (!p) {
3269
        pd = IO_MEM_UNASSIGNED;
3270
    } else {
3271
        pd = p->phys_offset;
3272
    }
3273

    
3274
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3275
        !(pd & IO_MEM_ROMD)) {
3276
        /* I/O case */
3277
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3278
        if (p)
3279
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3280
#ifdef TARGET_WORDS_BIGENDIAN
3281
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3282
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3283
#else
3284
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3285
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3286
#endif
3287
    } else {
3288
        /* RAM case */
3289
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3290
            (addr & ~TARGET_PAGE_MASK);
3291
        val = ldq_p(ptr);
3292
    }
3293
    return val;
3294
}
3295

    
3296
/* XXX: optimize */
3297
uint32_t ldub_phys(target_phys_addr_t addr)
3298
{
3299
    uint8_t val;
3300
    cpu_physical_memory_read(addr, &val, 1);
3301
    return val;
3302
}
3303

    
3304
/* XXX: optimize */
3305
uint32_t lduw_phys(target_phys_addr_t addr)
3306
{
3307
    uint16_t val;
3308
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3309
    return tswap16(val);
3310
}
3311

    
3312
/* warning: addr must be aligned. The ram page is not masked as dirty
3313
   and the code inside is not invalidated. It is useful if the dirty
3314
   bits are used to track modified PTEs */
3315
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3316
{
3317
    int io_index;
3318
    uint8_t *ptr;
3319
    unsigned long pd;
3320
    PhysPageDesc *p;
3321

    
3322
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3323
    if (!p) {
3324
        pd = IO_MEM_UNASSIGNED;
3325
    } else {
3326
        pd = p->phys_offset;
3327
    }
3328

    
3329
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3330
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3331
        if (p)
3332
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3333
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3334
    } else {
3335
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3336
        ptr = phys_ram_base + addr1;
3337
        stl_p(ptr, val);
3338

    
3339
        if (unlikely(in_migration)) {
3340
            if (!cpu_physical_memory_is_dirty(addr1)) {
3341
                /* invalidate code */
3342
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3343
                /* set dirty bit */
3344
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3345
                    (0xff & ~CODE_DIRTY_FLAG);
3346
            }
3347
        }
3348
    }
3349
}
3350

    
3351
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3352
{
3353
    int io_index;
3354
    uint8_t *ptr;
3355
    unsigned long pd;
3356
    PhysPageDesc *p;
3357

    
3358
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3359
    if (!p) {
3360
        pd = IO_MEM_UNASSIGNED;
3361
    } else {
3362
        pd = p->phys_offset;
3363
    }
3364

    
3365
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3366
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3367
        if (p)
3368
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3369
#ifdef TARGET_WORDS_BIGENDIAN
3370
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3371
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3372
#else
3373
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3374
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3375
#endif
3376
    } else {
3377
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3378
            (addr & ~TARGET_PAGE_MASK);
3379
        stq_p(ptr, val);
3380
    }
3381
}
3382

    
3383
/* warning: addr must be aligned */
3384
void stl_phys(target_phys_addr_t addr, uint32_t val)
3385
{
3386
    int io_index;
3387
    uint8_t *ptr;
3388
    unsigned long pd;
3389
    PhysPageDesc *p;
3390

    
3391
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3392
    if (!p) {
3393
        pd = IO_MEM_UNASSIGNED;
3394
    } else {
3395
        pd = p->phys_offset;
3396
    }
3397

    
3398
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3399
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3400
        if (p)
3401
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3402
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3403
    } else {
3404
        unsigned long addr1;
3405
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3406
        /* RAM case */
3407
        ptr = phys_ram_base + addr1;
3408
        stl_p(ptr, val);
3409
        if (!cpu_physical_memory_is_dirty(addr1)) {
3410
            /* invalidate code */
3411
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3412
            /* set dirty bit */
3413
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3414
                (0xff & ~CODE_DIRTY_FLAG);
3415
        }
3416
    }
3417
}
3418

    
3419
/* XXX: optimize */
3420
void stb_phys(target_phys_addr_t addr, uint32_t val)
3421
{
3422
    uint8_t v = val;
3423
    cpu_physical_memory_write(addr, &v, 1);
3424
}
3425

    
3426
/* XXX: optimize */
3427
void stw_phys(target_phys_addr_t addr, uint32_t val)
3428
{
3429
    uint16_t v = tswap16(val);
3430
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3431
}
3432

    
3433
/* XXX: optimize */
3434
void stq_phys(target_phys_addr_t addr, uint64_t val)
3435
{
3436
    val = tswap64(val);
3437
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3438
}
3439

    
3440
#endif
3441

    
3442
/* virtual memory access for debug */
3443
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3444
                        uint8_t *buf, int len, int is_write)
3445
{
3446
    int l;
3447
    target_phys_addr_t phys_addr;
3448
    target_ulong page;
3449

    
3450
    while (len > 0) {
3451
        page = addr & TARGET_PAGE_MASK;
3452
        phys_addr = cpu_get_phys_page_debug(env, page);
3453
        /* if no physical page mapped, return an error */
3454
        if (phys_addr == -1)
3455
            return -1;
3456
        l = (page + TARGET_PAGE_SIZE) - addr;
3457
        if (l > len)
3458
            l = len;
3459
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3460
                               buf, l, is_write);
3461
        len -= l;
3462
        buf += l;
3463
        addr += l;
3464
    }
3465
    return 0;
3466
}
3467

    
3468
/* in deterministic execution mode, instructions doing device I/Os
3469
   must be at the end of the TB */
3470
void cpu_io_recompile(CPUState *env, void *retaddr)
3471
{
3472
    TranslationBlock *tb;
3473
    uint32_t n, cflags;
3474
    target_ulong pc, cs_base;
3475
    uint64_t flags;
3476

    
3477
    tb = tb_find_pc((unsigned long)retaddr);
3478
    if (!tb) {
3479
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3480
                  retaddr);
3481
    }
3482
    n = env->icount_decr.u16.low + tb->icount;
3483
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3484
    /* Calculate how many instructions had been executed before the fault
3485
       occurred.  */
3486
    n = n - env->icount_decr.u16.low;
3487
    /* Generate a new TB ending on the I/O insn.  */
3488
    n++;
3489
    /* On MIPS and SH, delay slot instructions can only be restarted if
3490
       they were already the first instruction in the TB.  If this is not
3491
       the first instruction in a TB then re-execute the preceding
3492
       branch.  */
3493
#if defined(TARGET_MIPS)
3494
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3495
        env->active_tc.PC -= 4;
3496
        env->icount_decr.u16.low++;
3497
        env->hflags &= ~MIPS_HFLAG_BMASK;
3498
    }
3499
#elif defined(TARGET_SH4)
3500
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3501
            && n > 1) {
3502
        env->pc -= 2;
3503
        env->icount_decr.u16.low++;
3504
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3505
    }
3506
#endif
3507
    /* This should never happen.  */
3508
    if (n > CF_COUNT_MASK)
3509
        cpu_abort(env, "TB too big during recompile");
3510

    
3511
    cflags = n | CF_LAST_IO;
3512
    pc = tb->pc;
3513
    cs_base = tb->cs_base;
3514
    flags = tb->flags;
3515
    tb_phys_invalidate(tb, -1);
3516
    /* FIXME: In theory this could raise an exception.  In practice
3517
       we have already translated the block once so it's probably ok.  */
3518
    tb_gen_code(env, pc, cs_base, flags, cflags);
3519
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3520
       the first in the TB) then we end up generating a whole new TB and
3521
       repeating the fault, which is horribly inefficient.
3522
       Better would be to execute just this insn uncached, or generate a
3523
       second new TB.  */
3524
    cpu_resume_from_signal(env, NULL);
3525
}
3526

    
3527
void dump_exec_info(FILE *f,
3528
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3529
{
3530
    int i, target_code_size, max_target_code_size;
3531
    int direct_jmp_count, direct_jmp2_count, cross_page;
3532
    TranslationBlock *tb;
3533

    
3534
    target_code_size = 0;
3535
    max_target_code_size = 0;
3536
    cross_page = 0;
3537
    direct_jmp_count = 0;
3538
    direct_jmp2_count = 0;
3539
    for(i = 0; i < nb_tbs; i++) {
3540
        tb = &tbs[i];
3541
        target_code_size += tb->size;
3542
        if (tb->size > max_target_code_size)
3543
            max_target_code_size = tb->size;
3544
        if (tb->page_addr[1] != -1)
3545
            cross_page++;
3546
        if (tb->tb_next_offset[0] != 0xffff) {
3547
            direct_jmp_count++;
3548
            if (tb->tb_next_offset[1] != 0xffff) {
3549
                direct_jmp2_count++;
3550
            }
3551
        }
3552
    }
3553
    /* XXX: avoid using doubles ? */
3554
    cpu_fprintf(f, "Translation buffer state:\n");
3555
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3556
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3557
    cpu_fprintf(f, "TB count            %d/%d\n", 
3558
                nb_tbs, code_gen_max_blocks);
3559
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3560
                nb_tbs ? target_code_size / nb_tbs : 0,
3561
                max_target_code_size);
3562
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3563
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3564
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3565
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3566
            cross_page,
3567
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3568
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3569
                direct_jmp_count,
3570
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3571
                direct_jmp2_count,
3572
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3573
    cpu_fprintf(f, "\nStatistics:\n");
3574
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3575
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3576
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3577
    tcg_dump_info(f, cpu_fprintf);
3578
}
3579

    
3580
#if !defined(CONFIG_USER_ONLY)
3581

    
3582
#define MMUSUFFIX _cmmu
3583
#define GETPC() NULL
3584
#define env cpu_single_env
3585
#define SOFTMMU_CODE_ACCESS
3586

    
3587
#define SHIFT 0
3588
#include "softmmu_template.h"
3589

    
3590
#define SHIFT 1
3591
#include "softmmu_template.h"
3592

    
3593
#define SHIFT 2
3594
#include "softmmu_template.h"
3595

    
3596
#define SHIFT 3
3597
#include "softmmu_template.h"
3598

    
3599
#undef env
3600

    
3601
#endif