Statistics
| Branch: | Revision:

root / exec.c @ d78f3995

History | View | Annotate | Download (107.8 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#include "osdep.h"
42
#include "kvm.h"
43
#if defined(CONFIG_USER_ONLY)
44
#include <qemu.h>
45
#endif
46

    
47
//#define DEBUG_TB_INVALIDATE
48
//#define DEBUG_FLUSH
49
//#define DEBUG_TLB
50
//#define DEBUG_UNASSIGNED
51

    
52
/* make various TB consistency checks */
53
//#define DEBUG_TB_CHECK
54
//#define DEBUG_TLB_CHECK
55

    
56
//#define DEBUG_IOPORT
57
//#define DEBUG_SUBPAGE
58

    
59
#if !defined(CONFIG_USER_ONLY)
60
/* TB consistency checks only implemented for usermode emulation.  */
61
#undef DEBUG_TB_CHECK
62
#endif
63

    
64
#define SMC_BITMAP_USE_THRESHOLD 10
65

    
66
#if defined(TARGET_SPARC64)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 41
68
#elif defined(TARGET_SPARC)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 36
70
#elif defined(TARGET_ALPHA)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 42
72
#define TARGET_VIRT_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_PPC64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 42
77
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78
#define TARGET_PHYS_ADDR_SPACE_BITS 36
79
#else
80
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81
#define TARGET_PHYS_ADDR_SPACE_BITS 32
82
#endif
83

    
84
static TranslationBlock *tbs;
85
int code_gen_max_blocks;
86
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87
static int nb_tbs;
88
/* any access to the tbs or the page table must use this lock */
89
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90

    
91
#if defined(__arm__) || defined(__sparc_v9__)
92
/* The prologue must be reachable with a direct jump. ARM and Sparc64
93
 have limited branch ranges (possibly also PPC) so place it in a
94
 section close to code segment. */
95
#define code_gen_section                                \
96
    __attribute__((__section__(".gen_code")))           \
97
    __attribute__((aligned (32)))
98
#else
99
#define code_gen_section                                \
100
    __attribute__((aligned (32)))
101
#endif
102

    
103
uint8_t code_gen_prologue[1024] code_gen_section;
104
static uint8_t *code_gen_buffer;
105
static unsigned long code_gen_buffer_size;
106
/* threshold to flush the translated code buffer */
107
static unsigned long code_gen_buffer_max_size;
108
uint8_t *code_gen_ptr;
109

    
110
#if !defined(CONFIG_USER_ONLY)
111
ram_addr_t phys_ram_size;
112
int phys_ram_fd;
113
uint8_t *phys_ram_base;
114
uint8_t *phys_ram_dirty;
115
static int in_migration;
116
static ram_addr_t phys_ram_alloc_offset = 0;
117
#endif
118

    
119
CPUState *first_cpu;
120
/* current CPU in the current thread. It is only valid inside
121
   cpu_exec() */
122
CPUState *cpu_single_env;
123
/* 0 = Do not count executed instructions.
124
   1 = Precise instruction counting.
125
   2 = Adaptive rate instruction counting.  */
126
int use_icount = 0;
127
/* Current instruction counter.  While executing translated code this may
128
   include some instructions that have not yet been executed.  */
129
int64_t qemu_icount;
130

    
131
typedef struct PageDesc {
132
    /* list of TBs intersecting this ram page */
133
    TranslationBlock *first_tb;
134
    /* in order to optimize self modifying code, we count the number
135
       of lookups we do to a given page to use a bitmap */
136
    unsigned int code_write_count;
137
    uint8_t *code_bitmap;
138
#if defined(CONFIG_USER_ONLY)
139
    unsigned long flags;
140
#endif
141
} PageDesc;
142

    
143
typedef struct PhysPageDesc {
144
    /* offset in host memory of the page + io_index in the low bits */
145
    ram_addr_t phys_offset;
146
    ram_addr_t region_offset;
147
} PhysPageDesc;
148

    
149
#define L2_BITS 10
150
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
151
/* XXX: this is a temporary hack for alpha target.
152
 *      In the future, this is to be replaced by a multi-level table
153
 *      to actually be able to handle the complete 64 bits address space.
154
 */
155
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
156
#else
157
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
158
#endif
159

    
160
#define L1_SIZE (1 << L1_BITS)
161
#define L2_SIZE (1 << L2_BITS)
162

    
163
unsigned long qemu_real_host_page_size;
164
unsigned long qemu_host_page_bits;
165
unsigned long qemu_host_page_size;
166
unsigned long qemu_host_page_mask;
167

    
168
/* XXX: for system emulation, it could just be an array */
169
static PageDesc *l1_map[L1_SIZE];
170
static PhysPageDesc **l1_phys_map;
171

    
172
#if !defined(CONFIG_USER_ONLY)
173
static void io_mem_init(void);
174

    
175
/* io memory support */
176
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
177
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
178
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
179
static char io_mem_used[IO_MEM_NB_ENTRIES];
180
static int io_mem_watch;
181
#endif
182

    
183
/* log support */
184
static const char *logfilename = "/tmp/qemu.log";
185
FILE *logfile;
186
int loglevel;
187
static int log_append = 0;
188

    
189
/* statistics */
190
static int tlb_flush_count;
191
static int tb_flush_count;
192
static int tb_phys_invalidate_count;
193

    
194
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
195
typedef struct subpage_t {
196
    target_phys_addr_t base;
197
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
198
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
199
    void *opaque[TARGET_PAGE_SIZE][2][4];
200
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
201
} subpage_t;
202

    
203
#ifdef _WIN32
204
static void map_exec(void *addr, long size)
205
{
206
    DWORD old_protect;
207
    VirtualProtect(addr, size,
208
                   PAGE_EXECUTE_READWRITE, &old_protect);
209
    
210
}
211
#else
212
static void map_exec(void *addr, long size)
213
{
214
    unsigned long start, end, page_size;
215
    
216
    page_size = getpagesize();
217
    start = (unsigned long)addr;
218
    start &= ~(page_size - 1);
219
    
220
    end = (unsigned long)addr + size;
221
    end += page_size - 1;
222
    end &= ~(page_size - 1);
223
    
224
    mprotect((void *)start, end - start,
225
             PROT_READ | PROT_WRITE | PROT_EXEC);
226
}
227
#endif
228

    
229
static void page_init(void)
230
{
231
    /* NOTE: we can always suppose that qemu_host_page_size >=
232
       TARGET_PAGE_SIZE */
233
#ifdef _WIN32
234
    {
235
        SYSTEM_INFO system_info;
236

    
237
        GetSystemInfo(&system_info);
238
        qemu_real_host_page_size = system_info.dwPageSize;
239
    }
240
#else
241
    qemu_real_host_page_size = getpagesize();
242
#endif
243
    if (qemu_host_page_size == 0)
244
        qemu_host_page_size = qemu_real_host_page_size;
245
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
246
        qemu_host_page_size = TARGET_PAGE_SIZE;
247
    qemu_host_page_bits = 0;
248
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
249
        qemu_host_page_bits++;
250
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
251
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
252
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
253

    
254
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
255
    {
256
        long long startaddr, endaddr;
257
        FILE *f;
258
        int n;
259

    
260
        mmap_lock();
261
        last_brk = (unsigned long)sbrk(0);
262
        f = fopen("/proc/self/maps", "r");
263
        if (f) {
264
            do {
265
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
266
                if (n == 2) {
267
                    startaddr = MIN(startaddr,
268
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
269
                    endaddr = MIN(endaddr,
270
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
271
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
272
                                   TARGET_PAGE_ALIGN(endaddr),
273
                                   PAGE_RESERVED); 
274
                }
275
            } while (!feof(f));
276
            fclose(f);
277
        }
278
        mmap_unlock();
279
    }
280
#endif
281
}
282

    
283
static inline PageDesc **page_l1_map(target_ulong index)
284
{
285
#if TARGET_LONG_BITS > 32
286
    /* Host memory outside guest VM.  For 32-bit targets we have already
287
       excluded high addresses.  */
288
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
289
        return NULL;
290
#endif
291
    return &l1_map[index >> L2_BITS];
292
}
293

    
294
static inline PageDesc *page_find_alloc(target_ulong index)
295
{
296
    PageDesc **lp, *p;
297
    lp = page_l1_map(index);
298
    if (!lp)
299
        return NULL;
300

    
301
    p = *lp;
302
    if (!p) {
303
        /* allocate if not found */
304
#if defined(CONFIG_USER_ONLY)
305
        size_t len = sizeof(PageDesc) * L2_SIZE;
306
        /* Don't use qemu_malloc because it may recurse.  */
307
        p = mmap(0, len, PROT_READ | PROT_WRITE,
308
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
309
        *lp = p;
310
        if (h2g_valid(p)) {
311
            unsigned long addr = h2g(p);
312
            page_set_flags(addr & TARGET_PAGE_MASK,
313
                           TARGET_PAGE_ALIGN(addr + len),
314
                           PAGE_RESERVED); 
315
        }
316
#else
317
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
318
        *lp = p;
319
#endif
320
    }
321
    return p + (index & (L2_SIZE - 1));
322
}
323

    
324
static inline PageDesc *page_find(target_ulong index)
325
{
326
    PageDesc **lp, *p;
327
    lp = page_l1_map(index);
328
    if (!lp)
329
        return NULL;
330

    
331
    p = *lp;
332
    if (!p)
333
        return 0;
334
    return p + (index & (L2_SIZE - 1));
335
}
336

    
337
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
338
{
339
    void **lp, **p;
340
    PhysPageDesc *pd;
341

    
342
    p = (void **)l1_phys_map;
343
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
344

    
345
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
346
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
347
#endif
348
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
349
    p = *lp;
350
    if (!p) {
351
        /* allocate if not found */
352
        if (!alloc)
353
            return NULL;
354
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
355
        memset(p, 0, sizeof(void *) * L1_SIZE);
356
        *lp = p;
357
    }
358
#endif
359
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
360
    pd = *lp;
361
    if (!pd) {
362
        int i;
363
        /* allocate if not found */
364
        if (!alloc)
365
            return NULL;
366
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
367
        *lp = pd;
368
        for (i = 0; i < L2_SIZE; i++) {
369
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
370
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
371
        }
372
    }
373
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
374
}
375

    
376
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
377
{
378
    return phys_page_find_alloc(index, 0);
379
}
380

    
381
#if !defined(CONFIG_USER_ONLY)
382
static void tlb_protect_code(ram_addr_t ram_addr);
383
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
384
                                    target_ulong vaddr);
385
#define mmap_lock() do { } while(0)
386
#define mmap_unlock() do { } while(0)
387
#endif
388

    
389
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
390

    
391
#if defined(CONFIG_USER_ONLY)
392
/* Currently it is not recommanded to allocate big chunks of data in
393
   user mode. It will change when a dedicated libc will be used */
394
#define USE_STATIC_CODE_GEN_BUFFER
395
#endif
396

    
397
#ifdef USE_STATIC_CODE_GEN_BUFFER
398
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
399
#endif
400

    
401
static void code_gen_alloc(unsigned long tb_size)
402
{
403
#ifdef USE_STATIC_CODE_GEN_BUFFER
404
    code_gen_buffer = static_code_gen_buffer;
405
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406
    map_exec(code_gen_buffer, code_gen_buffer_size);
407
#else
408
    code_gen_buffer_size = tb_size;
409
    if (code_gen_buffer_size == 0) {
410
#if defined(CONFIG_USER_ONLY)
411
        /* in user mode, phys_ram_size is not meaningful */
412
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
413
#else
414
        /* XXX: needs ajustments */
415
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
416
#endif
417
    }
418
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420
    /* The code gen buffer location may have constraints depending on
421
       the host cpu and OS */
422
#if defined(__linux__) 
423
    {
424
        int flags;
425
        void *start = NULL;
426

    
427
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
428
#if defined(__x86_64__)
429
        flags |= MAP_32BIT;
430
        /* Cannot map more than that */
431
        if (code_gen_buffer_size > (800 * 1024 * 1024))
432
            code_gen_buffer_size = (800 * 1024 * 1024);
433
#elif defined(__sparc_v9__)
434
        // Map the buffer below 2G, so we can use direct calls and branches
435
        flags |= MAP_FIXED;
436
        start = (void *) 0x60000000UL;
437
        if (code_gen_buffer_size > (512 * 1024 * 1024))
438
            code_gen_buffer_size = (512 * 1024 * 1024);
439
#elif defined(__arm__)
440
        /* Map the buffer below 32M, so we can use direct calls and branches */
441
        flags |= MAP_FIXED;
442
        start = (void *) 0x01000000UL;
443
        if (code_gen_buffer_size > 16 * 1024 * 1024)
444
            code_gen_buffer_size = 16 * 1024 * 1024;
445
#endif
446
        code_gen_buffer = mmap(start, code_gen_buffer_size,
447
                               PROT_WRITE | PROT_READ | PROT_EXEC,
448
                               flags, -1, 0);
449
        if (code_gen_buffer == MAP_FAILED) {
450
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
451
            exit(1);
452
        }
453
    }
454
#elif defined(__FreeBSD__) || defined(__DragonFly__)
455
    {
456
        int flags;
457
        void *addr = NULL;
458
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
459
#if defined(__x86_64__)
460
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
461
         * 0x40000000 is free */
462
        flags |= MAP_FIXED;
463
        addr = (void *)0x40000000;
464
        /* Cannot map more than that */
465
        if (code_gen_buffer_size > (800 * 1024 * 1024))
466
            code_gen_buffer_size = (800 * 1024 * 1024);
467
#endif
468
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
469
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
470
                               flags, -1, 0);
471
        if (code_gen_buffer == MAP_FAILED) {
472
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
473
            exit(1);
474
        }
475
    }
476
#else
477
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
478
    map_exec(code_gen_buffer, code_gen_buffer_size);
479
#endif
480
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
481
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
482
    code_gen_buffer_max_size = code_gen_buffer_size - 
483
        code_gen_max_block_size();
484
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
485
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
486
}
487

    
488
/* Must be called before using the QEMU cpus. 'tb_size' is the size
489
   (in bytes) allocated to the translation buffer. Zero means default
490
   size. */
491
void cpu_exec_init_all(unsigned long tb_size)
492
{
493
    cpu_gen_init();
494
    code_gen_alloc(tb_size);
495
    code_gen_ptr = code_gen_buffer;
496
    page_init();
497
#if !defined(CONFIG_USER_ONLY)
498
    io_mem_init();
499
#endif
500
}
501

    
502
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
503

    
504
#define CPU_COMMON_SAVE_VERSION 1
505

    
506
static void cpu_common_save(QEMUFile *f, void *opaque)
507
{
508
    CPUState *env = opaque;
509

    
510
    qemu_put_be32s(f, &env->halted);
511
    qemu_put_be32s(f, &env->interrupt_request);
512
}
513

    
514
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
515
{
516
    CPUState *env = opaque;
517

    
518
    if (version_id != CPU_COMMON_SAVE_VERSION)
519
        return -EINVAL;
520

    
521
    qemu_get_be32s(f, &env->halted);
522
    qemu_get_be32s(f, &env->interrupt_request);
523
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
524
       version_id is increased. */
525
    env->interrupt_request &= ~0x01;
526
    tlb_flush(env, 1);
527

    
528
    return 0;
529
}
530
#endif
531

    
532
void cpu_exec_init(CPUState *env)
533
{
534
    CPUState **penv;
535
    int cpu_index;
536

    
537
#if defined(CONFIG_USER_ONLY)
538
    cpu_list_lock();
539
#endif
540
    env->next_cpu = NULL;
541
    penv = &first_cpu;
542
    cpu_index = 0;
543
    while (*penv != NULL) {
544
        penv = (CPUState **)&(*penv)->next_cpu;
545
        cpu_index++;
546
    }
547
    env->cpu_index = cpu_index;
548
    TAILQ_INIT(&env->breakpoints);
549
    TAILQ_INIT(&env->watchpoints);
550
    *penv = env;
551
#if defined(CONFIG_USER_ONLY)
552
    cpu_list_unlock();
553
#endif
554
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
555
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
556
                    cpu_common_save, cpu_common_load, env);
557
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
558
                    cpu_save, cpu_load, env);
559
#endif
560
}
561

    
562
static inline void invalidate_page_bitmap(PageDesc *p)
563
{
564
    if (p->code_bitmap) {
565
        qemu_free(p->code_bitmap);
566
        p->code_bitmap = NULL;
567
    }
568
    p->code_write_count = 0;
569
}
570

    
571
/* set to NULL all the 'first_tb' fields in all PageDescs */
572
static void page_flush_tb(void)
573
{
574
    int i, j;
575
    PageDesc *p;
576

    
577
    for(i = 0; i < L1_SIZE; i++) {
578
        p = l1_map[i];
579
        if (p) {
580
            for(j = 0; j < L2_SIZE; j++) {
581
                p->first_tb = NULL;
582
                invalidate_page_bitmap(p);
583
                p++;
584
            }
585
        }
586
    }
587
}
588

    
589
/* flush all the translation blocks */
590
/* XXX: tb_flush is currently not thread safe */
591
void tb_flush(CPUState *env1)
592
{
593
    CPUState *env;
594
#if defined(DEBUG_FLUSH)
595
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
596
           (unsigned long)(code_gen_ptr - code_gen_buffer),
597
           nb_tbs, nb_tbs > 0 ?
598
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
599
#endif
600
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
601
        cpu_abort(env1, "Internal error: code buffer overflow\n");
602

    
603
    nb_tbs = 0;
604

    
605
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
606
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
607
    }
608

    
609
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
610
    page_flush_tb();
611

    
612
    code_gen_ptr = code_gen_buffer;
613
    /* XXX: flush processor icache at this point if cache flush is
614
       expensive */
615
    tb_flush_count++;
616
}
617

    
618
#ifdef DEBUG_TB_CHECK
619

    
620
static void tb_invalidate_check(target_ulong address)
621
{
622
    TranslationBlock *tb;
623
    int i;
624
    address &= TARGET_PAGE_MASK;
625
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
626
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
627
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
628
                  address >= tb->pc + tb->size)) {
629
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
630
                       address, (long)tb->pc, tb->size);
631
            }
632
        }
633
    }
634
}
635

    
636
/* verify that all the pages have correct rights for code */
637
static void tb_page_check(void)
638
{
639
    TranslationBlock *tb;
640
    int i, flags1, flags2;
641

    
642
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
643
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
644
            flags1 = page_get_flags(tb->pc);
645
            flags2 = page_get_flags(tb->pc + tb->size - 1);
646
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
647
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
648
                       (long)tb->pc, tb->size, flags1, flags2);
649
            }
650
        }
651
    }
652
}
653

    
654
static void tb_jmp_check(TranslationBlock *tb)
655
{
656
    TranslationBlock *tb1;
657
    unsigned int n1;
658

    
659
    /* suppress any remaining jumps to this TB */
660
    tb1 = tb->jmp_first;
661
    for(;;) {
662
        n1 = (long)tb1 & 3;
663
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
664
        if (n1 == 2)
665
            break;
666
        tb1 = tb1->jmp_next[n1];
667
    }
668
    /* check end of list */
669
    if (tb1 != tb) {
670
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
671
    }
672
}
673

    
674
#endif
675

    
676
/* invalidate one TB */
677
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
678
                             int next_offset)
679
{
680
    TranslationBlock *tb1;
681
    for(;;) {
682
        tb1 = *ptb;
683
        if (tb1 == tb) {
684
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
685
            break;
686
        }
687
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
688
    }
689
}
690

    
691
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
692
{
693
    TranslationBlock *tb1;
694
    unsigned int n1;
695

    
696
    for(;;) {
697
        tb1 = *ptb;
698
        n1 = (long)tb1 & 3;
699
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
700
        if (tb1 == tb) {
701
            *ptb = tb1->page_next[n1];
702
            break;
703
        }
704
        ptb = &tb1->page_next[n1];
705
    }
706
}
707

    
708
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
709
{
710
    TranslationBlock *tb1, **ptb;
711
    unsigned int n1;
712

    
713
    ptb = &tb->jmp_next[n];
714
    tb1 = *ptb;
715
    if (tb1) {
716
        /* find tb(n) in circular list */
717
        for(;;) {
718
            tb1 = *ptb;
719
            n1 = (long)tb1 & 3;
720
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
721
            if (n1 == n && tb1 == tb)
722
                break;
723
            if (n1 == 2) {
724
                ptb = &tb1->jmp_first;
725
            } else {
726
                ptb = &tb1->jmp_next[n1];
727
            }
728
        }
729
        /* now we can suppress tb(n) from the list */
730
        *ptb = tb->jmp_next[n];
731

    
732
        tb->jmp_next[n] = NULL;
733
    }
734
}
735

    
736
/* reset the jump entry 'n' of a TB so that it is not chained to
737
   another TB */
738
static inline void tb_reset_jump(TranslationBlock *tb, int n)
739
{
740
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
741
}
742

    
743
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
744
{
745
    CPUState *env;
746
    PageDesc *p;
747
    unsigned int h, n1;
748
    target_phys_addr_t phys_pc;
749
    TranslationBlock *tb1, *tb2;
750

    
751
    /* remove the TB from the hash list */
752
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
753
    h = tb_phys_hash_func(phys_pc);
754
    tb_remove(&tb_phys_hash[h], tb,
755
              offsetof(TranslationBlock, phys_hash_next));
756

    
757
    /* remove the TB from the page list */
758
    if (tb->page_addr[0] != page_addr) {
759
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
760
        tb_page_remove(&p->first_tb, tb);
761
        invalidate_page_bitmap(p);
762
    }
763
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
764
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
765
        tb_page_remove(&p->first_tb, tb);
766
        invalidate_page_bitmap(p);
767
    }
768

    
769
    tb_invalidated_flag = 1;
770

    
771
    /* remove the TB from the hash list */
772
    h = tb_jmp_cache_hash_func(tb->pc);
773
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
774
        if (env->tb_jmp_cache[h] == tb)
775
            env->tb_jmp_cache[h] = NULL;
776
    }
777

    
778
    /* suppress this TB from the two jump lists */
779
    tb_jmp_remove(tb, 0);
780
    tb_jmp_remove(tb, 1);
781

    
782
    /* suppress any remaining jumps to this TB */
783
    tb1 = tb->jmp_first;
784
    for(;;) {
785
        n1 = (long)tb1 & 3;
786
        if (n1 == 2)
787
            break;
788
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
789
        tb2 = tb1->jmp_next[n1];
790
        tb_reset_jump(tb1, n1);
791
        tb1->jmp_next[n1] = NULL;
792
        tb1 = tb2;
793
    }
794
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
795

    
796
    tb_phys_invalidate_count++;
797
}
798

    
799
static inline void set_bits(uint8_t *tab, int start, int len)
800
{
801
    int end, mask, end1;
802

    
803
    end = start + len;
804
    tab += start >> 3;
805
    mask = 0xff << (start & 7);
806
    if ((start & ~7) == (end & ~7)) {
807
        if (start < end) {
808
            mask &= ~(0xff << (end & 7));
809
            *tab |= mask;
810
        }
811
    } else {
812
        *tab++ |= mask;
813
        start = (start + 8) & ~7;
814
        end1 = end & ~7;
815
        while (start < end1) {
816
            *tab++ = 0xff;
817
            start += 8;
818
        }
819
        if (start < end) {
820
            mask = ~(0xff << (end & 7));
821
            *tab |= mask;
822
        }
823
    }
824
}
825

    
826
static void build_page_bitmap(PageDesc *p)
827
{
828
    int n, tb_start, tb_end;
829
    TranslationBlock *tb;
830

    
831
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
832

    
833
    tb = p->first_tb;
834
    while (tb != NULL) {
835
        n = (long)tb & 3;
836
        tb = (TranslationBlock *)((long)tb & ~3);
837
        /* NOTE: this is subtle as a TB may span two physical pages */
838
        if (n == 0) {
839
            /* NOTE: tb_end may be after the end of the page, but
840
               it is not a problem */
841
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
842
            tb_end = tb_start + tb->size;
843
            if (tb_end > TARGET_PAGE_SIZE)
844
                tb_end = TARGET_PAGE_SIZE;
845
        } else {
846
            tb_start = 0;
847
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
848
        }
849
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
850
        tb = tb->page_next[n];
851
    }
852
}
853

    
854
TranslationBlock *tb_gen_code(CPUState *env,
855
                              target_ulong pc, target_ulong cs_base,
856
                              int flags, int cflags)
857
{
858
    TranslationBlock *tb;
859
    uint8_t *tc_ptr;
860
    target_ulong phys_pc, phys_page2, virt_page2;
861
    int code_gen_size;
862

    
863
    phys_pc = get_phys_addr_code(env, pc);
864
    tb = tb_alloc(pc);
865
    if (!tb) {
866
        /* flush must be done */
867
        tb_flush(env);
868
        /* cannot fail at this point */
869
        tb = tb_alloc(pc);
870
        /* Don't forget to invalidate previous TB info.  */
871
        tb_invalidated_flag = 1;
872
    }
873
    tc_ptr = code_gen_ptr;
874
    tb->tc_ptr = tc_ptr;
875
    tb->cs_base = cs_base;
876
    tb->flags = flags;
877
    tb->cflags = cflags;
878
    cpu_gen_code(env, tb, &code_gen_size);
879
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
880

    
881
    /* check next page if needed */
882
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
883
    phys_page2 = -1;
884
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
885
        phys_page2 = get_phys_addr_code(env, virt_page2);
886
    }
887
    tb_link_phys(tb, phys_pc, phys_page2);
888
    return tb;
889
}
890

    
891
/* invalidate all TBs which intersect with the target physical page
892
   starting in range [start;end[. NOTE: start and end must refer to
893
   the same physical page. 'is_cpu_write_access' should be true if called
894
   from a real cpu write access: the virtual CPU will exit the current
895
   TB if code is modified inside this TB. */
896
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
897
                                   int is_cpu_write_access)
898
{
899
    TranslationBlock *tb, *tb_next, *saved_tb;
900
    CPUState *env = cpu_single_env;
901
    target_ulong tb_start, tb_end;
902
    PageDesc *p;
903
    int n;
904
#ifdef TARGET_HAS_PRECISE_SMC
905
    int current_tb_not_found = is_cpu_write_access;
906
    TranslationBlock *current_tb = NULL;
907
    int current_tb_modified = 0;
908
    target_ulong current_pc = 0;
909
    target_ulong current_cs_base = 0;
910
    int current_flags = 0;
911
#endif /* TARGET_HAS_PRECISE_SMC */
912

    
913
    p = page_find(start >> TARGET_PAGE_BITS);
914
    if (!p)
915
        return;
916
    if (!p->code_bitmap &&
917
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
918
        is_cpu_write_access) {
919
        /* build code bitmap */
920
        build_page_bitmap(p);
921
    }
922

    
923
    /* we remove all the TBs in the range [start, end[ */
924
    /* XXX: see if in some cases it could be faster to invalidate all the code */
925
    tb = p->first_tb;
926
    while (tb != NULL) {
927
        n = (long)tb & 3;
928
        tb = (TranslationBlock *)((long)tb & ~3);
929
        tb_next = tb->page_next[n];
930
        /* NOTE: this is subtle as a TB may span two physical pages */
931
        if (n == 0) {
932
            /* NOTE: tb_end may be after the end of the page, but
933
               it is not a problem */
934
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
935
            tb_end = tb_start + tb->size;
936
        } else {
937
            tb_start = tb->page_addr[1];
938
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
939
        }
940
        if (!(tb_end <= start || tb_start >= end)) {
941
#ifdef TARGET_HAS_PRECISE_SMC
942
            if (current_tb_not_found) {
943
                current_tb_not_found = 0;
944
                current_tb = NULL;
945
                if (env->mem_io_pc) {
946
                    /* now we have a real cpu fault */
947
                    current_tb = tb_find_pc(env->mem_io_pc);
948
                }
949
            }
950
            if (current_tb == tb &&
951
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
952
                /* If we are modifying the current TB, we must stop
953
                its execution. We could be more precise by checking
954
                that the modification is after the current PC, but it
955
                would require a specialized function to partially
956
                restore the CPU state */
957

    
958
                current_tb_modified = 1;
959
                cpu_restore_state(current_tb, env,
960
                                  env->mem_io_pc, NULL);
961
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
962
                                     &current_flags);
963
            }
964
#endif /* TARGET_HAS_PRECISE_SMC */
965
            /* we need to do that to handle the case where a signal
966
               occurs while doing tb_phys_invalidate() */
967
            saved_tb = NULL;
968
            if (env) {
969
                saved_tb = env->current_tb;
970
                env->current_tb = NULL;
971
            }
972
            tb_phys_invalidate(tb, -1);
973
            if (env) {
974
                env->current_tb = saved_tb;
975
                if (env->interrupt_request && env->current_tb)
976
                    cpu_interrupt(env, env->interrupt_request);
977
            }
978
        }
979
        tb = tb_next;
980
    }
981
#if !defined(CONFIG_USER_ONLY)
982
    /* if no code remaining, no need to continue to use slow writes */
983
    if (!p->first_tb) {
984
        invalidate_page_bitmap(p);
985
        if (is_cpu_write_access) {
986
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
987
        }
988
    }
989
#endif
990
#ifdef TARGET_HAS_PRECISE_SMC
991
    if (current_tb_modified) {
992
        /* we generate a block containing just the instruction
993
           modifying the memory. It will ensure that it cannot modify
994
           itself */
995
        env->current_tb = NULL;
996
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
997
        cpu_resume_from_signal(env, NULL);
998
    }
999
#endif
1000
}
1001

    
1002
/* len must be <= 8 and start must be a multiple of len */
1003
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1004
{
1005
    PageDesc *p;
1006
    int offset, b;
1007
#if 0
1008
    if (1) {
1009
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1010
                  cpu_single_env->mem_io_vaddr, len,
1011
                  cpu_single_env->eip,
1012
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1013
    }
1014
#endif
1015
    p = page_find(start >> TARGET_PAGE_BITS);
1016
    if (!p)
1017
        return;
1018
    if (p->code_bitmap) {
1019
        offset = start & ~TARGET_PAGE_MASK;
1020
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1021
        if (b & ((1 << len) - 1))
1022
            goto do_invalidate;
1023
    } else {
1024
    do_invalidate:
1025
        tb_invalidate_phys_page_range(start, start + len, 1);
1026
    }
1027
}
1028

    
1029
#if !defined(CONFIG_SOFTMMU)
1030
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1031
                                    unsigned long pc, void *puc)
1032
{
1033
    TranslationBlock *tb;
1034
    PageDesc *p;
1035
    int n;
1036
#ifdef TARGET_HAS_PRECISE_SMC
1037
    TranslationBlock *current_tb = NULL;
1038
    CPUState *env = cpu_single_env;
1039
    int current_tb_modified = 0;
1040
    target_ulong current_pc = 0;
1041
    target_ulong current_cs_base = 0;
1042
    int current_flags = 0;
1043
#endif
1044

    
1045
    addr &= TARGET_PAGE_MASK;
1046
    p = page_find(addr >> TARGET_PAGE_BITS);
1047
    if (!p)
1048
        return;
1049
    tb = p->first_tb;
1050
#ifdef TARGET_HAS_PRECISE_SMC
1051
    if (tb && pc != 0) {
1052
        current_tb = tb_find_pc(pc);
1053
    }
1054
#endif
1055
    while (tb != NULL) {
1056
        n = (long)tb & 3;
1057
        tb = (TranslationBlock *)((long)tb & ~3);
1058
#ifdef TARGET_HAS_PRECISE_SMC
1059
        if (current_tb == tb &&
1060
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1061
                /* If we are modifying the current TB, we must stop
1062
                   its execution. We could be more precise by checking
1063
                   that the modification is after the current PC, but it
1064
                   would require a specialized function to partially
1065
                   restore the CPU state */
1066

    
1067
            current_tb_modified = 1;
1068
            cpu_restore_state(current_tb, env, pc, puc);
1069
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1070
                                 &current_flags);
1071
        }
1072
#endif /* TARGET_HAS_PRECISE_SMC */
1073
        tb_phys_invalidate(tb, addr);
1074
        tb = tb->page_next[n];
1075
    }
1076
    p->first_tb = NULL;
1077
#ifdef TARGET_HAS_PRECISE_SMC
1078
    if (current_tb_modified) {
1079
        /* we generate a block containing just the instruction
1080
           modifying the memory. It will ensure that it cannot modify
1081
           itself */
1082
        env->current_tb = NULL;
1083
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1084
        cpu_resume_from_signal(env, puc);
1085
    }
1086
#endif
1087
}
1088
#endif
1089

    
1090
/* add the tb in the target page and protect it if necessary */
1091
static inline void tb_alloc_page(TranslationBlock *tb,
1092
                                 unsigned int n, target_ulong page_addr)
1093
{
1094
    PageDesc *p;
1095
    TranslationBlock *last_first_tb;
1096

    
1097
    tb->page_addr[n] = page_addr;
1098
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1099
    tb->page_next[n] = p->first_tb;
1100
    last_first_tb = p->first_tb;
1101
    p->first_tb = (TranslationBlock *)((long)tb | n);
1102
    invalidate_page_bitmap(p);
1103

    
1104
#if defined(TARGET_HAS_SMC) || 1
1105

    
1106
#if defined(CONFIG_USER_ONLY)
1107
    if (p->flags & PAGE_WRITE) {
1108
        target_ulong addr;
1109
        PageDesc *p2;
1110
        int prot;
1111

    
1112
        /* force the host page as non writable (writes will have a
1113
           page fault + mprotect overhead) */
1114
        page_addr &= qemu_host_page_mask;
1115
        prot = 0;
1116
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1117
            addr += TARGET_PAGE_SIZE) {
1118

    
1119
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1120
            if (!p2)
1121
                continue;
1122
            prot |= p2->flags;
1123
            p2->flags &= ~PAGE_WRITE;
1124
            page_get_flags(addr);
1125
          }
1126
        mprotect(g2h(page_addr), qemu_host_page_size,
1127
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1128
#ifdef DEBUG_TB_INVALIDATE
1129
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1130
               page_addr);
1131
#endif
1132
    }
1133
#else
1134
    /* if some code is already present, then the pages are already
1135
       protected. So we handle the case where only the first TB is
1136
       allocated in a physical page */
1137
    if (!last_first_tb) {
1138
        tlb_protect_code(page_addr);
1139
    }
1140
#endif
1141

    
1142
#endif /* TARGET_HAS_SMC */
1143
}
1144

    
1145
/* Allocate a new translation block. Flush the translation buffer if
1146
   too many translation blocks or too much generated code. */
1147
TranslationBlock *tb_alloc(target_ulong pc)
1148
{
1149
    TranslationBlock *tb;
1150

    
1151
    if (nb_tbs >= code_gen_max_blocks ||
1152
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1153
        return NULL;
1154
    tb = &tbs[nb_tbs++];
1155
    tb->pc = pc;
1156
    tb->cflags = 0;
1157
    return tb;
1158
}
1159

    
1160
void tb_free(TranslationBlock *tb)
1161
{
1162
    /* In practice this is mostly used for single use temporary TB
1163
       Ignore the hard cases and just back up if this TB happens to
1164
       be the last one generated.  */
1165
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1166
        code_gen_ptr = tb->tc_ptr;
1167
        nb_tbs--;
1168
    }
1169
}
1170

    
1171
/* add a new TB and link it to the physical page tables. phys_page2 is
1172
   (-1) to indicate that only one page contains the TB. */
1173
void tb_link_phys(TranslationBlock *tb,
1174
                  target_ulong phys_pc, target_ulong phys_page2)
1175
{
1176
    unsigned int h;
1177
    TranslationBlock **ptb;
1178

    
1179
    /* Grab the mmap lock to stop another thread invalidating this TB
1180
       before we are done.  */
1181
    mmap_lock();
1182
    /* add in the physical hash table */
1183
    h = tb_phys_hash_func(phys_pc);
1184
    ptb = &tb_phys_hash[h];
1185
    tb->phys_hash_next = *ptb;
1186
    *ptb = tb;
1187

    
1188
    /* add in the page list */
1189
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1190
    if (phys_page2 != -1)
1191
        tb_alloc_page(tb, 1, phys_page2);
1192
    else
1193
        tb->page_addr[1] = -1;
1194

    
1195
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1196
    tb->jmp_next[0] = NULL;
1197
    tb->jmp_next[1] = NULL;
1198

    
1199
    /* init original jump addresses */
1200
    if (tb->tb_next_offset[0] != 0xffff)
1201
        tb_reset_jump(tb, 0);
1202
    if (tb->tb_next_offset[1] != 0xffff)
1203
        tb_reset_jump(tb, 1);
1204

    
1205
#ifdef DEBUG_TB_CHECK
1206
    tb_page_check();
1207
#endif
1208
    mmap_unlock();
1209
}
1210

    
1211
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1212
   tb[1].tc_ptr. Return NULL if not found */
1213
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1214
{
1215
    int m_min, m_max, m;
1216
    unsigned long v;
1217
    TranslationBlock *tb;
1218

    
1219
    if (nb_tbs <= 0)
1220
        return NULL;
1221
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1222
        tc_ptr >= (unsigned long)code_gen_ptr)
1223
        return NULL;
1224
    /* binary search (cf Knuth) */
1225
    m_min = 0;
1226
    m_max = nb_tbs - 1;
1227
    while (m_min <= m_max) {
1228
        m = (m_min + m_max) >> 1;
1229
        tb = &tbs[m];
1230
        v = (unsigned long)tb->tc_ptr;
1231
        if (v == tc_ptr)
1232
            return tb;
1233
        else if (tc_ptr < v) {
1234
            m_max = m - 1;
1235
        } else {
1236
            m_min = m + 1;
1237
        }
1238
    }
1239
    return &tbs[m_max];
1240
}
1241

    
1242
static void tb_reset_jump_recursive(TranslationBlock *tb);
1243

    
1244
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1245
{
1246
    TranslationBlock *tb1, *tb_next, **ptb;
1247
    unsigned int n1;
1248

    
1249
    tb1 = tb->jmp_next[n];
1250
    if (tb1 != NULL) {
1251
        /* find head of list */
1252
        for(;;) {
1253
            n1 = (long)tb1 & 3;
1254
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1255
            if (n1 == 2)
1256
                break;
1257
            tb1 = tb1->jmp_next[n1];
1258
        }
1259
        /* we are now sure now that tb jumps to tb1 */
1260
        tb_next = tb1;
1261

    
1262
        /* remove tb from the jmp_first list */
1263
        ptb = &tb_next->jmp_first;
1264
        for(;;) {
1265
            tb1 = *ptb;
1266
            n1 = (long)tb1 & 3;
1267
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1268
            if (n1 == n && tb1 == tb)
1269
                break;
1270
            ptb = &tb1->jmp_next[n1];
1271
        }
1272
        *ptb = tb->jmp_next[n];
1273
        tb->jmp_next[n] = NULL;
1274

    
1275
        /* suppress the jump to next tb in generated code */
1276
        tb_reset_jump(tb, n);
1277

    
1278
        /* suppress jumps in the tb on which we could have jumped */
1279
        tb_reset_jump_recursive(tb_next);
1280
    }
1281
}
1282

    
1283
static void tb_reset_jump_recursive(TranslationBlock *tb)
1284
{
1285
    tb_reset_jump_recursive2(tb, 0);
1286
    tb_reset_jump_recursive2(tb, 1);
1287
}
1288

    
1289
#if defined(TARGET_HAS_ICE)
1290
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1291
{
1292
    target_phys_addr_t addr;
1293
    target_ulong pd;
1294
    ram_addr_t ram_addr;
1295
    PhysPageDesc *p;
1296

    
1297
    addr = cpu_get_phys_page_debug(env, pc);
1298
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1299
    if (!p) {
1300
        pd = IO_MEM_UNASSIGNED;
1301
    } else {
1302
        pd = p->phys_offset;
1303
    }
1304
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1305
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1306
}
1307
#endif
1308

    
1309
/* Add a watchpoint.  */
1310
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1311
                          int flags, CPUWatchpoint **watchpoint)
1312
{
1313
    target_ulong len_mask = ~(len - 1);
1314
    CPUWatchpoint *wp;
1315

    
1316
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1317
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1318
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1319
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1320
        return -EINVAL;
1321
    }
1322
    wp = qemu_malloc(sizeof(*wp));
1323

    
1324
    wp->vaddr = addr;
1325
    wp->len_mask = len_mask;
1326
    wp->flags = flags;
1327

    
1328
    /* keep all GDB-injected watchpoints in front */
1329
    if (flags & BP_GDB)
1330
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1331
    else
1332
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1333

    
1334
    tlb_flush_page(env, addr);
1335

    
1336
    if (watchpoint)
1337
        *watchpoint = wp;
1338
    return 0;
1339
}
1340

    
1341
/* Remove a specific watchpoint.  */
1342
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1343
                          int flags)
1344
{
1345
    target_ulong len_mask = ~(len - 1);
1346
    CPUWatchpoint *wp;
1347

    
1348
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1349
        if (addr == wp->vaddr && len_mask == wp->len_mask
1350
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1351
            cpu_watchpoint_remove_by_ref(env, wp);
1352
            return 0;
1353
        }
1354
    }
1355
    return -ENOENT;
1356
}
1357

    
1358
/* Remove a specific watchpoint by reference.  */
1359
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1360
{
1361
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1362

    
1363
    tlb_flush_page(env, watchpoint->vaddr);
1364

    
1365
    qemu_free(watchpoint);
1366
}
1367

    
1368
/* Remove all matching watchpoints.  */
1369
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1370
{
1371
    CPUWatchpoint *wp, *next;
1372

    
1373
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1374
        if (wp->flags & mask)
1375
            cpu_watchpoint_remove_by_ref(env, wp);
1376
    }
1377
}
1378

    
1379
/* Add a breakpoint.  */
1380
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1381
                          CPUBreakpoint **breakpoint)
1382
{
1383
#if defined(TARGET_HAS_ICE)
1384
    CPUBreakpoint *bp;
1385

    
1386
    bp = qemu_malloc(sizeof(*bp));
1387

    
1388
    bp->pc = pc;
1389
    bp->flags = flags;
1390

    
1391
    /* keep all GDB-injected breakpoints in front */
1392
    if (flags & BP_GDB)
1393
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1394
    else
1395
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1396

    
1397
    breakpoint_invalidate(env, pc);
1398

    
1399
    if (breakpoint)
1400
        *breakpoint = bp;
1401
    return 0;
1402
#else
1403
    return -ENOSYS;
1404
#endif
1405
}
1406

    
1407
/* Remove a specific breakpoint.  */
1408
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1409
{
1410
#if defined(TARGET_HAS_ICE)
1411
    CPUBreakpoint *bp;
1412

    
1413
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1414
        if (bp->pc == pc && bp->flags == flags) {
1415
            cpu_breakpoint_remove_by_ref(env, bp);
1416
            return 0;
1417
        }
1418
    }
1419
    return -ENOENT;
1420
#else
1421
    return -ENOSYS;
1422
#endif
1423
}
1424

    
1425
/* Remove a specific breakpoint by reference.  */
1426
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1427
{
1428
#if defined(TARGET_HAS_ICE)
1429
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1430

    
1431
    breakpoint_invalidate(env, breakpoint->pc);
1432

    
1433
    qemu_free(breakpoint);
1434
#endif
1435
}
1436

    
1437
/* Remove all matching breakpoints. */
1438
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1439
{
1440
#if defined(TARGET_HAS_ICE)
1441
    CPUBreakpoint *bp, *next;
1442

    
1443
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1444
        if (bp->flags & mask)
1445
            cpu_breakpoint_remove_by_ref(env, bp);
1446
    }
1447
#endif
1448
}
1449

    
1450
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1451
   CPU loop after each instruction */
1452
void cpu_single_step(CPUState *env, int enabled)
1453
{
1454
#if defined(TARGET_HAS_ICE)
1455
    if (env->singlestep_enabled != enabled) {
1456
        env->singlestep_enabled = enabled;
1457
        if (kvm_enabled())
1458
            kvm_update_guest_debug(env, 0);
1459
        else {
1460
            /* must flush all the translated code to avoid inconsistancies */
1461
            /* XXX: only flush what is necessary */
1462
            tb_flush(env);
1463
        }
1464
    }
1465
#endif
1466
}
1467

    
1468
/* enable or disable low levels log */
1469
void cpu_set_log(int log_flags)
1470
{
1471
    loglevel = log_flags;
1472
    if (loglevel && !logfile) {
1473
        logfile = fopen(logfilename, log_append ? "a" : "w");
1474
        if (!logfile) {
1475
            perror(logfilename);
1476
            _exit(1);
1477
        }
1478
#if !defined(CONFIG_SOFTMMU)
1479
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1480
        {
1481
            static char logfile_buf[4096];
1482
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1483
        }
1484
#else
1485
        setvbuf(logfile, NULL, _IOLBF, 0);
1486
#endif
1487
        log_append = 1;
1488
    }
1489
    if (!loglevel && logfile) {
1490
        fclose(logfile);
1491
        logfile = NULL;
1492
    }
1493
}
1494

    
1495
void cpu_set_log_filename(const char *filename)
1496
{
1497
    logfilename = strdup(filename);
1498
    if (logfile) {
1499
        fclose(logfile);
1500
        logfile = NULL;
1501
    }
1502
    cpu_set_log(loglevel);
1503
}
1504

    
1505
static void cpu_unlink_tb(CPUState *env)
1506
{
1507
#if defined(USE_NPTL)
1508
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1509
       problem and hope the cpu will stop of its own accord.  For userspace
1510
       emulation this often isn't actually as bad as it sounds.  Often
1511
       signals are used primarily to interrupt blocking syscalls.  */
1512
#else
1513
    TranslationBlock *tb;
1514
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1515

    
1516
    tb = env->current_tb;
1517
    /* if the cpu is currently executing code, we must unlink it and
1518
       all the potentially executing TB */
1519
    if (tb && !testandset(&interrupt_lock)) {
1520
        env->current_tb = NULL;
1521
        tb_reset_jump_recursive(tb);
1522
        resetlock(&interrupt_lock);
1523
    }
1524
#endif
1525
}
1526

    
1527
/* mask must never be zero, except for A20 change call */
1528
void cpu_interrupt(CPUState *env, int mask)
1529
{
1530
    int old_mask;
1531

    
1532
    old_mask = env->interrupt_request;
1533
    env->interrupt_request |= mask;
1534

    
1535
    if (use_icount) {
1536
        env->icount_decr.u16.high = 0xffff;
1537
#ifndef CONFIG_USER_ONLY
1538
        if (!can_do_io(env)
1539
            && (mask & ~old_mask) != 0) {
1540
            cpu_abort(env, "Raised interrupt while not in I/O function");
1541
        }
1542
#endif
1543
    } else {
1544
        cpu_unlink_tb(env);
1545
    }
1546
}
1547

    
1548
void cpu_reset_interrupt(CPUState *env, int mask)
1549
{
1550
    env->interrupt_request &= ~mask;
1551
}
1552

    
1553
void cpu_exit(CPUState *env)
1554
{
1555
    env->exit_request = 1;
1556
    cpu_unlink_tb(env);
1557
}
1558

    
1559
const CPULogItem cpu_log_items[] = {
1560
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1561
      "show generated host assembly code for each compiled TB" },
1562
    { CPU_LOG_TB_IN_ASM, "in_asm",
1563
      "show target assembly code for each compiled TB" },
1564
    { CPU_LOG_TB_OP, "op",
1565
      "show micro ops for each compiled TB" },
1566
    { CPU_LOG_TB_OP_OPT, "op_opt",
1567
      "show micro ops "
1568
#ifdef TARGET_I386
1569
      "before eflags optimization and "
1570
#endif
1571
      "after liveness analysis" },
1572
    { CPU_LOG_INT, "int",
1573
      "show interrupts/exceptions in short format" },
1574
    { CPU_LOG_EXEC, "exec",
1575
      "show trace before each executed TB (lots of logs)" },
1576
    { CPU_LOG_TB_CPU, "cpu",
1577
      "show CPU state before block translation" },
1578
#ifdef TARGET_I386
1579
    { CPU_LOG_PCALL, "pcall",
1580
      "show protected mode far calls/returns/exceptions" },
1581
    { CPU_LOG_RESET, "cpu_reset",
1582
      "show CPU state before CPU resets" },
1583
#endif
1584
#ifdef DEBUG_IOPORT
1585
    { CPU_LOG_IOPORT, "ioport",
1586
      "show all i/o ports accesses" },
1587
#endif
1588
    { 0, NULL, NULL },
1589
};
1590

    
1591
static int cmp1(const char *s1, int n, const char *s2)
1592
{
1593
    if (strlen(s2) != n)
1594
        return 0;
1595
    return memcmp(s1, s2, n) == 0;
1596
}
1597

    
1598
/* takes a comma separated list of log masks. Return 0 if error. */
1599
int cpu_str_to_log_mask(const char *str)
1600
{
1601
    const CPULogItem *item;
1602
    int mask;
1603
    const char *p, *p1;
1604

    
1605
    p = str;
1606
    mask = 0;
1607
    for(;;) {
1608
        p1 = strchr(p, ',');
1609
        if (!p1)
1610
            p1 = p + strlen(p);
1611
        if(cmp1(p,p1-p,"all")) {
1612
                for(item = cpu_log_items; item->mask != 0; item++) {
1613
                        mask |= item->mask;
1614
                }
1615
        } else {
1616
        for(item = cpu_log_items; item->mask != 0; item++) {
1617
            if (cmp1(p, p1 - p, item->name))
1618
                goto found;
1619
        }
1620
        return 0;
1621
        }
1622
    found:
1623
        mask |= item->mask;
1624
        if (*p1 != ',')
1625
            break;
1626
        p = p1 + 1;
1627
    }
1628
    return mask;
1629
}
1630

    
1631
void cpu_abort(CPUState *env, const char *fmt, ...)
1632
{
1633
    va_list ap;
1634
    va_list ap2;
1635

    
1636
    va_start(ap, fmt);
1637
    va_copy(ap2, ap);
1638
    fprintf(stderr, "qemu: fatal: ");
1639
    vfprintf(stderr, fmt, ap);
1640
    fprintf(stderr, "\n");
1641
#ifdef TARGET_I386
1642
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1643
#else
1644
    cpu_dump_state(env, stderr, fprintf, 0);
1645
#endif
1646
    if (qemu_log_enabled()) {
1647
        qemu_log("qemu: fatal: ");
1648
        qemu_log_vprintf(fmt, ap2);
1649
        qemu_log("\n");
1650
#ifdef TARGET_I386
1651
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1652
#else
1653
        log_cpu_state(env, 0);
1654
#endif
1655
        qemu_log_flush();
1656
        qemu_log_close();
1657
    }
1658
    va_end(ap2);
1659
    va_end(ap);
1660
    abort();
1661
}
1662

    
1663
CPUState *cpu_copy(CPUState *env)
1664
{
1665
    CPUState *new_env = cpu_init(env->cpu_model_str);
1666
    CPUState *next_cpu = new_env->next_cpu;
1667
    int cpu_index = new_env->cpu_index;
1668
#if defined(TARGET_HAS_ICE)
1669
    CPUBreakpoint *bp;
1670
    CPUWatchpoint *wp;
1671
#endif
1672

    
1673
    memcpy(new_env, env, sizeof(CPUState));
1674

    
1675
    /* Preserve chaining and index. */
1676
    new_env->next_cpu = next_cpu;
1677
    new_env->cpu_index = cpu_index;
1678

    
1679
    /* Clone all break/watchpoints.
1680
       Note: Once we support ptrace with hw-debug register access, make sure
1681
       BP_CPU break/watchpoints are handled correctly on clone. */
1682
    TAILQ_INIT(&env->breakpoints);
1683
    TAILQ_INIT(&env->watchpoints);
1684
#if defined(TARGET_HAS_ICE)
1685
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1686
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1687
    }
1688
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1689
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1690
                              wp->flags, NULL);
1691
    }
1692
#endif
1693

    
1694
    return new_env;
1695
}
1696

    
1697
#if !defined(CONFIG_USER_ONLY)
1698

    
1699
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1700
{
1701
    unsigned int i;
1702

    
1703
    /* Discard jump cache entries for any tb which might potentially
1704
       overlap the flushed page.  */
1705
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1706
    memset (&env->tb_jmp_cache[i], 0, 
1707
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1708

    
1709
    i = tb_jmp_cache_hash_page(addr);
1710
    memset (&env->tb_jmp_cache[i], 0, 
1711
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1712
}
1713

    
1714
/* NOTE: if flush_global is true, also flush global entries (not
1715
   implemented yet) */
1716
void tlb_flush(CPUState *env, int flush_global)
1717
{
1718
    int i;
1719

    
1720
#if defined(DEBUG_TLB)
1721
    printf("tlb_flush:\n");
1722
#endif
1723
    /* must reset current TB so that interrupts cannot modify the
1724
       links while we are modifying them */
1725
    env->current_tb = NULL;
1726

    
1727
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1728
        env->tlb_table[0][i].addr_read = -1;
1729
        env->tlb_table[0][i].addr_write = -1;
1730
        env->tlb_table[0][i].addr_code = -1;
1731
        env->tlb_table[1][i].addr_read = -1;
1732
        env->tlb_table[1][i].addr_write = -1;
1733
        env->tlb_table[1][i].addr_code = -1;
1734
#if (NB_MMU_MODES >= 3)
1735
        env->tlb_table[2][i].addr_read = -1;
1736
        env->tlb_table[2][i].addr_write = -1;
1737
        env->tlb_table[2][i].addr_code = -1;
1738
#if (NB_MMU_MODES == 4)
1739
        env->tlb_table[3][i].addr_read = -1;
1740
        env->tlb_table[3][i].addr_write = -1;
1741
        env->tlb_table[3][i].addr_code = -1;
1742
#endif
1743
#endif
1744
    }
1745

    
1746
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1747

    
1748
#ifdef USE_KQEMU
1749
    if (env->kqemu_enabled) {
1750
        kqemu_flush(env, flush_global);
1751
    }
1752
#endif
1753
    tlb_flush_count++;
1754
}
1755

    
1756
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1757
{
1758
    if (addr == (tlb_entry->addr_read &
1759
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1760
        addr == (tlb_entry->addr_write &
1761
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1762
        addr == (tlb_entry->addr_code &
1763
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1764
        tlb_entry->addr_read = -1;
1765
        tlb_entry->addr_write = -1;
1766
        tlb_entry->addr_code = -1;
1767
    }
1768
}
1769

    
1770
void tlb_flush_page(CPUState *env, target_ulong addr)
1771
{
1772
    int i;
1773

    
1774
#if defined(DEBUG_TLB)
1775
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1776
#endif
1777
    /* must reset current TB so that interrupts cannot modify the
1778
       links while we are modifying them */
1779
    env->current_tb = NULL;
1780

    
1781
    addr &= TARGET_PAGE_MASK;
1782
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1783
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1784
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1785
#if (NB_MMU_MODES >= 3)
1786
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1787
#if (NB_MMU_MODES == 4)
1788
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1789
#endif
1790
#endif
1791

    
1792
    tlb_flush_jmp_cache(env, addr);
1793

    
1794
#ifdef USE_KQEMU
1795
    if (env->kqemu_enabled) {
1796
        kqemu_flush_page(env, addr);
1797
    }
1798
#endif
1799
}
1800

    
1801
/* update the TLBs so that writes to code in the virtual page 'addr'
1802
   can be detected */
1803
static void tlb_protect_code(ram_addr_t ram_addr)
1804
{
1805
    cpu_physical_memory_reset_dirty(ram_addr,
1806
                                    ram_addr + TARGET_PAGE_SIZE,
1807
                                    CODE_DIRTY_FLAG);
1808
}
1809

    
1810
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1811
   tested for self modifying code */
1812
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1813
                                    target_ulong vaddr)
1814
{
1815
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1816
}
1817

    
1818
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1819
                                         unsigned long start, unsigned long length)
1820
{
1821
    unsigned long addr;
1822
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1823
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1824
        if ((addr - start) < length) {
1825
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1826
        }
1827
    }
1828
}
1829

    
1830
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1831
                                     int dirty_flags)
1832
{
1833
    CPUState *env;
1834
    unsigned long length, start1;
1835
    int i, mask, len;
1836
    uint8_t *p;
1837

    
1838
    start &= TARGET_PAGE_MASK;
1839
    end = TARGET_PAGE_ALIGN(end);
1840

    
1841
    length = end - start;
1842
    if (length == 0)
1843
        return;
1844
    len = length >> TARGET_PAGE_BITS;
1845
#ifdef USE_KQEMU
1846
    /* XXX: should not depend on cpu context */
1847
    env = first_cpu;
1848
    if (env->kqemu_enabled) {
1849
        ram_addr_t addr;
1850
        addr = start;
1851
        for(i = 0; i < len; i++) {
1852
            kqemu_set_notdirty(env, addr);
1853
            addr += TARGET_PAGE_SIZE;
1854
        }
1855
    }
1856
#endif
1857
    mask = ~dirty_flags;
1858
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1859
    for(i = 0; i < len; i++)
1860
        p[i] &= mask;
1861

    
1862
    /* we modify the TLB cache so that the dirty bit will be set again
1863
       when accessing the range */
1864
    start1 = start + (unsigned long)phys_ram_base;
1865
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1866
        for(i = 0; i < CPU_TLB_SIZE; i++)
1867
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1868
        for(i = 0; i < CPU_TLB_SIZE; i++)
1869
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1870
#if (NB_MMU_MODES >= 3)
1871
        for(i = 0; i < CPU_TLB_SIZE; i++)
1872
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1873
#if (NB_MMU_MODES == 4)
1874
        for(i = 0; i < CPU_TLB_SIZE; i++)
1875
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1876
#endif
1877
#endif
1878
    }
1879
}
1880

    
1881
int cpu_physical_memory_set_dirty_tracking(int enable)
1882
{
1883
    in_migration = enable;
1884
    return 0;
1885
}
1886

    
1887
int cpu_physical_memory_get_dirty_tracking(void)
1888
{
1889
    return in_migration;
1890
}
1891

    
1892
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1893
{
1894
    if (kvm_enabled())
1895
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1896
}
1897

    
1898
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1899
{
1900
    ram_addr_t ram_addr;
1901

    
1902
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1903
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1904
            tlb_entry->addend - (unsigned long)phys_ram_base;
1905
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1906
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1907
        }
1908
    }
1909
}
1910

    
1911
/* update the TLB according to the current state of the dirty bits */
1912
void cpu_tlb_update_dirty(CPUState *env)
1913
{
1914
    int i;
1915
    for(i = 0; i < CPU_TLB_SIZE; i++)
1916
        tlb_update_dirty(&env->tlb_table[0][i]);
1917
    for(i = 0; i < CPU_TLB_SIZE; i++)
1918
        tlb_update_dirty(&env->tlb_table[1][i]);
1919
#if (NB_MMU_MODES >= 3)
1920
    for(i = 0; i < CPU_TLB_SIZE; i++)
1921
        tlb_update_dirty(&env->tlb_table[2][i]);
1922
#if (NB_MMU_MODES == 4)
1923
    for(i = 0; i < CPU_TLB_SIZE; i++)
1924
        tlb_update_dirty(&env->tlb_table[3][i]);
1925
#endif
1926
#endif
1927
}
1928

    
1929
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1930
{
1931
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1932
        tlb_entry->addr_write = vaddr;
1933
}
1934

    
1935
/* update the TLB corresponding to virtual page vaddr
1936
   so that it is no longer dirty */
1937
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1938
{
1939
    int i;
1940

    
1941
    vaddr &= TARGET_PAGE_MASK;
1942
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1943
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1944
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1945
#if (NB_MMU_MODES >= 3)
1946
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1947
#if (NB_MMU_MODES == 4)
1948
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1949
#endif
1950
#endif
1951
}
1952

    
1953
/* add a new TLB entry. At most one entry for a given virtual address
1954
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1955
   (can only happen in non SOFTMMU mode for I/O pages or pages
1956
   conflicting with the host address space). */
1957
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1958
                      target_phys_addr_t paddr, int prot,
1959
                      int mmu_idx, int is_softmmu)
1960
{
1961
    PhysPageDesc *p;
1962
    unsigned long pd;
1963
    unsigned int index;
1964
    target_ulong address;
1965
    target_ulong code_address;
1966
    target_phys_addr_t addend;
1967
    int ret;
1968
    CPUTLBEntry *te;
1969
    CPUWatchpoint *wp;
1970
    target_phys_addr_t iotlb;
1971

    
1972
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1973
    if (!p) {
1974
        pd = IO_MEM_UNASSIGNED;
1975
    } else {
1976
        pd = p->phys_offset;
1977
    }
1978
#if defined(DEBUG_TLB)
1979
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1980
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1981
#endif
1982

    
1983
    ret = 0;
1984
    address = vaddr;
1985
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1986
        /* IO memory case (romd handled later) */
1987
        address |= TLB_MMIO;
1988
    }
1989
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1990
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1991
        /* Normal RAM.  */
1992
        iotlb = pd & TARGET_PAGE_MASK;
1993
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1994
            iotlb |= IO_MEM_NOTDIRTY;
1995
        else
1996
            iotlb |= IO_MEM_ROM;
1997
    } else {
1998
        /* IO handlers are currently passed a phsical address.
1999
           It would be nice to pass an offset from the base address
2000
           of that region.  This would avoid having to special case RAM,
2001
           and avoid full address decoding in every device.
2002
           We can't use the high bits of pd for this because
2003
           IO_MEM_ROMD uses these as a ram address.  */
2004
        iotlb = (pd & ~TARGET_PAGE_MASK);
2005
        if (p) {
2006
            iotlb += p->region_offset;
2007
        } else {
2008
            iotlb += paddr;
2009
        }
2010
    }
2011

    
2012
    code_address = address;
2013
    /* Make accesses to pages with watchpoints go via the
2014
       watchpoint trap routines.  */
2015
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2016
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2017
            iotlb = io_mem_watch + paddr;
2018
            /* TODO: The memory case can be optimized by not trapping
2019
               reads of pages with a write breakpoint.  */
2020
            address |= TLB_MMIO;
2021
        }
2022
    }
2023

    
2024
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2025
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2026
    te = &env->tlb_table[mmu_idx][index];
2027
    te->addend = addend - vaddr;
2028
    if (prot & PAGE_READ) {
2029
        te->addr_read = address;
2030
    } else {
2031
        te->addr_read = -1;
2032
    }
2033

    
2034
    if (prot & PAGE_EXEC) {
2035
        te->addr_code = code_address;
2036
    } else {
2037
        te->addr_code = -1;
2038
    }
2039
    if (prot & PAGE_WRITE) {
2040
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2041
            (pd & IO_MEM_ROMD)) {
2042
            /* Write access calls the I/O callback.  */
2043
            te->addr_write = address | TLB_MMIO;
2044
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2045
                   !cpu_physical_memory_is_dirty(pd)) {
2046
            te->addr_write = address | TLB_NOTDIRTY;
2047
        } else {
2048
            te->addr_write = address;
2049
        }
2050
    } else {
2051
        te->addr_write = -1;
2052
    }
2053
    return ret;
2054
}
2055

    
2056
#else
2057

    
2058
void tlb_flush(CPUState *env, int flush_global)
2059
{
2060
}
2061

    
2062
void tlb_flush_page(CPUState *env, target_ulong addr)
2063
{
2064
}
2065

    
2066
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2067
                      target_phys_addr_t paddr, int prot,
2068
                      int mmu_idx, int is_softmmu)
2069
{
2070
    return 0;
2071
}
2072

    
2073
/* dump memory mappings */
2074
void page_dump(FILE *f)
2075
{
2076
    unsigned long start, end;
2077
    int i, j, prot, prot1;
2078
    PageDesc *p;
2079

    
2080
    fprintf(f, "%-8s %-8s %-8s %s\n",
2081
            "start", "end", "size", "prot");
2082
    start = -1;
2083
    end = -1;
2084
    prot = 0;
2085
    for(i = 0; i <= L1_SIZE; i++) {
2086
        if (i < L1_SIZE)
2087
            p = l1_map[i];
2088
        else
2089
            p = NULL;
2090
        for(j = 0;j < L2_SIZE; j++) {
2091
            if (!p)
2092
                prot1 = 0;
2093
            else
2094
                prot1 = p[j].flags;
2095
            if (prot1 != prot) {
2096
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2097
                if (start != -1) {
2098
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2099
                            start, end, end - start,
2100
                            prot & PAGE_READ ? 'r' : '-',
2101
                            prot & PAGE_WRITE ? 'w' : '-',
2102
                            prot & PAGE_EXEC ? 'x' : '-');
2103
                }
2104
                if (prot1 != 0)
2105
                    start = end;
2106
                else
2107
                    start = -1;
2108
                prot = prot1;
2109
            }
2110
            if (!p)
2111
                break;
2112
        }
2113
    }
2114
}
2115

    
2116
int page_get_flags(target_ulong address)
2117
{
2118
    PageDesc *p;
2119

    
2120
    p = page_find(address >> TARGET_PAGE_BITS);
2121
    if (!p)
2122
        return 0;
2123
    return p->flags;
2124
}
2125

    
2126
/* modify the flags of a page and invalidate the code if
2127
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2128
   depending on PAGE_WRITE */
2129
void page_set_flags(target_ulong start, target_ulong end, int flags)
2130
{
2131
    PageDesc *p;
2132
    target_ulong addr;
2133

    
2134
    /* mmap_lock should already be held.  */
2135
    start = start & TARGET_PAGE_MASK;
2136
    end = TARGET_PAGE_ALIGN(end);
2137
    if (flags & PAGE_WRITE)
2138
        flags |= PAGE_WRITE_ORG;
2139
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2140
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2141
        /* We may be called for host regions that are outside guest
2142
           address space.  */
2143
        if (!p)
2144
            return;
2145
        /* if the write protection is set, then we invalidate the code
2146
           inside */
2147
        if (!(p->flags & PAGE_WRITE) &&
2148
            (flags & PAGE_WRITE) &&
2149
            p->first_tb) {
2150
            tb_invalidate_phys_page(addr, 0, NULL);
2151
        }
2152
        p->flags = flags;
2153
    }
2154
}
2155

    
2156
int page_check_range(target_ulong start, target_ulong len, int flags)
2157
{
2158
    PageDesc *p;
2159
    target_ulong end;
2160
    target_ulong addr;
2161

    
2162
    if (start + len < start)
2163
        /* we've wrapped around */
2164
        return -1;
2165

    
2166
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2167
    start = start & TARGET_PAGE_MASK;
2168

    
2169
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2170
        p = page_find(addr >> TARGET_PAGE_BITS);
2171
        if( !p )
2172
            return -1;
2173
        if( !(p->flags & PAGE_VALID) )
2174
            return -1;
2175

    
2176
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2177
            return -1;
2178
        if (flags & PAGE_WRITE) {
2179
            if (!(p->flags & PAGE_WRITE_ORG))
2180
                return -1;
2181
            /* unprotect the page if it was put read-only because it
2182
               contains translated code */
2183
            if (!(p->flags & PAGE_WRITE)) {
2184
                if (!page_unprotect(addr, 0, NULL))
2185
                    return -1;
2186
            }
2187
            return 0;
2188
        }
2189
    }
2190
    return 0;
2191
}
2192

    
2193
/* called from signal handler: invalidate the code and unprotect the
2194
   page. Return TRUE if the fault was succesfully handled. */
2195
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2196
{
2197
    unsigned int page_index, prot, pindex;
2198
    PageDesc *p, *p1;
2199
    target_ulong host_start, host_end, addr;
2200

    
2201
    /* Technically this isn't safe inside a signal handler.  However we
2202
       know this only ever happens in a synchronous SEGV handler, so in
2203
       practice it seems to be ok.  */
2204
    mmap_lock();
2205

    
2206
    host_start = address & qemu_host_page_mask;
2207
    page_index = host_start >> TARGET_PAGE_BITS;
2208
    p1 = page_find(page_index);
2209
    if (!p1) {
2210
        mmap_unlock();
2211
        return 0;
2212
    }
2213
    host_end = host_start + qemu_host_page_size;
2214
    p = p1;
2215
    prot = 0;
2216
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2217
        prot |= p->flags;
2218
        p++;
2219
    }
2220
    /* if the page was really writable, then we change its
2221
       protection back to writable */
2222
    if (prot & PAGE_WRITE_ORG) {
2223
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2224
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2225
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2226
                     (prot & PAGE_BITS) | PAGE_WRITE);
2227
            p1[pindex].flags |= PAGE_WRITE;
2228
            /* and since the content will be modified, we must invalidate
2229
               the corresponding translated code. */
2230
            tb_invalidate_phys_page(address, pc, puc);
2231
#ifdef DEBUG_TB_CHECK
2232
            tb_invalidate_check(address);
2233
#endif
2234
            mmap_unlock();
2235
            return 1;
2236
        }
2237
    }
2238
    mmap_unlock();
2239
    return 0;
2240
}
2241

    
2242
static inline void tlb_set_dirty(CPUState *env,
2243
                                 unsigned long addr, target_ulong vaddr)
2244
{
2245
}
2246
#endif /* defined(CONFIG_USER_ONLY) */
2247

    
2248
#if !defined(CONFIG_USER_ONLY)
2249

    
2250
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2251
                             ram_addr_t memory, ram_addr_t region_offset);
2252
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2253
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2254
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2255
                      need_subpage)                                     \
2256
    do {                                                                \
2257
        if (addr > start_addr)                                          \
2258
            start_addr2 = 0;                                            \
2259
        else {                                                          \
2260
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2261
            if (start_addr2 > 0)                                        \
2262
                need_subpage = 1;                                       \
2263
        }                                                               \
2264
                                                                        \
2265
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2266
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2267
        else {                                                          \
2268
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2269
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2270
                need_subpage = 1;                                       \
2271
        }                                                               \
2272
    } while (0)
2273

    
2274
/* register physical memory. 'size' must be a multiple of the target
2275
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2276
   io memory page.  The address used when calling the IO function is
2277
   the offset from the start of the region, plus region_offset.  Both
2278
   start_region and regon_offset are rounded down to a page boundary
2279
   before calculating this offset.  This should not be a problem unless
2280
   the low bits of start_addr and region_offset differ.  */
2281
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2282
                                         ram_addr_t size,
2283
                                         ram_addr_t phys_offset,
2284
                                         ram_addr_t region_offset)
2285
{
2286
    target_phys_addr_t addr, end_addr;
2287
    PhysPageDesc *p;
2288
    CPUState *env;
2289
    ram_addr_t orig_size = size;
2290
    void *subpage;
2291

    
2292
#ifdef USE_KQEMU
2293
    /* XXX: should not depend on cpu context */
2294
    env = first_cpu;
2295
    if (env->kqemu_enabled) {
2296
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2297
    }
2298
#endif
2299
    if (kvm_enabled())
2300
        kvm_set_phys_mem(start_addr, size, phys_offset);
2301

    
2302
    if (phys_offset == IO_MEM_UNASSIGNED) {
2303
        region_offset = start_addr;
2304
    }
2305
    region_offset &= TARGET_PAGE_MASK;
2306
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2307
    end_addr = start_addr + (target_phys_addr_t)size;
2308
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2309
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2310
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2311
            ram_addr_t orig_memory = p->phys_offset;
2312
            target_phys_addr_t start_addr2, end_addr2;
2313
            int need_subpage = 0;
2314

    
2315
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2316
                          need_subpage);
2317
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2318
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2319
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2320
                                           &p->phys_offset, orig_memory,
2321
                                           p->region_offset);
2322
                } else {
2323
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2324
                                            >> IO_MEM_SHIFT];
2325
                }
2326
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2327
                                 region_offset);
2328
                p->region_offset = 0;
2329
            } else {
2330
                p->phys_offset = phys_offset;
2331
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2332
                    (phys_offset & IO_MEM_ROMD))
2333
                    phys_offset += TARGET_PAGE_SIZE;
2334
            }
2335
        } else {
2336
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2337
            p->phys_offset = phys_offset;
2338
            p->region_offset = region_offset;
2339
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2340
                (phys_offset & IO_MEM_ROMD)) {
2341
                phys_offset += TARGET_PAGE_SIZE;
2342
            } else {
2343
                target_phys_addr_t start_addr2, end_addr2;
2344
                int need_subpage = 0;
2345

    
2346
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2347
                              end_addr2, need_subpage);
2348

    
2349
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2350
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2351
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2352
                                           addr & TARGET_PAGE_MASK);
2353
                    subpage_register(subpage, start_addr2, end_addr2,
2354
                                     phys_offset, region_offset);
2355
                    p->region_offset = 0;
2356
                }
2357
            }
2358
        }
2359
        region_offset += TARGET_PAGE_SIZE;
2360
    }
2361

    
2362
    /* since each CPU stores ram addresses in its TLB cache, we must
2363
       reset the modified entries */
2364
    /* XXX: slow ! */
2365
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2366
        tlb_flush(env, 1);
2367
    }
2368
}
2369

    
2370
/* XXX: temporary until new memory mapping API */
2371
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2372
{
2373
    PhysPageDesc *p;
2374

    
2375
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2376
    if (!p)
2377
        return IO_MEM_UNASSIGNED;
2378
    return p->phys_offset;
2379
}
2380

    
2381
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2382
{
2383
    if (kvm_enabled())
2384
        kvm_coalesce_mmio_region(addr, size);
2385
}
2386

    
2387
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2388
{
2389
    if (kvm_enabled())
2390
        kvm_uncoalesce_mmio_region(addr, size);
2391
}
2392

    
2393
/* XXX: better than nothing */
2394
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2395
{
2396
    ram_addr_t addr;
2397
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2398
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2399
                (uint64_t)size, (uint64_t)phys_ram_size);
2400
        abort();
2401
    }
2402
    addr = phys_ram_alloc_offset;
2403
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2404
    return addr;
2405
}
2406

    
2407
void qemu_ram_free(ram_addr_t addr)
2408
{
2409
}
2410

    
2411
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2412
{
2413
#ifdef DEBUG_UNASSIGNED
2414
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2415
#endif
2416
#if defined(TARGET_SPARC)
2417
    do_unassigned_access(addr, 0, 0, 0, 1);
2418
#endif
2419
    return 0;
2420
}
2421

    
2422
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2423
{
2424
#ifdef DEBUG_UNASSIGNED
2425
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2426
#endif
2427
#if defined(TARGET_SPARC)
2428
    do_unassigned_access(addr, 0, 0, 0, 2);
2429
#endif
2430
    return 0;
2431
}
2432

    
2433
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2434
{
2435
#ifdef DEBUG_UNASSIGNED
2436
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2437
#endif
2438
#if defined(TARGET_SPARC)
2439
    do_unassigned_access(addr, 0, 0, 0, 4);
2440
#endif
2441
    return 0;
2442
}
2443

    
2444
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2445
{
2446
#ifdef DEBUG_UNASSIGNED
2447
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2448
#endif
2449
#if defined(TARGET_SPARC)
2450
    do_unassigned_access(addr, 1, 0, 0, 1);
2451
#endif
2452
}
2453

    
2454
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2455
{
2456
#ifdef DEBUG_UNASSIGNED
2457
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2458
#endif
2459
#if defined(TARGET_SPARC)
2460
    do_unassigned_access(addr, 1, 0, 0, 2);
2461
#endif
2462
}
2463

    
2464
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2465
{
2466
#ifdef DEBUG_UNASSIGNED
2467
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2468
#endif
2469
#if defined(TARGET_SPARC)
2470
    do_unassigned_access(addr, 1, 0, 0, 4);
2471
#endif
2472
}
2473

    
2474
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2475
    unassigned_mem_readb,
2476
    unassigned_mem_readw,
2477
    unassigned_mem_readl,
2478
};
2479

    
2480
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2481
    unassigned_mem_writeb,
2482
    unassigned_mem_writew,
2483
    unassigned_mem_writel,
2484
};
2485

    
2486
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2487
                                uint32_t val)
2488
{
2489
    int dirty_flags;
2490
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2491
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2492
#if !defined(CONFIG_USER_ONLY)
2493
        tb_invalidate_phys_page_fast(ram_addr, 1);
2494
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2495
#endif
2496
    }
2497
    stb_p(phys_ram_base + ram_addr, val);
2498
#ifdef USE_KQEMU
2499
    if (cpu_single_env->kqemu_enabled &&
2500
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2501
        kqemu_modify_page(cpu_single_env, ram_addr);
2502
#endif
2503
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2504
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2505
    /* we remove the notdirty callback only if the code has been
2506
       flushed */
2507
    if (dirty_flags == 0xff)
2508
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2509
}
2510

    
2511
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2512
                                uint32_t val)
2513
{
2514
    int dirty_flags;
2515
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2516
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2517
#if !defined(CONFIG_USER_ONLY)
2518
        tb_invalidate_phys_page_fast(ram_addr, 2);
2519
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2520
#endif
2521
    }
2522
    stw_p(phys_ram_base + ram_addr, val);
2523
#ifdef USE_KQEMU
2524
    if (cpu_single_env->kqemu_enabled &&
2525
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2526
        kqemu_modify_page(cpu_single_env, ram_addr);
2527
#endif
2528
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2529
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2530
    /* we remove the notdirty callback only if the code has been
2531
       flushed */
2532
    if (dirty_flags == 0xff)
2533
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2534
}
2535

    
2536
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2537
                                uint32_t val)
2538
{
2539
    int dirty_flags;
2540
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2541
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2542
#if !defined(CONFIG_USER_ONLY)
2543
        tb_invalidate_phys_page_fast(ram_addr, 4);
2544
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2545
#endif
2546
    }
2547
    stl_p(phys_ram_base + ram_addr, val);
2548
#ifdef USE_KQEMU
2549
    if (cpu_single_env->kqemu_enabled &&
2550
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2551
        kqemu_modify_page(cpu_single_env, ram_addr);
2552
#endif
2553
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2554
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2555
    /* we remove the notdirty callback only if the code has been
2556
       flushed */
2557
    if (dirty_flags == 0xff)
2558
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2559
}
2560

    
2561
static CPUReadMemoryFunc *error_mem_read[3] = {
2562
    NULL, /* never used */
2563
    NULL, /* never used */
2564
    NULL, /* never used */
2565
};
2566

    
2567
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2568
    notdirty_mem_writeb,
2569
    notdirty_mem_writew,
2570
    notdirty_mem_writel,
2571
};
2572

    
2573
/* Generate a debug exception if a watchpoint has been hit.  */
2574
static void check_watchpoint(int offset, int len_mask, int flags)
2575
{
2576
    CPUState *env = cpu_single_env;
2577
    target_ulong pc, cs_base;
2578
    TranslationBlock *tb;
2579
    target_ulong vaddr;
2580
    CPUWatchpoint *wp;
2581
    int cpu_flags;
2582

    
2583
    if (env->watchpoint_hit) {
2584
        /* We re-entered the check after replacing the TB. Now raise
2585
         * the debug interrupt so that is will trigger after the
2586
         * current instruction. */
2587
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2588
        return;
2589
    }
2590
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2591
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2592
        if ((vaddr == (wp->vaddr & len_mask) ||
2593
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2594
            wp->flags |= BP_WATCHPOINT_HIT;
2595
            if (!env->watchpoint_hit) {
2596
                env->watchpoint_hit = wp;
2597
                tb = tb_find_pc(env->mem_io_pc);
2598
                if (!tb) {
2599
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2600
                              "pc=%p", (void *)env->mem_io_pc);
2601
                }
2602
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2603
                tb_phys_invalidate(tb, -1);
2604
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2605
                    env->exception_index = EXCP_DEBUG;
2606
                } else {
2607
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2608
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2609
                }
2610
                cpu_resume_from_signal(env, NULL);
2611
            }
2612
        } else {
2613
            wp->flags &= ~BP_WATCHPOINT_HIT;
2614
        }
2615
    }
2616
}
2617

    
2618
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2619
   so these check for a hit then pass through to the normal out-of-line
2620
   phys routines.  */
2621
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2622
{
2623
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2624
    return ldub_phys(addr);
2625
}
2626

    
2627
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2628
{
2629
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2630
    return lduw_phys(addr);
2631
}
2632

    
2633
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2634
{
2635
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2636
    return ldl_phys(addr);
2637
}
2638

    
2639
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2640
                             uint32_t val)
2641
{
2642
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2643
    stb_phys(addr, val);
2644
}
2645

    
2646
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2647
                             uint32_t val)
2648
{
2649
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2650
    stw_phys(addr, val);
2651
}
2652

    
2653
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2654
                             uint32_t val)
2655
{
2656
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2657
    stl_phys(addr, val);
2658
}
2659

    
2660
static CPUReadMemoryFunc *watch_mem_read[3] = {
2661
    watch_mem_readb,
2662
    watch_mem_readw,
2663
    watch_mem_readl,
2664
};
2665

    
2666
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2667
    watch_mem_writeb,
2668
    watch_mem_writew,
2669
    watch_mem_writel,
2670
};
2671

    
2672
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2673
                                 unsigned int len)
2674
{
2675
    uint32_t ret;
2676
    unsigned int idx;
2677

    
2678
    idx = SUBPAGE_IDX(addr);
2679
#if defined(DEBUG_SUBPAGE)
2680
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2681
           mmio, len, addr, idx);
2682
#endif
2683
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2684
                                       addr + mmio->region_offset[idx][0][len]);
2685

    
2686
    return ret;
2687
}
2688

    
2689
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2690
                              uint32_t value, unsigned int len)
2691
{
2692
    unsigned int idx;
2693

    
2694
    idx = SUBPAGE_IDX(addr);
2695
#if defined(DEBUG_SUBPAGE)
2696
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2697
           mmio, len, addr, idx, value);
2698
#endif
2699
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2700
                                  addr + mmio->region_offset[idx][1][len],
2701
                                  value);
2702
}
2703

    
2704
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2705
{
2706
#if defined(DEBUG_SUBPAGE)
2707
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2708
#endif
2709

    
2710
    return subpage_readlen(opaque, addr, 0);
2711
}
2712

    
2713
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2714
                            uint32_t value)
2715
{
2716
#if defined(DEBUG_SUBPAGE)
2717
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2718
#endif
2719
    subpage_writelen(opaque, addr, value, 0);
2720
}
2721

    
2722
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2723
{
2724
#if defined(DEBUG_SUBPAGE)
2725
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2726
#endif
2727

    
2728
    return subpage_readlen(opaque, addr, 1);
2729
}
2730

    
2731
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2732
                            uint32_t value)
2733
{
2734
#if defined(DEBUG_SUBPAGE)
2735
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2736
#endif
2737
    subpage_writelen(opaque, addr, value, 1);
2738
}
2739

    
2740
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2741
{
2742
#if defined(DEBUG_SUBPAGE)
2743
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2744
#endif
2745

    
2746
    return subpage_readlen(opaque, addr, 2);
2747
}
2748

    
2749
static void subpage_writel (void *opaque,
2750
                         target_phys_addr_t addr, uint32_t value)
2751
{
2752
#if defined(DEBUG_SUBPAGE)
2753
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2754
#endif
2755
    subpage_writelen(opaque, addr, value, 2);
2756
}
2757

    
2758
static CPUReadMemoryFunc *subpage_read[] = {
2759
    &subpage_readb,
2760
    &subpage_readw,
2761
    &subpage_readl,
2762
};
2763

    
2764
static CPUWriteMemoryFunc *subpage_write[] = {
2765
    &subpage_writeb,
2766
    &subpage_writew,
2767
    &subpage_writel,
2768
};
2769

    
2770
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2771
                             ram_addr_t memory, ram_addr_t region_offset)
2772
{
2773
    int idx, eidx;
2774
    unsigned int i;
2775

    
2776
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2777
        return -1;
2778
    idx = SUBPAGE_IDX(start);
2779
    eidx = SUBPAGE_IDX(end);
2780
#if defined(DEBUG_SUBPAGE)
2781
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2782
           mmio, start, end, idx, eidx, memory);
2783
#endif
2784
    memory >>= IO_MEM_SHIFT;
2785
    for (; idx <= eidx; idx++) {
2786
        for (i = 0; i < 4; i++) {
2787
            if (io_mem_read[memory][i]) {
2788
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2789
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2790
                mmio->region_offset[idx][0][i] = region_offset;
2791
            }
2792
            if (io_mem_write[memory][i]) {
2793
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2794
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2795
                mmio->region_offset[idx][1][i] = region_offset;
2796
            }
2797
        }
2798
    }
2799

    
2800
    return 0;
2801
}
2802

    
2803
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2804
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2805
{
2806
    subpage_t *mmio;
2807
    int subpage_memory;
2808

    
2809
    mmio = qemu_mallocz(sizeof(subpage_t));
2810

    
2811
    mmio->base = base;
2812
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2813
#if defined(DEBUG_SUBPAGE)
2814
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2815
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2816
#endif
2817
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2818
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2819
                         region_offset);
2820

    
2821
    return mmio;
2822
}
2823

    
2824
static int get_free_io_mem_idx(void)
2825
{
2826
    int i;
2827

    
2828
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2829
        if (!io_mem_used[i]) {
2830
            io_mem_used[i] = 1;
2831
            return i;
2832
        }
2833

    
2834
    return -1;
2835
}
2836

    
2837
static void io_mem_init(void)
2838
{
2839
    int i;
2840

    
2841
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2842
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2843
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2844
    for (i=0; i<5; i++)
2845
        io_mem_used[i] = 1;
2846

    
2847
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2848
                                          watch_mem_write, NULL);
2849
    /* alloc dirty bits array */
2850
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2851
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2852
}
2853

    
2854
/* mem_read and mem_write are arrays of functions containing the
2855
   function to access byte (index 0), word (index 1) and dword (index
2856
   2). Functions can be omitted with a NULL function pointer. The
2857
   registered functions may be modified dynamically later.
2858
   If io_index is non zero, the corresponding io zone is
2859
   modified. If it is zero, a new io zone is allocated. The return
2860
   value can be used with cpu_register_physical_memory(). (-1) is
2861
   returned if error. */
2862
int cpu_register_io_memory(int io_index,
2863
                           CPUReadMemoryFunc **mem_read,
2864
                           CPUWriteMemoryFunc **mem_write,
2865
                           void *opaque)
2866
{
2867
    int i, subwidth = 0;
2868

    
2869
    if (io_index <= 0) {
2870
        io_index = get_free_io_mem_idx();
2871
        if (io_index == -1)
2872
            return io_index;
2873
    } else {
2874
        if (io_index >= IO_MEM_NB_ENTRIES)
2875
            return -1;
2876
    }
2877

    
2878
    for(i = 0;i < 3; i++) {
2879
        if (!mem_read[i] || !mem_write[i])
2880
            subwidth = IO_MEM_SUBWIDTH;
2881
        io_mem_read[io_index][i] = mem_read[i];
2882
        io_mem_write[io_index][i] = mem_write[i];
2883
    }
2884
    io_mem_opaque[io_index] = opaque;
2885
    return (io_index << IO_MEM_SHIFT) | subwidth;
2886
}
2887

    
2888
void cpu_unregister_io_memory(int io_table_address)
2889
{
2890
    int i;
2891
    int io_index = io_table_address >> IO_MEM_SHIFT;
2892

    
2893
    for (i=0;i < 3; i++) {
2894
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2895
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2896
    }
2897
    io_mem_opaque[io_index] = NULL;
2898
    io_mem_used[io_index] = 0;
2899
}
2900

    
2901
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2902
{
2903
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2904
}
2905

    
2906
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2907
{
2908
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2909
}
2910

    
2911
#endif /* !defined(CONFIG_USER_ONLY) */
2912

    
2913
/* physical memory access (slow version, mainly for debug) */
2914
#if defined(CONFIG_USER_ONLY)
2915
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2916
                            int len, int is_write)
2917
{
2918
    int l, flags;
2919
    target_ulong page;
2920
    void * p;
2921

    
2922
    while (len > 0) {
2923
        page = addr & TARGET_PAGE_MASK;
2924
        l = (page + TARGET_PAGE_SIZE) - addr;
2925
        if (l > len)
2926
            l = len;
2927
        flags = page_get_flags(page);
2928
        if (!(flags & PAGE_VALID))
2929
            return;
2930
        if (is_write) {
2931
            if (!(flags & PAGE_WRITE))
2932
                return;
2933
            /* XXX: this code should not depend on lock_user */
2934
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2935
                /* FIXME - should this return an error rather than just fail? */
2936
                return;
2937
            memcpy(p, buf, l);
2938
            unlock_user(p, addr, l);
2939
        } else {
2940
            if (!(flags & PAGE_READ))
2941
                return;
2942
            /* XXX: this code should not depend on lock_user */
2943
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2944
                /* FIXME - should this return an error rather than just fail? */
2945
                return;
2946
            memcpy(buf, p, l);
2947
            unlock_user(p, addr, 0);
2948
        }
2949
        len -= l;
2950
        buf += l;
2951
        addr += l;
2952
    }
2953
}
2954

    
2955
#else
2956
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2957
                            int len, int is_write)
2958
{
2959
    int l, io_index;
2960
    uint8_t *ptr;
2961
    uint32_t val;
2962
    target_phys_addr_t page;
2963
    unsigned long pd;
2964
    PhysPageDesc *p;
2965

    
2966
    while (len > 0) {
2967
        page = addr & TARGET_PAGE_MASK;
2968
        l = (page + TARGET_PAGE_SIZE) - addr;
2969
        if (l > len)
2970
            l = len;
2971
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2972
        if (!p) {
2973
            pd = IO_MEM_UNASSIGNED;
2974
        } else {
2975
            pd = p->phys_offset;
2976
        }
2977

    
2978
        if (is_write) {
2979
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2980
                target_phys_addr_t addr1 = addr;
2981
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2982
                if (p)
2983
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2984
                /* XXX: could force cpu_single_env to NULL to avoid
2985
                   potential bugs */
2986
                if (l >= 4 && ((addr1 & 3) == 0)) {
2987
                    /* 32 bit write access */
2988
                    val = ldl_p(buf);
2989
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
2990
                    l = 4;
2991
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
2992
                    /* 16 bit write access */
2993
                    val = lduw_p(buf);
2994
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
2995
                    l = 2;
2996
                } else {
2997
                    /* 8 bit write access */
2998
                    val = ldub_p(buf);
2999
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3000
                    l = 1;
3001
                }
3002
            } else {
3003
                unsigned long addr1;
3004
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3005
                /* RAM case */
3006
                ptr = phys_ram_base + addr1;
3007
                memcpy(ptr, buf, l);
3008
                if (!cpu_physical_memory_is_dirty(addr1)) {
3009
                    /* invalidate code */
3010
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3011
                    /* set dirty bit */
3012
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3013
                        (0xff & ~CODE_DIRTY_FLAG);
3014
                }
3015
            }
3016
        } else {
3017
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3018
                !(pd & IO_MEM_ROMD)) {
3019
                target_phys_addr_t addr1 = addr;
3020
                /* I/O case */
3021
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3022
                if (p)
3023
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3024
                if (l >= 4 && ((addr1 & 3) == 0)) {
3025
                    /* 32 bit read access */
3026
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3027
                    stl_p(buf, val);
3028
                    l = 4;
3029
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3030
                    /* 16 bit read access */
3031
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3032
                    stw_p(buf, val);
3033
                    l = 2;
3034
                } else {
3035
                    /* 8 bit read access */
3036
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3037
                    stb_p(buf, val);
3038
                    l = 1;
3039
                }
3040
            } else {
3041
                /* RAM case */
3042
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3043
                    (addr & ~TARGET_PAGE_MASK);
3044
                memcpy(buf, ptr, l);
3045
            }
3046
        }
3047
        len -= l;
3048
        buf += l;
3049
        addr += l;
3050
    }
3051
}
3052

    
3053
/* used for ROM loading : can write in RAM and ROM */
3054
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3055
                                   const uint8_t *buf, int len)
3056
{
3057
    int l;
3058
    uint8_t *ptr;
3059
    target_phys_addr_t page;
3060
    unsigned long pd;
3061
    PhysPageDesc *p;
3062

    
3063
    while (len > 0) {
3064
        page = addr & TARGET_PAGE_MASK;
3065
        l = (page + TARGET_PAGE_SIZE) - addr;
3066
        if (l > len)
3067
            l = len;
3068
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3069
        if (!p) {
3070
            pd = IO_MEM_UNASSIGNED;
3071
        } else {
3072
            pd = p->phys_offset;
3073
        }
3074

    
3075
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3076
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3077
            !(pd & IO_MEM_ROMD)) {
3078
            /* do nothing */
3079
        } else {
3080
            unsigned long addr1;
3081
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3082
            /* ROM/RAM case */
3083
            ptr = phys_ram_base + addr1;
3084
            memcpy(ptr, buf, l);
3085
        }
3086
        len -= l;
3087
        buf += l;
3088
        addr += l;
3089
    }
3090
}
3091

    
3092
typedef struct {
3093
    void *buffer;
3094
    target_phys_addr_t addr;
3095
    target_phys_addr_t len;
3096
} BounceBuffer;
3097

    
3098
static BounceBuffer bounce;
3099

    
3100
typedef struct MapClient {
3101
    void *opaque;
3102
    void (*callback)(void *opaque);
3103
    LIST_ENTRY(MapClient) link;
3104
} MapClient;
3105

    
3106
static LIST_HEAD(map_client_list, MapClient) map_client_list
3107
    = LIST_HEAD_INITIALIZER(map_client_list);
3108

    
3109
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3110
{
3111
    MapClient *client = qemu_malloc(sizeof(*client));
3112

    
3113
    client->opaque = opaque;
3114
    client->callback = callback;
3115
    LIST_INSERT_HEAD(&map_client_list, client, link);
3116
    return client;
3117
}
3118

    
3119
void cpu_unregister_map_client(void *_client)
3120
{
3121
    MapClient *client = (MapClient *)_client;
3122

    
3123
    LIST_REMOVE(client, link);
3124
}
3125

    
3126
static void cpu_notify_map_clients(void)
3127
{
3128
    MapClient *client;
3129

    
3130
    while (!LIST_EMPTY(&map_client_list)) {
3131
        client = LIST_FIRST(&map_client_list);
3132
        client->callback(client->opaque);
3133
        LIST_REMOVE(client, link);
3134
    }
3135
}
3136

    
3137
/* Map a physical memory region into a host virtual address.
3138
 * May map a subset of the requested range, given by and returned in *plen.
3139
 * May return NULL if resources needed to perform the mapping are exhausted.
3140
 * Use only for reads OR writes - not for read-modify-write operations.
3141
 * Use cpu_register_map_client() to know when retrying the map operation is
3142
 * likely to succeed.
3143
 */
3144
void *cpu_physical_memory_map(target_phys_addr_t addr,
3145
                              target_phys_addr_t *plen,
3146
                              int is_write)
3147
{
3148
    target_phys_addr_t len = *plen;
3149
    target_phys_addr_t done = 0;
3150
    int l;
3151
    uint8_t *ret = NULL;
3152
    uint8_t *ptr;
3153
    target_phys_addr_t page;
3154
    unsigned long pd;
3155
    PhysPageDesc *p;
3156
    unsigned long addr1;
3157

    
3158
    while (len > 0) {
3159
        page = addr & TARGET_PAGE_MASK;
3160
        l = (page + TARGET_PAGE_SIZE) - addr;
3161
        if (l > len)
3162
            l = len;
3163
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3164
        if (!p) {
3165
            pd = IO_MEM_UNASSIGNED;
3166
        } else {
3167
            pd = p->phys_offset;
3168
        }
3169

    
3170
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3171
            if (done || bounce.buffer) {
3172
                break;
3173
            }
3174
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3175
            bounce.addr = addr;
3176
            bounce.len = l;
3177
            if (!is_write) {
3178
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3179
            }
3180
            ptr = bounce.buffer;
3181
        } else {
3182
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3183
            ptr = phys_ram_base + addr1;
3184
        }
3185
        if (!done) {
3186
            ret = ptr;
3187
        } else if (ret + done != ptr) {
3188
            break;
3189
        }
3190

    
3191
        len -= l;
3192
        addr += l;
3193
        done += l;
3194
    }
3195
    *plen = done;
3196
    return ret;
3197
}
3198

    
3199
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3200
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3201
 * the amount of memory that was actually read or written by the caller.
3202
 */
3203
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3204
                               int is_write, target_phys_addr_t access_len)
3205
{
3206
    if (buffer != bounce.buffer) {
3207
        if (is_write) {
3208
            unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3209
            while (access_len) {
3210
                unsigned l;
3211
                l = TARGET_PAGE_SIZE;
3212
                if (l > access_len)
3213
                    l = access_len;
3214
                if (!cpu_physical_memory_is_dirty(addr1)) {
3215
                    /* invalidate code */
3216
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3217
                    /* set dirty bit */
3218
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3219
                        (0xff & ~CODE_DIRTY_FLAG);
3220
                }
3221
                addr1 += l;
3222
                access_len -= l;
3223
            }
3224
        }
3225
        return;
3226
    }
3227
    if (is_write) {
3228
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3229
    }
3230
    qemu_free(bounce.buffer);
3231
    bounce.buffer = NULL;
3232
    cpu_notify_map_clients();
3233
}
3234

    
3235
/* warning: addr must be aligned */
3236
uint32_t ldl_phys(target_phys_addr_t addr)
3237
{
3238
    int io_index;
3239
    uint8_t *ptr;
3240
    uint32_t val;
3241
    unsigned long pd;
3242
    PhysPageDesc *p;
3243

    
3244
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3245
    if (!p) {
3246
        pd = IO_MEM_UNASSIGNED;
3247
    } else {
3248
        pd = p->phys_offset;
3249
    }
3250

    
3251
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3252
        !(pd & IO_MEM_ROMD)) {
3253
        /* I/O case */
3254
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3255
        if (p)
3256
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3257
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3258
    } else {
3259
        /* RAM case */
3260
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3261
            (addr & ~TARGET_PAGE_MASK);
3262
        val = ldl_p(ptr);
3263
    }
3264
    return val;
3265
}
3266

    
3267
/* warning: addr must be aligned */
3268
uint64_t ldq_phys(target_phys_addr_t addr)
3269
{
3270
    int io_index;
3271
    uint8_t *ptr;
3272
    uint64_t val;
3273
    unsigned long pd;
3274
    PhysPageDesc *p;
3275

    
3276
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3277
    if (!p) {
3278
        pd = IO_MEM_UNASSIGNED;
3279
    } else {
3280
        pd = p->phys_offset;
3281
    }
3282

    
3283
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3284
        !(pd & IO_MEM_ROMD)) {
3285
        /* I/O case */
3286
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3287
        if (p)
3288
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3289
#ifdef TARGET_WORDS_BIGENDIAN
3290
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3291
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3292
#else
3293
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3294
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3295
#endif
3296
    } else {
3297
        /* RAM case */
3298
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3299
            (addr & ~TARGET_PAGE_MASK);
3300
        val = ldq_p(ptr);
3301
    }
3302
    return val;
3303
}
3304

    
3305
/* XXX: optimize */
3306
uint32_t ldub_phys(target_phys_addr_t addr)
3307
{
3308
    uint8_t val;
3309
    cpu_physical_memory_read(addr, &val, 1);
3310
    return val;
3311
}
3312

    
3313
/* XXX: optimize */
3314
uint32_t lduw_phys(target_phys_addr_t addr)
3315
{
3316
    uint16_t val;
3317
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3318
    return tswap16(val);
3319
}
3320

    
3321
/* warning: addr must be aligned. The ram page is not masked as dirty
3322
   and the code inside is not invalidated. It is useful if the dirty
3323
   bits are used to track modified PTEs */
3324
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3325
{
3326
    int io_index;
3327
    uint8_t *ptr;
3328
    unsigned long pd;
3329
    PhysPageDesc *p;
3330

    
3331
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3332
    if (!p) {
3333
        pd = IO_MEM_UNASSIGNED;
3334
    } else {
3335
        pd = p->phys_offset;
3336
    }
3337

    
3338
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3339
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3340
        if (p)
3341
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3342
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3343
    } else {
3344
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3345
        ptr = phys_ram_base + addr1;
3346
        stl_p(ptr, val);
3347

    
3348
        if (unlikely(in_migration)) {
3349
            if (!cpu_physical_memory_is_dirty(addr1)) {
3350
                /* invalidate code */
3351
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3352
                /* set dirty bit */
3353
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3354
                    (0xff & ~CODE_DIRTY_FLAG);
3355
            }
3356
        }
3357
    }
3358
}
3359

    
3360
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3361
{
3362
    int io_index;
3363
    uint8_t *ptr;
3364
    unsigned long pd;
3365
    PhysPageDesc *p;
3366

    
3367
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3368
    if (!p) {
3369
        pd = IO_MEM_UNASSIGNED;
3370
    } else {
3371
        pd = p->phys_offset;
3372
    }
3373

    
3374
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3375
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3376
        if (p)
3377
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3378
#ifdef TARGET_WORDS_BIGENDIAN
3379
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3380
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3381
#else
3382
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3383
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3384
#endif
3385
    } else {
3386
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3387
            (addr & ~TARGET_PAGE_MASK);
3388
        stq_p(ptr, val);
3389
    }
3390
}
3391

    
3392
/* warning: addr must be aligned */
3393
void stl_phys(target_phys_addr_t addr, uint32_t val)
3394
{
3395
    int io_index;
3396
    uint8_t *ptr;
3397
    unsigned long pd;
3398
    PhysPageDesc *p;
3399

    
3400
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3401
    if (!p) {
3402
        pd = IO_MEM_UNASSIGNED;
3403
    } else {
3404
        pd = p->phys_offset;
3405
    }
3406

    
3407
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3408
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3409
        if (p)
3410
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3411
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3412
    } else {
3413
        unsigned long addr1;
3414
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3415
        /* RAM case */
3416
        ptr = phys_ram_base + addr1;
3417
        stl_p(ptr, val);
3418
        if (!cpu_physical_memory_is_dirty(addr1)) {
3419
            /* invalidate code */
3420
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3421
            /* set dirty bit */
3422
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3423
                (0xff & ~CODE_DIRTY_FLAG);
3424
        }
3425
    }
3426
}
3427

    
3428
/* XXX: optimize */
3429
void stb_phys(target_phys_addr_t addr, uint32_t val)
3430
{
3431
    uint8_t v = val;
3432
    cpu_physical_memory_write(addr, &v, 1);
3433
}
3434

    
3435
/* XXX: optimize */
3436
void stw_phys(target_phys_addr_t addr, uint32_t val)
3437
{
3438
    uint16_t v = tswap16(val);
3439
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3440
}
3441

    
3442
/* XXX: optimize */
3443
void stq_phys(target_phys_addr_t addr, uint64_t val)
3444
{
3445
    val = tswap64(val);
3446
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3447
}
3448

    
3449
#endif
3450

    
3451
/* virtual memory access for debug */
3452
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3453
                        uint8_t *buf, int len, int is_write)
3454
{
3455
    int l;
3456
    target_phys_addr_t phys_addr;
3457
    target_ulong page;
3458

    
3459
    while (len > 0) {
3460
        page = addr & TARGET_PAGE_MASK;
3461
        phys_addr = cpu_get_phys_page_debug(env, page);
3462
        /* if no physical page mapped, return an error */
3463
        if (phys_addr == -1)
3464
            return -1;
3465
        l = (page + TARGET_PAGE_SIZE) - addr;
3466
        if (l > len)
3467
            l = len;
3468
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3469
                               buf, l, is_write);
3470
        len -= l;
3471
        buf += l;
3472
        addr += l;
3473
    }
3474
    return 0;
3475
}
3476

    
3477
/* in deterministic execution mode, instructions doing device I/Os
3478
   must be at the end of the TB */
3479
void cpu_io_recompile(CPUState *env, void *retaddr)
3480
{
3481
    TranslationBlock *tb;
3482
    uint32_t n, cflags;
3483
    target_ulong pc, cs_base;
3484
    uint64_t flags;
3485

    
3486
    tb = tb_find_pc((unsigned long)retaddr);
3487
    if (!tb) {
3488
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3489
                  retaddr);
3490
    }
3491
    n = env->icount_decr.u16.low + tb->icount;
3492
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3493
    /* Calculate how many instructions had been executed before the fault
3494
       occurred.  */
3495
    n = n - env->icount_decr.u16.low;
3496
    /* Generate a new TB ending on the I/O insn.  */
3497
    n++;
3498
    /* On MIPS and SH, delay slot instructions can only be restarted if
3499
       they were already the first instruction in the TB.  If this is not
3500
       the first instruction in a TB then re-execute the preceding
3501
       branch.  */
3502
#if defined(TARGET_MIPS)
3503
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3504
        env->active_tc.PC -= 4;
3505
        env->icount_decr.u16.low++;
3506
        env->hflags &= ~MIPS_HFLAG_BMASK;
3507
    }
3508
#elif defined(TARGET_SH4)
3509
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3510
            && n > 1) {
3511
        env->pc -= 2;
3512
        env->icount_decr.u16.low++;
3513
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3514
    }
3515
#endif
3516
    /* This should never happen.  */
3517
    if (n > CF_COUNT_MASK)
3518
        cpu_abort(env, "TB too big during recompile");
3519

    
3520
    cflags = n | CF_LAST_IO;
3521
    pc = tb->pc;
3522
    cs_base = tb->cs_base;
3523
    flags = tb->flags;
3524
    tb_phys_invalidate(tb, -1);
3525
    /* FIXME: In theory this could raise an exception.  In practice
3526
       we have already translated the block once so it's probably ok.  */
3527
    tb_gen_code(env, pc, cs_base, flags, cflags);
3528
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3529
       the first in the TB) then we end up generating a whole new TB and
3530
       repeating the fault, which is horribly inefficient.
3531
       Better would be to execute just this insn uncached, or generate a
3532
       second new TB.  */
3533
    cpu_resume_from_signal(env, NULL);
3534
}
3535

    
3536
void dump_exec_info(FILE *f,
3537
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3538
{
3539
    int i, target_code_size, max_target_code_size;
3540
    int direct_jmp_count, direct_jmp2_count, cross_page;
3541
    TranslationBlock *tb;
3542

    
3543
    target_code_size = 0;
3544
    max_target_code_size = 0;
3545
    cross_page = 0;
3546
    direct_jmp_count = 0;
3547
    direct_jmp2_count = 0;
3548
    for(i = 0; i < nb_tbs; i++) {
3549
        tb = &tbs[i];
3550
        target_code_size += tb->size;
3551
        if (tb->size > max_target_code_size)
3552
            max_target_code_size = tb->size;
3553
        if (tb->page_addr[1] != -1)
3554
            cross_page++;
3555
        if (tb->tb_next_offset[0] != 0xffff) {
3556
            direct_jmp_count++;
3557
            if (tb->tb_next_offset[1] != 0xffff) {
3558
                direct_jmp2_count++;
3559
            }
3560
        }
3561
    }
3562
    /* XXX: avoid using doubles ? */
3563
    cpu_fprintf(f, "Translation buffer state:\n");
3564
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3565
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3566
    cpu_fprintf(f, "TB count            %d/%d\n", 
3567
                nb_tbs, code_gen_max_blocks);
3568
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3569
                nb_tbs ? target_code_size / nb_tbs : 0,
3570
                max_target_code_size);
3571
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3572
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3573
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3574
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3575
            cross_page,
3576
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3577
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3578
                direct_jmp_count,
3579
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3580
                direct_jmp2_count,
3581
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3582
    cpu_fprintf(f, "\nStatistics:\n");
3583
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3584
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3585
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3586
    tcg_dump_info(f, cpu_fprintf);
3587
}
3588

    
3589
#if !defined(CONFIG_USER_ONLY)
3590

    
3591
#define MMUSUFFIX _cmmu
3592
#define GETPC() NULL
3593
#define env cpu_single_env
3594
#define SOFTMMU_CODE_ACCESS
3595

    
3596
#define SHIFT 0
3597
#include "softmmu_template.h"
3598

    
3599
#define SHIFT 1
3600
#include "softmmu_template.h"
3601

    
3602
#define SHIFT 2
3603
#include "softmmu_template.h"
3604

    
3605
#define SHIFT 3
3606
#include "softmmu_template.h"
3607

    
3608
#undef env
3609

    
3610
#endif