Statistics
| Branch: | Revision:

root / exec.c @ b5dc7732

History | View | Annotate | Download (90.3 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#if defined(CONFIG_USER_ONLY)
41
#include <qemu.h>
42
#endif
43

    
44
//#define DEBUG_TB_INVALIDATE
45
//#define DEBUG_FLUSH
46
//#define DEBUG_TLB
47
//#define DEBUG_UNASSIGNED
48

    
49
/* make various TB consistency checks */
50
//#define DEBUG_TB_CHECK
51
//#define DEBUG_TLB_CHECK
52

    
53
//#define DEBUG_IOPORT
54
//#define DEBUG_SUBPAGE
55

    
56
#if !defined(CONFIG_USER_ONLY)
57
/* TB consistency checks only implemented for usermode emulation.  */
58
#undef DEBUG_TB_CHECK
59
#endif
60

    
61
#define SMC_BITMAP_USE_THRESHOLD 10
62

    
63
#define MMAP_AREA_START        0x00000000
64
#define MMAP_AREA_END          0xa8000000
65

    
66
#if defined(TARGET_SPARC64)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 41
68
#elif defined(TARGET_SPARC)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 36
70
#elif defined(TARGET_ALPHA)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 42
72
#define TARGET_VIRT_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_PPC64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 42
77
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78
#define TARGET_PHYS_ADDR_SPACE_BITS 36
79
#else
80
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81
#define TARGET_PHYS_ADDR_SPACE_BITS 32
82
#endif
83

    
84
TranslationBlock *tbs;
85
int code_gen_max_blocks;
86
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87
int nb_tbs;
88
/* any access to the tbs or the page table must use this lock */
89
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90

    
91
uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
92
uint8_t *code_gen_buffer;
93
unsigned long code_gen_buffer_size;
94
/* threshold to flush the translated code buffer */
95
unsigned long code_gen_buffer_max_size; 
96
uint8_t *code_gen_ptr;
97

    
98
#if !defined(CONFIG_USER_ONLY)
99
ram_addr_t phys_ram_size;
100
int phys_ram_fd;
101
uint8_t *phys_ram_base;
102
uint8_t *phys_ram_dirty;
103
static ram_addr_t phys_ram_alloc_offset = 0;
104
#endif
105

    
106
CPUState *first_cpu;
107
/* current CPU in the current thread. It is only valid inside
108
   cpu_exec() */
109
CPUState *cpu_single_env;
110

    
111
typedef struct PageDesc {
112
    /* list of TBs intersecting this ram page */
113
    TranslationBlock *first_tb;
114
    /* in order to optimize self modifying code, we count the number
115
       of lookups we do to a given page to use a bitmap */
116
    unsigned int code_write_count;
117
    uint8_t *code_bitmap;
118
#if defined(CONFIG_USER_ONLY)
119
    unsigned long flags;
120
#endif
121
} PageDesc;
122

    
123
typedef struct PhysPageDesc {
124
    /* offset in host memory of the page + io_index in the low bits */
125
    ram_addr_t phys_offset;
126
} PhysPageDesc;
127

    
128
#define L2_BITS 10
129
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
130
/* XXX: this is a temporary hack for alpha target.
131
 *      In the future, this is to be replaced by a multi-level table
132
 *      to actually be able to handle the complete 64 bits address space.
133
 */
134
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
135
#else
136
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
137
#endif
138

    
139
#define L1_SIZE (1 << L1_BITS)
140
#define L2_SIZE (1 << L2_BITS)
141

    
142
unsigned long qemu_real_host_page_size;
143
unsigned long qemu_host_page_bits;
144
unsigned long qemu_host_page_size;
145
unsigned long qemu_host_page_mask;
146

    
147
/* XXX: for system emulation, it could just be an array */
148
static PageDesc *l1_map[L1_SIZE];
149
PhysPageDesc **l1_phys_map;
150

    
151
#if !defined(CONFIG_USER_ONLY)
152
static void io_mem_init(void);
153

    
154
/* io memory support */
155
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
156
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
157
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
158
static int io_mem_nb;
159
static int io_mem_watch;
160
#endif
161

    
162
/* log support */
163
char *logfilename = "/tmp/qemu.log";
164
FILE *logfile;
165
int loglevel;
166
static int log_append = 0;
167

    
168
/* statistics */
169
static int tlb_flush_count;
170
static int tb_flush_count;
171
static int tb_phys_invalidate_count;
172

    
173
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
174
typedef struct subpage_t {
175
    target_phys_addr_t base;
176
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
177
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
178
    void *opaque[TARGET_PAGE_SIZE][2][4];
179
} subpage_t;
180

    
181
#ifdef _WIN32
182
static void map_exec(void *addr, long size)
183
{
184
    DWORD old_protect;
185
    VirtualProtect(addr, size,
186
                   PAGE_EXECUTE_READWRITE, &old_protect);
187
    
188
}
189
#else
190
static void map_exec(void *addr, long size)
191
{
192
    unsigned long start, end, page_size;
193
    
194
    page_size = getpagesize();
195
    start = (unsigned long)addr;
196
    start &= ~(page_size - 1);
197
    
198
    end = (unsigned long)addr + size;
199
    end += page_size - 1;
200
    end &= ~(page_size - 1);
201
    
202
    mprotect((void *)start, end - start,
203
             PROT_READ | PROT_WRITE | PROT_EXEC);
204
}
205
#endif
206

    
207
static void page_init(void)
208
{
209
    /* NOTE: we can always suppose that qemu_host_page_size >=
210
       TARGET_PAGE_SIZE */
211
#ifdef _WIN32
212
    {
213
        SYSTEM_INFO system_info;
214
        DWORD old_protect;
215

    
216
        GetSystemInfo(&system_info);
217
        qemu_real_host_page_size = system_info.dwPageSize;
218
    }
219
#else
220
    qemu_real_host_page_size = getpagesize();
221
#endif
222
    if (qemu_host_page_size == 0)
223
        qemu_host_page_size = qemu_real_host_page_size;
224
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
225
        qemu_host_page_size = TARGET_PAGE_SIZE;
226
    qemu_host_page_bits = 0;
227
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
228
        qemu_host_page_bits++;
229
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
230
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
231
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
232

    
233
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
234
    {
235
        long long startaddr, endaddr;
236
        FILE *f;
237
        int n;
238

    
239
        mmap_lock();
240
        last_brk = (unsigned long)sbrk(0);
241
        f = fopen("/proc/self/maps", "r");
242
        if (f) {
243
            do {
244
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
245
                if (n == 2) {
246
                    startaddr = MIN(startaddr,
247
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
248
                    endaddr = MIN(endaddr,
249
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
250
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
251
                                   TARGET_PAGE_ALIGN(endaddr),
252
                                   PAGE_RESERVED); 
253
                }
254
            } while (!feof(f));
255
            fclose(f);
256
        }
257
        mmap_unlock();
258
    }
259
#endif
260
}
261

    
262
static inline PageDesc *page_find_alloc(target_ulong index)
263
{
264
    PageDesc **lp, *p;
265

    
266
#if TARGET_LONG_BITS > 32
267
    /* Host memory outside guest VM.  For 32-bit targets we have already
268
       excluded high addresses.  */
269
    if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
270
        return NULL;
271
#endif
272
    lp = &l1_map[index >> L2_BITS];
273
    p = *lp;
274
    if (!p) {
275
        /* allocate if not found */
276
#if defined(CONFIG_USER_ONLY)
277
        unsigned long addr;
278
        size_t len = sizeof(PageDesc) * L2_SIZE;
279
        /* Don't use qemu_malloc because it may recurse.  */
280
        p = mmap(0, len, PROT_READ | PROT_WRITE,
281
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
282
        *lp = p;
283
        addr = h2g(p);
284
        if (addr == (target_ulong)addr) {
285
            page_set_flags(addr & TARGET_PAGE_MASK,
286
                           TARGET_PAGE_ALIGN(addr + len),
287
                           PAGE_RESERVED); 
288
        }
289
#else
290
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
291
        *lp = p;
292
#endif
293
    }
294
    return p + (index & (L2_SIZE - 1));
295
}
296

    
297
static inline PageDesc *page_find(target_ulong index)
298
{
299
    PageDesc *p;
300

    
301
    p = l1_map[index >> L2_BITS];
302
    if (!p)
303
        return 0;
304
    return p + (index & (L2_SIZE - 1));
305
}
306

    
307
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
308
{
309
    void **lp, **p;
310
    PhysPageDesc *pd;
311

    
312
    p = (void **)l1_phys_map;
313
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
314

    
315
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
316
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
317
#endif
318
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
319
    p = *lp;
320
    if (!p) {
321
        /* allocate if not found */
322
        if (!alloc)
323
            return NULL;
324
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
325
        memset(p, 0, sizeof(void *) * L1_SIZE);
326
        *lp = p;
327
    }
328
#endif
329
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
330
    pd = *lp;
331
    if (!pd) {
332
        int i;
333
        /* allocate if not found */
334
        if (!alloc)
335
            return NULL;
336
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
337
        *lp = pd;
338
        for (i = 0; i < L2_SIZE; i++)
339
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
340
    }
341
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
342
}
343

    
344
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
345
{
346
    return phys_page_find_alloc(index, 0);
347
}
348

    
349
#if !defined(CONFIG_USER_ONLY)
350
static void tlb_protect_code(ram_addr_t ram_addr);
351
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
352
                                    target_ulong vaddr);
353
#define mmap_lock() do { } while(0)
354
#define mmap_unlock() do { } while(0)
355
#endif
356

    
357
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
358

    
359
#if defined(CONFIG_USER_ONLY)
360
/* Currently it is not recommanded to allocate big chunks of data in
361
   user mode. It will change when a dedicated libc will be used */
362
#define USE_STATIC_CODE_GEN_BUFFER
363
#endif
364

    
365
#ifdef USE_STATIC_CODE_GEN_BUFFER
366
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
367
#endif
368

    
369
void code_gen_alloc(unsigned long tb_size)
370
{
371
#ifdef USE_STATIC_CODE_GEN_BUFFER
372
    code_gen_buffer = static_code_gen_buffer;
373
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
374
    map_exec(code_gen_buffer, code_gen_buffer_size);
375
#else
376
    code_gen_buffer_size = tb_size;
377
    if (code_gen_buffer_size == 0) {
378
#if defined(CONFIG_USER_ONLY)
379
        /* in user mode, phys_ram_size is not meaningful */
380
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
381
#else
382
        /* XXX: needs ajustments */
383
        code_gen_buffer_size = (int)(phys_ram_size / 4);
384
#endif
385
    }
386
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
387
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
388
    /* The code gen buffer location may have constraints depending on
389
       the host cpu and OS */
390
#if defined(__linux__) 
391
    {
392
        int flags;
393
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
394
#if defined(__x86_64__)
395
        flags |= MAP_32BIT;
396
        /* Cannot map more than that */
397
        if (code_gen_buffer_size > (800 * 1024 * 1024))
398
            code_gen_buffer_size = (800 * 1024 * 1024);
399
#endif
400
        code_gen_buffer = mmap(NULL, code_gen_buffer_size,
401
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
402
                               flags, -1, 0);
403
        if (code_gen_buffer == MAP_FAILED) {
404
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
405
            exit(1);
406
        }
407
    }
408
#else
409
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
410
    if (!code_gen_buffer) {
411
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
412
        exit(1);
413
    }
414
    map_exec(code_gen_buffer, code_gen_buffer_size);
415
#endif
416
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
417
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
418
    code_gen_buffer_max_size = code_gen_buffer_size - 
419
        code_gen_max_block_size();
420
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
421
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
422
}
423

    
424
/* Must be called before using the QEMU cpus. 'tb_size' is the size
425
   (in bytes) allocated to the translation buffer. Zero means default
426
   size. */
427
void cpu_exec_init_all(unsigned long tb_size)
428
{
429
    cpu_gen_init();
430
    code_gen_alloc(tb_size);
431
    code_gen_ptr = code_gen_buffer;
432
    page_init();
433
#if !defined(CONFIG_USER_ONLY)
434
    io_mem_init();
435
#endif
436
}
437

    
438
void cpu_exec_init(CPUState *env)
439
{
440
    CPUState **penv;
441
    int cpu_index;
442

    
443
    env->next_cpu = NULL;
444
    penv = &first_cpu;
445
    cpu_index = 0;
446
    while (*penv != NULL) {
447
        penv = (CPUState **)&(*penv)->next_cpu;
448
        cpu_index++;
449
    }
450
    env->cpu_index = cpu_index;
451
    env->nb_watchpoints = 0;
452
    *penv = env;
453
}
454

    
455
static inline void invalidate_page_bitmap(PageDesc *p)
456
{
457
    if (p->code_bitmap) {
458
        qemu_free(p->code_bitmap);
459
        p->code_bitmap = NULL;
460
    }
461
    p->code_write_count = 0;
462
}
463

    
464
/* set to NULL all the 'first_tb' fields in all PageDescs */
465
static void page_flush_tb(void)
466
{
467
    int i, j;
468
    PageDesc *p;
469

    
470
    for(i = 0; i < L1_SIZE; i++) {
471
        p = l1_map[i];
472
        if (p) {
473
            for(j = 0; j < L2_SIZE; j++) {
474
                p->first_tb = NULL;
475
                invalidate_page_bitmap(p);
476
                p++;
477
            }
478
        }
479
    }
480
}
481

    
482
/* flush all the translation blocks */
483
/* XXX: tb_flush is currently not thread safe */
484
void tb_flush(CPUState *env1)
485
{
486
    CPUState *env;
487
#if defined(DEBUG_FLUSH)
488
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
489
           (unsigned long)(code_gen_ptr - code_gen_buffer),
490
           nb_tbs, nb_tbs > 0 ?
491
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
492
#endif
493
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
494
        cpu_abort(env1, "Internal error: code buffer overflow\n");
495

    
496
    nb_tbs = 0;
497

    
498
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
499
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
500
    }
501

    
502
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
503
    page_flush_tb();
504

    
505
    code_gen_ptr = code_gen_buffer;
506
    /* XXX: flush processor icache at this point if cache flush is
507
       expensive */
508
    tb_flush_count++;
509
}
510

    
511
#ifdef DEBUG_TB_CHECK
512

    
513
static void tb_invalidate_check(target_ulong address)
514
{
515
    TranslationBlock *tb;
516
    int i;
517
    address &= TARGET_PAGE_MASK;
518
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
519
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
520
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
521
                  address >= tb->pc + tb->size)) {
522
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
523
                       address, (long)tb->pc, tb->size);
524
            }
525
        }
526
    }
527
}
528

    
529
/* verify that all the pages have correct rights for code */
530
static void tb_page_check(void)
531
{
532
    TranslationBlock *tb;
533
    int i, flags1, flags2;
534

    
535
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
536
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
537
            flags1 = page_get_flags(tb->pc);
538
            flags2 = page_get_flags(tb->pc + tb->size - 1);
539
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
540
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
541
                       (long)tb->pc, tb->size, flags1, flags2);
542
            }
543
        }
544
    }
545
}
546

    
547
void tb_jmp_check(TranslationBlock *tb)
548
{
549
    TranslationBlock *tb1;
550
    unsigned int n1;
551

    
552
    /* suppress any remaining jumps to this TB */
553
    tb1 = tb->jmp_first;
554
    for(;;) {
555
        n1 = (long)tb1 & 3;
556
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
557
        if (n1 == 2)
558
            break;
559
        tb1 = tb1->jmp_next[n1];
560
    }
561
    /* check end of list */
562
    if (tb1 != tb) {
563
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
564
    }
565
}
566

    
567
#endif
568

    
569
/* invalidate one TB */
570
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
571
                             int next_offset)
572
{
573
    TranslationBlock *tb1;
574
    for(;;) {
575
        tb1 = *ptb;
576
        if (tb1 == tb) {
577
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
578
            break;
579
        }
580
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
581
    }
582
}
583

    
584
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
585
{
586
    TranslationBlock *tb1;
587
    unsigned int n1;
588

    
589
    for(;;) {
590
        tb1 = *ptb;
591
        n1 = (long)tb1 & 3;
592
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
593
        if (tb1 == tb) {
594
            *ptb = tb1->page_next[n1];
595
            break;
596
        }
597
        ptb = &tb1->page_next[n1];
598
    }
599
}
600

    
601
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
602
{
603
    TranslationBlock *tb1, **ptb;
604
    unsigned int n1;
605

    
606
    ptb = &tb->jmp_next[n];
607
    tb1 = *ptb;
608
    if (tb1) {
609
        /* find tb(n) in circular list */
610
        for(;;) {
611
            tb1 = *ptb;
612
            n1 = (long)tb1 & 3;
613
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
614
            if (n1 == n && tb1 == tb)
615
                break;
616
            if (n1 == 2) {
617
                ptb = &tb1->jmp_first;
618
            } else {
619
                ptb = &tb1->jmp_next[n1];
620
            }
621
        }
622
        /* now we can suppress tb(n) from the list */
623
        *ptb = tb->jmp_next[n];
624

    
625
        tb->jmp_next[n] = NULL;
626
    }
627
}
628

    
629
/* reset the jump entry 'n' of a TB so that it is not chained to
630
   another TB */
631
static inline void tb_reset_jump(TranslationBlock *tb, int n)
632
{
633
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
634
}
635

    
636
static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
637
{
638
    CPUState *env;
639
    PageDesc *p;
640
    unsigned int h, n1;
641
    target_phys_addr_t phys_pc;
642
    TranslationBlock *tb1, *tb2;
643

    
644
    /* remove the TB from the hash list */
645
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
646
    h = tb_phys_hash_func(phys_pc);
647
    tb_remove(&tb_phys_hash[h], tb,
648
              offsetof(TranslationBlock, phys_hash_next));
649

    
650
    /* remove the TB from the page list */
651
    if (tb->page_addr[0] != page_addr) {
652
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
653
        tb_page_remove(&p->first_tb, tb);
654
        invalidate_page_bitmap(p);
655
    }
656
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
657
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
658
        tb_page_remove(&p->first_tb, tb);
659
        invalidate_page_bitmap(p);
660
    }
661

    
662
    tb_invalidated_flag = 1;
663

    
664
    /* remove the TB from the hash list */
665
    h = tb_jmp_cache_hash_func(tb->pc);
666
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
667
        if (env->tb_jmp_cache[h] == tb)
668
            env->tb_jmp_cache[h] = NULL;
669
    }
670

    
671
    /* suppress this TB from the two jump lists */
672
    tb_jmp_remove(tb, 0);
673
    tb_jmp_remove(tb, 1);
674

    
675
    /* suppress any remaining jumps to this TB */
676
    tb1 = tb->jmp_first;
677
    for(;;) {
678
        n1 = (long)tb1 & 3;
679
        if (n1 == 2)
680
            break;
681
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
682
        tb2 = tb1->jmp_next[n1];
683
        tb_reset_jump(tb1, n1);
684
        tb1->jmp_next[n1] = NULL;
685
        tb1 = tb2;
686
    }
687
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
688

    
689
    tb_phys_invalidate_count++;
690
}
691

    
692
static inline void set_bits(uint8_t *tab, int start, int len)
693
{
694
    int end, mask, end1;
695

    
696
    end = start + len;
697
    tab += start >> 3;
698
    mask = 0xff << (start & 7);
699
    if ((start & ~7) == (end & ~7)) {
700
        if (start < end) {
701
            mask &= ~(0xff << (end & 7));
702
            *tab |= mask;
703
        }
704
    } else {
705
        *tab++ |= mask;
706
        start = (start + 8) & ~7;
707
        end1 = end & ~7;
708
        while (start < end1) {
709
            *tab++ = 0xff;
710
            start += 8;
711
        }
712
        if (start < end) {
713
            mask = ~(0xff << (end & 7));
714
            *tab |= mask;
715
        }
716
    }
717
}
718

    
719
static void build_page_bitmap(PageDesc *p)
720
{
721
    int n, tb_start, tb_end;
722
    TranslationBlock *tb;
723

    
724
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
725
    if (!p->code_bitmap)
726
        return;
727

    
728
    tb = p->first_tb;
729
    while (tb != NULL) {
730
        n = (long)tb & 3;
731
        tb = (TranslationBlock *)((long)tb & ~3);
732
        /* NOTE: this is subtle as a TB may span two physical pages */
733
        if (n == 0) {
734
            /* NOTE: tb_end may be after the end of the page, but
735
               it is not a problem */
736
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
737
            tb_end = tb_start + tb->size;
738
            if (tb_end > TARGET_PAGE_SIZE)
739
                tb_end = TARGET_PAGE_SIZE;
740
        } else {
741
            tb_start = 0;
742
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
743
        }
744
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
745
        tb = tb->page_next[n];
746
    }
747
}
748

    
749
#ifdef TARGET_HAS_PRECISE_SMC
750

    
751
static void tb_gen_code(CPUState *env,
752
                        target_ulong pc, target_ulong cs_base, int flags,
753
                        int cflags)
754
{
755
    TranslationBlock *tb;
756
    uint8_t *tc_ptr;
757
    target_ulong phys_pc, phys_page2, virt_page2;
758
    int code_gen_size;
759

    
760
    phys_pc = get_phys_addr_code(env, pc);
761
    tb = tb_alloc(pc);
762
    if (!tb) {
763
        /* flush must be done */
764
        tb_flush(env);
765
        /* cannot fail at this point */
766
        tb = tb_alloc(pc);
767
    }
768
    tc_ptr = code_gen_ptr;
769
    tb->tc_ptr = tc_ptr;
770
    tb->cs_base = cs_base;
771
    tb->flags = flags;
772
    tb->cflags = cflags;
773
    cpu_gen_code(env, tb, &code_gen_size);
774
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
775

    
776
    /* check next page if needed */
777
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
778
    phys_page2 = -1;
779
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
780
        phys_page2 = get_phys_addr_code(env, virt_page2);
781
    }
782
    tb_link_phys(tb, phys_pc, phys_page2);
783
}
784
#endif
785

    
786
/* invalidate all TBs which intersect with the target physical page
787
   starting in range [start;end[. NOTE: start and end must refer to
788
   the same physical page. 'is_cpu_write_access' should be true if called
789
   from a real cpu write access: the virtual CPU will exit the current
790
   TB if code is modified inside this TB. */
791
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
792
                                   int is_cpu_write_access)
793
{
794
    int n, current_tb_modified, current_tb_not_found, current_flags;
795
    CPUState *env = cpu_single_env;
796
    PageDesc *p;
797
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
798
    target_ulong tb_start, tb_end;
799
    target_ulong current_pc, current_cs_base;
800

    
801
    p = page_find(start >> TARGET_PAGE_BITS);
802
    if (!p)
803
        return;
804
    if (!p->code_bitmap &&
805
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
806
        is_cpu_write_access) {
807
        /* build code bitmap */
808
        build_page_bitmap(p);
809
    }
810

    
811
    /* we remove all the TBs in the range [start, end[ */
812
    /* XXX: see if in some cases it could be faster to invalidate all the code */
813
    current_tb_not_found = is_cpu_write_access;
814
    current_tb_modified = 0;
815
    current_tb = NULL; /* avoid warning */
816
    current_pc = 0; /* avoid warning */
817
    current_cs_base = 0; /* avoid warning */
818
    current_flags = 0; /* avoid warning */
819
    tb = p->first_tb;
820
    while (tb != NULL) {
821
        n = (long)tb & 3;
822
        tb = (TranslationBlock *)((long)tb & ~3);
823
        tb_next = tb->page_next[n];
824
        /* NOTE: this is subtle as a TB may span two physical pages */
825
        if (n == 0) {
826
            /* NOTE: tb_end may be after the end of the page, but
827
               it is not a problem */
828
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
829
            tb_end = tb_start + tb->size;
830
        } else {
831
            tb_start = tb->page_addr[1];
832
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
833
        }
834
        if (!(tb_end <= start || tb_start >= end)) {
835
#ifdef TARGET_HAS_PRECISE_SMC
836
            if (current_tb_not_found) {
837
                current_tb_not_found = 0;
838
                current_tb = NULL;
839
                if (env->mem_write_pc) {
840
                    /* now we have a real cpu fault */
841
                    current_tb = tb_find_pc(env->mem_write_pc);
842
                }
843
            }
844
            if (current_tb == tb &&
845
                !(current_tb->cflags & CF_SINGLE_INSN)) {
846
                /* If we are modifying the current TB, we must stop
847
                its execution. We could be more precise by checking
848
                that the modification is after the current PC, but it
849
                would require a specialized function to partially
850
                restore the CPU state */
851

    
852
                current_tb_modified = 1;
853
                cpu_restore_state(current_tb, env,
854
                                  env->mem_write_pc, NULL);
855
#if defined(TARGET_I386)
856
                current_flags = env->hflags;
857
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
858
                current_cs_base = (target_ulong)env->segs[R_CS].base;
859
                current_pc = current_cs_base + env->eip;
860
#else
861
#error unsupported CPU
862
#endif
863
            }
864
#endif /* TARGET_HAS_PRECISE_SMC */
865
            /* we need to do that to handle the case where a signal
866
               occurs while doing tb_phys_invalidate() */
867
            saved_tb = NULL;
868
            if (env) {
869
                saved_tb = env->current_tb;
870
                env->current_tb = NULL;
871
            }
872
            tb_phys_invalidate(tb, -1);
873
            if (env) {
874
                env->current_tb = saved_tb;
875
                if (env->interrupt_request && env->current_tb)
876
                    cpu_interrupt(env, env->interrupt_request);
877
            }
878
        }
879
        tb = tb_next;
880
    }
881
#if !defined(CONFIG_USER_ONLY)
882
    /* if no code remaining, no need to continue to use slow writes */
883
    if (!p->first_tb) {
884
        invalidate_page_bitmap(p);
885
        if (is_cpu_write_access) {
886
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
887
        }
888
    }
889
#endif
890
#ifdef TARGET_HAS_PRECISE_SMC
891
    if (current_tb_modified) {
892
        /* we generate a block containing just the instruction
893
           modifying the memory. It will ensure that it cannot modify
894
           itself */
895
        env->current_tb = NULL;
896
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
897
                    CF_SINGLE_INSN);
898
        cpu_resume_from_signal(env, NULL);
899
    }
900
#endif
901
}
902

    
903
/* len must be <= 8 and start must be a multiple of len */
904
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
905
{
906
    PageDesc *p;
907
    int offset, b;
908
#if 0
909
    if (1) {
910
        if (loglevel) {
911
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
912
                   cpu_single_env->mem_write_vaddr, len,
913
                   cpu_single_env->eip,
914
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
915
        }
916
    }
917
#endif
918
    p = page_find(start >> TARGET_PAGE_BITS);
919
    if (!p)
920
        return;
921
    if (p->code_bitmap) {
922
        offset = start & ~TARGET_PAGE_MASK;
923
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
924
        if (b & ((1 << len) - 1))
925
            goto do_invalidate;
926
    } else {
927
    do_invalidate:
928
        tb_invalidate_phys_page_range(start, start + len, 1);
929
    }
930
}
931

    
932
#if !defined(CONFIG_SOFTMMU)
933
static void tb_invalidate_phys_page(target_phys_addr_t addr,
934
                                    unsigned long pc, void *puc)
935
{
936
    int n, current_flags, current_tb_modified;
937
    target_ulong current_pc, current_cs_base;
938
    PageDesc *p;
939
    TranslationBlock *tb, *current_tb;
940
#ifdef TARGET_HAS_PRECISE_SMC
941
    CPUState *env = cpu_single_env;
942
#endif
943

    
944
    addr &= TARGET_PAGE_MASK;
945
    p = page_find(addr >> TARGET_PAGE_BITS);
946
    if (!p)
947
        return;
948
    tb = p->first_tb;
949
    current_tb_modified = 0;
950
    current_tb = NULL;
951
    current_pc = 0; /* avoid warning */
952
    current_cs_base = 0; /* avoid warning */
953
    current_flags = 0; /* avoid warning */
954
#ifdef TARGET_HAS_PRECISE_SMC
955
    if (tb && pc != 0) {
956
        current_tb = tb_find_pc(pc);
957
    }
958
#endif
959
    while (tb != NULL) {
960
        n = (long)tb & 3;
961
        tb = (TranslationBlock *)((long)tb & ~3);
962
#ifdef TARGET_HAS_PRECISE_SMC
963
        if (current_tb == tb &&
964
            !(current_tb->cflags & CF_SINGLE_INSN)) {
965
                /* If we are modifying the current TB, we must stop
966
                   its execution. We could be more precise by checking
967
                   that the modification is after the current PC, but it
968
                   would require a specialized function to partially
969
                   restore the CPU state */
970

    
971
            current_tb_modified = 1;
972
            cpu_restore_state(current_tb, env, pc, puc);
973
#if defined(TARGET_I386)
974
            current_flags = env->hflags;
975
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
976
            current_cs_base = (target_ulong)env->segs[R_CS].base;
977
            current_pc = current_cs_base + env->eip;
978
#else
979
#error unsupported CPU
980
#endif
981
        }
982
#endif /* TARGET_HAS_PRECISE_SMC */
983
        tb_phys_invalidate(tb, addr);
984
        tb = tb->page_next[n];
985
    }
986
    p->first_tb = NULL;
987
#ifdef TARGET_HAS_PRECISE_SMC
988
    if (current_tb_modified) {
989
        /* we generate a block containing just the instruction
990
           modifying the memory. It will ensure that it cannot modify
991
           itself */
992
        env->current_tb = NULL;
993
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
994
                    CF_SINGLE_INSN);
995
        cpu_resume_from_signal(env, puc);
996
    }
997
#endif
998
}
999
#endif
1000

    
1001
/* add the tb in the target page and protect it if necessary */
1002
static inline void tb_alloc_page(TranslationBlock *tb,
1003
                                 unsigned int n, target_ulong page_addr)
1004
{
1005
    PageDesc *p;
1006
    TranslationBlock *last_first_tb;
1007

    
1008
    tb->page_addr[n] = page_addr;
1009
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1010
    tb->page_next[n] = p->first_tb;
1011
    last_first_tb = p->first_tb;
1012
    p->first_tb = (TranslationBlock *)((long)tb | n);
1013
    invalidate_page_bitmap(p);
1014

    
1015
#if defined(TARGET_HAS_SMC) || 1
1016

    
1017
#if defined(CONFIG_USER_ONLY)
1018
    if (p->flags & PAGE_WRITE) {
1019
        target_ulong addr;
1020
        PageDesc *p2;
1021
        int prot;
1022

    
1023
        /* force the host page as non writable (writes will have a
1024
           page fault + mprotect overhead) */
1025
        page_addr &= qemu_host_page_mask;
1026
        prot = 0;
1027
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1028
            addr += TARGET_PAGE_SIZE) {
1029

    
1030
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1031
            if (!p2)
1032
                continue;
1033
            prot |= p2->flags;
1034
            p2->flags &= ~PAGE_WRITE;
1035
            page_get_flags(addr);
1036
          }
1037
        mprotect(g2h(page_addr), qemu_host_page_size,
1038
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1039
#ifdef DEBUG_TB_INVALIDATE
1040
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1041
               page_addr);
1042
#endif
1043
    }
1044
#else
1045
    /* if some code is already present, then the pages are already
1046
       protected. So we handle the case where only the first TB is
1047
       allocated in a physical page */
1048
    if (!last_first_tb) {
1049
        tlb_protect_code(page_addr);
1050
    }
1051
#endif
1052

    
1053
#endif /* TARGET_HAS_SMC */
1054
}
1055

    
1056
/* Allocate a new translation block. Flush the translation buffer if
1057
   too many translation blocks or too much generated code. */
1058
TranslationBlock *tb_alloc(target_ulong pc)
1059
{
1060
    TranslationBlock *tb;
1061

    
1062
    if (nb_tbs >= code_gen_max_blocks ||
1063
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1064
        return NULL;
1065
    tb = &tbs[nb_tbs++];
1066
    tb->pc = pc;
1067
    tb->cflags = 0;
1068
    return tb;
1069
}
1070

    
1071
/* add a new TB and link it to the physical page tables. phys_page2 is
1072
   (-1) to indicate that only one page contains the TB. */
1073
void tb_link_phys(TranslationBlock *tb,
1074
                  target_ulong phys_pc, target_ulong phys_page2)
1075
{
1076
    unsigned int h;
1077
    TranslationBlock **ptb;
1078

    
1079
    /* Grab the mmap lock to stop another thread invalidating this TB
1080
       before we are done.  */
1081
    mmap_lock();
1082
    /* add in the physical hash table */
1083
    h = tb_phys_hash_func(phys_pc);
1084
    ptb = &tb_phys_hash[h];
1085
    tb->phys_hash_next = *ptb;
1086
    *ptb = tb;
1087

    
1088
    /* add in the page list */
1089
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1090
    if (phys_page2 != -1)
1091
        tb_alloc_page(tb, 1, phys_page2);
1092
    else
1093
        tb->page_addr[1] = -1;
1094

    
1095
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1096
    tb->jmp_next[0] = NULL;
1097
    tb->jmp_next[1] = NULL;
1098

    
1099
    /* init original jump addresses */
1100
    if (tb->tb_next_offset[0] != 0xffff)
1101
        tb_reset_jump(tb, 0);
1102
    if (tb->tb_next_offset[1] != 0xffff)
1103
        tb_reset_jump(tb, 1);
1104

    
1105
#ifdef DEBUG_TB_CHECK
1106
    tb_page_check();
1107
#endif
1108
    mmap_unlock();
1109
}
1110

    
1111
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1112
   tb[1].tc_ptr. Return NULL if not found */
1113
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1114
{
1115
    int m_min, m_max, m;
1116
    unsigned long v;
1117
    TranslationBlock *tb;
1118

    
1119
    if (nb_tbs <= 0)
1120
        return NULL;
1121
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1122
        tc_ptr >= (unsigned long)code_gen_ptr)
1123
        return NULL;
1124
    /* binary search (cf Knuth) */
1125
    m_min = 0;
1126
    m_max = nb_tbs - 1;
1127
    while (m_min <= m_max) {
1128
        m = (m_min + m_max) >> 1;
1129
        tb = &tbs[m];
1130
        v = (unsigned long)tb->tc_ptr;
1131
        if (v == tc_ptr)
1132
            return tb;
1133
        else if (tc_ptr < v) {
1134
            m_max = m - 1;
1135
        } else {
1136
            m_min = m + 1;
1137
        }
1138
    }
1139
    return &tbs[m_max];
1140
}
1141

    
1142
static void tb_reset_jump_recursive(TranslationBlock *tb);
1143

    
1144
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1145
{
1146
    TranslationBlock *tb1, *tb_next, **ptb;
1147
    unsigned int n1;
1148

    
1149
    tb1 = tb->jmp_next[n];
1150
    if (tb1 != NULL) {
1151
        /* find head of list */
1152
        for(;;) {
1153
            n1 = (long)tb1 & 3;
1154
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1155
            if (n1 == 2)
1156
                break;
1157
            tb1 = tb1->jmp_next[n1];
1158
        }
1159
        /* we are now sure now that tb jumps to tb1 */
1160
        tb_next = tb1;
1161

    
1162
        /* remove tb from the jmp_first list */
1163
        ptb = &tb_next->jmp_first;
1164
        for(;;) {
1165
            tb1 = *ptb;
1166
            n1 = (long)tb1 & 3;
1167
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1168
            if (n1 == n && tb1 == tb)
1169
                break;
1170
            ptb = &tb1->jmp_next[n1];
1171
        }
1172
        *ptb = tb->jmp_next[n];
1173
        tb->jmp_next[n] = NULL;
1174

    
1175
        /* suppress the jump to next tb in generated code */
1176
        tb_reset_jump(tb, n);
1177

    
1178
        /* suppress jumps in the tb on which we could have jumped */
1179
        tb_reset_jump_recursive(tb_next);
1180
    }
1181
}
1182

    
1183
static void tb_reset_jump_recursive(TranslationBlock *tb)
1184
{
1185
    tb_reset_jump_recursive2(tb, 0);
1186
    tb_reset_jump_recursive2(tb, 1);
1187
}
1188

    
1189
#if defined(TARGET_HAS_ICE)
1190
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1191
{
1192
    target_phys_addr_t addr;
1193
    target_ulong pd;
1194
    ram_addr_t ram_addr;
1195
    PhysPageDesc *p;
1196

    
1197
    addr = cpu_get_phys_page_debug(env, pc);
1198
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1199
    if (!p) {
1200
        pd = IO_MEM_UNASSIGNED;
1201
    } else {
1202
        pd = p->phys_offset;
1203
    }
1204
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1205
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1206
}
1207
#endif
1208

    
1209
/* Add a watchpoint.  */
1210
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1211
{
1212
    int i;
1213

    
1214
    for (i = 0; i < env->nb_watchpoints; i++) {
1215
        if (addr == env->watchpoint[i].vaddr)
1216
            return 0;
1217
    }
1218
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1219
        return -1;
1220

    
1221
    i = env->nb_watchpoints++;
1222
    env->watchpoint[i].vaddr = addr;
1223
    env->watchpoint[i].type = type;
1224
    tlb_flush_page(env, addr);
1225
    /* FIXME: This flush is needed because of the hack to make memory ops
1226
       terminate the TB.  It can be removed once the proper IO trap and
1227
       re-execute bits are in.  */
1228
    tb_flush(env);
1229
    return i;
1230
}
1231

    
1232
/* Remove a watchpoint.  */
1233
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1234
{
1235
    int i;
1236

    
1237
    for (i = 0; i < env->nb_watchpoints; i++) {
1238
        if (addr == env->watchpoint[i].vaddr) {
1239
            env->nb_watchpoints--;
1240
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1241
            tlb_flush_page(env, addr);
1242
            return 0;
1243
        }
1244
    }
1245
    return -1;
1246
}
1247

    
1248
/* Remove all watchpoints. */
1249
void cpu_watchpoint_remove_all(CPUState *env) {
1250
    int i;
1251

    
1252
    for (i = 0; i < env->nb_watchpoints; i++) {
1253
        tlb_flush_page(env, env->watchpoint[i].vaddr);
1254
    }
1255
    env->nb_watchpoints = 0;
1256
}
1257

    
1258
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1259
   breakpoint is reached */
1260
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1261
{
1262
#if defined(TARGET_HAS_ICE)
1263
    int i;
1264

    
1265
    for(i = 0; i < env->nb_breakpoints; i++) {
1266
        if (env->breakpoints[i] == pc)
1267
            return 0;
1268
    }
1269

    
1270
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1271
        return -1;
1272
    env->breakpoints[env->nb_breakpoints++] = pc;
1273

    
1274
    breakpoint_invalidate(env, pc);
1275
    return 0;
1276
#else
1277
    return -1;
1278
#endif
1279
}
1280

    
1281
/* remove all breakpoints */
1282
void cpu_breakpoint_remove_all(CPUState *env) {
1283
#if defined(TARGET_HAS_ICE)
1284
    int i;
1285
    for(i = 0; i < env->nb_breakpoints; i++) {
1286
        breakpoint_invalidate(env, env->breakpoints[i]);
1287
    }
1288
    env->nb_breakpoints = 0;
1289
#endif
1290
}
1291

    
1292
/* remove a breakpoint */
1293
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1294
{
1295
#if defined(TARGET_HAS_ICE)
1296
    int i;
1297
    for(i = 0; i < env->nb_breakpoints; i++) {
1298
        if (env->breakpoints[i] == pc)
1299
            goto found;
1300
    }
1301
    return -1;
1302
 found:
1303
    env->nb_breakpoints--;
1304
    if (i < env->nb_breakpoints)
1305
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1306

    
1307
    breakpoint_invalidate(env, pc);
1308
    return 0;
1309
#else
1310
    return -1;
1311
#endif
1312
}
1313

    
1314
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1315
   CPU loop after each instruction */
1316
void cpu_single_step(CPUState *env, int enabled)
1317
{
1318
#if defined(TARGET_HAS_ICE)
1319
    if (env->singlestep_enabled != enabled) {
1320
        env->singlestep_enabled = enabled;
1321
        /* must flush all the translated code to avoid inconsistancies */
1322
        /* XXX: only flush what is necessary */
1323
        tb_flush(env);
1324
    }
1325
#endif
1326
}
1327

    
1328
/* enable or disable low levels log */
1329
void cpu_set_log(int log_flags)
1330
{
1331
    loglevel = log_flags;
1332
    if (loglevel && !logfile) {
1333
        logfile = fopen(logfilename, log_append ? "a" : "w");
1334
        if (!logfile) {
1335
            perror(logfilename);
1336
            _exit(1);
1337
        }
1338
#if !defined(CONFIG_SOFTMMU)
1339
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1340
        {
1341
            static uint8_t logfile_buf[4096];
1342
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1343
        }
1344
#else
1345
        setvbuf(logfile, NULL, _IOLBF, 0);
1346
#endif
1347
        log_append = 1;
1348
    }
1349
    if (!loglevel && logfile) {
1350
        fclose(logfile);
1351
        logfile = NULL;
1352
    }
1353
}
1354

    
1355
void cpu_set_log_filename(const char *filename)
1356
{
1357
    logfilename = strdup(filename);
1358
    if (logfile) {
1359
        fclose(logfile);
1360
        logfile = NULL;
1361
    }
1362
    cpu_set_log(loglevel);
1363
}
1364

    
1365
/* mask must never be zero, except for A20 change call */
1366
void cpu_interrupt(CPUState *env, int mask)
1367
{
1368
#if !defined(USE_NPTL)
1369
    TranslationBlock *tb;
1370
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1371
#endif
1372

    
1373
    /* FIXME: This is probably not threadsafe.  A different thread could
1374
       be in the mittle of a read-modify-write operation.  */
1375
    env->interrupt_request |= mask;
1376
#if defined(USE_NPTL)
1377
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1378
       problem and hope the cpu will stop of its own accord.  For userspace
1379
       emulation this often isn't actually as bad as it sounds.  Often
1380
       signals are used primarily to interrupt blocking syscalls.  */
1381
#else
1382
    /* if the cpu is currently executing code, we must unlink it and
1383
       all the potentially executing TB */
1384
    tb = env->current_tb;
1385
    if (tb && !testandset(&interrupt_lock)) {
1386
        env->current_tb = NULL;
1387
        tb_reset_jump_recursive(tb);
1388
        resetlock(&interrupt_lock);
1389
    }
1390
#endif
1391
}
1392

    
1393
void cpu_reset_interrupt(CPUState *env, int mask)
1394
{
1395
    env->interrupt_request &= ~mask;
1396
}
1397

    
1398
CPULogItem cpu_log_items[] = {
1399
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1400
      "show generated host assembly code for each compiled TB" },
1401
    { CPU_LOG_TB_IN_ASM, "in_asm",
1402
      "show target assembly code for each compiled TB" },
1403
    { CPU_LOG_TB_OP, "op",
1404
      "show micro ops for each compiled TB" },
1405
    { CPU_LOG_TB_OP_OPT, "op_opt",
1406
      "show micro ops "
1407
#ifdef TARGET_I386
1408
      "before eflags optimization and "
1409
#endif
1410
      "after liveness analysis" },
1411
    { CPU_LOG_INT, "int",
1412
      "show interrupts/exceptions in short format" },
1413
    { CPU_LOG_EXEC, "exec",
1414
      "show trace before each executed TB (lots of logs)" },
1415
    { CPU_LOG_TB_CPU, "cpu",
1416
      "show CPU state before block translation" },
1417
#ifdef TARGET_I386
1418
    { CPU_LOG_PCALL, "pcall",
1419
      "show protected mode far calls/returns/exceptions" },
1420
#endif
1421
#ifdef DEBUG_IOPORT
1422
    { CPU_LOG_IOPORT, "ioport",
1423
      "show all i/o ports accesses" },
1424
#endif
1425
    { 0, NULL, NULL },
1426
};
1427

    
1428
static int cmp1(const char *s1, int n, const char *s2)
1429
{
1430
    if (strlen(s2) != n)
1431
        return 0;
1432
    return memcmp(s1, s2, n) == 0;
1433
}
1434

    
1435
/* takes a comma separated list of log masks. Return 0 if error. */
1436
int cpu_str_to_log_mask(const char *str)
1437
{
1438
    CPULogItem *item;
1439
    int mask;
1440
    const char *p, *p1;
1441

    
1442
    p = str;
1443
    mask = 0;
1444
    for(;;) {
1445
        p1 = strchr(p, ',');
1446
        if (!p1)
1447
            p1 = p + strlen(p);
1448
        if(cmp1(p,p1-p,"all")) {
1449
                for(item = cpu_log_items; item->mask != 0; item++) {
1450
                        mask |= item->mask;
1451
                }
1452
        } else {
1453
        for(item = cpu_log_items; item->mask != 0; item++) {
1454
            if (cmp1(p, p1 - p, item->name))
1455
                goto found;
1456
        }
1457
        return 0;
1458
        }
1459
    found:
1460
        mask |= item->mask;
1461
        if (*p1 != ',')
1462
            break;
1463
        p = p1 + 1;
1464
    }
1465
    return mask;
1466
}
1467

    
1468
void cpu_abort(CPUState *env, const char *fmt, ...)
1469
{
1470
    va_list ap;
1471
    va_list ap2;
1472

    
1473
    va_start(ap, fmt);
1474
    va_copy(ap2, ap);
1475
    fprintf(stderr, "qemu: fatal: ");
1476
    vfprintf(stderr, fmt, ap);
1477
    fprintf(stderr, "\n");
1478
#ifdef TARGET_I386
1479
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1480
#else
1481
    cpu_dump_state(env, stderr, fprintf, 0);
1482
#endif
1483
    if (logfile) {
1484
        fprintf(logfile, "qemu: fatal: ");
1485
        vfprintf(logfile, fmt, ap2);
1486
        fprintf(logfile, "\n");
1487
#ifdef TARGET_I386
1488
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1489
#else
1490
        cpu_dump_state(env, logfile, fprintf, 0);
1491
#endif
1492
        fflush(logfile);
1493
        fclose(logfile);
1494
    }
1495
    va_end(ap2);
1496
    va_end(ap);
1497
    abort();
1498
}
1499

    
1500
CPUState *cpu_copy(CPUState *env)
1501
{
1502
    CPUState *new_env = cpu_init(env->cpu_model_str);
1503
    /* preserve chaining and index */
1504
    CPUState *next_cpu = new_env->next_cpu;
1505
    int cpu_index = new_env->cpu_index;
1506
    memcpy(new_env, env, sizeof(CPUState));
1507
    new_env->next_cpu = next_cpu;
1508
    new_env->cpu_index = cpu_index;
1509
    return new_env;
1510
}
1511

    
1512
#if !defined(CONFIG_USER_ONLY)
1513

    
1514
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1515
{
1516
    unsigned int i;
1517

    
1518
    /* Discard jump cache entries for any tb which might potentially
1519
       overlap the flushed page.  */
1520
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1521
    memset (&env->tb_jmp_cache[i], 0, 
1522
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1523

    
1524
    i = tb_jmp_cache_hash_page(addr);
1525
    memset (&env->tb_jmp_cache[i], 0, 
1526
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1527
}
1528

    
1529
/* NOTE: if flush_global is true, also flush global entries (not
1530
   implemented yet) */
1531
void tlb_flush(CPUState *env, int flush_global)
1532
{
1533
    int i;
1534

    
1535
#if defined(DEBUG_TLB)
1536
    printf("tlb_flush:\n");
1537
#endif
1538
    /* must reset current TB so that interrupts cannot modify the
1539
       links while we are modifying them */
1540
    env->current_tb = NULL;
1541

    
1542
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1543
        env->tlb_table[0][i].addr_read = -1;
1544
        env->tlb_table[0][i].addr_write = -1;
1545
        env->tlb_table[0][i].addr_code = -1;
1546
        env->tlb_table[1][i].addr_read = -1;
1547
        env->tlb_table[1][i].addr_write = -1;
1548
        env->tlb_table[1][i].addr_code = -1;
1549
#if (NB_MMU_MODES >= 3)
1550
        env->tlb_table[2][i].addr_read = -1;
1551
        env->tlb_table[2][i].addr_write = -1;
1552
        env->tlb_table[2][i].addr_code = -1;
1553
#if (NB_MMU_MODES == 4)
1554
        env->tlb_table[3][i].addr_read = -1;
1555
        env->tlb_table[3][i].addr_write = -1;
1556
        env->tlb_table[3][i].addr_code = -1;
1557
#endif
1558
#endif
1559
    }
1560

    
1561
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1562

    
1563
#ifdef USE_KQEMU
1564
    if (env->kqemu_enabled) {
1565
        kqemu_flush(env, flush_global);
1566
    }
1567
#endif
1568
    tlb_flush_count++;
1569
}
1570

    
1571
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1572
{
1573
    if (addr == (tlb_entry->addr_read &
1574
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1575
        addr == (tlb_entry->addr_write &
1576
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1577
        addr == (tlb_entry->addr_code &
1578
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1579
        tlb_entry->addr_read = -1;
1580
        tlb_entry->addr_write = -1;
1581
        tlb_entry->addr_code = -1;
1582
    }
1583
}
1584

    
1585
void tlb_flush_page(CPUState *env, target_ulong addr)
1586
{
1587
    int i;
1588

    
1589
#if defined(DEBUG_TLB)
1590
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1591
#endif
1592
    /* must reset current TB so that interrupts cannot modify the
1593
       links while we are modifying them */
1594
    env->current_tb = NULL;
1595

    
1596
    addr &= TARGET_PAGE_MASK;
1597
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1598
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1599
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1600
#if (NB_MMU_MODES >= 3)
1601
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1602
#if (NB_MMU_MODES == 4)
1603
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1604
#endif
1605
#endif
1606

    
1607
    tlb_flush_jmp_cache(env, addr);
1608

    
1609
#ifdef USE_KQEMU
1610
    if (env->kqemu_enabled) {
1611
        kqemu_flush_page(env, addr);
1612
    }
1613
#endif
1614
}
1615

    
1616
/* update the TLBs so that writes to code in the virtual page 'addr'
1617
   can be detected */
1618
static void tlb_protect_code(ram_addr_t ram_addr)
1619
{
1620
    cpu_physical_memory_reset_dirty(ram_addr,
1621
                                    ram_addr + TARGET_PAGE_SIZE,
1622
                                    CODE_DIRTY_FLAG);
1623
}
1624

    
1625
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1626
   tested for self modifying code */
1627
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1628
                                    target_ulong vaddr)
1629
{
1630
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1631
}
1632

    
1633
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1634
                                         unsigned long start, unsigned long length)
1635
{
1636
    unsigned long addr;
1637
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1638
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1639
        if ((addr - start) < length) {
1640
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1641
        }
1642
    }
1643
}
1644

    
1645
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1646
                                     int dirty_flags)
1647
{
1648
    CPUState *env;
1649
    unsigned long length, start1;
1650
    int i, mask, len;
1651
    uint8_t *p;
1652

    
1653
    start &= TARGET_PAGE_MASK;
1654
    end = TARGET_PAGE_ALIGN(end);
1655

    
1656
    length = end - start;
1657
    if (length == 0)
1658
        return;
1659
    len = length >> TARGET_PAGE_BITS;
1660
#ifdef USE_KQEMU
1661
    /* XXX: should not depend on cpu context */
1662
    env = first_cpu;
1663
    if (env->kqemu_enabled) {
1664
        ram_addr_t addr;
1665
        addr = start;
1666
        for(i = 0; i < len; i++) {
1667
            kqemu_set_notdirty(env, addr);
1668
            addr += TARGET_PAGE_SIZE;
1669
        }
1670
    }
1671
#endif
1672
    mask = ~dirty_flags;
1673
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1674
    for(i = 0; i < len; i++)
1675
        p[i] &= mask;
1676

    
1677
    /* we modify the TLB cache so that the dirty bit will be set again
1678
       when accessing the range */
1679
    start1 = start + (unsigned long)phys_ram_base;
1680
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1681
        for(i = 0; i < CPU_TLB_SIZE; i++)
1682
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1683
        for(i = 0; i < CPU_TLB_SIZE; i++)
1684
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1685
#if (NB_MMU_MODES >= 3)
1686
        for(i = 0; i < CPU_TLB_SIZE; i++)
1687
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1688
#if (NB_MMU_MODES == 4)
1689
        for(i = 0; i < CPU_TLB_SIZE; i++)
1690
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1691
#endif
1692
#endif
1693
    }
1694
}
1695

    
1696
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1697
{
1698
    ram_addr_t ram_addr;
1699

    
1700
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1701
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1702
            tlb_entry->addend - (unsigned long)phys_ram_base;
1703
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1704
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1705
        }
1706
    }
1707
}
1708

    
1709
/* update the TLB according to the current state of the dirty bits */
1710
void cpu_tlb_update_dirty(CPUState *env)
1711
{
1712
    int i;
1713
    for(i = 0; i < CPU_TLB_SIZE; i++)
1714
        tlb_update_dirty(&env->tlb_table[0][i]);
1715
    for(i = 0; i < CPU_TLB_SIZE; i++)
1716
        tlb_update_dirty(&env->tlb_table[1][i]);
1717
#if (NB_MMU_MODES >= 3)
1718
    for(i = 0; i < CPU_TLB_SIZE; i++)
1719
        tlb_update_dirty(&env->tlb_table[2][i]);
1720
#if (NB_MMU_MODES == 4)
1721
    for(i = 0; i < CPU_TLB_SIZE; i++)
1722
        tlb_update_dirty(&env->tlb_table[3][i]);
1723
#endif
1724
#endif
1725
}
1726

    
1727
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1728
{
1729
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1730
        tlb_entry->addr_write = vaddr;
1731
}
1732

    
1733
/* update the TLB corresponding to virtual page vaddr
1734
   so that it is no longer dirty */
1735
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1736
{
1737
    int i;
1738

    
1739
    vaddr &= TARGET_PAGE_MASK;
1740
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1741
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1742
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1743
#if (NB_MMU_MODES >= 3)
1744
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1745
#if (NB_MMU_MODES == 4)
1746
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1747
#endif
1748
#endif
1749
}
1750

    
1751
/* add a new TLB entry. At most one entry for a given virtual address
1752
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1753
   (can only happen in non SOFTMMU mode for I/O pages or pages
1754
   conflicting with the host address space). */
1755
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1756
                      target_phys_addr_t paddr, int prot,
1757
                      int mmu_idx, int is_softmmu)
1758
{
1759
    PhysPageDesc *p;
1760
    unsigned long pd;
1761
    unsigned int index;
1762
    target_ulong address;
1763
    target_ulong code_address;
1764
    target_phys_addr_t addend;
1765
    int ret;
1766
    CPUTLBEntry *te;
1767
    int i;
1768
    target_phys_addr_t iotlb;
1769

    
1770
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1771
    if (!p) {
1772
        pd = IO_MEM_UNASSIGNED;
1773
    } else {
1774
        pd = p->phys_offset;
1775
    }
1776
#if defined(DEBUG_TLB)
1777
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1778
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1779
#endif
1780

    
1781
    ret = 0;
1782
    address = vaddr;
1783
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1784
        /* IO memory case (romd handled later) */
1785
        address |= TLB_MMIO;
1786
    }
1787
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1788
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1789
        /* Normal RAM.  */
1790
        iotlb = pd & TARGET_PAGE_MASK;
1791
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1792
            iotlb |= IO_MEM_NOTDIRTY;
1793
        else
1794
            iotlb |= IO_MEM_ROM;
1795
    } else {
1796
        /* IO handlers are currently passed a phsical address.
1797
           It would be nice to pass an offset from the base address
1798
           of that region.  This would avoid having to special case RAM,
1799
           and avoid full address decoding in every device.
1800
           We can't use the high bits of pd for this because
1801
           IO_MEM_ROMD uses these as a ram address.  */
1802
        iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1803
    }
1804

    
1805
    code_address = address;
1806
    /* Make accesses to pages with watchpoints go via the
1807
       watchpoint trap routines.  */
1808
    for (i = 0; i < env->nb_watchpoints; i++) {
1809
        if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1810
            iotlb = io_mem_watch + paddr;
1811
            /* TODO: The memory case can be optimized by not trapping
1812
               reads of pages with a write breakpoint.  */
1813
            address |= TLB_MMIO;
1814
        }
1815
    }
1816

    
1817
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1818
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
1819
    te = &env->tlb_table[mmu_idx][index];
1820
    te->addend = addend - vaddr;
1821
    if (prot & PAGE_READ) {
1822
        te->addr_read = address;
1823
    } else {
1824
        te->addr_read = -1;
1825
    }
1826

    
1827
    if (prot & PAGE_EXEC) {
1828
        te->addr_code = code_address;
1829
    } else {
1830
        te->addr_code = -1;
1831
    }
1832
    if (prot & PAGE_WRITE) {
1833
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1834
            (pd & IO_MEM_ROMD)) {
1835
            /* Write access calls the I/O callback.  */
1836
            te->addr_write = address | TLB_MMIO;
1837
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1838
                   !cpu_physical_memory_is_dirty(pd)) {
1839
            te->addr_write = address | TLB_NOTDIRTY;
1840
        } else {
1841
            te->addr_write = address;
1842
        }
1843
    } else {
1844
        te->addr_write = -1;
1845
    }
1846
    return ret;
1847
}
1848

    
1849
#else
1850

    
1851
void tlb_flush(CPUState *env, int flush_global)
1852
{
1853
}
1854

    
1855
void tlb_flush_page(CPUState *env, target_ulong addr)
1856
{
1857
}
1858

    
1859
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1860
                      target_phys_addr_t paddr, int prot,
1861
                      int mmu_idx, int is_softmmu)
1862
{
1863
    return 0;
1864
}
1865

    
1866
/* dump memory mappings */
1867
void page_dump(FILE *f)
1868
{
1869
    unsigned long start, end;
1870
    int i, j, prot, prot1;
1871
    PageDesc *p;
1872

    
1873
    fprintf(f, "%-8s %-8s %-8s %s\n",
1874
            "start", "end", "size", "prot");
1875
    start = -1;
1876
    end = -1;
1877
    prot = 0;
1878
    for(i = 0; i <= L1_SIZE; i++) {
1879
        if (i < L1_SIZE)
1880
            p = l1_map[i];
1881
        else
1882
            p = NULL;
1883
        for(j = 0;j < L2_SIZE; j++) {
1884
            if (!p)
1885
                prot1 = 0;
1886
            else
1887
                prot1 = p[j].flags;
1888
            if (prot1 != prot) {
1889
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1890
                if (start != -1) {
1891
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1892
                            start, end, end - start,
1893
                            prot & PAGE_READ ? 'r' : '-',
1894
                            prot & PAGE_WRITE ? 'w' : '-',
1895
                            prot & PAGE_EXEC ? 'x' : '-');
1896
                }
1897
                if (prot1 != 0)
1898
                    start = end;
1899
                else
1900
                    start = -1;
1901
                prot = prot1;
1902
            }
1903
            if (!p)
1904
                break;
1905
        }
1906
    }
1907
}
1908

    
1909
int page_get_flags(target_ulong address)
1910
{
1911
    PageDesc *p;
1912

    
1913
    p = page_find(address >> TARGET_PAGE_BITS);
1914
    if (!p)
1915
        return 0;
1916
    return p->flags;
1917
}
1918

    
1919
/* modify the flags of a page and invalidate the code if
1920
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1921
   depending on PAGE_WRITE */
1922
void page_set_flags(target_ulong start, target_ulong end, int flags)
1923
{
1924
    PageDesc *p;
1925
    target_ulong addr;
1926

    
1927
    /* mmap_lock should already be held.  */
1928
    start = start & TARGET_PAGE_MASK;
1929
    end = TARGET_PAGE_ALIGN(end);
1930
    if (flags & PAGE_WRITE)
1931
        flags |= PAGE_WRITE_ORG;
1932
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1933
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1934
        /* We may be called for host regions that are outside guest
1935
           address space.  */
1936
        if (!p)
1937
            return;
1938
        /* if the write protection is set, then we invalidate the code
1939
           inside */
1940
        if (!(p->flags & PAGE_WRITE) &&
1941
            (flags & PAGE_WRITE) &&
1942
            p->first_tb) {
1943
            tb_invalidate_phys_page(addr, 0, NULL);
1944
        }
1945
        p->flags = flags;
1946
    }
1947
}
1948

    
1949
int page_check_range(target_ulong start, target_ulong len, int flags)
1950
{
1951
    PageDesc *p;
1952
    target_ulong end;
1953
    target_ulong addr;
1954

    
1955
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1956
    start = start & TARGET_PAGE_MASK;
1957

    
1958
    if( end < start )
1959
        /* we've wrapped around */
1960
        return -1;
1961
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1962
        p = page_find(addr >> TARGET_PAGE_BITS);
1963
        if( !p )
1964
            return -1;
1965
        if( !(p->flags & PAGE_VALID) )
1966
            return -1;
1967

    
1968
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1969
            return -1;
1970
        if (flags & PAGE_WRITE) {
1971
            if (!(p->flags & PAGE_WRITE_ORG))
1972
                return -1;
1973
            /* unprotect the page if it was put read-only because it
1974
               contains translated code */
1975
            if (!(p->flags & PAGE_WRITE)) {
1976
                if (!page_unprotect(addr, 0, NULL))
1977
                    return -1;
1978
            }
1979
            return 0;
1980
        }
1981
    }
1982
    return 0;
1983
}
1984

    
1985
/* called from signal handler: invalidate the code and unprotect the
1986
   page. Return TRUE if the fault was succesfully handled. */
1987
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1988
{
1989
    unsigned int page_index, prot, pindex;
1990
    PageDesc *p, *p1;
1991
    target_ulong host_start, host_end, addr;
1992

    
1993
    /* Technically this isn't safe inside a signal handler.  However we
1994
       know this only ever happens in a synchronous SEGV handler, so in
1995
       practice it seems to be ok.  */
1996
    mmap_lock();
1997

    
1998
    host_start = address & qemu_host_page_mask;
1999
    page_index = host_start >> TARGET_PAGE_BITS;
2000
    p1 = page_find(page_index);
2001
    if (!p1) {
2002
        mmap_unlock();
2003
        return 0;
2004
    }
2005
    host_end = host_start + qemu_host_page_size;
2006
    p = p1;
2007
    prot = 0;
2008
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2009
        prot |= p->flags;
2010
        p++;
2011
    }
2012
    /* if the page was really writable, then we change its
2013
       protection back to writable */
2014
    if (prot & PAGE_WRITE_ORG) {
2015
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2016
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2017
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2018
                     (prot & PAGE_BITS) | PAGE_WRITE);
2019
            p1[pindex].flags |= PAGE_WRITE;
2020
            /* and since the content will be modified, we must invalidate
2021
               the corresponding translated code. */
2022
            tb_invalidate_phys_page(address, pc, puc);
2023
#ifdef DEBUG_TB_CHECK
2024
            tb_invalidate_check(address);
2025
#endif
2026
            mmap_unlock();
2027
            return 1;
2028
        }
2029
    }
2030
    mmap_unlock();
2031
    return 0;
2032
}
2033

    
2034
static inline void tlb_set_dirty(CPUState *env,
2035
                                 unsigned long addr, target_ulong vaddr)
2036
{
2037
}
2038
#endif /* defined(CONFIG_USER_ONLY) */
2039

    
2040
#if !defined(CONFIG_USER_ONLY)
2041
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2042
                             ram_addr_t memory);
2043
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2044
                           ram_addr_t orig_memory);
2045
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2046
                      need_subpage)                                     \
2047
    do {                                                                \
2048
        if (addr > start_addr)                                          \
2049
            start_addr2 = 0;                                            \
2050
        else {                                                          \
2051
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2052
            if (start_addr2 > 0)                                        \
2053
                need_subpage = 1;                                       \
2054
        }                                                               \
2055
                                                                        \
2056
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2057
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2058
        else {                                                          \
2059
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2060
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2061
                need_subpage = 1;                                       \
2062
        }                                                               \
2063
    } while (0)
2064

    
2065
/* register physical memory. 'size' must be a multiple of the target
2066
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2067
   io memory page */
2068
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2069
                                  ram_addr_t size,
2070
                                  ram_addr_t phys_offset)
2071
{
2072
    target_phys_addr_t addr, end_addr;
2073
    PhysPageDesc *p;
2074
    CPUState *env;
2075
    ram_addr_t orig_size = size;
2076
    void *subpage;
2077

    
2078
#ifdef USE_KQEMU
2079
    /* XXX: should not depend on cpu context */
2080
    env = first_cpu;
2081
    if (env->kqemu_enabled) {
2082
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2083
    }
2084
#endif
2085
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2086
    end_addr = start_addr + (target_phys_addr_t)size;
2087
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2088
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2089
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2090
            ram_addr_t orig_memory = p->phys_offset;
2091
            target_phys_addr_t start_addr2, end_addr2;
2092
            int need_subpage = 0;
2093

    
2094
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2095
                          need_subpage);
2096
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2097
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2098
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2099
                                           &p->phys_offset, orig_memory);
2100
                } else {
2101
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2102
                                            >> IO_MEM_SHIFT];
2103
                }
2104
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2105
            } else {
2106
                p->phys_offset = phys_offset;
2107
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2108
                    (phys_offset & IO_MEM_ROMD))
2109
                    phys_offset += TARGET_PAGE_SIZE;
2110
            }
2111
        } else {
2112
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2113
            p->phys_offset = phys_offset;
2114
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2115
                (phys_offset & IO_MEM_ROMD))
2116
                phys_offset += TARGET_PAGE_SIZE;
2117
            else {
2118
                target_phys_addr_t start_addr2, end_addr2;
2119
                int need_subpage = 0;
2120

    
2121
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2122
                              end_addr2, need_subpage);
2123

    
2124
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2125
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2126
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2127
                    subpage_register(subpage, start_addr2, end_addr2,
2128
                                     phys_offset);
2129
                }
2130
            }
2131
        }
2132
    }
2133

    
2134
    /* since each CPU stores ram addresses in its TLB cache, we must
2135
       reset the modified entries */
2136
    /* XXX: slow ! */
2137
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2138
        tlb_flush(env, 1);
2139
    }
2140
}
2141

    
2142
/* XXX: temporary until new memory mapping API */
2143
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2144
{
2145
    PhysPageDesc *p;
2146

    
2147
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2148
    if (!p)
2149
        return IO_MEM_UNASSIGNED;
2150
    return p->phys_offset;
2151
}
2152

    
2153
/* XXX: better than nothing */
2154
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2155
{
2156
    ram_addr_t addr;
2157
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2158
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2159
                (uint64_t)size, (uint64_t)phys_ram_size);
2160
        abort();
2161
    }
2162
    addr = phys_ram_alloc_offset;
2163
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2164
    return addr;
2165
}
2166

    
2167
void qemu_ram_free(ram_addr_t addr)
2168
{
2169
}
2170

    
2171
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2172
{
2173
#ifdef DEBUG_UNASSIGNED
2174
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2175
#endif
2176
#ifdef TARGET_SPARC
2177
    do_unassigned_access(addr, 0, 0, 0);
2178
#elif TARGET_CRIS
2179
    do_unassigned_access(addr, 0, 0, 0);
2180
#endif
2181
    return 0;
2182
}
2183

    
2184
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2185
{
2186
#ifdef DEBUG_UNASSIGNED
2187
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2188
#endif
2189
#ifdef TARGET_SPARC
2190
    do_unassigned_access(addr, 1, 0, 0);
2191
#elif TARGET_CRIS
2192
    do_unassigned_access(addr, 1, 0, 0);
2193
#endif
2194
}
2195

    
2196
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2197
    unassigned_mem_readb,
2198
    unassigned_mem_readb,
2199
    unassigned_mem_readb,
2200
};
2201

    
2202
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2203
    unassigned_mem_writeb,
2204
    unassigned_mem_writeb,
2205
    unassigned_mem_writeb,
2206
};
2207

    
2208
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2209
                                uint32_t val)
2210
{
2211
    int dirty_flags;
2212
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2213
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2214
#if !defined(CONFIG_USER_ONLY)
2215
        tb_invalidate_phys_page_fast(ram_addr, 1);
2216
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2217
#endif
2218
    }
2219
    stb_p(phys_ram_base + ram_addr, val);
2220
#ifdef USE_KQEMU
2221
    if (cpu_single_env->kqemu_enabled &&
2222
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2223
        kqemu_modify_page(cpu_single_env, ram_addr);
2224
#endif
2225
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2226
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2227
    /* we remove the notdirty callback only if the code has been
2228
       flushed */
2229
    if (dirty_flags == 0xff)
2230
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2231
}
2232

    
2233
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2234
                                uint32_t val)
2235
{
2236
    int dirty_flags;
2237
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2238
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2239
#if !defined(CONFIG_USER_ONLY)
2240
        tb_invalidate_phys_page_fast(ram_addr, 2);
2241
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2242
#endif
2243
    }
2244
    stw_p(phys_ram_base + ram_addr, val);
2245
#ifdef USE_KQEMU
2246
    if (cpu_single_env->kqemu_enabled &&
2247
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2248
        kqemu_modify_page(cpu_single_env, ram_addr);
2249
#endif
2250
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2251
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2252
    /* we remove the notdirty callback only if the code has been
2253
       flushed */
2254
    if (dirty_flags == 0xff)
2255
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2256
}
2257

    
2258
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2259
                                uint32_t val)
2260
{
2261
    int dirty_flags;
2262
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2263
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2264
#if !defined(CONFIG_USER_ONLY)
2265
        tb_invalidate_phys_page_fast(ram_addr, 4);
2266
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2267
#endif
2268
    }
2269
    stl_p(phys_ram_base + ram_addr, val);
2270
#ifdef USE_KQEMU
2271
    if (cpu_single_env->kqemu_enabled &&
2272
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2273
        kqemu_modify_page(cpu_single_env, ram_addr);
2274
#endif
2275
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2276
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2277
    /* we remove the notdirty callback only if the code has been
2278
       flushed */
2279
    if (dirty_flags == 0xff)
2280
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2281
}
2282

    
2283
static CPUReadMemoryFunc *error_mem_read[3] = {
2284
    NULL, /* never used */
2285
    NULL, /* never used */
2286
    NULL, /* never used */
2287
};
2288

    
2289
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2290
    notdirty_mem_writeb,
2291
    notdirty_mem_writew,
2292
    notdirty_mem_writel,
2293
};
2294

    
2295
/* Generate a debug exception if a watchpoint has been hit.  */
2296
static void check_watchpoint(int offset, int flags)
2297
{
2298
    CPUState *env = cpu_single_env;
2299
    target_ulong vaddr;
2300
    int i;
2301

    
2302
    vaddr = (env->mem_write_vaddr & TARGET_PAGE_MASK) + offset;
2303
    for (i = 0; i < env->nb_watchpoints; i++) {
2304
        if (vaddr == env->watchpoint[i].vaddr
2305
                && (env->watchpoint[i].type & flags)) {
2306
            env->watchpoint_hit = i + 1;
2307
            cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2308
            break;
2309
        }
2310
    }
2311
}
2312

    
2313
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2314
   so these check for a hit then pass through to the normal out-of-line
2315
   phys routines.  */
2316
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2317
{
2318
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2319
    return ldub_phys(addr);
2320
}
2321

    
2322
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2323
{
2324
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2325
    return lduw_phys(addr);
2326
}
2327

    
2328
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2329
{
2330
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2331
    return ldl_phys(addr);
2332
}
2333

    
2334
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2335
                             uint32_t val)
2336
{
2337
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2338
    stb_phys(addr, val);
2339
}
2340

    
2341
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2342
                             uint32_t val)
2343
{
2344
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2345
    stw_phys(addr, val);
2346
}
2347

    
2348
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2349
                             uint32_t val)
2350
{
2351
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2352
    stl_phys(addr, val);
2353
}
2354

    
2355
static CPUReadMemoryFunc *watch_mem_read[3] = {
2356
    watch_mem_readb,
2357
    watch_mem_readw,
2358
    watch_mem_readl,
2359
};
2360

    
2361
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2362
    watch_mem_writeb,
2363
    watch_mem_writew,
2364
    watch_mem_writel,
2365
};
2366

    
2367
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2368
                                 unsigned int len)
2369
{
2370
    uint32_t ret;
2371
    unsigned int idx;
2372

    
2373
    idx = SUBPAGE_IDX(addr - mmio->base);
2374
#if defined(DEBUG_SUBPAGE)
2375
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2376
           mmio, len, addr, idx);
2377
#endif
2378
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2379

    
2380
    return ret;
2381
}
2382

    
2383
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2384
                              uint32_t value, unsigned int len)
2385
{
2386
    unsigned int idx;
2387

    
2388
    idx = SUBPAGE_IDX(addr - mmio->base);
2389
#if defined(DEBUG_SUBPAGE)
2390
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2391
           mmio, len, addr, idx, value);
2392
#endif
2393
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2394
}
2395

    
2396
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2397
{
2398
#if defined(DEBUG_SUBPAGE)
2399
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2400
#endif
2401

    
2402
    return subpage_readlen(opaque, addr, 0);
2403
}
2404

    
2405
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2406
                            uint32_t value)
2407
{
2408
#if defined(DEBUG_SUBPAGE)
2409
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2410
#endif
2411
    subpage_writelen(opaque, addr, value, 0);
2412
}
2413

    
2414
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2415
{
2416
#if defined(DEBUG_SUBPAGE)
2417
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2418
#endif
2419

    
2420
    return subpage_readlen(opaque, addr, 1);
2421
}
2422

    
2423
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2424
                            uint32_t value)
2425
{
2426
#if defined(DEBUG_SUBPAGE)
2427
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2428
#endif
2429
    subpage_writelen(opaque, addr, value, 1);
2430
}
2431

    
2432
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2433
{
2434
#if defined(DEBUG_SUBPAGE)
2435
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2436
#endif
2437

    
2438
    return subpage_readlen(opaque, addr, 2);
2439
}
2440

    
2441
static void subpage_writel (void *opaque,
2442
                         target_phys_addr_t addr, uint32_t value)
2443
{
2444
#if defined(DEBUG_SUBPAGE)
2445
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2446
#endif
2447
    subpage_writelen(opaque, addr, value, 2);
2448
}
2449

    
2450
static CPUReadMemoryFunc *subpage_read[] = {
2451
    &subpage_readb,
2452
    &subpage_readw,
2453
    &subpage_readl,
2454
};
2455

    
2456
static CPUWriteMemoryFunc *subpage_write[] = {
2457
    &subpage_writeb,
2458
    &subpage_writew,
2459
    &subpage_writel,
2460
};
2461

    
2462
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2463
                             ram_addr_t memory)
2464
{
2465
    int idx, eidx;
2466
    unsigned int i;
2467

    
2468
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2469
        return -1;
2470
    idx = SUBPAGE_IDX(start);
2471
    eidx = SUBPAGE_IDX(end);
2472
#if defined(DEBUG_SUBPAGE)
2473
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2474
           mmio, start, end, idx, eidx, memory);
2475
#endif
2476
    memory >>= IO_MEM_SHIFT;
2477
    for (; idx <= eidx; idx++) {
2478
        for (i = 0; i < 4; i++) {
2479
            if (io_mem_read[memory][i]) {
2480
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2481
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2482
            }
2483
            if (io_mem_write[memory][i]) {
2484
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2485
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2486
            }
2487
        }
2488
    }
2489

    
2490
    return 0;
2491
}
2492

    
2493
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2494
                           ram_addr_t orig_memory)
2495
{
2496
    subpage_t *mmio;
2497
    int subpage_memory;
2498

    
2499
    mmio = qemu_mallocz(sizeof(subpage_t));
2500
    if (mmio != NULL) {
2501
        mmio->base = base;
2502
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2503
#if defined(DEBUG_SUBPAGE)
2504
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2505
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2506
#endif
2507
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2508
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2509
    }
2510

    
2511
    return mmio;
2512
}
2513

    
2514
static void io_mem_init(void)
2515
{
2516
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2517
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2518
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2519
    io_mem_nb = 5;
2520

    
2521
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2522
                                          watch_mem_write, NULL);
2523
    /* alloc dirty bits array */
2524
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2525
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2526
}
2527

    
2528
/* mem_read and mem_write are arrays of functions containing the
2529
   function to access byte (index 0), word (index 1) and dword (index
2530
   2). Functions can be omitted with a NULL function pointer. The
2531
   registered functions may be modified dynamically later.
2532
   If io_index is non zero, the corresponding io zone is
2533
   modified. If it is zero, a new io zone is allocated. The return
2534
   value can be used with cpu_register_physical_memory(). (-1) is
2535
   returned if error. */
2536
int cpu_register_io_memory(int io_index,
2537
                           CPUReadMemoryFunc **mem_read,
2538
                           CPUWriteMemoryFunc **mem_write,
2539
                           void *opaque)
2540
{
2541
    int i, subwidth = 0;
2542

    
2543
    if (io_index <= 0) {
2544
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2545
            return -1;
2546
        io_index = io_mem_nb++;
2547
    } else {
2548
        if (io_index >= IO_MEM_NB_ENTRIES)
2549
            return -1;
2550
    }
2551

    
2552
    for(i = 0;i < 3; i++) {
2553
        if (!mem_read[i] || !mem_write[i])
2554
            subwidth = IO_MEM_SUBWIDTH;
2555
        io_mem_read[io_index][i] = mem_read[i];
2556
        io_mem_write[io_index][i] = mem_write[i];
2557
    }
2558
    io_mem_opaque[io_index] = opaque;
2559
    return (io_index << IO_MEM_SHIFT) | subwidth;
2560
}
2561

    
2562
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2563
{
2564
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2565
}
2566

    
2567
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2568
{
2569
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2570
}
2571

    
2572
#endif /* !defined(CONFIG_USER_ONLY) */
2573

    
2574
/* physical memory access (slow version, mainly for debug) */
2575
#if defined(CONFIG_USER_ONLY)
2576
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2577
                            int len, int is_write)
2578
{
2579
    int l, flags;
2580
    target_ulong page;
2581
    void * p;
2582

    
2583
    while (len > 0) {
2584
        page = addr & TARGET_PAGE_MASK;
2585
        l = (page + TARGET_PAGE_SIZE) - addr;
2586
        if (l > len)
2587
            l = len;
2588
        flags = page_get_flags(page);
2589
        if (!(flags & PAGE_VALID))
2590
            return;
2591
        if (is_write) {
2592
            if (!(flags & PAGE_WRITE))
2593
                return;
2594
            /* XXX: this code should not depend on lock_user */
2595
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2596
                /* FIXME - should this return an error rather than just fail? */
2597
                return;
2598
            memcpy(p, buf, l);
2599
            unlock_user(p, addr, l);
2600
        } else {
2601
            if (!(flags & PAGE_READ))
2602
                return;
2603
            /* XXX: this code should not depend on lock_user */
2604
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2605
                /* FIXME - should this return an error rather than just fail? */
2606
                return;
2607
            memcpy(buf, p, l);
2608
            unlock_user(p, addr, 0);
2609
        }
2610
        len -= l;
2611
        buf += l;
2612
        addr += l;
2613
    }
2614
}
2615

    
2616
#else
2617
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2618
                            int len, int is_write)
2619
{
2620
    int l, io_index;
2621
    uint8_t *ptr;
2622
    uint32_t val;
2623
    target_phys_addr_t page;
2624
    unsigned long pd;
2625
    PhysPageDesc *p;
2626

    
2627
    while (len > 0) {
2628
        page = addr & TARGET_PAGE_MASK;
2629
        l = (page + TARGET_PAGE_SIZE) - addr;
2630
        if (l > len)
2631
            l = len;
2632
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2633
        if (!p) {
2634
            pd = IO_MEM_UNASSIGNED;
2635
        } else {
2636
            pd = p->phys_offset;
2637
        }
2638

    
2639
        if (is_write) {
2640
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2641
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2642
                /* XXX: could force cpu_single_env to NULL to avoid
2643
                   potential bugs */
2644
                if (l >= 4 && ((addr & 3) == 0)) {
2645
                    /* 32 bit write access */
2646
                    val = ldl_p(buf);
2647
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2648
                    l = 4;
2649
                } else if (l >= 2 && ((addr & 1) == 0)) {
2650
                    /* 16 bit write access */
2651
                    val = lduw_p(buf);
2652
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2653
                    l = 2;
2654
                } else {
2655
                    /* 8 bit write access */
2656
                    val = ldub_p(buf);
2657
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2658
                    l = 1;
2659
                }
2660
            } else {
2661
                unsigned long addr1;
2662
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2663
                /* RAM case */
2664
                ptr = phys_ram_base + addr1;
2665
                memcpy(ptr, buf, l);
2666
                if (!cpu_physical_memory_is_dirty(addr1)) {
2667
                    /* invalidate code */
2668
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2669
                    /* set dirty bit */
2670
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2671
                        (0xff & ~CODE_DIRTY_FLAG);
2672
                }
2673
            }
2674
        } else {
2675
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2676
                !(pd & IO_MEM_ROMD)) {
2677
                /* I/O case */
2678
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2679
                if (l >= 4 && ((addr & 3) == 0)) {
2680
                    /* 32 bit read access */
2681
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2682
                    stl_p(buf, val);
2683
                    l = 4;
2684
                } else if (l >= 2 && ((addr & 1) == 0)) {
2685
                    /* 16 bit read access */
2686
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2687
                    stw_p(buf, val);
2688
                    l = 2;
2689
                } else {
2690
                    /* 8 bit read access */
2691
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2692
                    stb_p(buf, val);
2693
                    l = 1;
2694
                }
2695
            } else {
2696
                /* RAM case */
2697
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2698
                    (addr & ~TARGET_PAGE_MASK);
2699
                memcpy(buf, ptr, l);
2700
            }
2701
        }
2702
        len -= l;
2703
        buf += l;
2704
        addr += l;
2705
    }
2706
}
2707

    
2708
/* used for ROM loading : can write in RAM and ROM */
2709
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2710
                                   const uint8_t *buf, int len)
2711
{
2712
    int l;
2713
    uint8_t *ptr;
2714
    target_phys_addr_t page;
2715
    unsigned long pd;
2716
    PhysPageDesc *p;
2717

    
2718
    while (len > 0) {
2719
        page = addr & TARGET_PAGE_MASK;
2720
        l = (page + TARGET_PAGE_SIZE) - addr;
2721
        if (l > len)
2722
            l = len;
2723
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2724
        if (!p) {
2725
            pd = IO_MEM_UNASSIGNED;
2726
        } else {
2727
            pd = p->phys_offset;
2728
        }
2729

    
2730
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2731
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2732
            !(pd & IO_MEM_ROMD)) {
2733
            /* do nothing */
2734
        } else {
2735
            unsigned long addr1;
2736
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2737
            /* ROM/RAM case */
2738
            ptr = phys_ram_base + addr1;
2739
            memcpy(ptr, buf, l);
2740
        }
2741
        len -= l;
2742
        buf += l;
2743
        addr += l;
2744
    }
2745
}
2746

    
2747

    
2748
/* warning: addr must be aligned */
2749
uint32_t ldl_phys(target_phys_addr_t addr)
2750
{
2751
    int io_index;
2752
    uint8_t *ptr;
2753
    uint32_t val;
2754
    unsigned long pd;
2755
    PhysPageDesc *p;
2756

    
2757
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2758
    if (!p) {
2759
        pd = IO_MEM_UNASSIGNED;
2760
    } else {
2761
        pd = p->phys_offset;
2762
    }
2763

    
2764
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2765
        !(pd & IO_MEM_ROMD)) {
2766
        /* I/O case */
2767
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2768
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2769
    } else {
2770
        /* RAM case */
2771
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2772
            (addr & ~TARGET_PAGE_MASK);
2773
        val = ldl_p(ptr);
2774
    }
2775
    return val;
2776
}
2777

    
2778
/* warning: addr must be aligned */
2779
uint64_t ldq_phys(target_phys_addr_t addr)
2780
{
2781
    int io_index;
2782
    uint8_t *ptr;
2783
    uint64_t val;
2784
    unsigned long pd;
2785
    PhysPageDesc *p;
2786

    
2787
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2788
    if (!p) {
2789
        pd = IO_MEM_UNASSIGNED;
2790
    } else {
2791
        pd = p->phys_offset;
2792
    }
2793

    
2794
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2795
        !(pd & IO_MEM_ROMD)) {
2796
        /* I/O case */
2797
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2798
#ifdef TARGET_WORDS_BIGENDIAN
2799
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2800
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2801
#else
2802
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2803
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2804
#endif
2805
    } else {
2806
        /* RAM case */
2807
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2808
            (addr & ~TARGET_PAGE_MASK);
2809
        val = ldq_p(ptr);
2810
    }
2811
    return val;
2812
}
2813

    
2814
/* XXX: optimize */
2815
uint32_t ldub_phys(target_phys_addr_t addr)
2816
{
2817
    uint8_t val;
2818
    cpu_physical_memory_read(addr, &val, 1);
2819
    return val;
2820
}
2821

    
2822
/* XXX: optimize */
2823
uint32_t lduw_phys(target_phys_addr_t addr)
2824
{
2825
    uint16_t val;
2826
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2827
    return tswap16(val);
2828
}
2829

    
2830
/* warning: addr must be aligned. The ram page is not masked as dirty
2831
   and the code inside is not invalidated. It is useful if the dirty
2832
   bits are used to track modified PTEs */
2833
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2834
{
2835
    int io_index;
2836
    uint8_t *ptr;
2837
    unsigned long pd;
2838
    PhysPageDesc *p;
2839

    
2840
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2841
    if (!p) {
2842
        pd = IO_MEM_UNASSIGNED;
2843
    } else {
2844
        pd = p->phys_offset;
2845
    }
2846

    
2847
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2848
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2849
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2850
    } else {
2851
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2852
            (addr & ~TARGET_PAGE_MASK);
2853
        stl_p(ptr, val);
2854
    }
2855
}
2856

    
2857
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2858
{
2859
    int io_index;
2860
    uint8_t *ptr;
2861
    unsigned long pd;
2862
    PhysPageDesc *p;
2863

    
2864
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2865
    if (!p) {
2866
        pd = IO_MEM_UNASSIGNED;
2867
    } else {
2868
        pd = p->phys_offset;
2869
    }
2870

    
2871
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2872
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2873
#ifdef TARGET_WORDS_BIGENDIAN
2874
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2875
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2876
#else
2877
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2878
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2879
#endif
2880
    } else {
2881
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2882
            (addr & ~TARGET_PAGE_MASK);
2883
        stq_p(ptr, val);
2884
    }
2885
}
2886

    
2887
/* warning: addr must be aligned */
2888
void stl_phys(target_phys_addr_t addr, uint32_t val)
2889
{
2890
    int io_index;
2891
    uint8_t *ptr;
2892
    unsigned long pd;
2893
    PhysPageDesc *p;
2894

    
2895
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2896
    if (!p) {
2897
        pd = IO_MEM_UNASSIGNED;
2898
    } else {
2899
        pd = p->phys_offset;
2900
    }
2901

    
2902
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2903
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2904
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2905
    } else {
2906
        unsigned long addr1;
2907
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2908
        /* RAM case */
2909
        ptr = phys_ram_base + addr1;
2910
        stl_p(ptr, val);
2911
        if (!cpu_physical_memory_is_dirty(addr1)) {
2912
            /* invalidate code */
2913
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2914
            /* set dirty bit */
2915
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2916
                (0xff & ~CODE_DIRTY_FLAG);
2917
        }
2918
    }
2919
}
2920

    
2921
/* XXX: optimize */
2922
void stb_phys(target_phys_addr_t addr, uint32_t val)
2923
{
2924
    uint8_t v = val;
2925
    cpu_physical_memory_write(addr, &v, 1);
2926
}
2927

    
2928
/* XXX: optimize */
2929
void stw_phys(target_phys_addr_t addr, uint32_t val)
2930
{
2931
    uint16_t v = tswap16(val);
2932
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2933
}
2934

    
2935
/* XXX: optimize */
2936
void stq_phys(target_phys_addr_t addr, uint64_t val)
2937
{
2938
    val = tswap64(val);
2939
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2940
}
2941

    
2942
#endif
2943

    
2944
/* virtual memory access for debug */
2945
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2946
                        uint8_t *buf, int len, int is_write)
2947
{
2948
    int l;
2949
    target_phys_addr_t phys_addr;
2950
    target_ulong page;
2951

    
2952
    while (len > 0) {
2953
        page = addr & TARGET_PAGE_MASK;
2954
        phys_addr = cpu_get_phys_page_debug(env, page);
2955
        /* if no physical page mapped, return an error */
2956
        if (phys_addr == -1)
2957
            return -1;
2958
        l = (page + TARGET_PAGE_SIZE) - addr;
2959
        if (l > len)
2960
            l = len;
2961
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2962
                               buf, l, is_write);
2963
        len -= l;
2964
        buf += l;
2965
        addr += l;
2966
    }
2967
    return 0;
2968
}
2969

    
2970
void dump_exec_info(FILE *f,
2971
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2972
{
2973
    int i, target_code_size, max_target_code_size;
2974
    int direct_jmp_count, direct_jmp2_count, cross_page;
2975
    TranslationBlock *tb;
2976

    
2977
    target_code_size = 0;
2978
    max_target_code_size = 0;
2979
    cross_page = 0;
2980
    direct_jmp_count = 0;
2981
    direct_jmp2_count = 0;
2982
    for(i = 0; i < nb_tbs; i++) {
2983
        tb = &tbs[i];
2984
        target_code_size += tb->size;
2985
        if (tb->size > max_target_code_size)
2986
            max_target_code_size = tb->size;
2987
        if (tb->page_addr[1] != -1)
2988
            cross_page++;
2989
        if (tb->tb_next_offset[0] != 0xffff) {
2990
            direct_jmp_count++;
2991
            if (tb->tb_next_offset[1] != 0xffff) {
2992
                direct_jmp2_count++;
2993
            }
2994
        }
2995
    }
2996
    /* XXX: avoid using doubles ? */
2997
    cpu_fprintf(f, "Translation buffer state:\n");
2998
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
2999
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3000
    cpu_fprintf(f, "TB count            %d/%d\n", 
3001
                nb_tbs, code_gen_max_blocks);
3002
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3003
                nb_tbs ? target_code_size / nb_tbs : 0,
3004
                max_target_code_size);
3005
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3006
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3007
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3008
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3009
            cross_page,
3010
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3011
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3012
                direct_jmp_count,
3013
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3014
                direct_jmp2_count,
3015
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3016
    cpu_fprintf(f, "\nStatistics:\n");
3017
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3018
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3019
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3020
    tcg_dump_info(f, cpu_fprintf);
3021
}
3022

    
3023
#if !defined(CONFIG_USER_ONLY)
3024

    
3025
#define MMUSUFFIX _cmmu
3026
#define GETPC() NULL
3027
#define env cpu_single_env
3028
#define SOFTMMU_CODE_ACCESS
3029

    
3030
#define SHIFT 0
3031
#include "softmmu_template.h"
3032

    
3033
#define SHIFT 1
3034
#include "softmmu_template.h"
3035

    
3036
#define SHIFT 2
3037
#include "softmmu_template.h"
3038

    
3039
#define SHIFT 3
3040
#include "softmmu_template.h"
3041

    
3042
#undef env
3043

    
3044
#endif