Statistics
| Branch: | Revision:

root / exec.c @ b67d9a52

History | View | Annotate | Download (90.1 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#if defined(CONFIG_USER_ONLY)
41
#include <qemu.h>
42
#endif
43

    
44
//#define DEBUG_TB_INVALIDATE
45
//#define DEBUG_FLUSH
46
//#define DEBUG_TLB
47
//#define DEBUG_UNASSIGNED
48

    
49
/* make various TB consistency checks */
50
//#define DEBUG_TB_CHECK
51
//#define DEBUG_TLB_CHECK
52

    
53
//#define DEBUG_IOPORT
54
//#define DEBUG_SUBPAGE
55

    
56
#if !defined(CONFIG_USER_ONLY)
57
/* TB consistency checks only implemented for usermode emulation.  */
58
#undef DEBUG_TB_CHECK
59
#endif
60

    
61
/* threshold to flush the translated code buffer */
62
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
63

    
64
#define SMC_BITMAP_USE_THRESHOLD 10
65

    
66
#define MMAP_AREA_START        0x00000000
67
#define MMAP_AREA_END          0xa8000000
68

    
69
#if defined(TARGET_SPARC64)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 41
71
#elif defined(TARGET_SPARC)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 36
73
#elif defined(TARGET_ALPHA)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#define TARGET_VIRT_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_PPC64)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 42
80
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81
#define TARGET_PHYS_ADDR_SPACE_BITS 36
82
#else
83
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84
#define TARGET_PHYS_ADDR_SPACE_BITS 32
85
#endif
86

    
87
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
88
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
89
int nb_tbs;
90
/* any access to the tbs or the page table must use this lock */
91
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
92

    
93
uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
94
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
95
uint8_t *code_gen_ptr;
96

    
97
ram_addr_t phys_ram_size;
98
int phys_ram_fd;
99
uint8_t *phys_ram_base;
100
uint8_t *phys_ram_dirty;
101
static ram_addr_t phys_ram_alloc_offset = 0;
102

    
103
CPUState *first_cpu;
104
/* current CPU in the current thread. It is only valid inside
105
   cpu_exec() */
106
CPUState *cpu_single_env;
107

    
108
typedef struct PageDesc {
109
    /* list of TBs intersecting this ram page */
110
    TranslationBlock *first_tb;
111
    /* in order to optimize self modifying code, we count the number
112
       of lookups we do to a given page to use a bitmap */
113
    unsigned int code_write_count;
114
    uint8_t *code_bitmap;
115
#if defined(CONFIG_USER_ONLY)
116
    unsigned long flags;
117
#endif
118
} PageDesc;
119

    
120
typedef struct PhysPageDesc {
121
    /* offset in host memory of the page + io_index in the low 12 bits */
122
    ram_addr_t phys_offset;
123
} PhysPageDesc;
124

    
125
#define L2_BITS 10
126
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
127
/* XXX: this is a temporary hack for alpha target.
128
 *      In the future, this is to be replaced by a multi-level table
129
 *      to actually be able to handle the complete 64 bits address space.
130
 */
131
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
132
#else
133
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
134
#endif
135

    
136
#define L1_SIZE (1 << L1_BITS)
137
#define L2_SIZE (1 << L2_BITS)
138

    
139
static void io_mem_init(void);
140

    
141
unsigned long qemu_real_host_page_size;
142
unsigned long qemu_host_page_bits;
143
unsigned long qemu_host_page_size;
144
unsigned long qemu_host_page_mask;
145

    
146
/* XXX: for system emulation, it could just be an array */
147
static PageDesc *l1_map[L1_SIZE];
148
PhysPageDesc **l1_phys_map;
149

    
150
/* io memory support */
151
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
152
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
153
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
154
static int io_mem_nb;
155
#if defined(CONFIG_SOFTMMU)
156
static int io_mem_watch;
157
#endif
158

    
159
/* log support */
160
char *logfilename = "/tmp/qemu.log";
161
FILE *logfile;
162
int loglevel;
163
static int log_append = 0;
164

    
165
/* statistics */
166
static int tlb_flush_count;
167
static int tb_flush_count;
168
static int tb_phys_invalidate_count;
169

    
170
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
171
typedef struct subpage_t {
172
    target_phys_addr_t base;
173
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
174
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
175
    void *opaque[TARGET_PAGE_SIZE][2][4];
176
} subpage_t;
177

    
178
#ifdef _WIN32
179
static void map_exec(void *addr, long size)
180
{
181
    DWORD old_protect;
182
    VirtualProtect(addr, size,
183
                   PAGE_EXECUTE_READWRITE, &old_protect);
184
    
185
}
186
#else
187
static void map_exec(void *addr, long size)
188
{
189
    unsigned long start, end;
190
    
191
    start = (unsigned long)addr;
192
    start &= ~(qemu_real_host_page_size - 1);
193
    
194
    end = (unsigned long)addr + size;
195
    end += qemu_real_host_page_size - 1;
196
    end &= ~(qemu_real_host_page_size - 1);
197
    
198
    mprotect((void *)start, end - start,
199
             PROT_READ | PROT_WRITE | PROT_EXEC);
200
}
201
#endif
202

    
203
static void page_init(void)
204
{
205
    /* NOTE: we can always suppose that qemu_host_page_size >=
206
       TARGET_PAGE_SIZE */
207
#ifdef _WIN32
208
    {
209
        SYSTEM_INFO system_info;
210
        DWORD old_protect;
211

    
212
        GetSystemInfo(&system_info);
213
        qemu_real_host_page_size = system_info.dwPageSize;
214
    }
215
#else
216
    qemu_real_host_page_size = getpagesize();
217
#endif
218
    map_exec(code_gen_buffer, sizeof(code_gen_buffer));
219
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
220

    
221
    if (qemu_host_page_size == 0)
222
        qemu_host_page_size = qemu_real_host_page_size;
223
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
224
        qemu_host_page_size = TARGET_PAGE_SIZE;
225
    qemu_host_page_bits = 0;
226
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
227
        qemu_host_page_bits++;
228
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
229
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
230
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
231

    
232
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
233
    {
234
        long long startaddr, endaddr;
235
        FILE *f;
236
        int n;
237

    
238
        f = fopen("/proc/self/maps", "r");
239
        if (f) {
240
            do {
241
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
242
                if (n == 2) {
243
                    startaddr = MIN(startaddr,
244
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
245
                    endaddr = MIN(endaddr,
246
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
247
                    page_set_flags(TARGET_PAGE_ALIGN(startaddr),
248
                                   TARGET_PAGE_ALIGN(endaddr),
249
                                   PAGE_RESERVED); 
250
                }
251
            } while (!feof(f));
252
            fclose(f);
253
        }
254
    }
255
#endif
256
}
257

    
258
static inline PageDesc *page_find_alloc(target_ulong index)
259
{
260
    PageDesc **lp, *p;
261

    
262
    lp = &l1_map[index >> L2_BITS];
263
    p = *lp;
264
    if (!p) {
265
        /* allocate if not found */
266
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
267
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
268
        *lp = p;
269
    }
270
    return p + (index & (L2_SIZE - 1));
271
}
272

    
273
static inline PageDesc *page_find(target_ulong index)
274
{
275
    PageDesc *p;
276

    
277
    p = l1_map[index >> L2_BITS];
278
    if (!p)
279
        return 0;
280
    return p + (index & (L2_SIZE - 1));
281
}
282

    
283
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
284
{
285
    void **lp, **p;
286
    PhysPageDesc *pd;
287

    
288
    p = (void **)l1_phys_map;
289
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
290

    
291
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
292
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
293
#endif
294
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
295
    p = *lp;
296
    if (!p) {
297
        /* allocate if not found */
298
        if (!alloc)
299
            return NULL;
300
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
301
        memset(p, 0, sizeof(void *) * L1_SIZE);
302
        *lp = p;
303
    }
304
#endif
305
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
306
    pd = *lp;
307
    if (!pd) {
308
        int i;
309
        /* allocate if not found */
310
        if (!alloc)
311
            return NULL;
312
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
313
        *lp = pd;
314
        for (i = 0; i < L2_SIZE; i++)
315
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
316
    }
317
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
318
}
319

    
320
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
321
{
322
    return phys_page_find_alloc(index, 0);
323
}
324

    
325
#if !defined(CONFIG_USER_ONLY)
326
static void tlb_protect_code(ram_addr_t ram_addr);
327
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
328
                                    target_ulong vaddr);
329
#endif
330

    
331
void cpu_exec_init(CPUState *env)
332
{
333
    CPUState **penv;
334
    int cpu_index;
335

    
336
    if (!code_gen_ptr) {
337
        cpu_gen_init();
338
        code_gen_ptr = code_gen_buffer;
339
        page_init();
340
        io_mem_init();
341
    }
342
    env->next_cpu = NULL;
343
    penv = &first_cpu;
344
    cpu_index = 0;
345
    while (*penv != NULL) {
346
        penv = (CPUState **)&(*penv)->next_cpu;
347
        cpu_index++;
348
    }
349
    env->cpu_index = cpu_index;
350
    env->nb_watchpoints = 0;
351
    *penv = env;
352
}
353

    
354
static inline void invalidate_page_bitmap(PageDesc *p)
355
{
356
    if (p->code_bitmap) {
357
        qemu_free(p->code_bitmap);
358
        p->code_bitmap = NULL;
359
    }
360
    p->code_write_count = 0;
361
}
362

    
363
/* set to NULL all the 'first_tb' fields in all PageDescs */
364
static void page_flush_tb(void)
365
{
366
    int i, j;
367
    PageDesc *p;
368

    
369
    for(i = 0; i < L1_SIZE; i++) {
370
        p = l1_map[i];
371
        if (p) {
372
            for(j = 0; j < L2_SIZE; j++) {
373
                p->first_tb = NULL;
374
                invalidate_page_bitmap(p);
375
                p++;
376
            }
377
        }
378
    }
379
}
380

    
381
/* flush all the translation blocks */
382
/* XXX: tb_flush is currently not thread safe */
383
void tb_flush(CPUState *env1)
384
{
385
    CPUState *env;
386
#if defined(DEBUG_FLUSH)
387
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
388
           (unsigned long)(code_gen_ptr - code_gen_buffer),
389
           nb_tbs, nb_tbs > 0 ?
390
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
391
#endif
392
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
393
        cpu_abort(env1, "Internal error: code buffer overflow\n");
394

    
395
    nb_tbs = 0;
396

    
397
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
398
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
399
    }
400

    
401
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
402
    page_flush_tb();
403

    
404
    code_gen_ptr = code_gen_buffer;
405
    /* XXX: flush processor icache at this point if cache flush is
406
       expensive */
407
    tb_flush_count++;
408
}
409

    
410
#ifdef DEBUG_TB_CHECK
411

    
412
static void tb_invalidate_check(target_ulong address)
413
{
414
    TranslationBlock *tb;
415
    int i;
416
    address &= TARGET_PAGE_MASK;
417
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
418
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
419
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
420
                  address >= tb->pc + tb->size)) {
421
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
422
                       address, (long)tb->pc, tb->size);
423
            }
424
        }
425
    }
426
}
427

    
428
/* verify that all the pages have correct rights for code */
429
static void tb_page_check(void)
430
{
431
    TranslationBlock *tb;
432
    int i, flags1, flags2;
433

    
434
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
435
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
436
            flags1 = page_get_flags(tb->pc);
437
            flags2 = page_get_flags(tb->pc + tb->size - 1);
438
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
439
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
440
                       (long)tb->pc, tb->size, flags1, flags2);
441
            }
442
        }
443
    }
444
}
445

    
446
void tb_jmp_check(TranslationBlock *tb)
447
{
448
    TranslationBlock *tb1;
449
    unsigned int n1;
450

    
451
    /* suppress any remaining jumps to this TB */
452
    tb1 = tb->jmp_first;
453
    for(;;) {
454
        n1 = (long)tb1 & 3;
455
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
456
        if (n1 == 2)
457
            break;
458
        tb1 = tb1->jmp_next[n1];
459
    }
460
    /* check end of list */
461
    if (tb1 != tb) {
462
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
463
    }
464
}
465

    
466
#endif
467

    
468
/* invalidate one TB */
469
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
470
                             int next_offset)
471
{
472
    TranslationBlock *tb1;
473
    for(;;) {
474
        tb1 = *ptb;
475
        if (tb1 == tb) {
476
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
477
            break;
478
        }
479
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
480
    }
481
}
482

    
483
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
484
{
485
    TranslationBlock *tb1;
486
    unsigned int n1;
487

    
488
    for(;;) {
489
        tb1 = *ptb;
490
        n1 = (long)tb1 & 3;
491
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
492
        if (tb1 == tb) {
493
            *ptb = tb1->page_next[n1];
494
            break;
495
        }
496
        ptb = &tb1->page_next[n1];
497
    }
498
}
499

    
500
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
501
{
502
    TranslationBlock *tb1, **ptb;
503
    unsigned int n1;
504

    
505
    ptb = &tb->jmp_next[n];
506
    tb1 = *ptb;
507
    if (tb1) {
508
        /* find tb(n) in circular list */
509
        for(;;) {
510
            tb1 = *ptb;
511
            n1 = (long)tb1 & 3;
512
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
513
            if (n1 == n && tb1 == tb)
514
                break;
515
            if (n1 == 2) {
516
                ptb = &tb1->jmp_first;
517
            } else {
518
                ptb = &tb1->jmp_next[n1];
519
            }
520
        }
521
        /* now we can suppress tb(n) from the list */
522
        *ptb = tb->jmp_next[n];
523

    
524
        tb->jmp_next[n] = NULL;
525
    }
526
}
527

    
528
/* reset the jump entry 'n' of a TB so that it is not chained to
529
   another TB */
530
static inline void tb_reset_jump(TranslationBlock *tb, int n)
531
{
532
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
533
}
534

    
535
static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
536
{
537
    CPUState *env;
538
    PageDesc *p;
539
    unsigned int h, n1;
540
    target_phys_addr_t phys_pc;
541
    TranslationBlock *tb1, *tb2;
542

    
543
    /* remove the TB from the hash list */
544
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
545
    h = tb_phys_hash_func(phys_pc);
546
    tb_remove(&tb_phys_hash[h], tb,
547
              offsetof(TranslationBlock, phys_hash_next));
548

    
549
    /* remove the TB from the page list */
550
    if (tb->page_addr[0] != page_addr) {
551
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
552
        tb_page_remove(&p->first_tb, tb);
553
        invalidate_page_bitmap(p);
554
    }
555
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
556
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
557
        tb_page_remove(&p->first_tb, tb);
558
        invalidate_page_bitmap(p);
559
    }
560

    
561
    tb_invalidated_flag = 1;
562

    
563
    /* remove the TB from the hash list */
564
    h = tb_jmp_cache_hash_func(tb->pc);
565
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
566
        if (env->tb_jmp_cache[h] == tb)
567
            env->tb_jmp_cache[h] = NULL;
568
    }
569

    
570
    /* suppress this TB from the two jump lists */
571
    tb_jmp_remove(tb, 0);
572
    tb_jmp_remove(tb, 1);
573

    
574
    /* suppress any remaining jumps to this TB */
575
    tb1 = tb->jmp_first;
576
    for(;;) {
577
        n1 = (long)tb1 & 3;
578
        if (n1 == 2)
579
            break;
580
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
581
        tb2 = tb1->jmp_next[n1];
582
        tb_reset_jump(tb1, n1);
583
        tb1->jmp_next[n1] = NULL;
584
        tb1 = tb2;
585
    }
586
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
587

    
588
    tb_phys_invalidate_count++;
589
}
590

    
591
static inline void set_bits(uint8_t *tab, int start, int len)
592
{
593
    int end, mask, end1;
594

    
595
    end = start + len;
596
    tab += start >> 3;
597
    mask = 0xff << (start & 7);
598
    if ((start & ~7) == (end & ~7)) {
599
        if (start < end) {
600
            mask &= ~(0xff << (end & 7));
601
            *tab |= mask;
602
        }
603
    } else {
604
        *tab++ |= mask;
605
        start = (start + 8) & ~7;
606
        end1 = end & ~7;
607
        while (start < end1) {
608
            *tab++ = 0xff;
609
            start += 8;
610
        }
611
        if (start < end) {
612
            mask = ~(0xff << (end & 7));
613
            *tab |= mask;
614
        }
615
    }
616
}
617

    
618
static void build_page_bitmap(PageDesc *p)
619
{
620
    int n, tb_start, tb_end;
621
    TranslationBlock *tb;
622

    
623
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
624
    if (!p->code_bitmap)
625
        return;
626
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
627

    
628
    tb = p->first_tb;
629
    while (tb != NULL) {
630
        n = (long)tb & 3;
631
        tb = (TranslationBlock *)((long)tb & ~3);
632
        /* NOTE: this is subtle as a TB may span two physical pages */
633
        if (n == 0) {
634
            /* NOTE: tb_end may be after the end of the page, but
635
               it is not a problem */
636
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
637
            tb_end = tb_start + tb->size;
638
            if (tb_end > TARGET_PAGE_SIZE)
639
                tb_end = TARGET_PAGE_SIZE;
640
        } else {
641
            tb_start = 0;
642
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
643
        }
644
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
645
        tb = tb->page_next[n];
646
    }
647
}
648

    
649
#ifdef TARGET_HAS_PRECISE_SMC
650

    
651
static void tb_gen_code(CPUState *env,
652
                        target_ulong pc, target_ulong cs_base, int flags,
653
                        int cflags)
654
{
655
    TranslationBlock *tb;
656
    uint8_t *tc_ptr;
657
    target_ulong phys_pc, phys_page2, virt_page2;
658
    int code_gen_size;
659

    
660
    phys_pc = get_phys_addr_code(env, pc);
661
    tb = tb_alloc(pc);
662
    if (!tb) {
663
        /* flush must be done */
664
        tb_flush(env);
665
        /* cannot fail at this point */
666
        tb = tb_alloc(pc);
667
    }
668
    tc_ptr = code_gen_ptr;
669
    tb->tc_ptr = tc_ptr;
670
    tb->cs_base = cs_base;
671
    tb->flags = flags;
672
    tb->cflags = cflags;
673
    cpu_gen_code(env, tb, &code_gen_size);
674
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
675

    
676
    /* check next page if needed */
677
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
678
    phys_page2 = -1;
679
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
680
        phys_page2 = get_phys_addr_code(env, virt_page2);
681
    }
682
    tb_link_phys(tb, phys_pc, phys_page2);
683
}
684
#endif
685

    
686
/* invalidate all TBs which intersect with the target physical page
687
   starting in range [start;end[. NOTE: start and end must refer to
688
   the same physical page. 'is_cpu_write_access' should be true if called
689
   from a real cpu write access: the virtual CPU will exit the current
690
   TB if code is modified inside this TB. */
691
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
692
                                   int is_cpu_write_access)
693
{
694
    int n, current_tb_modified, current_tb_not_found, current_flags;
695
    CPUState *env = cpu_single_env;
696
    PageDesc *p;
697
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
698
    target_ulong tb_start, tb_end;
699
    target_ulong current_pc, current_cs_base;
700

    
701
    p = page_find(start >> TARGET_PAGE_BITS);
702
    if (!p)
703
        return;
704
    if (!p->code_bitmap &&
705
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
706
        is_cpu_write_access) {
707
        /* build code bitmap */
708
        build_page_bitmap(p);
709
    }
710

    
711
    /* we remove all the TBs in the range [start, end[ */
712
    /* XXX: see if in some cases it could be faster to invalidate all the code */
713
    current_tb_not_found = is_cpu_write_access;
714
    current_tb_modified = 0;
715
    current_tb = NULL; /* avoid warning */
716
    current_pc = 0; /* avoid warning */
717
    current_cs_base = 0; /* avoid warning */
718
    current_flags = 0; /* avoid warning */
719
    tb = p->first_tb;
720
    while (tb != NULL) {
721
        n = (long)tb & 3;
722
        tb = (TranslationBlock *)((long)tb & ~3);
723
        tb_next = tb->page_next[n];
724
        /* NOTE: this is subtle as a TB may span two physical pages */
725
        if (n == 0) {
726
            /* NOTE: tb_end may be after the end of the page, but
727
               it is not a problem */
728
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
729
            tb_end = tb_start + tb->size;
730
        } else {
731
            tb_start = tb->page_addr[1];
732
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
733
        }
734
        if (!(tb_end <= start || tb_start >= end)) {
735
#ifdef TARGET_HAS_PRECISE_SMC
736
            if (current_tb_not_found) {
737
                current_tb_not_found = 0;
738
                current_tb = NULL;
739
                if (env->mem_write_pc) {
740
                    /* now we have a real cpu fault */
741
                    current_tb = tb_find_pc(env->mem_write_pc);
742
                }
743
            }
744
            if (current_tb == tb &&
745
                !(current_tb->cflags & CF_SINGLE_INSN)) {
746
                /* If we are modifying the current TB, we must stop
747
                its execution. We could be more precise by checking
748
                that the modification is after the current PC, but it
749
                would require a specialized function to partially
750
                restore the CPU state */
751

    
752
                current_tb_modified = 1;
753
                cpu_restore_state(current_tb, env,
754
                                  env->mem_write_pc, NULL);
755
#if defined(TARGET_I386)
756
                current_flags = env->hflags;
757
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
758
                current_cs_base = (target_ulong)env->segs[R_CS].base;
759
                current_pc = current_cs_base + env->eip;
760
#else
761
#error unsupported CPU
762
#endif
763
            }
764
#endif /* TARGET_HAS_PRECISE_SMC */
765
            /* we need to do that to handle the case where a signal
766
               occurs while doing tb_phys_invalidate() */
767
            saved_tb = NULL;
768
            if (env) {
769
                saved_tb = env->current_tb;
770
                env->current_tb = NULL;
771
            }
772
            tb_phys_invalidate(tb, -1);
773
            if (env) {
774
                env->current_tb = saved_tb;
775
                if (env->interrupt_request && env->current_tb)
776
                    cpu_interrupt(env, env->interrupt_request);
777
            }
778
        }
779
        tb = tb_next;
780
    }
781
#if !defined(CONFIG_USER_ONLY)
782
    /* if no code remaining, no need to continue to use slow writes */
783
    if (!p->first_tb) {
784
        invalidate_page_bitmap(p);
785
        if (is_cpu_write_access) {
786
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
787
        }
788
    }
789
#endif
790
#ifdef TARGET_HAS_PRECISE_SMC
791
    if (current_tb_modified) {
792
        /* we generate a block containing just the instruction
793
           modifying the memory. It will ensure that it cannot modify
794
           itself */
795
        env->current_tb = NULL;
796
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
797
                    CF_SINGLE_INSN);
798
        cpu_resume_from_signal(env, NULL);
799
    }
800
#endif
801
}
802

    
803
/* len must be <= 8 and start must be a multiple of len */
804
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
805
{
806
    PageDesc *p;
807
    int offset, b;
808
#if 0
809
    if (1) {
810
        if (loglevel) {
811
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
812
                   cpu_single_env->mem_write_vaddr, len,
813
                   cpu_single_env->eip,
814
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
815
        }
816
    }
817
#endif
818
    p = page_find(start >> TARGET_PAGE_BITS);
819
    if (!p)
820
        return;
821
    if (p->code_bitmap) {
822
        offset = start & ~TARGET_PAGE_MASK;
823
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
824
        if (b & ((1 << len) - 1))
825
            goto do_invalidate;
826
    } else {
827
    do_invalidate:
828
        tb_invalidate_phys_page_range(start, start + len, 1);
829
    }
830
}
831

    
832
#if !defined(CONFIG_SOFTMMU)
833
static void tb_invalidate_phys_page(target_phys_addr_t addr,
834
                                    unsigned long pc, void *puc)
835
{
836
    int n, current_flags, current_tb_modified;
837
    target_ulong current_pc, current_cs_base;
838
    PageDesc *p;
839
    TranslationBlock *tb, *current_tb;
840
#ifdef TARGET_HAS_PRECISE_SMC
841
    CPUState *env = cpu_single_env;
842
#endif
843

    
844
    addr &= TARGET_PAGE_MASK;
845
    p = page_find(addr >> TARGET_PAGE_BITS);
846
    if (!p)
847
        return;
848
    tb = p->first_tb;
849
    current_tb_modified = 0;
850
    current_tb = NULL;
851
    current_pc = 0; /* avoid warning */
852
    current_cs_base = 0; /* avoid warning */
853
    current_flags = 0; /* avoid warning */
854
#ifdef TARGET_HAS_PRECISE_SMC
855
    if (tb && pc != 0) {
856
        current_tb = tb_find_pc(pc);
857
    }
858
#endif
859
    while (tb != NULL) {
860
        n = (long)tb & 3;
861
        tb = (TranslationBlock *)((long)tb & ~3);
862
#ifdef TARGET_HAS_PRECISE_SMC
863
        if (current_tb == tb &&
864
            !(current_tb->cflags & CF_SINGLE_INSN)) {
865
                /* If we are modifying the current TB, we must stop
866
                   its execution. We could be more precise by checking
867
                   that the modification is after the current PC, but it
868
                   would require a specialized function to partially
869
                   restore the CPU state */
870

    
871
            current_tb_modified = 1;
872
            cpu_restore_state(current_tb, env, pc, puc);
873
#if defined(TARGET_I386)
874
            current_flags = env->hflags;
875
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
876
            current_cs_base = (target_ulong)env->segs[R_CS].base;
877
            current_pc = current_cs_base + env->eip;
878
#else
879
#error unsupported CPU
880
#endif
881
        }
882
#endif /* TARGET_HAS_PRECISE_SMC */
883
        tb_phys_invalidate(tb, addr);
884
        tb = tb->page_next[n];
885
    }
886
    p->first_tb = NULL;
887
#ifdef TARGET_HAS_PRECISE_SMC
888
    if (current_tb_modified) {
889
        /* we generate a block containing just the instruction
890
           modifying the memory. It will ensure that it cannot modify
891
           itself */
892
        env->current_tb = NULL;
893
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
894
                    CF_SINGLE_INSN);
895
        cpu_resume_from_signal(env, puc);
896
    }
897
#endif
898
}
899
#endif
900

    
901
/* add the tb in the target page and protect it if necessary */
902
static inline void tb_alloc_page(TranslationBlock *tb,
903
                                 unsigned int n, target_ulong page_addr)
904
{
905
    PageDesc *p;
906
    TranslationBlock *last_first_tb;
907

    
908
    tb->page_addr[n] = page_addr;
909
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
910
    tb->page_next[n] = p->first_tb;
911
    last_first_tb = p->first_tb;
912
    p->first_tb = (TranslationBlock *)((long)tb | n);
913
    invalidate_page_bitmap(p);
914

    
915
#if defined(TARGET_HAS_SMC) || 1
916

    
917
#if defined(CONFIG_USER_ONLY)
918
    if (p->flags & PAGE_WRITE) {
919
        target_ulong addr;
920
        PageDesc *p2;
921
        int prot;
922

    
923
        /* force the host page as non writable (writes will have a
924
           page fault + mprotect overhead) */
925
        page_addr &= qemu_host_page_mask;
926
        prot = 0;
927
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
928
            addr += TARGET_PAGE_SIZE) {
929

    
930
            p2 = page_find (addr >> TARGET_PAGE_BITS);
931
            if (!p2)
932
                continue;
933
            prot |= p2->flags;
934
            p2->flags &= ~PAGE_WRITE;
935
            page_get_flags(addr);
936
          }
937
        mprotect(g2h(page_addr), qemu_host_page_size,
938
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
939
#ifdef DEBUG_TB_INVALIDATE
940
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
941
               page_addr);
942
#endif
943
    }
944
#else
945
    /* if some code is already present, then the pages are already
946
       protected. So we handle the case where only the first TB is
947
       allocated in a physical page */
948
    if (!last_first_tb) {
949
        tlb_protect_code(page_addr);
950
    }
951
#endif
952

    
953
#endif /* TARGET_HAS_SMC */
954
}
955

    
956
/* Allocate a new translation block. Flush the translation buffer if
957
   too many translation blocks or too much generated code. */
958
TranslationBlock *tb_alloc(target_ulong pc)
959
{
960
    TranslationBlock *tb;
961

    
962
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
963
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
964
        return NULL;
965
    tb = &tbs[nb_tbs++];
966
    tb->pc = pc;
967
    tb->cflags = 0;
968
    return tb;
969
}
970

    
971
/* add a new TB and link it to the physical page tables. phys_page2 is
972
   (-1) to indicate that only one page contains the TB. */
973
void tb_link_phys(TranslationBlock *tb,
974
                  target_ulong phys_pc, target_ulong phys_page2)
975
{
976
    unsigned int h;
977
    TranslationBlock **ptb;
978

    
979
    /* add in the physical hash table */
980
    h = tb_phys_hash_func(phys_pc);
981
    ptb = &tb_phys_hash[h];
982
    tb->phys_hash_next = *ptb;
983
    *ptb = tb;
984

    
985
    /* add in the page list */
986
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
987
    if (phys_page2 != -1)
988
        tb_alloc_page(tb, 1, phys_page2);
989
    else
990
        tb->page_addr[1] = -1;
991

    
992
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
993
    tb->jmp_next[0] = NULL;
994
    tb->jmp_next[1] = NULL;
995

    
996
    /* init original jump addresses */
997
    if (tb->tb_next_offset[0] != 0xffff)
998
        tb_reset_jump(tb, 0);
999
    if (tb->tb_next_offset[1] != 0xffff)
1000
        tb_reset_jump(tb, 1);
1001

    
1002
#ifdef DEBUG_TB_CHECK
1003
    tb_page_check();
1004
#endif
1005
}
1006

    
1007
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1008
   tb[1].tc_ptr. Return NULL if not found */
1009
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1010
{
1011
    int m_min, m_max, m;
1012
    unsigned long v;
1013
    TranslationBlock *tb;
1014

    
1015
    if (nb_tbs <= 0)
1016
        return NULL;
1017
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1018
        tc_ptr >= (unsigned long)code_gen_ptr)
1019
        return NULL;
1020
    /* binary search (cf Knuth) */
1021
    m_min = 0;
1022
    m_max = nb_tbs - 1;
1023
    while (m_min <= m_max) {
1024
        m = (m_min + m_max) >> 1;
1025
        tb = &tbs[m];
1026
        v = (unsigned long)tb->tc_ptr;
1027
        if (v == tc_ptr)
1028
            return tb;
1029
        else if (tc_ptr < v) {
1030
            m_max = m - 1;
1031
        } else {
1032
            m_min = m + 1;
1033
        }
1034
    }
1035
    return &tbs[m_max];
1036
}
1037

    
1038
static void tb_reset_jump_recursive(TranslationBlock *tb);
1039

    
1040
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1041
{
1042
    TranslationBlock *tb1, *tb_next, **ptb;
1043
    unsigned int n1;
1044

    
1045
    tb1 = tb->jmp_next[n];
1046
    if (tb1 != NULL) {
1047
        /* find head of list */
1048
        for(;;) {
1049
            n1 = (long)tb1 & 3;
1050
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1051
            if (n1 == 2)
1052
                break;
1053
            tb1 = tb1->jmp_next[n1];
1054
        }
1055
        /* we are now sure now that tb jumps to tb1 */
1056
        tb_next = tb1;
1057

    
1058
        /* remove tb from the jmp_first list */
1059
        ptb = &tb_next->jmp_first;
1060
        for(;;) {
1061
            tb1 = *ptb;
1062
            n1 = (long)tb1 & 3;
1063
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1064
            if (n1 == n && tb1 == tb)
1065
                break;
1066
            ptb = &tb1->jmp_next[n1];
1067
        }
1068
        *ptb = tb->jmp_next[n];
1069
        tb->jmp_next[n] = NULL;
1070

    
1071
        /* suppress the jump to next tb in generated code */
1072
        tb_reset_jump(tb, n);
1073

    
1074
        /* suppress jumps in the tb on which we could have jumped */
1075
        tb_reset_jump_recursive(tb_next);
1076
    }
1077
}
1078

    
1079
static void tb_reset_jump_recursive(TranslationBlock *tb)
1080
{
1081
    tb_reset_jump_recursive2(tb, 0);
1082
    tb_reset_jump_recursive2(tb, 1);
1083
}
1084

    
1085
#if defined(TARGET_HAS_ICE)
1086
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1087
{
1088
    target_phys_addr_t addr;
1089
    target_ulong pd;
1090
    ram_addr_t ram_addr;
1091
    PhysPageDesc *p;
1092

    
1093
    addr = cpu_get_phys_page_debug(env, pc);
1094
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1095
    if (!p) {
1096
        pd = IO_MEM_UNASSIGNED;
1097
    } else {
1098
        pd = p->phys_offset;
1099
    }
1100
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1101
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1102
}
1103
#endif
1104

    
1105
/* Add a watchpoint.  */
1106
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1107
{
1108
    int i;
1109

    
1110
    for (i = 0; i < env->nb_watchpoints; i++) {
1111
        if (addr == env->watchpoint[i].vaddr)
1112
            return 0;
1113
    }
1114
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1115
        return -1;
1116

    
1117
    i = env->nb_watchpoints++;
1118
    env->watchpoint[i].vaddr = addr;
1119
    tlb_flush_page(env, addr);
1120
    /* FIXME: This flush is needed because of the hack to make memory ops
1121
       terminate the TB.  It can be removed once the proper IO trap and
1122
       re-execute bits are in.  */
1123
    tb_flush(env);
1124
    return i;
1125
}
1126

    
1127
/* Remove a watchpoint.  */
1128
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1129
{
1130
    int i;
1131

    
1132
    for (i = 0; i < env->nb_watchpoints; i++) {
1133
        if (addr == env->watchpoint[i].vaddr) {
1134
            env->nb_watchpoints--;
1135
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1136
            tlb_flush_page(env, addr);
1137
            return 0;
1138
        }
1139
    }
1140
    return -1;
1141
}
1142

    
1143
/* Remove all watchpoints. */
1144
void cpu_watchpoint_remove_all(CPUState *env) {
1145
    int i;
1146

    
1147
    for (i = 0; i < env->nb_watchpoints; i++) {
1148
        tlb_flush_page(env, env->watchpoint[i].vaddr);
1149
    }
1150
    env->nb_watchpoints = 0;
1151
}
1152

    
1153
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1154
   breakpoint is reached */
1155
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1156
{
1157
#if defined(TARGET_HAS_ICE)
1158
    int i;
1159

    
1160
    for(i = 0; i < env->nb_breakpoints; i++) {
1161
        if (env->breakpoints[i] == pc)
1162
            return 0;
1163
    }
1164

    
1165
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1166
        return -1;
1167
    env->breakpoints[env->nb_breakpoints++] = pc;
1168

    
1169
    breakpoint_invalidate(env, pc);
1170
    return 0;
1171
#else
1172
    return -1;
1173
#endif
1174
}
1175

    
1176
/* remove all breakpoints */
1177
void cpu_breakpoint_remove_all(CPUState *env) {
1178
#if defined(TARGET_HAS_ICE)
1179
    int i;
1180
    for(i = 0; i < env->nb_breakpoints; i++) {
1181
        breakpoint_invalidate(env, env->breakpoints[i]);
1182
    }
1183
    env->nb_breakpoints = 0;
1184
#endif
1185
}
1186

    
1187
/* remove a breakpoint */
1188
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1189
{
1190
#if defined(TARGET_HAS_ICE)
1191
    int i;
1192
    for(i = 0; i < env->nb_breakpoints; i++) {
1193
        if (env->breakpoints[i] == pc)
1194
            goto found;
1195
    }
1196
    return -1;
1197
 found:
1198
    env->nb_breakpoints--;
1199
    if (i < env->nb_breakpoints)
1200
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1201

    
1202
    breakpoint_invalidate(env, pc);
1203
    return 0;
1204
#else
1205
    return -1;
1206
#endif
1207
}
1208

    
1209
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1210
   CPU loop after each instruction */
1211
void cpu_single_step(CPUState *env, int enabled)
1212
{
1213
#if defined(TARGET_HAS_ICE)
1214
    if (env->singlestep_enabled != enabled) {
1215
        env->singlestep_enabled = enabled;
1216
        /* must flush all the translated code to avoid inconsistancies */
1217
        /* XXX: only flush what is necessary */
1218
        tb_flush(env);
1219
    }
1220
#endif
1221
}
1222

    
1223
/* enable or disable low levels log */
1224
void cpu_set_log(int log_flags)
1225
{
1226
    loglevel = log_flags;
1227
    if (loglevel && !logfile) {
1228
        logfile = fopen(logfilename, log_append ? "a" : "w");
1229
        if (!logfile) {
1230
            perror(logfilename);
1231
            _exit(1);
1232
        }
1233
#if !defined(CONFIG_SOFTMMU)
1234
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1235
        {
1236
            static uint8_t logfile_buf[4096];
1237
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1238
        }
1239
#else
1240
        setvbuf(logfile, NULL, _IOLBF, 0);
1241
#endif
1242
        log_append = 1;
1243
    }
1244
    if (!loglevel && logfile) {
1245
        fclose(logfile);
1246
        logfile = NULL;
1247
    }
1248
}
1249

    
1250
void cpu_set_log_filename(const char *filename)
1251
{
1252
    logfilename = strdup(filename);
1253
    if (logfile) {
1254
        fclose(logfile);
1255
        logfile = NULL;
1256
    }
1257
    cpu_set_log(loglevel);
1258
}
1259

    
1260
/* mask must never be zero, except for A20 change call */
1261
void cpu_interrupt(CPUState *env, int mask)
1262
{
1263
    TranslationBlock *tb;
1264
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1265

    
1266
    env->interrupt_request |= mask;
1267
    /* if the cpu is currently executing code, we must unlink it and
1268
       all the potentially executing TB */
1269
    tb = env->current_tb;
1270
    if (tb && !testandset(&interrupt_lock)) {
1271
        env->current_tb = NULL;
1272
        tb_reset_jump_recursive(tb);
1273
        resetlock(&interrupt_lock);
1274
    }
1275
}
1276

    
1277
void cpu_reset_interrupt(CPUState *env, int mask)
1278
{
1279
    env->interrupt_request &= ~mask;
1280
}
1281

    
1282
CPULogItem cpu_log_items[] = {
1283
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1284
      "show generated host assembly code for each compiled TB" },
1285
    { CPU_LOG_TB_IN_ASM, "in_asm",
1286
      "show target assembly code for each compiled TB" },
1287
    { CPU_LOG_TB_OP, "op",
1288
      "show micro ops for each compiled TB" },
1289
    { CPU_LOG_TB_OP_OPT, "op_opt",
1290
      "show micro ops "
1291
#ifdef TARGET_I386
1292
      "before eflags optimization and "
1293
#endif
1294
      "after liveness analysis" },
1295
    { CPU_LOG_INT, "int",
1296
      "show interrupts/exceptions in short format" },
1297
    { CPU_LOG_EXEC, "exec",
1298
      "show trace before each executed TB (lots of logs)" },
1299
    { CPU_LOG_TB_CPU, "cpu",
1300
      "show CPU state before block translation" },
1301
#ifdef TARGET_I386
1302
    { CPU_LOG_PCALL, "pcall",
1303
      "show protected mode far calls/returns/exceptions" },
1304
#endif
1305
#ifdef DEBUG_IOPORT
1306
    { CPU_LOG_IOPORT, "ioport",
1307
      "show all i/o ports accesses" },
1308
#endif
1309
    { 0, NULL, NULL },
1310
};
1311

    
1312
static int cmp1(const char *s1, int n, const char *s2)
1313
{
1314
    if (strlen(s2) != n)
1315
        return 0;
1316
    return memcmp(s1, s2, n) == 0;
1317
}
1318

    
1319
/* takes a comma separated list of log masks. Return 0 if error. */
1320
int cpu_str_to_log_mask(const char *str)
1321
{
1322
    CPULogItem *item;
1323
    int mask;
1324
    const char *p, *p1;
1325

    
1326
    p = str;
1327
    mask = 0;
1328
    for(;;) {
1329
        p1 = strchr(p, ',');
1330
        if (!p1)
1331
            p1 = p + strlen(p);
1332
        if(cmp1(p,p1-p,"all")) {
1333
                for(item = cpu_log_items; item->mask != 0; item++) {
1334
                        mask |= item->mask;
1335
                }
1336
        } else {
1337
        for(item = cpu_log_items; item->mask != 0; item++) {
1338
            if (cmp1(p, p1 - p, item->name))
1339
                goto found;
1340
        }
1341
        return 0;
1342
        }
1343
    found:
1344
        mask |= item->mask;
1345
        if (*p1 != ',')
1346
            break;
1347
        p = p1 + 1;
1348
    }
1349
    return mask;
1350
}
1351

    
1352
void cpu_abort(CPUState *env, const char *fmt, ...)
1353
{
1354
    va_list ap;
1355
    va_list ap2;
1356

    
1357
    va_start(ap, fmt);
1358
    va_copy(ap2, ap);
1359
    fprintf(stderr, "qemu: fatal: ");
1360
    vfprintf(stderr, fmt, ap);
1361
    fprintf(stderr, "\n");
1362
#ifdef TARGET_I386
1363
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1364
#else
1365
    cpu_dump_state(env, stderr, fprintf, 0);
1366
#endif
1367
    if (logfile) {
1368
        fprintf(logfile, "qemu: fatal: ");
1369
        vfprintf(logfile, fmt, ap2);
1370
        fprintf(logfile, "\n");
1371
#ifdef TARGET_I386
1372
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1373
#else
1374
        cpu_dump_state(env, logfile, fprintf, 0);
1375
#endif
1376
        fflush(logfile);
1377
        fclose(logfile);
1378
    }
1379
    va_end(ap2);
1380
    va_end(ap);
1381
    abort();
1382
}
1383

    
1384
CPUState *cpu_copy(CPUState *env)
1385
{
1386
    CPUState *new_env = cpu_init(env->cpu_model_str);
1387
    /* preserve chaining and index */
1388
    CPUState *next_cpu = new_env->next_cpu;
1389
    int cpu_index = new_env->cpu_index;
1390
    memcpy(new_env, env, sizeof(CPUState));
1391
    new_env->next_cpu = next_cpu;
1392
    new_env->cpu_index = cpu_index;
1393
    return new_env;
1394
}
1395

    
1396
#if !defined(CONFIG_USER_ONLY)
1397

    
1398
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1399
{
1400
    unsigned int i;
1401

    
1402
    /* Discard jump cache entries for any tb which might potentially
1403
       overlap the flushed page.  */
1404
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1405
    memset (&env->tb_jmp_cache[i], 0, 
1406
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1407

    
1408
    i = tb_jmp_cache_hash_page(addr);
1409
    memset (&env->tb_jmp_cache[i], 0, 
1410
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1411
}
1412

    
1413
/* NOTE: if flush_global is true, also flush global entries (not
1414
   implemented yet) */
1415
void tlb_flush(CPUState *env, int flush_global)
1416
{
1417
    int i;
1418

    
1419
#if defined(DEBUG_TLB)
1420
    printf("tlb_flush:\n");
1421
#endif
1422
    /* must reset current TB so that interrupts cannot modify the
1423
       links while we are modifying them */
1424
    env->current_tb = NULL;
1425

    
1426
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1427
        env->tlb_table[0][i].addr_read = -1;
1428
        env->tlb_table[0][i].addr_write = -1;
1429
        env->tlb_table[0][i].addr_code = -1;
1430
        env->tlb_table[1][i].addr_read = -1;
1431
        env->tlb_table[1][i].addr_write = -1;
1432
        env->tlb_table[1][i].addr_code = -1;
1433
#if (NB_MMU_MODES >= 3)
1434
        env->tlb_table[2][i].addr_read = -1;
1435
        env->tlb_table[2][i].addr_write = -1;
1436
        env->tlb_table[2][i].addr_code = -1;
1437
#if (NB_MMU_MODES == 4)
1438
        env->tlb_table[3][i].addr_read = -1;
1439
        env->tlb_table[3][i].addr_write = -1;
1440
        env->tlb_table[3][i].addr_code = -1;
1441
#endif
1442
#endif
1443
    }
1444

    
1445
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1446

    
1447
#if !defined(CONFIG_SOFTMMU)
1448
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1449
#endif
1450
#ifdef USE_KQEMU
1451
    if (env->kqemu_enabled) {
1452
        kqemu_flush(env, flush_global);
1453
    }
1454
#endif
1455
    tlb_flush_count++;
1456
}
1457

    
1458
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1459
{
1460
    if (addr == (tlb_entry->addr_read &
1461
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1462
        addr == (tlb_entry->addr_write &
1463
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1464
        addr == (tlb_entry->addr_code &
1465
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1466
        tlb_entry->addr_read = -1;
1467
        tlb_entry->addr_write = -1;
1468
        tlb_entry->addr_code = -1;
1469
    }
1470
}
1471

    
1472
void tlb_flush_page(CPUState *env, target_ulong addr)
1473
{
1474
    int i;
1475

    
1476
#if defined(DEBUG_TLB)
1477
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1478
#endif
1479
    /* must reset current TB so that interrupts cannot modify the
1480
       links while we are modifying them */
1481
    env->current_tb = NULL;
1482

    
1483
    addr &= TARGET_PAGE_MASK;
1484
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1485
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1486
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1487
#if (NB_MMU_MODES >= 3)
1488
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1489
#if (NB_MMU_MODES == 4)
1490
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1491
#endif
1492
#endif
1493

    
1494
    tlb_flush_jmp_cache(env, addr);
1495

    
1496
#if !defined(CONFIG_SOFTMMU)
1497
    if (addr < MMAP_AREA_END)
1498
        munmap((void *)addr, TARGET_PAGE_SIZE);
1499
#endif
1500
#ifdef USE_KQEMU
1501
    if (env->kqemu_enabled) {
1502
        kqemu_flush_page(env, addr);
1503
    }
1504
#endif
1505
}
1506

    
1507
/* update the TLBs so that writes to code in the virtual page 'addr'
1508
   can be detected */
1509
static void tlb_protect_code(ram_addr_t ram_addr)
1510
{
1511
    cpu_physical_memory_reset_dirty(ram_addr,
1512
                                    ram_addr + TARGET_PAGE_SIZE,
1513
                                    CODE_DIRTY_FLAG);
1514
}
1515

    
1516
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1517
   tested for self modifying code */
1518
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1519
                                    target_ulong vaddr)
1520
{
1521
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1522
}
1523

    
1524
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1525
                                         unsigned long start, unsigned long length)
1526
{
1527
    unsigned long addr;
1528
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1529
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1530
        if ((addr - start) < length) {
1531
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1532
        }
1533
    }
1534
}
1535

    
1536
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1537
                                     int dirty_flags)
1538
{
1539
    CPUState *env;
1540
    unsigned long length, start1;
1541
    int i, mask, len;
1542
    uint8_t *p;
1543

    
1544
    start &= TARGET_PAGE_MASK;
1545
    end = TARGET_PAGE_ALIGN(end);
1546

    
1547
    length = end - start;
1548
    if (length == 0)
1549
        return;
1550
    len = length >> TARGET_PAGE_BITS;
1551
#ifdef USE_KQEMU
1552
    /* XXX: should not depend on cpu context */
1553
    env = first_cpu;
1554
    if (env->kqemu_enabled) {
1555
        ram_addr_t addr;
1556
        addr = start;
1557
        for(i = 0; i < len; i++) {
1558
            kqemu_set_notdirty(env, addr);
1559
            addr += TARGET_PAGE_SIZE;
1560
        }
1561
    }
1562
#endif
1563
    mask = ~dirty_flags;
1564
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1565
    for(i = 0; i < len; i++)
1566
        p[i] &= mask;
1567

    
1568
    /* we modify the TLB cache so that the dirty bit will be set again
1569
       when accessing the range */
1570
    start1 = start + (unsigned long)phys_ram_base;
1571
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1572
        for(i = 0; i < CPU_TLB_SIZE; i++)
1573
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1574
        for(i = 0; i < CPU_TLB_SIZE; i++)
1575
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1576
#if (NB_MMU_MODES >= 3)
1577
        for(i = 0; i < CPU_TLB_SIZE; i++)
1578
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1579
#if (NB_MMU_MODES == 4)
1580
        for(i = 0; i < CPU_TLB_SIZE; i++)
1581
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1582
#endif
1583
#endif
1584
    }
1585

    
1586
#if !defined(CONFIG_SOFTMMU)
1587
    /* XXX: this is expensive */
1588
    {
1589
        VirtPageDesc *p;
1590
        int j;
1591
        target_ulong addr;
1592

    
1593
        for(i = 0; i < L1_SIZE; i++) {
1594
            p = l1_virt_map[i];
1595
            if (p) {
1596
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1597
                for(j = 0; j < L2_SIZE; j++) {
1598
                    if (p->valid_tag == virt_valid_tag &&
1599
                        p->phys_addr >= start && p->phys_addr < end &&
1600
                        (p->prot & PROT_WRITE)) {
1601
                        if (addr < MMAP_AREA_END) {
1602
                            mprotect((void *)addr, TARGET_PAGE_SIZE,
1603
                                     p->prot & ~PROT_WRITE);
1604
                        }
1605
                    }
1606
                    addr += TARGET_PAGE_SIZE;
1607
                    p++;
1608
                }
1609
            }
1610
        }
1611
    }
1612
#endif
1613
}
1614

    
1615
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1616
{
1617
    ram_addr_t ram_addr;
1618

    
1619
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1620
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1621
            tlb_entry->addend - (unsigned long)phys_ram_base;
1622
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1623
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1624
        }
1625
    }
1626
}
1627

    
1628
/* update the TLB according to the current state of the dirty bits */
1629
void cpu_tlb_update_dirty(CPUState *env)
1630
{
1631
    int i;
1632
    for(i = 0; i < CPU_TLB_SIZE; i++)
1633
        tlb_update_dirty(&env->tlb_table[0][i]);
1634
    for(i = 0; i < CPU_TLB_SIZE; i++)
1635
        tlb_update_dirty(&env->tlb_table[1][i]);
1636
#if (NB_MMU_MODES >= 3)
1637
    for(i = 0; i < CPU_TLB_SIZE; i++)
1638
        tlb_update_dirty(&env->tlb_table[2][i]);
1639
#if (NB_MMU_MODES == 4)
1640
    for(i = 0; i < CPU_TLB_SIZE; i++)
1641
        tlb_update_dirty(&env->tlb_table[3][i]);
1642
#endif
1643
#endif
1644
}
1645

    
1646
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1647
                                  unsigned long start)
1648
{
1649
    unsigned long addr;
1650
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1651
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1652
        if (addr == start) {
1653
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1654
        }
1655
    }
1656
}
1657

    
1658
/* update the TLB corresponding to virtual page vaddr and phys addr
1659
   addr so that it is no longer dirty */
1660
static inline void tlb_set_dirty(CPUState *env,
1661
                                 unsigned long addr, target_ulong vaddr)
1662
{
1663
    int i;
1664

    
1665
    addr &= TARGET_PAGE_MASK;
1666
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1667
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1668
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1669
#if (NB_MMU_MODES >= 3)
1670
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1671
#if (NB_MMU_MODES == 4)
1672
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1673
#endif
1674
#endif
1675
}
1676

    
1677
/* add a new TLB entry. At most one entry for a given virtual address
1678
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1679
   (can only happen in non SOFTMMU mode for I/O pages or pages
1680
   conflicting with the host address space). */
1681
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1682
                      target_phys_addr_t paddr, int prot,
1683
                      int mmu_idx, int is_softmmu)
1684
{
1685
    PhysPageDesc *p;
1686
    unsigned long pd;
1687
    unsigned int index;
1688
    target_ulong address;
1689
    target_phys_addr_t addend;
1690
    int ret;
1691
    CPUTLBEntry *te;
1692
    int i;
1693

    
1694
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1695
    if (!p) {
1696
        pd = IO_MEM_UNASSIGNED;
1697
    } else {
1698
        pd = p->phys_offset;
1699
    }
1700
#if defined(DEBUG_TLB)
1701
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1702
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1703
#endif
1704

    
1705
    ret = 0;
1706
#if !defined(CONFIG_SOFTMMU)
1707
    if (is_softmmu)
1708
#endif
1709
    {
1710
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1711
            /* IO memory case */
1712
            address = vaddr | pd;
1713
            addend = paddr;
1714
        } else {
1715
            /* standard memory */
1716
            address = vaddr;
1717
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1718
        }
1719

    
1720
        /* Make accesses to pages with watchpoints go via the
1721
           watchpoint trap routines.  */
1722
        for (i = 0; i < env->nb_watchpoints; i++) {
1723
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1724
                if (address & ~TARGET_PAGE_MASK) {
1725
                    env->watchpoint[i].addend = 0;
1726
                    address = vaddr | io_mem_watch;
1727
                } else {
1728
                    env->watchpoint[i].addend = pd - paddr +
1729
                        (unsigned long) phys_ram_base;
1730
                    /* TODO: Figure out how to make read watchpoints coexist
1731
                       with code.  */
1732
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1733
                }
1734
            }
1735
        }
1736

    
1737
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1738
        addend -= vaddr;
1739
        te = &env->tlb_table[mmu_idx][index];
1740
        te->addend = addend;
1741
        if (prot & PAGE_READ) {
1742
            te->addr_read = address;
1743
        } else {
1744
            te->addr_read = -1;
1745
        }
1746

    
1747
        if (te->addr_code != -1) {
1748
            tlb_flush_jmp_cache(env, te->addr_code);
1749
        }
1750
        if (prot & PAGE_EXEC) {
1751
            te->addr_code = address;
1752
        } else {
1753
            te->addr_code = -1;
1754
        }
1755
        if (prot & PAGE_WRITE) {
1756
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1757
                (pd & IO_MEM_ROMD)) {
1758
                /* write access calls the I/O callback */
1759
                te->addr_write = vaddr |
1760
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1761
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1762
                       !cpu_physical_memory_is_dirty(pd)) {
1763
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1764
            } else {
1765
                te->addr_write = address;
1766
            }
1767
        } else {
1768
            te->addr_write = -1;
1769
        }
1770
    }
1771
#if !defined(CONFIG_SOFTMMU)
1772
    else {
1773
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1774
            /* IO access: no mapping is done as it will be handled by the
1775
               soft MMU */
1776
            if (!(env->hflags & HF_SOFTMMU_MASK))
1777
                ret = 2;
1778
        } else {
1779
            void *map_addr;
1780

    
1781
            if (vaddr >= MMAP_AREA_END) {
1782
                ret = 2;
1783
            } else {
1784
                if (prot & PROT_WRITE) {
1785
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1786
#if defined(TARGET_HAS_SMC) || 1
1787
                        first_tb ||
1788
#endif
1789
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1790
                         !cpu_physical_memory_is_dirty(pd))) {
1791
                        /* ROM: we do as if code was inside */
1792
                        /* if code is present, we only map as read only and save the
1793
                           original mapping */
1794
                        VirtPageDesc *vp;
1795

    
1796
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1797
                        vp->phys_addr = pd;
1798
                        vp->prot = prot;
1799
                        vp->valid_tag = virt_valid_tag;
1800
                        prot &= ~PAGE_WRITE;
1801
                    }
1802
                }
1803
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1804
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1805
                if (map_addr == MAP_FAILED) {
1806
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1807
                              paddr, vaddr);
1808
                }
1809
            }
1810
        }
1811
    }
1812
#endif
1813
    return ret;
1814
}
1815

    
1816
/* called from signal handler: invalidate the code and unprotect the
1817
   page. Return TRUE if the fault was succesfully handled. */
1818
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1819
{
1820
#if !defined(CONFIG_SOFTMMU)
1821
    VirtPageDesc *vp;
1822

    
1823
#if defined(DEBUG_TLB)
1824
    printf("page_unprotect: addr=0x%08x\n", addr);
1825
#endif
1826
    addr &= TARGET_PAGE_MASK;
1827

    
1828
    /* if it is not mapped, no need to worry here */
1829
    if (addr >= MMAP_AREA_END)
1830
        return 0;
1831
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1832
    if (!vp)
1833
        return 0;
1834
    /* NOTE: in this case, validate_tag is _not_ tested as it
1835
       validates only the code TLB */
1836
    if (vp->valid_tag != virt_valid_tag)
1837
        return 0;
1838
    if (!(vp->prot & PAGE_WRITE))
1839
        return 0;
1840
#if defined(DEBUG_TLB)
1841
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1842
           addr, vp->phys_addr, vp->prot);
1843
#endif
1844
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1845
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1846
                  (unsigned long)addr, vp->prot);
1847
    /* set the dirty bit */
1848
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1849
    /* flush the code inside */
1850
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1851
    return 1;
1852
#else
1853
    return 0;
1854
#endif
1855
}
1856

    
1857
#else
1858

    
1859
void tlb_flush(CPUState *env, int flush_global)
1860
{
1861
}
1862

    
1863
void tlb_flush_page(CPUState *env, target_ulong addr)
1864
{
1865
}
1866

    
1867
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1868
                      target_phys_addr_t paddr, int prot,
1869
                      int mmu_idx, int is_softmmu)
1870
{
1871
    return 0;
1872
}
1873

    
1874
/* dump memory mappings */
1875
void page_dump(FILE *f)
1876
{
1877
    unsigned long start, end;
1878
    int i, j, prot, prot1;
1879
    PageDesc *p;
1880

    
1881
    fprintf(f, "%-8s %-8s %-8s %s\n",
1882
            "start", "end", "size", "prot");
1883
    start = -1;
1884
    end = -1;
1885
    prot = 0;
1886
    for(i = 0; i <= L1_SIZE; i++) {
1887
        if (i < L1_SIZE)
1888
            p = l1_map[i];
1889
        else
1890
            p = NULL;
1891
        for(j = 0;j < L2_SIZE; j++) {
1892
            if (!p)
1893
                prot1 = 0;
1894
            else
1895
                prot1 = p[j].flags;
1896
            if (prot1 != prot) {
1897
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1898
                if (start != -1) {
1899
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1900
                            start, end, end - start,
1901
                            prot & PAGE_READ ? 'r' : '-',
1902
                            prot & PAGE_WRITE ? 'w' : '-',
1903
                            prot & PAGE_EXEC ? 'x' : '-');
1904
                }
1905
                if (prot1 != 0)
1906
                    start = end;
1907
                else
1908
                    start = -1;
1909
                prot = prot1;
1910
            }
1911
            if (!p)
1912
                break;
1913
        }
1914
    }
1915
}
1916

    
1917
int page_get_flags(target_ulong address)
1918
{
1919
    PageDesc *p;
1920

    
1921
    p = page_find(address >> TARGET_PAGE_BITS);
1922
    if (!p)
1923
        return 0;
1924
    return p->flags;
1925
}
1926

    
1927
/* modify the flags of a page and invalidate the code if
1928
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1929
   depending on PAGE_WRITE */
1930
void page_set_flags(target_ulong start, target_ulong end, int flags)
1931
{
1932
    PageDesc *p;
1933
    target_ulong addr;
1934

    
1935
    start = start & TARGET_PAGE_MASK;
1936
    end = TARGET_PAGE_ALIGN(end);
1937
    if (flags & PAGE_WRITE)
1938
        flags |= PAGE_WRITE_ORG;
1939
    spin_lock(&tb_lock);
1940
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1941
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1942
        /* if the write protection is set, then we invalidate the code
1943
           inside */
1944
        if (!(p->flags & PAGE_WRITE) &&
1945
            (flags & PAGE_WRITE) &&
1946
            p->first_tb) {
1947
            tb_invalidate_phys_page(addr, 0, NULL);
1948
        }
1949
        p->flags = flags;
1950
    }
1951
    spin_unlock(&tb_lock);
1952
}
1953

    
1954
int page_check_range(target_ulong start, target_ulong len, int flags)
1955
{
1956
    PageDesc *p;
1957
    target_ulong end;
1958
    target_ulong addr;
1959

    
1960
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1961
    start = start & TARGET_PAGE_MASK;
1962

    
1963
    if( end < start )
1964
        /* we've wrapped around */
1965
        return -1;
1966
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1967
        p = page_find(addr >> TARGET_PAGE_BITS);
1968
        if( !p )
1969
            return -1;
1970
        if( !(p->flags & PAGE_VALID) )
1971
            return -1;
1972

    
1973
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1974
            return -1;
1975
        if (flags & PAGE_WRITE) {
1976
            if (!(p->flags & PAGE_WRITE_ORG))
1977
                return -1;
1978
            /* unprotect the page if it was put read-only because it
1979
               contains translated code */
1980
            if (!(p->flags & PAGE_WRITE)) {
1981
                if (!page_unprotect(addr, 0, NULL))
1982
                    return -1;
1983
            }
1984
            return 0;
1985
        }
1986
    }
1987
    return 0;
1988
}
1989

    
1990
/* called from signal handler: invalidate the code and unprotect the
1991
   page. Return TRUE if the fault was succesfully handled. */
1992
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1993
{
1994
    unsigned int page_index, prot, pindex;
1995
    PageDesc *p, *p1;
1996
    target_ulong host_start, host_end, addr;
1997

    
1998
    host_start = address & qemu_host_page_mask;
1999
    page_index = host_start >> TARGET_PAGE_BITS;
2000
    p1 = page_find(page_index);
2001
    if (!p1)
2002
        return 0;
2003
    host_end = host_start + qemu_host_page_size;
2004
    p = p1;
2005
    prot = 0;
2006
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2007
        prot |= p->flags;
2008
        p++;
2009
    }
2010
    /* if the page was really writable, then we change its
2011
       protection back to writable */
2012
    if (prot & PAGE_WRITE_ORG) {
2013
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2014
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2015
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2016
                     (prot & PAGE_BITS) | PAGE_WRITE);
2017
            p1[pindex].flags |= PAGE_WRITE;
2018
            /* and since the content will be modified, we must invalidate
2019
               the corresponding translated code. */
2020
            tb_invalidate_phys_page(address, pc, puc);
2021
#ifdef DEBUG_TB_CHECK
2022
            tb_invalidate_check(address);
2023
#endif
2024
            return 1;
2025
        }
2026
    }
2027
    return 0;
2028
}
2029

    
2030
static inline void tlb_set_dirty(CPUState *env,
2031
                                 unsigned long addr, target_ulong vaddr)
2032
{
2033
}
2034
#endif /* defined(CONFIG_USER_ONLY) */
2035

    
2036
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2037
                             ram_addr_t memory);
2038
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2039
                           ram_addr_t orig_memory);
2040
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2041
                      need_subpage)                                     \
2042
    do {                                                                \
2043
        if (addr > start_addr)                                          \
2044
            start_addr2 = 0;                                            \
2045
        else {                                                          \
2046
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2047
            if (start_addr2 > 0)                                        \
2048
                need_subpage = 1;                                       \
2049
        }                                                               \
2050
                                                                        \
2051
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2052
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2053
        else {                                                          \
2054
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2055
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2056
                need_subpage = 1;                                       \
2057
        }                                                               \
2058
    } while (0)
2059

    
2060
/* register physical memory. 'size' must be a multiple of the target
2061
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2062
   io memory page */
2063
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2064
                                  ram_addr_t size,
2065
                                  ram_addr_t phys_offset)
2066
{
2067
    target_phys_addr_t addr, end_addr;
2068
    PhysPageDesc *p;
2069
    CPUState *env;
2070
    ram_addr_t orig_size = size;
2071
    void *subpage;
2072

    
2073
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2074
    end_addr = start_addr + (target_phys_addr_t)size;
2075
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2076
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2077
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2078
            ram_addr_t orig_memory = p->phys_offset;
2079
            target_phys_addr_t start_addr2, end_addr2;
2080
            int need_subpage = 0;
2081

    
2082
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2083
                          need_subpage);
2084
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2085
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2086
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2087
                                           &p->phys_offset, orig_memory);
2088
                } else {
2089
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2090
                                            >> IO_MEM_SHIFT];
2091
                }
2092
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2093
            } else {
2094
                p->phys_offset = phys_offset;
2095
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2096
                    (phys_offset & IO_MEM_ROMD))
2097
                    phys_offset += TARGET_PAGE_SIZE;
2098
            }
2099
        } else {
2100
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2101
            p->phys_offset = phys_offset;
2102
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2103
                (phys_offset & IO_MEM_ROMD))
2104
                phys_offset += TARGET_PAGE_SIZE;
2105
            else {
2106
                target_phys_addr_t start_addr2, end_addr2;
2107
                int need_subpage = 0;
2108

    
2109
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2110
                              end_addr2, need_subpage);
2111

    
2112
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2113
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2114
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2115
                    subpage_register(subpage, start_addr2, end_addr2,
2116
                                     phys_offset);
2117
                }
2118
            }
2119
        }
2120
    }
2121

    
2122
    /* since each CPU stores ram addresses in its TLB cache, we must
2123
       reset the modified entries */
2124
    /* XXX: slow ! */
2125
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2126
        tlb_flush(env, 1);
2127
    }
2128
}
2129

    
2130
/* XXX: temporary until new memory mapping API */
2131
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2132
{
2133
    PhysPageDesc *p;
2134

    
2135
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2136
    if (!p)
2137
        return IO_MEM_UNASSIGNED;
2138
    return p->phys_offset;
2139
}
2140

    
2141
/* XXX: better than nothing */
2142
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2143
{
2144
    ram_addr_t addr;
2145
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2146
        fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
2147
                size, phys_ram_size);
2148
        abort();
2149
    }
2150
    addr = phys_ram_alloc_offset;
2151
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2152
    return addr;
2153
}
2154

    
2155
void qemu_ram_free(ram_addr_t addr)
2156
{
2157
}
2158

    
2159
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2160
{
2161
#ifdef DEBUG_UNASSIGNED
2162
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2163
#endif
2164
#ifdef TARGET_SPARC
2165
    do_unassigned_access(addr, 0, 0, 0);
2166
#elif TARGET_CRIS
2167
    do_unassigned_access(addr, 0, 0, 0);
2168
#endif
2169
    return 0;
2170
}
2171

    
2172
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2173
{
2174
#ifdef DEBUG_UNASSIGNED
2175
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2176
#endif
2177
#ifdef TARGET_SPARC
2178
    do_unassigned_access(addr, 1, 0, 0);
2179
#elif TARGET_CRIS
2180
    do_unassigned_access(addr, 1, 0, 0);
2181
#endif
2182
}
2183

    
2184
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2185
    unassigned_mem_readb,
2186
    unassigned_mem_readb,
2187
    unassigned_mem_readb,
2188
};
2189

    
2190
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2191
    unassigned_mem_writeb,
2192
    unassigned_mem_writeb,
2193
    unassigned_mem_writeb,
2194
};
2195

    
2196
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2197
{
2198
    unsigned long ram_addr;
2199
    int dirty_flags;
2200
    ram_addr = addr - (unsigned long)phys_ram_base;
2201
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2202
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2203
#if !defined(CONFIG_USER_ONLY)
2204
        tb_invalidate_phys_page_fast(ram_addr, 1);
2205
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2206
#endif
2207
    }
2208
    stb_p((uint8_t *)(long)addr, val);
2209
#ifdef USE_KQEMU
2210
    if (cpu_single_env->kqemu_enabled &&
2211
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2212
        kqemu_modify_page(cpu_single_env, ram_addr);
2213
#endif
2214
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2215
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2216
    /* we remove the notdirty callback only if the code has been
2217
       flushed */
2218
    if (dirty_flags == 0xff)
2219
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2220
}
2221

    
2222
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2223
{
2224
    unsigned long ram_addr;
2225
    int dirty_flags;
2226
    ram_addr = addr - (unsigned long)phys_ram_base;
2227
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2228
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2229
#if !defined(CONFIG_USER_ONLY)
2230
        tb_invalidate_phys_page_fast(ram_addr, 2);
2231
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2232
#endif
2233
    }
2234
    stw_p((uint8_t *)(long)addr, val);
2235
#ifdef USE_KQEMU
2236
    if (cpu_single_env->kqemu_enabled &&
2237
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2238
        kqemu_modify_page(cpu_single_env, ram_addr);
2239
#endif
2240
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2241
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2242
    /* we remove the notdirty callback only if the code has been
2243
       flushed */
2244
    if (dirty_flags == 0xff)
2245
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2246
}
2247

    
2248
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2249
{
2250
    unsigned long ram_addr;
2251
    int dirty_flags;
2252
    ram_addr = addr - (unsigned long)phys_ram_base;
2253
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2254
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2255
#if !defined(CONFIG_USER_ONLY)
2256
        tb_invalidate_phys_page_fast(ram_addr, 4);
2257
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2258
#endif
2259
    }
2260
    stl_p((uint8_t *)(long)addr, val);
2261
#ifdef USE_KQEMU
2262
    if (cpu_single_env->kqemu_enabled &&
2263
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2264
        kqemu_modify_page(cpu_single_env, ram_addr);
2265
#endif
2266
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2267
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2268
    /* we remove the notdirty callback only if the code has been
2269
       flushed */
2270
    if (dirty_flags == 0xff)
2271
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2272
}
2273

    
2274
static CPUReadMemoryFunc *error_mem_read[3] = {
2275
    NULL, /* never used */
2276
    NULL, /* never used */
2277
    NULL, /* never used */
2278
};
2279

    
2280
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2281
    notdirty_mem_writeb,
2282
    notdirty_mem_writew,
2283
    notdirty_mem_writel,
2284
};
2285

    
2286
#if defined(CONFIG_SOFTMMU)
2287
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2288
   so these check for a hit then pass through to the normal out-of-line
2289
   phys routines.  */
2290
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2291
{
2292
    return ldub_phys(addr);
2293
}
2294

    
2295
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2296
{
2297
    return lduw_phys(addr);
2298
}
2299

    
2300
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2301
{
2302
    return ldl_phys(addr);
2303
}
2304

    
2305
/* Generate a debug exception if a watchpoint has been hit.
2306
   Returns the real physical address of the access.  addr will be a host
2307
   address in case of a RAM location.  */
2308
static target_ulong check_watchpoint(target_phys_addr_t addr)
2309
{
2310
    CPUState *env = cpu_single_env;
2311
    target_ulong watch;
2312
    target_ulong retaddr;
2313
    int i;
2314

    
2315
    retaddr = addr;
2316
    for (i = 0; i < env->nb_watchpoints; i++) {
2317
        watch = env->watchpoint[i].vaddr;
2318
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2319
            retaddr = addr - env->watchpoint[i].addend;
2320
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2321
                cpu_single_env->watchpoint_hit = i + 1;
2322
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2323
                break;
2324
            }
2325
        }
2326
    }
2327
    return retaddr;
2328
}
2329

    
2330
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2331
                             uint32_t val)
2332
{
2333
    addr = check_watchpoint(addr);
2334
    stb_phys(addr, val);
2335
}
2336

    
2337
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2338
                             uint32_t val)
2339
{
2340
    addr = check_watchpoint(addr);
2341
    stw_phys(addr, val);
2342
}
2343

    
2344
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2345
                             uint32_t val)
2346
{
2347
    addr = check_watchpoint(addr);
2348
    stl_phys(addr, val);
2349
}
2350

    
2351
static CPUReadMemoryFunc *watch_mem_read[3] = {
2352
    watch_mem_readb,
2353
    watch_mem_readw,
2354
    watch_mem_readl,
2355
};
2356

    
2357
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2358
    watch_mem_writeb,
2359
    watch_mem_writew,
2360
    watch_mem_writel,
2361
};
2362
#endif
2363

    
2364
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2365
                                 unsigned int len)
2366
{
2367
    uint32_t ret;
2368
    unsigned int idx;
2369

    
2370
    idx = SUBPAGE_IDX(addr - mmio->base);
2371
#if defined(DEBUG_SUBPAGE)
2372
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2373
           mmio, len, addr, idx);
2374
#endif
2375
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2376

    
2377
    return ret;
2378
}
2379

    
2380
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2381
                              uint32_t value, unsigned int len)
2382
{
2383
    unsigned int idx;
2384

    
2385
    idx = SUBPAGE_IDX(addr - mmio->base);
2386
#if defined(DEBUG_SUBPAGE)
2387
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2388
           mmio, len, addr, idx, value);
2389
#endif
2390
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2391
}
2392

    
2393
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2394
{
2395
#if defined(DEBUG_SUBPAGE)
2396
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2397
#endif
2398

    
2399
    return subpage_readlen(opaque, addr, 0);
2400
}
2401

    
2402
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2403
                            uint32_t value)
2404
{
2405
#if defined(DEBUG_SUBPAGE)
2406
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2407
#endif
2408
    subpage_writelen(opaque, addr, value, 0);
2409
}
2410

    
2411
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2412
{
2413
#if defined(DEBUG_SUBPAGE)
2414
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2415
#endif
2416

    
2417
    return subpage_readlen(opaque, addr, 1);
2418
}
2419

    
2420
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2421
                            uint32_t value)
2422
{
2423
#if defined(DEBUG_SUBPAGE)
2424
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2425
#endif
2426
    subpage_writelen(opaque, addr, value, 1);
2427
}
2428

    
2429
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2430
{
2431
#if defined(DEBUG_SUBPAGE)
2432
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2433
#endif
2434

    
2435
    return subpage_readlen(opaque, addr, 2);
2436
}
2437

    
2438
static void subpage_writel (void *opaque,
2439
                         target_phys_addr_t addr, uint32_t value)
2440
{
2441
#if defined(DEBUG_SUBPAGE)
2442
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2443
#endif
2444
    subpage_writelen(opaque, addr, value, 2);
2445
}
2446

    
2447
static CPUReadMemoryFunc *subpage_read[] = {
2448
    &subpage_readb,
2449
    &subpage_readw,
2450
    &subpage_readl,
2451
};
2452

    
2453
static CPUWriteMemoryFunc *subpage_write[] = {
2454
    &subpage_writeb,
2455
    &subpage_writew,
2456
    &subpage_writel,
2457
};
2458

    
2459
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2460
                             ram_addr_t memory)
2461
{
2462
    int idx, eidx;
2463
    unsigned int i;
2464

    
2465
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2466
        return -1;
2467
    idx = SUBPAGE_IDX(start);
2468
    eidx = SUBPAGE_IDX(end);
2469
#if defined(DEBUG_SUBPAGE)
2470
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2471
           mmio, start, end, idx, eidx, memory);
2472
#endif
2473
    memory >>= IO_MEM_SHIFT;
2474
    for (; idx <= eidx; idx++) {
2475
        for (i = 0; i < 4; i++) {
2476
            if (io_mem_read[memory][i]) {
2477
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2478
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2479
            }
2480
            if (io_mem_write[memory][i]) {
2481
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2482
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2483
            }
2484
        }
2485
    }
2486

    
2487
    return 0;
2488
}
2489

    
2490
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2491
                           ram_addr_t orig_memory)
2492
{
2493
    subpage_t *mmio;
2494
    int subpage_memory;
2495

    
2496
    mmio = qemu_mallocz(sizeof(subpage_t));
2497
    if (mmio != NULL) {
2498
        mmio->base = base;
2499
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2500
#if defined(DEBUG_SUBPAGE)
2501
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2502
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2503
#endif
2504
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2505
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2506
    }
2507

    
2508
    return mmio;
2509
}
2510

    
2511
static void io_mem_init(void)
2512
{
2513
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2514
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2515
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2516
    io_mem_nb = 5;
2517

    
2518
#if defined(CONFIG_SOFTMMU)
2519
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2520
                                          watch_mem_write, NULL);
2521
#endif
2522
    /* alloc dirty bits array */
2523
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2524
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2525
}
2526

    
2527
/* mem_read and mem_write are arrays of functions containing the
2528
   function to access byte (index 0), word (index 1) and dword (index
2529
   2). Functions can be omitted with a NULL function pointer. The
2530
   registered functions may be modified dynamically later.
2531
   If io_index is non zero, the corresponding io zone is
2532
   modified. If it is zero, a new io zone is allocated. The return
2533
   value can be used with cpu_register_physical_memory(). (-1) is
2534
   returned if error. */
2535
int cpu_register_io_memory(int io_index,
2536
                           CPUReadMemoryFunc **mem_read,
2537
                           CPUWriteMemoryFunc **mem_write,
2538
                           void *opaque)
2539
{
2540
    int i, subwidth = 0;
2541

    
2542
    if (io_index <= 0) {
2543
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2544
            return -1;
2545
        io_index = io_mem_nb++;
2546
    } else {
2547
        if (io_index >= IO_MEM_NB_ENTRIES)
2548
            return -1;
2549
    }
2550

    
2551
    for(i = 0;i < 3; i++) {
2552
        if (!mem_read[i] || !mem_write[i])
2553
            subwidth = IO_MEM_SUBWIDTH;
2554
        io_mem_read[io_index][i] = mem_read[i];
2555
        io_mem_write[io_index][i] = mem_write[i];
2556
    }
2557
    io_mem_opaque[io_index] = opaque;
2558
    return (io_index << IO_MEM_SHIFT) | subwidth;
2559
}
2560

    
2561
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2562
{
2563
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2564
}
2565

    
2566
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2567
{
2568
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2569
}
2570

    
2571
/* physical memory access (slow version, mainly for debug) */
2572
#if defined(CONFIG_USER_ONLY)
2573
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2574
                            int len, int is_write)
2575
{
2576
    int l, flags;
2577
    target_ulong page;
2578
    void * p;
2579

    
2580
    while (len > 0) {
2581
        page = addr & TARGET_PAGE_MASK;
2582
        l = (page + TARGET_PAGE_SIZE) - addr;
2583
        if (l > len)
2584
            l = len;
2585
        flags = page_get_flags(page);
2586
        if (!(flags & PAGE_VALID))
2587
            return;
2588
        if (is_write) {
2589
            if (!(flags & PAGE_WRITE))
2590
                return;
2591
            /* XXX: this code should not depend on lock_user */
2592
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2593
                /* FIXME - should this return an error rather than just fail? */
2594
                return;
2595
            memcpy(p, buf, l);
2596
            unlock_user(p, addr, l);
2597
        } else {
2598
            if (!(flags & PAGE_READ))
2599
                return;
2600
            /* XXX: this code should not depend on lock_user */
2601
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2602
                /* FIXME - should this return an error rather than just fail? */
2603
                return;
2604
            memcpy(buf, p, l);
2605
            unlock_user(p, addr, 0);
2606
        }
2607
        len -= l;
2608
        buf += l;
2609
        addr += l;
2610
    }
2611
}
2612

    
2613
#else
2614
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2615
                            int len, int is_write)
2616
{
2617
    int l, io_index;
2618
    uint8_t *ptr;
2619
    uint32_t val;
2620
    target_phys_addr_t page;
2621
    unsigned long pd;
2622
    PhysPageDesc *p;
2623

    
2624
    while (len > 0) {
2625
        page = addr & TARGET_PAGE_MASK;
2626
        l = (page + TARGET_PAGE_SIZE) - addr;
2627
        if (l > len)
2628
            l = len;
2629
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2630
        if (!p) {
2631
            pd = IO_MEM_UNASSIGNED;
2632
        } else {
2633
            pd = p->phys_offset;
2634
        }
2635

    
2636
        if (is_write) {
2637
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2638
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2639
                /* XXX: could force cpu_single_env to NULL to avoid
2640
                   potential bugs */
2641
                if (l >= 4 && ((addr & 3) == 0)) {
2642
                    /* 32 bit write access */
2643
                    val = ldl_p(buf);
2644
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2645
                    l = 4;
2646
                } else if (l >= 2 && ((addr & 1) == 0)) {
2647
                    /* 16 bit write access */
2648
                    val = lduw_p(buf);
2649
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2650
                    l = 2;
2651
                } else {
2652
                    /* 8 bit write access */
2653
                    val = ldub_p(buf);
2654
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2655
                    l = 1;
2656
                }
2657
            } else {
2658
                unsigned long addr1;
2659
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2660
                /* RAM case */
2661
                ptr = phys_ram_base + addr1;
2662
                memcpy(ptr, buf, l);
2663
                if (!cpu_physical_memory_is_dirty(addr1)) {
2664
                    /* invalidate code */
2665
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2666
                    /* set dirty bit */
2667
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2668
                        (0xff & ~CODE_DIRTY_FLAG);
2669
                }
2670
            }
2671
        } else {
2672
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2673
                !(pd & IO_MEM_ROMD)) {
2674
                /* I/O case */
2675
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2676
                if (l >= 4 && ((addr & 3) == 0)) {
2677
                    /* 32 bit read access */
2678
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2679
                    stl_p(buf, val);
2680
                    l = 4;
2681
                } else if (l >= 2 && ((addr & 1) == 0)) {
2682
                    /* 16 bit read access */
2683
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2684
                    stw_p(buf, val);
2685
                    l = 2;
2686
                } else {
2687
                    /* 8 bit read access */
2688
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2689
                    stb_p(buf, val);
2690
                    l = 1;
2691
                }
2692
            } else {
2693
                /* RAM case */
2694
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2695
                    (addr & ~TARGET_PAGE_MASK);
2696
                memcpy(buf, ptr, l);
2697
            }
2698
        }
2699
        len -= l;
2700
        buf += l;
2701
        addr += l;
2702
    }
2703
}
2704

    
2705
/* used for ROM loading : can write in RAM and ROM */
2706
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2707
                                   const uint8_t *buf, int len)
2708
{
2709
    int l;
2710
    uint8_t *ptr;
2711
    target_phys_addr_t page;
2712
    unsigned long pd;
2713
    PhysPageDesc *p;
2714

    
2715
    while (len > 0) {
2716
        page = addr & TARGET_PAGE_MASK;
2717
        l = (page + TARGET_PAGE_SIZE) - addr;
2718
        if (l > len)
2719
            l = len;
2720
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2721
        if (!p) {
2722
            pd = IO_MEM_UNASSIGNED;
2723
        } else {
2724
            pd = p->phys_offset;
2725
        }
2726

    
2727
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2728
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2729
            !(pd & IO_MEM_ROMD)) {
2730
            /* do nothing */
2731
        } else {
2732
            unsigned long addr1;
2733
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2734
            /* ROM/RAM case */
2735
            ptr = phys_ram_base + addr1;
2736
            memcpy(ptr, buf, l);
2737
        }
2738
        len -= l;
2739
        buf += l;
2740
        addr += l;
2741
    }
2742
}
2743

    
2744

    
2745
/* warning: addr must be aligned */
2746
uint32_t ldl_phys(target_phys_addr_t addr)
2747
{
2748
    int io_index;
2749
    uint8_t *ptr;
2750
    uint32_t val;
2751
    unsigned long pd;
2752
    PhysPageDesc *p;
2753

    
2754
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2755
    if (!p) {
2756
        pd = IO_MEM_UNASSIGNED;
2757
    } else {
2758
        pd = p->phys_offset;
2759
    }
2760

    
2761
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2762
        !(pd & IO_MEM_ROMD)) {
2763
        /* I/O case */
2764
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2765
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2766
    } else {
2767
        /* RAM case */
2768
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2769
            (addr & ~TARGET_PAGE_MASK);
2770
        val = ldl_p(ptr);
2771
    }
2772
    return val;
2773
}
2774

    
2775
/* warning: addr must be aligned */
2776
uint64_t ldq_phys(target_phys_addr_t addr)
2777
{
2778
    int io_index;
2779
    uint8_t *ptr;
2780
    uint64_t val;
2781
    unsigned long pd;
2782
    PhysPageDesc *p;
2783

    
2784
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2785
    if (!p) {
2786
        pd = IO_MEM_UNASSIGNED;
2787
    } else {
2788
        pd = p->phys_offset;
2789
    }
2790

    
2791
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2792
        !(pd & IO_MEM_ROMD)) {
2793
        /* I/O case */
2794
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2795
#ifdef TARGET_WORDS_BIGENDIAN
2796
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2797
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2798
#else
2799
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2800
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2801
#endif
2802
    } else {
2803
        /* RAM case */
2804
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2805
            (addr & ~TARGET_PAGE_MASK);
2806
        val = ldq_p(ptr);
2807
    }
2808
    return val;
2809
}
2810

    
2811
/* XXX: optimize */
2812
uint32_t ldub_phys(target_phys_addr_t addr)
2813
{
2814
    uint8_t val;
2815
    cpu_physical_memory_read(addr, &val, 1);
2816
    return val;
2817
}
2818

    
2819
/* XXX: optimize */
2820
uint32_t lduw_phys(target_phys_addr_t addr)
2821
{
2822
    uint16_t val;
2823
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2824
    return tswap16(val);
2825
}
2826

    
2827
/* warning: addr must be aligned. The ram page is not masked as dirty
2828
   and the code inside is not invalidated. It is useful if the dirty
2829
   bits are used to track modified PTEs */
2830
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2831
{
2832
    int io_index;
2833
    uint8_t *ptr;
2834
    unsigned long pd;
2835
    PhysPageDesc *p;
2836

    
2837
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2838
    if (!p) {
2839
        pd = IO_MEM_UNASSIGNED;
2840
    } else {
2841
        pd = p->phys_offset;
2842
    }
2843

    
2844
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2845
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2846
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2847
    } else {
2848
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2849
            (addr & ~TARGET_PAGE_MASK);
2850
        stl_p(ptr, val);
2851
    }
2852
}
2853

    
2854
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2855
{
2856
    int io_index;
2857
    uint8_t *ptr;
2858
    unsigned long pd;
2859
    PhysPageDesc *p;
2860

    
2861
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2862
    if (!p) {
2863
        pd = IO_MEM_UNASSIGNED;
2864
    } else {
2865
        pd = p->phys_offset;
2866
    }
2867

    
2868
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2869
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2870
#ifdef TARGET_WORDS_BIGENDIAN
2871
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2872
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2873
#else
2874
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2875
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2876
#endif
2877
    } else {
2878
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2879
            (addr & ~TARGET_PAGE_MASK);
2880
        stq_p(ptr, val);
2881
    }
2882
}
2883

    
2884
/* warning: addr must be aligned */
2885
void stl_phys(target_phys_addr_t addr, uint32_t val)
2886
{
2887
    int io_index;
2888
    uint8_t *ptr;
2889
    unsigned long pd;
2890
    PhysPageDesc *p;
2891

    
2892
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2893
    if (!p) {
2894
        pd = IO_MEM_UNASSIGNED;
2895
    } else {
2896
        pd = p->phys_offset;
2897
    }
2898

    
2899
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2900
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2901
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2902
    } else {
2903
        unsigned long addr1;
2904
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2905
        /* RAM case */
2906
        ptr = phys_ram_base + addr1;
2907
        stl_p(ptr, val);
2908
        if (!cpu_physical_memory_is_dirty(addr1)) {
2909
            /* invalidate code */
2910
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2911
            /* set dirty bit */
2912
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2913
                (0xff & ~CODE_DIRTY_FLAG);
2914
        }
2915
    }
2916
}
2917

    
2918
/* XXX: optimize */
2919
void stb_phys(target_phys_addr_t addr, uint32_t val)
2920
{
2921
    uint8_t v = val;
2922
    cpu_physical_memory_write(addr, &v, 1);
2923
}
2924

    
2925
/* XXX: optimize */
2926
void stw_phys(target_phys_addr_t addr, uint32_t val)
2927
{
2928
    uint16_t v = tswap16(val);
2929
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2930
}
2931

    
2932
/* XXX: optimize */
2933
void stq_phys(target_phys_addr_t addr, uint64_t val)
2934
{
2935
    val = tswap64(val);
2936
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2937
}
2938

    
2939
#endif
2940

    
2941
/* virtual memory access for debug */
2942
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2943
                        uint8_t *buf, int len, int is_write)
2944
{
2945
    int l;
2946
    target_phys_addr_t phys_addr;
2947
    target_ulong page;
2948

    
2949
    while (len > 0) {
2950
        page = addr & TARGET_PAGE_MASK;
2951
        phys_addr = cpu_get_phys_page_debug(env, page);
2952
        /* if no physical page mapped, return an error */
2953
        if (phys_addr == -1)
2954
            return -1;
2955
        l = (page + TARGET_PAGE_SIZE) - addr;
2956
        if (l > len)
2957
            l = len;
2958
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2959
                               buf, l, is_write);
2960
        len -= l;
2961
        buf += l;
2962
        addr += l;
2963
    }
2964
    return 0;
2965
}
2966

    
2967
void dump_exec_info(FILE *f,
2968
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2969
{
2970
    int i, target_code_size, max_target_code_size;
2971
    int direct_jmp_count, direct_jmp2_count, cross_page;
2972
    TranslationBlock *tb;
2973

    
2974
    target_code_size = 0;
2975
    max_target_code_size = 0;
2976
    cross_page = 0;
2977
    direct_jmp_count = 0;
2978
    direct_jmp2_count = 0;
2979
    for(i = 0; i < nb_tbs; i++) {
2980
        tb = &tbs[i];
2981
        target_code_size += tb->size;
2982
        if (tb->size > max_target_code_size)
2983
            max_target_code_size = tb->size;
2984
        if (tb->page_addr[1] != -1)
2985
            cross_page++;
2986
        if (tb->tb_next_offset[0] != 0xffff) {
2987
            direct_jmp_count++;
2988
            if (tb->tb_next_offset[1] != 0xffff) {
2989
                direct_jmp2_count++;
2990
            }
2991
        }
2992
    }
2993
    /* XXX: avoid using doubles ? */
2994
    cpu_fprintf(f, "Translation buffer state:\n");
2995
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2996
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2997
                nb_tbs ? target_code_size / nb_tbs : 0,
2998
                max_target_code_size);
2999
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3000
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3001
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3002
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3003
            cross_page,
3004
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3005
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3006
                direct_jmp_count,
3007
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3008
                direct_jmp2_count,
3009
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3010
    cpu_fprintf(f, "\nStatistics:\n");
3011
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3012
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3013
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3014
    tcg_dump_info(f, cpu_fprintf);
3015
}
3016

    
3017
#if !defined(CONFIG_USER_ONLY)
3018

    
3019
#define MMUSUFFIX _cmmu
3020
#define GETPC() NULL
3021
#define env cpu_single_env
3022
#define SOFTMMU_CODE_ACCESS
3023

    
3024
#define SHIFT 0
3025
#include "softmmu_template.h"
3026

    
3027
#define SHIFT 1
3028
#include "softmmu_template.h"
3029

    
3030
#define SHIFT 2
3031
#include "softmmu_template.h"
3032

    
3033
#define SHIFT 3
3034
#include "softmmu_template.h"
3035

    
3036
#undef env
3037

    
3038
#endif