Statistics
| Branch: | Revision:

root / exec.c @ 7cb69cae

History | View | Annotate | Download (91.7 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#if defined(CONFIG_USER_ONLY)
40
#include <qemu.h>
41
#endif
42

    
43
//#define DEBUG_TB_INVALIDATE
44
//#define DEBUG_FLUSH
45
//#define DEBUG_TLB
46
//#define DEBUG_UNASSIGNED
47

    
48
/* make various TB consistency checks */
49
//#define DEBUG_TB_CHECK
50
//#define DEBUG_TLB_CHECK
51

    
52
//#define DEBUG_IOPORT
53
//#define DEBUG_SUBPAGE
54

    
55
#if !defined(CONFIG_USER_ONLY)
56
/* TB consistency checks only implemented for usermode emulation.  */
57
#undef DEBUG_TB_CHECK
58
#endif
59

    
60
/* threshold to flush the translated code buffer */
61
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#define MMAP_AREA_START        0x00000000
66
#define MMAP_AREA_END          0xa8000000
67

    
68
#if defined(TARGET_SPARC64)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 41
70
#elif defined(TARGET_SPARC)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 36
72
#elif defined(TARGET_ALPHA)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#define TARGET_VIRT_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_PPC64)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 42
77
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78
#define TARGET_PHYS_ADDR_SPACE_BITS 42
79
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
80
#define TARGET_PHYS_ADDR_SPACE_BITS 36
81
#else
82
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83
#define TARGET_PHYS_ADDR_SPACE_BITS 32
84
#endif
85

    
86
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
87
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88
int nb_tbs;
89
/* any access to the tbs or the page table must use this lock */
90
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91

    
92
uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
93
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
94
uint8_t *code_gen_ptr;
95

    
96
ram_addr_t phys_ram_size;
97
int phys_ram_fd;
98
uint8_t *phys_ram_base;
99
uint8_t *phys_ram_dirty;
100
static ram_addr_t phys_ram_alloc_offset = 0;
101

    
102
CPUState *first_cpu;
103
/* current CPU in the current thread. It is only valid inside
104
   cpu_exec() */
105
CPUState *cpu_single_env;
106

    
107
typedef struct PageDesc {
108
    /* list of TBs intersecting this ram page */
109
    TranslationBlock *first_tb;
110
    /* in order to optimize self modifying code, we count the number
111
       of lookups we do to a given page to use a bitmap */
112
    unsigned int code_write_count;
113
    uint8_t *code_bitmap;
114
#if defined(CONFIG_USER_ONLY)
115
    unsigned long flags;
116
#endif
117
} PageDesc;
118

    
119
typedef struct PhysPageDesc {
120
    /* offset in host memory of the page + io_index in the low 12 bits */
121
    ram_addr_t phys_offset;
122
} PhysPageDesc;
123

    
124
#define L2_BITS 10
125
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
126
/* XXX: this is a temporary hack for alpha target.
127
 *      In the future, this is to be replaced by a multi-level table
128
 *      to actually be able to handle the complete 64 bits address space.
129
 */
130
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
131
#else
132
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
133
#endif
134

    
135
#define L1_SIZE (1 << L1_BITS)
136
#define L2_SIZE (1 << L2_BITS)
137

    
138
static void io_mem_init(void);
139

    
140
unsigned long qemu_real_host_page_size;
141
unsigned long qemu_host_page_bits;
142
unsigned long qemu_host_page_size;
143
unsigned long qemu_host_page_mask;
144

    
145
/* XXX: for system emulation, it could just be an array */
146
static PageDesc *l1_map[L1_SIZE];
147
PhysPageDesc **l1_phys_map;
148

    
149
/* io memory support */
150
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
151
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
152
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
153
static int io_mem_nb;
154
#if defined(CONFIG_SOFTMMU)
155
static int io_mem_watch;
156
#endif
157

    
158
/* log support */
159
char *logfilename = "/tmp/qemu.log";
160
FILE *logfile;
161
int loglevel;
162
static int log_append = 0;
163

    
164
/* statistics */
165
static int tlb_flush_count;
166
static int tb_flush_count;
167
static int tb_phys_invalidate_count;
168

    
169
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
170
typedef struct subpage_t {
171
    target_phys_addr_t base;
172
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
173
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
174
    void *opaque[TARGET_PAGE_SIZE][2][4];
175
} subpage_t;
176

    
177
#ifdef _WIN32
178
static void map_exec(void *addr, long size)
179
{
180
    DWORD old_protect;
181
    VirtualProtect(addr, size,
182
                   PAGE_EXECUTE_READWRITE, &old_protect);
183
    
184
}
185
#else
186
static void map_exec(void *addr, long size)
187
{
188
    unsigned long start, end;
189
    
190
    start = (unsigned long)addr;
191
    start &= ~(qemu_real_host_page_size - 1);
192
    
193
    end = (unsigned long)addr + size;
194
    end += qemu_real_host_page_size - 1;
195
    end &= ~(qemu_real_host_page_size - 1);
196
    
197
    mprotect((void *)start, end - start,
198
             PROT_READ | PROT_WRITE | PROT_EXEC);
199
}
200
#endif
201

    
202
static void page_init(void)
203
{
204
    /* NOTE: we can always suppose that qemu_host_page_size >=
205
       TARGET_PAGE_SIZE */
206
#ifdef _WIN32
207
    {
208
        SYSTEM_INFO system_info;
209
        DWORD old_protect;
210

    
211
        GetSystemInfo(&system_info);
212
        qemu_real_host_page_size = system_info.dwPageSize;
213
    }
214
#else
215
    qemu_real_host_page_size = getpagesize();
216
#endif
217
    map_exec(code_gen_buffer, sizeof(code_gen_buffer));
218
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
219

    
220
    if (qemu_host_page_size == 0)
221
        qemu_host_page_size = qemu_real_host_page_size;
222
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
223
        qemu_host_page_size = TARGET_PAGE_SIZE;
224
    qemu_host_page_bits = 0;
225
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
226
        qemu_host_page_bits++;
227
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
228
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
230

    
231
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232
    {
233
        long long startaddr, endaddr;
234
        FILE *f;
235
        int n;
236

    
237
        f = fopen("/proc/self/maps", "r");
238
        if (f) {
239
            do {
240
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
241
                if (n == 2) {
242
                    startaddr = MIN(startaddr,
243
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
244
                    endaddr = MIN(endaddr,
245
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
246
                    page_set_flags(TARGET_PAGE_ALIGN(startaddr),
247
                                   TARGET_PAGE_ALIGN(endaddr),
248
                                   PAGE_RESERVED); 
249
                }
250
            } while (!feof(f));
251
            fclose(f);
252
        }
253
    }
254
#endif
255
}
256

    
257
static inline PageDesc *page_find_alloc(target_ulong index)
258
{
259
    PageDesc **lp, *p;
260

    
261
    lp = &l1_map[index >> L2_BITS];
262
    p = *lp;
263
    if (!p) {
264
        /* allocate if not found */
265
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
266
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
267
        *lp = p;
268
    }
269
    return p + (index & (L2_SIZE - 1));
270
}
271

    
272
static inline PageDesc *page_find(target_ulong index)
273
{
274
    PageDesc *p;
275

    
276
    p = l1_map[index >> L2_BITS];
277
    if (!p)
278
        return 0;
279
    return p + (index & (L2_SIZE - 1));
280
}
281

    
282
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
283
{
284
    void **lp, **p;
285
    PhysPageDesc *pd;
286

    
287
    p = (void **)l1_phys_map;
288
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
289

    
290
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
291
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
292
#endif
293
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
294
    p = *lp;
295
    if (!p) {
296
        /* allocate if not found */
297
        if (!alloc)
298
            return NULL;
299
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
300
        memset(p, 0, sizeof(void *) * L1_SIZE);
301
        *lp = p;
302
    }
303
#endif
304
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
305
    pd = *lp;
306
    if (!pd) {
307
        int i;
308
        /* allocate if not found */
309
        if (!alloc)
310
            return NULL;
311
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
312
        *lp = pd;
313
        for (i = 0; i < L2_SIZE; i++)
314
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
315
    }
316
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
317
}
318

    
319
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
320
{
321
    return phys_page_find_alloc(index, 0);
322
}
323

    
324
#if !defined(CONFIG_USER_ONLY)
325
static void tlb_protect_code(ram_addr_t ram_addr);
326
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
327
                                    target_ulong vaddr);
328
#endif
329

    
330
void cpu_exec_init(CPUState *env)
331
{
332
    CPUState **penv;
333
    int cpu_index;
334

    
335
    if (!code_gen_ptr) {
336
        cpu_gen_init();
337
        code_gen_ptr = code_gen_buffer;
338
        page_init();
339
        io_mem_init();
340
    }
341
    env->next_cpu = NULL;
342
    penv = &first_cpu;
343
    cpu_index = 0;
344
    while (*penv != NULL) {
345
        penv = (CPUState **)&(*penv)->next_cpu;
346
        cpu_index++;
347
    }
348
    env->cpu_index = cpu_index;
349
    env->nb_watchpoints = 0;
350
    *penv = env;
351
}
352

    
353
static inline void invalidate_page_bitmap(PageDesc *p)
354
{
355
    if (p->code_bitmap) {
356
        qemu_free(p->code_bitmap);
357
        p->code_bitmap = NULL;
358
    }
359
    p->code_write_count = 0;
360
}
361

    
362
/* set to NULL all the 'first_tb' fields in all PageDescs */
363
static void page_flush_tb(void)
364
{
365
    int i, j;
366
    PageDesc *p;
367

    
368
    for(i = 0; i < L1_SIZE; i++) {
369
        p = l1_map[i];
370
        if (p) {
371
            for(j = 0; j < L2_SIZE; j++) {
372
                p->first_tb = NULL;
373
                invalidate_page_bitmap(p);
374
                p++;
375
            }
376
        }
377
    }
378
}
379

    
380
/* flush all the translation blocks */
381
/* XXX: tb_flush is currently not thread safe */
382
void tb_flush(CPUState *env1)
383
{
384
    CPUState *env;
385
#if defined(DEBUG_FLUSH)
386
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
387
           (unsigned long)(code_gen_ptr - code_gen_buffer),
388
           nb_tbs, nb_tbs > 0 ?
389
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
390
#endif
391
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
392
        cpu_abort(env1, "Internal error: code buffer overflow\n");
393

    
394
    nb_tbs = 0;
395

    
396
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
397
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
398
    }
399

    
400
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
401
    page_flush_tb();
402

    
403
    code_gen_ptr = code_gen_buffer;
404
    /* XXX: flush processor icache at this point if cache flush is
405
       expensive */
406
    tb_flush_count++;
407
}
408

    
409
#ifdef DEBUG_TB_CHECK
410

    
411
static void tb_invalidate_check(target_ulong address)
412
{
413
    TranslationBlock *tb;
414
    int i;
415
    address &= TARGET_PAGE_MASK;
416
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
417
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
418
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
419
                  address >= tb->pc + tb->size)) {
420
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
421
                       address, (long)tb->pc, tb->size);
422
            }
423
        }
424
    }
425
}
426

    
427
/* verify that all the pages have correct rights for code */
428
static void tb_page_check(void)
429
{
430
    TranslationBlock *tb;
431
    int i, flags1, flags2;
432

    
433
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
434
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
435
            flags1 = page_get_flags(tb->pc);
436
            flags2 = page_get_flags(tb->pc + tb->size - 1);
437
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
438
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
439
                       (long)tb->pc, tb->size, flags1, flags2);
440
            }
441
        }
442
    }
443
}
444

    
445
void tb_jmp_check(TranslationBlock *tb)
446
{
447
    TranslationBlock *tb1;
448
    unsigned int n1;
449

    
450
    /* suppress any remaining jumps to this TB */
451
    tb1 = tb->jmp_first;
452
    for(;;) {
453
        n1 = (long)tb1 & 3;
454
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
455
        if (n1 == 2)
456
            break;
457
        tb1 = tb1->jmp_next[n1];
458
    }
459
    /* check end of list */
460
    if (tb1 != tb) {
461
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
462
    }
463
}
464

    
465
#endif
466

    
467
/* invalidate one TB */
468
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
469
                             int next_offset)
470
{
471
    TranslationBlock *tb1;
472
    for(;;) {
473
        tb1 = *ptb;
474
        if (tb1 == tb) {
475
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
476
            break;
477
        }
478
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
479
    }
480
}
481

    
482
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
483
{
484
    TranslationBlock *tb1;
485
    unsigned int n1;
486

    
487
    for(;;) {
488
        tb1 = *ptb;
489
        n1 = (long)tb1 & 3;
490
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
491
        if (tb1 == tb) {
492
            *ptb = tb1->page_next[n1];
493
            break;
494
        }
495
        ptb = &tb1->page_next[n1];
496
    }
497
}
498

    
499
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
500
{
501
    TranslationBlock *tb1, **ptb;
502
    unsigned int n1;
503

    
504
    ptb = &tb->jmp_next[n];
505
    tb1 = *ptb;
506
    if (tb1) {
507
        /* find tb(n) in circular list */
508
        for(;;) {
509
            tb1 = *ptb;
510
            n1 = (long)tb1 & 3;
511
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
512
            if (n1 == n && tb1 == tb)
513
                break;
514
            if (n1 == 2) {
515
                ptb = &tb1->jmp_first;
516
            } else {
517
                ptb = &tb1->jmp_next[n1];
518
            }
519
        }
520
        /* now we can suppress tb(n) from the list */
521
        *ptb = tb->jmp_next[n];
522

    
523
        tb->jmp_next[n] = NULL;
524
    }
525
}
526

    
527
/* reset the jump entry 'n' of a TB so that it is not chained to
528
   another TB */
529
static inline void tb_reset_jump(TranslationBlock *tb, int n)
530
{
531
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
532
}
533

    
534
static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
535
{
536
    CPUState *env;
537
    PageDesc *p;
538
    unsigned int h, n1;
539
    target_phys_addr_t phys_pc;
540
    TranslationBlock *tb1, *tb2;
541

    
542
    /* remove the TB from the hash list */
543
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
544
    h = tb_phys_hash_func(phys_pc);
545
    tb_remove(&tb_phys_hash[h], tb,
546
              offsetof(TranslationBlock, phys_hash_next));
547

    
548
    /* remove the TB from the page list */
549
    if (tb->page_addr[0] != page_addr) {
550
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
551
        tb_page_remove(&p->first_tb, tb);
552
        invalidate_page_bitmap(p);
553
    }
554
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
555
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
556
        tb_page_remove(&p->first_tb, tb);
557
        invalidate_page_bitmap(p);
558
    }
559

    
560
    tb_invalidated_flag = 1;
561

    
562
    /* remove the TB from the hash list */
563
    h = tb_jmp_cache_hash_func(tb->pc);
564
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
565
        if (env->tb_jmp_cache[h] == tb)
566
            env->tb_jmp_cache[h] = NULL;
567
    }
568

    
569
    /* suppress this TB from the two jump lists */
570
    tb_jmp_remove(tb, 0);
571
    tb_jmp_remove(tb, 1);
572

    
573
    /* suppress any remaining jumps to this TB */
574
    tb1 = tb->jmp_first;
575
    for(;;) {
576
        n1 = (long)tb1 & 3;
577
        if (n1 == 2)
578
            break;
579
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
580
        tb2 = tb1->jmp_next[n1];
581
        tb_reset_jump(tb1, n1);
582
        tb1->jmp_next[n1] = NULL;
583
        tb1 = tb2;
584
    }
585
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
586

    
587
    tb_phys_invalidate_count++;
588
}
589

    
590
static inline void set_bits(uint8_t *tab, int start, int len)
591
{
592
    int end, mask, end1;
593

    
594
    end = start + len;
595
    tab += start >> 3;
596
    mask = 0xff << (start & 7);
597
    if ((start & ~7) == (end & ~7)) {
598
        if (start < end) {
599
            mask &= ~(0xff << (end & 7));
600
            *tab |= mask;
601
        }
602
    } else {
603
        *tab++ |= mask;
604
        start = (start + 8) & ~7;
605
        end1 = end & ~7;
606
        while (start < end1) {
607
            *tab++ = 0xff;
608
            start += 8;
609
        }
610
        if (start < end) {
611
            mask = ~(0xff << (end & 7));
612
            *tab |= mask;
613
        }
614
    }
615
}
616

    
617
static void build_page_bitmap(PageDesc *p)
618
{
619
    int n, tb_start, tb_end;
620
    TranslationBlock *tb;
621

    
622
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
623
    if (!p->code_bitmap)
624
        return;
625
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
626

    
627
    tb = p->first_tb;
628
    while (tb != NULL) {
629
        n = (long)tb & 3;
630
        tb = (TranslationBlock *)((long)tb & ~3);
631
        /* NOTE: this is subtle as a TB may span two physical pages */
632
        if (n == 0) {
633
            /* NOTE: tb_end may be after the end of the page, but
634
               it is not a problem */
635
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
636
            tb_end = tb_start + tb->size;
637
            if (tb_end > TARGET_PAGE_SIZE)
638
                tb_end = TARGET_PAGE_SIZE;
639
        } else {
640
            tb_start = 0;
641
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
642
        }
643
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
644
        tb = tb->page_next[n];
645
    }
646
}
647

    
648
#ifdef TARGET_HAS_PRECISE_SMC
649

    
650
static void tb_gen_code(CPUState *env,
651
                        target_ulong pc, target_ulong cs_base, int flags,
652
                        int cflags)
653
{
654
    TranslationBlock *tb;
655
    uint8_t *tc_ptr;
656
    target_ulong phys_pc, phys_page2, virt_page2;
657
    int code_gen_size;
658

    
659
    phys_pc = get_phys_addr_code(env, pc);
660
    tb = tb_alloc(pc);
661
    if (!tb) {
662
        /* flush must be done */
663
        tb_flush(env);
664
        /* cannot fail at this point */
665
        tb = tb_alloc(pc);
666
    }
667
    tc_ptr = code_gen_ptr;
668
    tb->tc_ptr = tc_ptr;
669
    tb->cs_base = cs_base;
670
    tb->flags = flags;
671
    tb->cflags = cflags;
672
    cpu_gen_code(env, tb, &code_gen_size);
673
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
674

    
675
    /* check next page if needed */
676
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
677
    phys_page2 = -1;
678
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
679
        phys_page2 = get_phys_addr_code(env, virt_page2);
680
    }
681
    tb_link_phys(tb, phys_pc, phys_page2);
682
}
683
#endif
684

    
685
/* invalidate all TBs which intersect with the target physical page
686
   starting in range [start;end[. NOTE: start and end must refer to
687
   the same physical page. 'is_cpu_write_access' should be true if called
688
   from a real cpu write access: the virtual CPU will exit the current
689
   TB if code is modified inside this TB. */
690
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
691
                                   int is_cpu_write_access)
692
{
693
    int n, current_tb_modified, current_tb_not_found, current_flags;
694
    CPUState *env = cpu_single_env;
695
    PageDesc *p;
696
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
697
    target_ulong tb_start, tb_end;
698
    target_ulong current_pc, current_cs_base;
699

    
700
    p = page_find(start >> TARGET_PAGE_BITS);
701
    if (!p)
702
        return;
703
    if (!p->code_bitmap &&
704
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
705
        is_cpu_write_access) {
706
        /* build code bitmap */
707
        build_page_bitmap(p);
708
    }
709

    
710
    /* we remove all the TBs in the range [start, end[ */
711
    /* XXX: see if in some cases it could be faster to invalidate all the code */
712
    current_tb_not_found = is_cpu_write_access;
713
    current_tb_modified = 0;
714
    current_tb = NULL; /* avoid warning */
715
    current_pc = 0; /* avoid warning */
716
    current_cs_base = 0; /* avoid warning */
717
    current_flags = 0; /* avoid warning */
718
    tb = p->first_tb;
719
    while (tb != NULL) {
720
        n = (long)tb & 3;
721
        tb = (TranslationBlock *)((long)tb & ~3);
722
        tb_next = tb->page_next[n];
723
        /* NOTE: this is subtle as a TB may span two physical pages */
724
        if (n == 0) {
725
            /* NOTE: tb_end may be after the end of the page, but
726
               it is not a problem */
727
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
728
            tb_end = tb_start + tb->size;
729
        } else {
730
            tb_start = tb->page_addr[1];
731
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
732
        }
733
        if (!(tb_end <= start || tb_start >= end)) {
734
#ifdef TARGET_HAS_PRECISE_SMC
735
            if (current_tb_not_found) {
736
                current_tb_not_found = 0;
737
                current_tb = NULL;
738
                if (env->mem_write_pc) {
739
                    /* now we have a real cpu fault */
740
                    current_tb = tb_find_pc(env->mem_write_pc);
741
                }
742
            }
743
            if (current_tb == tb &&
744
                !(current_tb->cflags & CF_SINGLE_INSN)) {
745
                /* If we are modifying the current TB, we must stop
746
                its execution. We could be more precise by checking
747
                that the modification is after the current PC, but it
748
                would require a specialized function to partially
749
                restore the CPU state */
750

    
751
                current_tb_modified = 1;
752
                cpu_restore_state(current_tb, env,
753
                                  env->mem_write_pc, NULL);
754
#if defined(TARGET_I386)
755
                current_flags = env->hflags;
756
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
757
                current_cs_base = (target_ulong)env->segs[R_CS].base;
758
                current_pc = current_cs_base + env->eip;
759
#else
760
#error unsupported CPU
761
#endif
762
            }
763
#endif /* TARGET_HAS_PRECISE_SMC */
764
            /* we need to do that to handle the case where a signal
765
               occurs while doing tb_phys_invalidate() */
766
            saved_tb = NULL;
767
            if (env) {
768
                saved_tb = env->current_tb;
769
                env->current_tb = NULL;
770
            }
771
            tb_phys_invalidate(tb, -1);
772
            if (env) {
773
                env->current_tb = saved_tb;
774
                if (env->interrupt_request && env->current_tb)
775
                    cpu_interrupt(env, env->interrupt_request);
776
            }
777
        }
778
        tb = tb_next;
779
    }
780
#if !defined(CONFIG_USER_ONLY)
781
    /* if no code remaining, no need to continue to use slow writes */
782
    if (!p->first_tb) {
783
        invalidate_page_bitmap(p);
784
        if (is_cpu_write_access) {
785
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
786
        }
787
    }
788
#endif
789
#ifdef TARGET_HAS_PRECISE_SMC
790
    if (current_tb_modified) {
791
        /* we generate a block containing just the instruction
792
           modifying the memory. It will ensure that it cannot modify
793
           itself */
794
        env->current_tb = NULL;
795
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
796
                    CF_SINGLE_INSN);
797
        cpu_resume_from_signal(env, NULL);
798
    }
799
#endif
800
}
801

    
802
/* len must be <= 8 and start must be a multiple of len */
803
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
804
{
805
    PageDesc *p;
806
    int offset, b;
807
#if 0
808
    if (1) {
809
        if (loglevel) {
810
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
811
                   cpu_single_env->mem_write_vaddr, len,
812
                   cpu_single_env->eip,
813
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
814
        }
815
    }
816
#endif
817
    p = page_find(start >> TARGET_PAGE_BITS);
818
    if (!p)
819
        return;
820
    if (p->code_bitmap) {
821
        offset = start & ~TARGET_PAGE_MASK;
822
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
823
        if (b & ((1 << len) - 1))
824
            goto do_invalidate;
825
    } else {
826
    do_invalidate:
827
        tb_invalidate_phys_page_range(start, start + len, 1);
828
    }
829
}
830

    
831
#if !defined(CONFIG_SOFTMMU)
832
static void tb_invalidate_phys_page(target_phys_addr_t addr,
833
                                    unsigned long pc, void *puc)
834
{
835
    int n, current_flags, current_tb_modified;
836
    target_ulong current_pc, current_cs_base;
837
    PageDesc *p;
838
    TranslationBlock *tb, *current_tb;
839
#ifdef TARGET_HAS_PRECISE_SMC
840
    CPUState *env = cpu_single_env;
841
#endif
842

    
843
    addr &= TARGET_PAGE_MASK;
844
    p = page_find(addr >> TARGET_PAGE_BITS);
845
    if (!p)
846
        return;
847
    tb = p->first_tb;
848
    current_tb_modified = 0;
849
    current_tb = NULL;
850
    current_pc = 0; /* avoid warning */
851
    current_cs_base = 0; /* avoid warning */
852
    current_flags = 0; /* avoid warning */
853
#ifdef TARGET_HAS_PRECISE_SMC
854
    if (tb && pc != 0) {
855
        current_tb = tb_find_pc(pc);
856
    }
857
#endif
858
    while (tb != NULL) {
859
        n = (long)tb & 3;
860
        tb = (TranslationBlock *)((long)tb & ~3);
861
#ifdef TARGET_HAS_PRECISE_SMC
862
        if (current_tb == tb &&
863
            !(current_tb->cflags & CF_SINGLE_INSN)) {
864
                /* If we are modifying the current TB, we must stop
865
                   its execution. We could be more precise by checking
866
                   that the modification is after the current PC, but it
867
                   would require a specialized function to partially
868
                   restore the CPU state */
869

    
870
            current_tb_modified = 1;
871
            cpu_restore_state(current_tb, env, pc, puc);
872
#if defined(TARGET_I386)
873
            current_flags = env->hflags;
874
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
875
            current_cs_base = (target_ulong)env->segs[R_CS].base;
876
            current_pc = current_cs_base + env->eip;
877
#else
878
#error unsupported CPU
879
#endif
880
        }
881
#endif /* TARGET_HAS_PRECISE_SMC */
882
        tb_phys_invalidate(tb, addr);
883
        tb = tb->page_next[n];
884
    }
885
    p->first_tb = NULL;
886
#ifdef TARGET_HAS_PRECISE_SMC
887
    if (current_tb_modified) {
888
        /* we generate a block containing just the instruction
889
           modifying the memory. It will ensure that it cannot modify
890
           itself */
891
        env->current_tb = NULL;
892
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
893
                    CF_SINGLE_INSN);
894
        cpu_resume_from_signal(env, puc);
895
    }
896
#endif
897
}
898
#endif
899

    
900
/* add the tb in the target page and protect it if necessary */
901
static inline void tb_alloc_page(TranslationBlock *tb,
902
                                 unsigned int n, target_ulong page_addr)
903
{
904
    PageDesc *p;
905
    TranslationBlock *last_first_tb;
906

    
907
    tb->page_addr[n] = page_addr;
908
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
909
    tb->page_next[n] = p->first_tb;
910
    last_first_tb = p->first_tb;
911
    p->first_tb = (TranslationBlock *)((long)tb | n);
912
    invalidate_page_bitmap(p);
913

    
914
#if defined(TARGET_HAS_SMC) || 1
915

    
916
#if defined(CONFIG_USER_ONLY)
917
    if (p->flags & PAGE_WRITE) {
918
        target_ulong addr;
919
        PageDesc *p2;
920
        int prot;
921

    
922
        /* force the host page as non writable (writes will have a
923
           page fault + mprotect overhead) */
924
        page_addr &= qemu_host_page_mask;
925
        prot = 0;
926
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
927
            addr += TARGET_PAGE_SIZE) {
928

    
929
            p2 = page_find (addr >> TARGET_PAGE_BITS);
930
            if (!p2)
931
                continue;
932
            prot |= p2->flags;
933
            p2->flags &= ~PAGE_WRITE;
934
            page_get_flags(addr);
935
          }
936
        mprotect(g2h(page_addr), qemu_host_page_size,
937
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
938
#ifdef DEBUG_TB_INVALIDATE
939
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
940
               page_addr);
941
#endif
942
    }
943
#else
944
    /* if some code is already present, then the pages are already
945
       protected. So we handle the case where only the first TB is
946
       allocated in a physical page */
947
    if (!last_first_tb) {
948
        tlb_protect_code(page_addr);
949
    }
950
#endif
951

    
952
#endif /* TARGET_HAS_SMC */
953
}
954

    
955
/* Allocate a new translation block. Flush the translation buffer if
956
   too many translation blocks or too much generated code. */
957
TranslationBlock *tb_alloc(target_ulong pc)
958
{
959
    TranslationBlock *tb;
960

    
961
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
962
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
963
        return NULL;
964
    tb = &tbs[nb_tbs++];
965
    tb->pc = pc;
966
    tb->cflags = 0;
967
    return tb;
968
}
969

    
970
/* add a new TB and link it to the physical page tables. phys_page2 is
971
   (-1) to indicate that only one page contains the TB. */
972
void tb_link_phys(TranslationBlock *tb,
973
                  target_ulong phys_pc, target_ulong phys_page2)
974
{
975
    unsigned int h;
976
    TranslationBlock **ptb;
977

    
978
    /* add in the physical hash table */
979
    h = tb_phys_hash_func(phys_pc);
980
    ptb = &tb_phys_hash[h];
981
    tb->phys_hash_next = *ptb;
982
    *ptb = tb;
983

    
984
    /* add in the page list */
985
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
986
    if (phys_page2 != -1)
987
        tb_alloc_page(tb, 1, phys_page2);
988
    else
989
        tb->page_addr[1] = -1;
990

    
991
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
992
    tb->jmp_next[0] = NULL;
993
    tb->jmp_next[1] = NULL;
994

    
995
    /* init original jump addresses */
996
    if (tb->tb_next_offset[0] != 0xffff)
997
        tb_reset_jump(tb, 0);
998
    if (tb->tb_next_offset[1] != 0xffff)
999
        tb_reset_jump(tb, 1);
1000

    
1001
#ifdef DEBUG_TB_CHECK
1002
    tb_page_check();
1003
#endif
1004
}
1005

    
1006
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1007
   tb[1].tc_ptr. Return NULL if not found */
1008
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1009
{
1010
    int m_min, m_max, m;
1011
    unsigned long v;
1012
    TranslationBlock *tb;
1013

    
1014
    if (nb_tbs <= 0)
1015
        return NULL;
1016
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1017
        tc_ptr >= (unsigned long)code_gen_ptr)
1018
        return NULL;
1019
    /* binary search (cf Knuth) */
1020
    m_min = 0;
1021
    m_max = nb_tbs - 1;
1022
    while (m_min <= m_max) {
1023
        m = (m_min + m_max) >> 1;
1024
        tb = &tbs[m];
1025
        v = (unsigned long)tb->tc_ptr;
1026
        if (v == tc_ptr)
1027
            return tb;
1028
        else if (tc_ptr < v) {
1029
            m_max = m - 1;
1030
        } else {
1031
            m_min = m + 1;
1032
        }
1033
    }
1034
    return &tbs[m_max];
1035
}
1036

    
1037
static void tb_reset_jump_recursive(TranslationBlock *tb);
1038

    
1039
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1040
{
1041
    TranslationBlock *tb1, *tb_next, **ptb;
1042
    unsigned int n1;
1043

    
1044
    tb1 = tb->jmp_next[n];
1045
    if (tb1 != NULL) {
1046
        /* find head of list */
1047
        for(;;) {
1048
            n1 = (long)tb1 & 3;
1049
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1050
            if (n1 == 2)
1051
                break;
1052
            tb1 = tb1->jmp_next[n1];
1053
        }
1054
        /* we are now sure now that tb jumps to tb1 */
1055
        tb_next = tb1;
1056

    
1057
        /* remove tb from the jmp_first list */
1058
        ptb = &tb_next->jmp_first;
1059
        for(;;) {
1060
            tb1 = *ptb;
1061
            n1 = (long)tb1 & 3;
1062
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1063
            if (n1 == n && tb1 == tb)
1064
                break;
1065
            ptb = &tb1->jmp_next[n1];
1066
        }
1067
        *ptb = tb->jmp_next[n];
1068
        tb->jmp_next[n] = NULL;
1069

    
1070
        /* suppress the jump to next tb in generated code */
1071
        tb_reset_jump(tb, n);
1072

    
1073
        /* suppress jumps in the tb on which we could have jumped */
1074
        tb_reset_jump_recursive(tb_next);
1075
    }
1076
}
1077

    
1078
static void tb_reset_jump_recursive(TranslationBlock *tb)
1079
{
1080
    tb_reset_jump_recursive2(tb, 0);
1081
    tb_reset_jump_recursive2(tb, 1);
1082
}
1083

    
1084
#if defined(TARGET_HAS_ICE)
1085
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1086
{
1087
    target_phys_addr_t addr;
1088
    target_ulong pd;
1089
    ram_addr_t ram_addr;
1090
    PhysPageDesc *p;
1091

    
1092
    addr = cpu_get_phys_page_debug(env, pc);
1093
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1094
    if (!p) {
1095
        pd = IO_MEM_UNASSIGNED;
1096
    } else {
1097
        pd = p->phys_offset;
1098
    }
1099
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1100
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1101
}
1102
#endif
1103

    
1104
/* Add a watchpoint.  */
1105
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1106
{
1107
    int i;
1108

    
1109
    for (i = 0; i < env->nb_watchpoints; i++) {
1110
        if (addr == env->watchpoint[i].vaddr)
1111
            return 0;
1112
    }
1113
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1114
        return -1;
1115

    
1116
    i = env->nb_watchpoints++;
1117
    env->watchpoint[i].vaddr = addr;
1118
    tlb_flush_page(env, addr);
1119
    /* FIXME: This flush is needed because of the hack to make memory ops
1120
       terminate the TB.  It can be removed once the proper IO trap and
1121
       re-execute bits are in.  */
1122
    tb_flush(env);
1123
    return i;
1124
}
1125

    
1126
/* Remove a watchpoint.  */
1127
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1128
{
1129
    int i;
1130

    
1131
    for (i = 0; i < env->nb_watchpoints; i++) {
1132
        if (addr == env->watchpoint[i].vaddr) {
1133
            env->nb_watchpoints--;
1134
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1135
            tlb_flush_page(env, addr);
1136
            return 0;
1137
        }
1138
    }
1139
    return -1;
1140
}
1141

    
1142
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1143
   breakpoint is reached */
1144
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1145
{
1146
#if defined(TARGET_HAS_ICE)
1147
    int i;
1148

    
1149
    for(i = 0; i < env->nb_breakpoints; i++) {
1150
        if (env->breakpoints[i] == pc)
1151
            return 0;
1152
    }
1153

    
1154
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1155
        return -1;
1156
    env->breakpoints[env->nb_breakpoints++] = pc;
1157

    
1158
    breakpoint_invalidate(env, pc);
1159
    return 0;
1160
#else
1161
    return -1;
1162
#endif
1163
}
1164

    
1165
/* remove a breakpoint */
1166
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1167
{
1168
#if defined(TARGET_HAS_ICE)
1169
    int i;
1170
    for(i = 0; i < env->nb_breakpoints; i++) {
1171
        if (env->breakpoints[i] == pc)
1172
            goto found;
1173
    }
1174
    return -1;
1175
 found:
1176
    env->nb_breakpoints--;
1177
    if (i < env->nb_breakpoints)
1178
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1179

    
1180
    breakpoint_invalidate(env, pc);
1181
    return 0;
1182
#else
1183
    return -1;
1184
#endif
1185
}
1186

    
1187
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1188
   CPU loop after each instruction */
1189
void cpu_single_step(CPUState *env, int enabled)
1190
{
1191
#if defined(TARGET_HAS_ICE)
1192
    if (env->singlestep_enabled != enabled) {
1193
        env->singlestep_enabled = enabled;
1194
        /* must flush all the translated code to avoid inconsistancies */
1195
        /* XXX: only flush what is necessary */
1196
        tb_flush(env);
1197
    }
1198
#endif
1199
}
1200

    
1201
/* enable or disable low levels log */
1202
void cpu_set_log(int log_flags)
1203
{
1204
    loglevel = log_flags;
1205
    if (loglevel && !logfile) {
1206
        logfile = fopen(logfilename, log_append ? "a" : "w");
1207
        if (!logfile) {
1208
            perror(logfilename);
1209
            _exit(1);
1210
        }
1211
#if !defined(CONFIG_SOFTMMU)
1212
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1213
        {
1214
            static uint8_t logfile_buf[4096];
1215
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1216
        }
1217
#else
1218
        setvbuf(logfile, NULL, _IOLBF, 0);
1219
#endif
1220
        log_append = 1;
1221
    }
1222
    if (!loglevel && logfile) {
1223
        fclose(logfile);
1224
        logfile = NULL;
1225
    }
1226
}
1227

    
1228
void cpu_set_log_filename(const char *filename)
1229
{
1230
    logfilename = strdup(filename);
1231
    if (logfile) {
1232
        fclose(logfile);
1233
        logfile = NULL;
1234
    }
1235
    cpu_set_log(loglevel);
1236
}
1237

    
1238
/* mask must never be zero, except for A20 change call */
1239
void cpu_interrupt(CPUState *env, int mask)
1240
{
1241
    TranslationBlock *tb;
1242
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1243

    
1244
    env->interrupt_request |= mask;
1245
    /* if the cpu is currently executing code, we must unlink it and
1246
       all the potentially executing TB */
1247
    tb = env->current_tb;
1248
    if (tb && !testandset(&interrupt_lock)) {
1249
        env->current_tb = NULL;
1250
        tb_reset_jump_recursive(tb);
1251
        resetlock(&interrupt_lock);
1252
    }
1253
}
1254

    
1255
void cpu_reset_interrupt(CPUState *env, int mask)
1256
{
1257
    env->interrupt_request &= ~mask;
1258
}
1259

    
1260
CPULogItem cpu_log_items[] = {
1261
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1262
      "show generated host assembly code for each compiled TB" },
1263
    { CPU_LOG_TB_IN_ASM, "in_asm",
1264
      "show target assembly code for each compiled TB" },
1265
    { CPU_LOG_TB_OP, "op",
1266
      "show micro ops for each compiled TB" },
1267
    { CPU_LOG_TB_OP_OPT, "op_opt",
1268
      "show micro ops "
1269
#ifdef TARGET_I386
1270
      "before eflags optimization and "
1271
#endif
1272
      "after liveness analysis" },
1273
    { CPU_LOG_INT, "int",
1274
      "show interrupts/exceptions in short format" },
1275
    { CPU_LOG_EXEC, "exec",
1276
      "show trace before each executed TB (lots of logs)" },
1277
    { CPU_LOG_TB_CPU, "cpu",
1278
      "show CPU state before block translation" },
1279
#ifdef TARGET_I386
1280
    { CPU_LOG_PCALL, "pcall",
1281
      "show protected mode far calls/returns/exceptions" },
1282
#endif
1283
#ifdef DEBUG_IOPORT
1284
    { CPU_LOG_IOPORT, "ioport",
1285
      "show all i/o ports accesses" },
1286
#endif
1287
    { 0, NULL, NULL },
1288
};
1289

    
1290
static int cmp1(const char *s1, int n, const char *s2)
1291
{
1292
    if (strlen(s2) != n)
1293
        return 0;
1294
    return memcmp(s1, s2, n) == 0;
1295
}
1296

    
1297
/* takes a comma separated list of log masks. Return 0 if error. */
1298
int cpu_str_to_log_mask(const char *str)
1299
{
1300
    CPULogItem *item;
1301
    int mask;
1302
    const char *p, *p1;
1303

    
1304
    p = str;
1305
    mask = 0;
1306
    for(;;) {
1307
        p1 = strchr(p, ',');
1308
        if (!p1)
1309
            p1 = p + strlen(p);
1310
        if(cmp1(p,p1-p,"all")) {
1311
                for(item = cpu_log_items; item->mask != 0; item++) {
1312
                        mask |= item->mask;
1313
                }
1314
        } else {
1315
        for(item = cpu_log_items; item->mask != 0; item++) {
1316
            if (cmp1(p, p1 - p, item->name))
1317
                goto found;
1318
        }
1319
        return 0;
1320
        }
1321
    found:
1322
        mask |= item->mask;
1323
        if (*p1 != ',')
1324
            break;
1325
        p = p1 + 1;
1326
    }
1327
    return mask;
1328
}
1329

    
1330
void cpu_abort(CPUState *env, const char *fmt, ...)
1331
{
1332
    va_list ap;
1333
    va_list ap2;
1334

    
1335
    va_start(ap, fmt);
1336
    va_copy(ap2, ap);
1337
    fprintf(stderr, "qemu: fatal: ");
1338
    vfprintf(stderr, fmt, ap);
1339
    fprintf(stderr, "\n");
1340
#ifdef TARGET_I386
1341
    if(env->intercept & INTERCEPT_SVM_MASK) {
1342
        /* most probably the virtual machine should not
1343
           be shut down but rather caught by the VMM */
1344
        vmexit(SVM_EXIT_SHUTDOWN, 0);
1345
    }
1346
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1347
#else
1348
    cpu_dump_state(env, stderr, fprintf, 0);
1349
#endif
1350
    if (logfile) {
1351
        fprintf(logfile, "qemu: fatal: ");
1352
        vfprintf(logfile, fmt, ap2);
1353
        fprintf(logfile, "\n");
1354
#ifdef TARGET_I386
1355
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1356
#else
1357
        cpu_dump_state(env, logfile, fprintf, 0);
1358
#endif
1359
        fflush(logfile);
1360
        fclose(logfile);
1361
    }
1362
    va_end(ap2);
1363
    va_end(ap);
1364
    abort();
1365
}
1366

    
1367
CPUState *cpu_copy(CPUState *env)
1368
{
1369
    CPUState *new_env = cpu_init(env->cpu_model_str);
1370
    /* preserve chaining and index */
1371
    CPUState *next_cpu = new_env->next_cpu;
1372
    int cpu_index = new_env->cpu_index;
1373
    memcpy(new_env, env, sizeof(CPUState));
1374
    new_env->next_cpu = next_cpu;
1375
    new_env->cpu_index = cpu_index;
1376
    return new_env;
1377
}
1378

    
1379
#if !defined(CONFIG_USER_ONLY)
1380

    
1381
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1382
{
1383
    unsigned int i;
1384

    
1385
    /* Discard jump cache entries for any tb which might potentially
1386
       overlap the flushed page.  */
1387
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1388
    memset (&env->tb_jmp_cache[i], 0, 
1389
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1390

    
1391
    i = tb_jmp_cache_hash_page(addr);
1392
    memset (&env->tb_jmp_cache[i], 0, 
1393
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1394
}
1395

    
1396
/* NOTE: if flush_global is true, also flush global entries (not
1397
   implemented yet) */
1398
void tlb_flush(CPUState *env, int flush_global)
1399
{
1400
    int i;
1401

    
1402
#if defined(DEBUG_TLB)
1403
    printf("tlb_flush:\n");
1404
#endif
1405
    /* must reset current TB so that interrupts cannot modify the
1406
       links while we are modifying them */
1407
    env->current_tb = NULL;
1408

    
1409
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1410
        env->tlb_table[0][i].addr_read = -1;
1411
        env->tlb_table[0][i].addr_write = -1;
1412
        env->tlb_table[0][i].addr_code = -1;
1413
        env->tlb_table[1][i].addr_read = -1;
1414
        env->tlb_table[1][i].addr_write = -1;
1415
        env->tlb_table[1][i].addr_code = -1;
1416
#if (NB_MMU_MODES >= 3)
1417
        env->tlb_table[2][i].addr_read = -1;
1418
        env->tlb_table[2][i].addr_write = -1;
1419
        env->tlb_table[2][i].addr_code = -1;
1420
#if (NB_MMU_MODES == 4)
1421
        env->tlb_table[3][i].addr_read = -1;
1422
        env->tlb_table[3][i].addr_write = -1;
1423
        env->tlb_table[3][i].addr_code = -1;
1424
#endif
1425
#endif
1426
    }
1427

    
1428
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1429

    
1430
#if !defined(CONFIG_SOFTMMU)
1431
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1432
#endif
1433
#ifdef USE_KQEMU
1434
    if (env->kqemu_enabled) {
1435
        kqemu_flush(env, flush_global);
1436
    }
1437
#endif
1438
    tlb_flush_count++;
1439
}
1440

    
1441
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1442
{
1443
    if (addr == (tlb_entry->addr_read &
1444
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1445
        addr == (tlb_entry->addr_write &
1446
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1447
        addr == (tlb_entry->addr_code &
1448
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1449
        tlb_entry->addr_read = -1;
1450
        tlb_entry->addr_write = -1;
1451
        tlb_entry->addr_code = -1;
1452
    }
1453
}
1454

    
1455
void tlb_flush_page(CPUState *env, target_ulong addr)
1456
{
1457
    int i;
1458

    
1459
#if defined(DEBUG_TLB)
1460
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1461
#endif
1462
    /* must reset current TB so that interrupts cannot modify the
1463
       links while we are modifying them */
1464
    env->current_tb = NULL;
1465

    
1466
    addr &= TARGET_PAGE_MASK;
1467
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1468
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1469
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1470
#if (NB_MMU_MODES >= 3)
1471
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1472
#if (NB_MMU_MODES == 4)
1473
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1474
#endif
1475
#endif
1476

    
1477
    tlb_flush_jmp_cache(env, addr);
1478

    
1479
#if !defined(CONFIG_SOFTMMU)
1480
    if (addr < MMAP_AREA_END)
1481
        munmap((void *)addr, TARGET_PAGE_SIZE);
1482
#endif
1483
#ifdef USE_KQEMU
1484
    if (env->kqemu_enabled) {
1485
        kqemu_flush_page(env, addr);
1486
    }
1487
#endif
1488
}
1489

    
1490
/* update the TLBs so that writes to code in the virtual page 'addr'
1491
   can be detected */
1492
static void tlb_protect_code(ram_addr_t ram_addr)
1493
{
1494
    cpu_physical_memory_reset_dirty(ram_addr,
1495
                                    ram_addr + TARGET_PAGE_SIZE,
1496
                                    CODE_DIRTY_FLAG);
1497
}
1498

    
1499
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1500
   tested for self modifying code */
1501
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1502
                                    target_ulong vaddr)
1503
{
1504
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1505
}
1506

    
1507
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1508
                                         unsigned long start, unsigned long length)
1509
{
1510
    unsigned long addr;
1511
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1512
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1513
        if ((addr - start) < length) {
1514
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1515
        }
1516
    }
1517
}
1518

    
1519
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1520
                                     int dirty_flags)
1521
{
1522
    CPUState *env;
1523
    unsigned long length, start1;
1524
    int i, mask, len;
1525
    uint8_t *p;
1526

    
1527
    start &= TARGET_PAGE_MASK;
1528
    end = TARGET_PAGE_ALIGN(end);
1529

    
1530
    length = end - start;
1531
    if (length == 0)
1532
        return;
1533
    len = length >> TARGET_PAGE_BITS;
1534
#ifdef USE_KQEMU
1535
    /* XXX: should not depend on cpu context */
1536
    env = first_cpu;
1537
    if (env->kqemu_enabled) {
1538
        ram_addr_t addr;
1539
        addr = start;
1540
        for(i = 0; i < len; i++) {
1541
            kqemu_set_notdirty(env, addr);
1542
            addr += TARGET_PAGE_SIZE;
1543
        }
1544
    }
1545
#endif
1546
    mask = ~dirty_flags;
1547
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1548
    for(i = 0; i < len; i++)
1549
        p[i] &= mask;
1550

    
1551
    /* we modify the TLB cache so that the dirty bit will be set again
1552
       when accessing the range */
1553
    start1 = start + (unsigned long)phys_ram_base;
1554
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1555
        for(i = 0; i < CPU_TLB_SIZE; i++)
1556
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1557
        for(i = 0; i < CPU_TLB_SIZE; i++)
1558
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1559
#if (NB_MMU_MODES >= 3)
1560
        for(i = 0; i < CPU_TLB_SIZE; i++)
1561
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1562
#if (NB_MMU_MODES == 4)
1563
        for(i = 0; i < CPU_TLB_SIZE; i++)
1564
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1565
#endif
1566
#endif
1567
    }
1568

    
1569
#if !defined(CONFIG_SOFTMMU)
1570
    /* XXX: this is expensive */
1571
    {
1572
        VirtPageDesc *p;
1573
        int j;
1574
        target_ulong addr;
1575

    
1576
        for(i = 0; i < L1_SIZE; i++) {
1577
            p = l1_virt_map[i];
1578
            if (p) {
1579
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1580
                for(j = 0; j < L2_SIZE; j++) {
1581
                    if (p->valid_tag == virt_valid_tag &&
1582
                        p->phys_addr >= start && p->phys_addr < end &&
1583
                        (p->prot & PROT_WRITE)) {
1584
                        if (addr < MMAP_AREA_END) {
1585
                            mprotect((void *)addr, TARGET_PAGE_SIZE,
1586
                                     p->prot & ~PROT_WRITE);
1587
                        }
1588
                    }
1589
                    addr += TARGET_PAGE_SIZE;
1590
                    p++;
1591
                }
1592
            }
1593
        }
1594
    }
1595
#endif
1596
}
1597

    
1598
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1599
{
1600
    ram_addr_t ram_addr;
1601

    
1602
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1603
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1604
            tlb_entry->addend - (unsigned long)phys_ram_base;
1605
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1606
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1607
        }
1608
    }
1609
}
1610

    
1611
/* update the TLB according to the current state of the dirty bits */
1612
void cpu_tlb_update_dirty(CPUState *env)
1613
{
1614
    int i;
1615
    for(i = 0; i < CPU_TLB_SIZE; i++)
1616
        tlb_update_dirty(&env->tlb_table[0][i]);
1617
    for(i = 0; i < CPU_TLB_SIZE; i++)
1618
        tlb_update_dirty(&env->tlb_table[1][i]);
1619
#if (NB_MMU_MODES >= 3)
1620
    for(i = 0; i < CPU_TLB_SIZE; i++)
1621
        tlb_update_dirty(&env->tlb_table[2][i]);
1622
#if (NB_MMU_MODES == 4)
1623
    for(i = 0; i < CPU_TLB_SIZE; i++)
1624
        tlb_update_dirty(&env->tlb_table[3][i]);
1625
#endif
1626
#endif
1627
}
1628

    
1629
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1630
                                  unsigned long start)
1631
{
1632
    unsigned long addr;
1633
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1634
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1635
        if (addr == start) {
1636
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1637
        }
1638
    }
1639
}
1640

    
1641
/* update the TLB corresponding to virtual page vaddr and phys addr
1642
   addr so that it is no longer dirty */
1643
static inline void tlb_set_dirty(CPUState *env,
1644
                                 unsigned long addr, target_ulong vaddr)
1645
{
1646
    int i;
1647

    
1648
    addr &= TARGET_PAGE_MASK;
1649
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1650
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1651
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1652
#if (NB_MMU_MODES >= 3)
1653
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1654
#if (NB_MMU_MODES == 4)
1655
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1656
#endif
1657
#endif
1658
}
1659

    
1660
/* add a new TLB entry. At most one entry for a given virtual address
1661
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1662
   (can only happen in non SOFTMMU mode for I/O pages or pages
1663
   conflicting with the host address space). */
1664
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1665
                      target_phys_addr_t paddr, int prot,
1666
                      int mmu_idx, int is_softmmu)
1667
{
1668
    PhysPageDesc *p;
1669
    unsigned long pd;
1670
    unsigned int index;
1671
    target_ulong address;
1672
    target_phys_addr_t addend;
1673
    int ret;
1674
    CPUTLBEntry *te;
1675
    int i;
1676

    
1677
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1678
    if (!p) {
1679
        pd = IO_MEM_UNASSIGNED;
1680
    } else {
1681
        pd = p->phys_offset;
1682
    }
1683
#if defined(DEBUG_TLB)
1684
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1685
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1686
#endif
1687

    
1688
    ret = 0;
1689
#if !defined(CONFIG_SOFTMMU)
1690
    if (is_softmmu)
1691
#endif
1692
    {
1693
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1694
            /* IO memory case */
1695
            address = vaddr | pd;
1696
            addend = paddr;
1697
        } else {
1698
            /* standard memory */
1699
            address = vaddr;
1700
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1701
        }
1702

    
1703
        /* Make accesses to pages with watchpoints go via the
1704
           watchpoint trap routines.  */
1705
        for (i = 0; i < env->nb_watchpoints; i++) {
1706
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1707
                if (address & ~TARGET_PAGE_MASK) {
1708
                    env->watchpoint[i].addend = 0;
1709
                    address = vaddr | io_mem_watch;
1710
                } else {
1711
                    env->watchpoint[i].addend = pd - paddr +
1712
                        (unsigned long) phys_ram_base;
1713
                    /* TODO: Figure out how to make read watchpoints coexist
1714
                       with code.  */
1715
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1716
                }
1717
            }
1718
        }
1719

    
1720
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1721
        addend -= vaddr;
1722
        te = &env->tlb_table[mmu_idx][index];
1723
        te->addend = addend;
1724
        if (prot & PAGE_READ) {
1725
            te->addr_read = address;
1726
        } else {
1727
            te->addr_read = -1;
1728
        }
1729

    
1730
        if (te->addr_code != -1) {
1731
            tlb_flush_jmp_cache(env, te->addr_code);
1732
        }
1733
        if (prot & PAGE_EXEC) {
1734
            te->addr_code = address;
1735
        } else {
1736
            te->addr_code = -1;
1737
        }
1738
        if (prot & PAGE_WRITE) {
1739
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1740
                (pd & IO_MEM_ROMD)) {
1741
                /* write access calls the I/O callback */
1742
                te->addr_write = vaddr |
1743
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1744
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1745
                       !cpu_physical_memory_is_dirty(pd)) {
1746
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1747
            } else {
1748
                te->addr_write = address;
1749
            }
1750
        } else {
1751
            te->addr_write = -1;
1752
        }
1753
    }
1754
#if !defined(CONFIG_SOFTMMU)
1755
    else {
1756
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1757
            /* IO access: no mapping is done as it will be handled by the
1758
               soft MMU */
1759
            if (!(env->hflags & HF_SOFTMMU_MASK))
1760
                ret = 2;
1761
        } else {
1762
            void *map_addr;
1763

    
1764
            if (vaddr >= MMAP_AREA_END) {
1765
                ret = 2;
1766
            } else {
1767
                if (prot & PROT_WRITE) {
1768
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1769
#if defined(TARGET_HAS_SMC) || 1
1770
                        first_tb ||
1771
#endif
1772
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1773
                         !cpu_physical_memory_is_dirty(pd))) {
1774
                        /* ROM: we do as if code was inside */
1775
                        /* if code is present, we only map as read only and save the
1776
                           original mapping */
1777
                        VirtPageDesc *vp;
1778

    
1779
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1780
                        vp->phys_addr = pd;
1781
                        vp->prot = prot;
1782
                        vp->valid_tag = virt_valid_tag;
1783
                        prot &= ~PAGE_WRITE;
1784
                    }
1785
                }
1786
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1787
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1788
                if (map_addr == MAP_FAILED) {
1789
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1790
                              paddr, vaddr);
1791
                }
1792
            }
1793
        }
1794
    }
1795
#endif
1796
    return ret;
1797
}
1798

    
1799
/* called from signal handler: invalidate the code and unprotect the
1800
   page. Return TRUE if the fault was succesfully handled. */
1801
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1802
{
1803
#if !defined(CONFIG_SOFTMMU)
1804
    VirtPageDesc *vp;
1805

    
1806
#if defined(DEBUG_TLB)
1807
    printf("page_unprotect: addr=0x%08x\n", addr);
1808
#endif
1809
    addr &= TARGET_PAGE_MASK;
1810

    
1811
    /* if it is not mapped, no need to worry here */
1812
    if (addr >= MMAP_AREA_END)
1813
        return 0;
1814
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1815
    if (!vp)
1816
        return 0;
1817
    /* NOTE: in this case, validate_tag is _not_ tested as it
1818
       validates only the code TLB */
1819
    if (vp->valid_tag != virt_valid_tag)
1820
        return 0;
1821
    if (!(vp->prot & PAGE_WRITE))
1822
        return 0;
1823
#if defined(DEBUG_TLB)
1824
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1825
           addr, vp->phys_addr, vp->prot);
1826
#endif
1827
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1828
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1829
                  (unsigned long)addr, vp->prot);
1830
    /* set the dirty bit */
1831
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1832
    /* flush the code inside */
1833
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1834
    return 1;
1835
#else
1836
    return 0;
1837
#endif
1838
}
1839

    
1840
#else
1841

    
1842
void tlb_flush(CPUState *env, int flush_global)
1843
{
1844
}
1845

    
1846
void tlb_flush_page(CPUState *env, target_ulong addr)
1847
{
1848
}
1849

    
1850
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1851
                      target_phys_addr_t paddr, int prot,
1852
                      int mmu_idx, int is_softmmu)
1853
{
1854
    return 0;
1855
}
1856

    
1857
/* dump memory mappings */
1858
void page_dump(FILE *f)
1859
{
1860
    unsigned long start, end;
1861
    int i, j, prot, prot1;
1862
    PageDesc *p;
1863

    
1864
    fprintf(f, "%-8s %-8s %-8s %s\n",
1865
            "start", "end", "size", "prot");
1866
    start = -1;
1867
    end = -1;
1868
    prot = 0;
1869
    for(i = 0; i <= L1_SIZE; i++) {
1870
        if (i < L1_SIZE)
1871
            p = l1_map[i];
1872
        else
1873
            p = NULL;
1874
        for(j = 0;j < L2_SIZE; j++) {
1875
            if (!p)
1876
                prot1 = 0;
1877
            else
1878
                prot1 = p[j].flags;
1879
            if (prot1 != prot) {
1880
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1881
                if (start != -1) {
1882
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1883
                            start, end, end - start,
1884
                            prot & PAGE_READ ? 'r' : '-',
1885
                            prot & PAGE_WRITE ? 'w' : '-',
1886
                            prot & PAGE_EXEC ? 'x' : '-');
1887
                }
1888
                if (prot1 != 0)
1889
                    start = end;
1890
                else
1891
                    start = -1;
1892
                prot = prot1;
1893
            }
1894
            if (!p)
1895
                break;
1896
        }
1897
    }
1898
}
1899

    
1900
int page_get_flags(target_ulong address)
1901
{
1902
    PageDesc *p;
1903

    
1904
    p = page_find(address >> TARGET_PAGE_BITS);
1905
    if (!p)
1906
        return 0;
1907
    return p->flags;
1908
}
1909

    
1910
/* modify the flags of a page and invalidate the code if
1911
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1912
   depending on PAGE_WRITE */
1913
void page_set_flags(target_ulong start, target_ulong end, int flags)
1914
{
1915
    PageDesc *p;
1916
    target_ulong addr;
1917

    
1918
    start = start & TARGET_PAGE_MASK;
1919
    end = TARGET_PAGE_ALIGN(end);
1920
    if (flags & PAGE_WRITE)
1921
        flags |= PAGE_WRITE_ORG;
1922
    spin_lock(&tb_lock);
1923
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1924
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1925
        /* if the write protection is set, then we invalidate the code
1926
           inside */
1927
        if (!(p->flags & PAGE_WRITE) &&
1928
            (flags & PAGE_WRITE) &&
1929
            p->first_tb) {
1930
            tb_invalidate_phys_page(addr, 0, NULL);
1931
        }
1932
        p->flags = flags;
1933
    }
1934
    spin_unlock(&tb_lock);
1935
}
1936

    
1937
int page_check_range(target_ulong start, target_ulong len, int flags)
1938
{
1939
    PageDesc *p;
1940
    target_ulong end;
1941
    target_ulong addr;
1942

    
1943
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1944
    start = start & TARGET_PAGE_MASK;
1945

    
1946
    if( end < start )
1947
        /* we've wrapped around */
1948
        return -1;
1949
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1950
        p = page_find(addr >> TARGET_PAGE_BITS);
1951
        if( !p )
1952
            return -1;
1953
        if( !(p->flags & PAGE_VALID) )
1954
            return -1;
1955

    
1956
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1957
            return -1;
1958
        if (flags & PAGE_WRITE) {
1959
            if (!(p->flags & PAGE_WRITE_ORG))
1960
                return -1;
1961
            /* unprotect the page if it was put read-only because it
1962
               contains translated code */
1963
            if (!(p->flags & PAGE_WRITE)) {
1964
                if (!page_unprotect(addr, 0, NULL))
1965
                    return -1;
1966
            }
1967
            return 0;
1968
        }
1969
    }
1970
    return 0;
1971
}
1972

    
1973
/* called from signal handler: invalidate the code and unprotect the
1974
   page. Return TRUE if the fault was succesfully handled. */
1975
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1976
{
1977
    unsigned int page_index, prot, pindex;
1978
    PageDesc *p, *p1;
1979
    target_ulong host_start, host_end, addr;
1980

    
1981
    host_start = address & qemu_host_page_mask;
1982
    page_index = host_start >> TARGET_PAGE_BITS;
1983
    p1 = page_find(page_index);
1984
    if (!p1)
1985
        return 0;
1986
    host_end = host_start + qemu_host_page_size;
1987
    p = p1;
1988
    prot = 0;
1989
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1990
        prot |= p->flags;
1991
        p++;
1992
    }
1993
    /* if the page was really writable, then we change its
1994
       protection back to writable */
1995
    if (prot & PAGE_WRITE_ORG) {
1996
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1997
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1998
            mprotect((void *)g2h(host_start), qemu_host_page_size,
1999
                     (prot & PAGE_BITS) | PAGE_WRITE);
2000
            p1[pindex].flags |= PAGE_WRITE;
2001
            /* and since the content will be modified, we must invalidate
2002
               the corresponding translated code. */
2003
            tb_invalidate_phys_page(address, pc, puc);
2004
#ifdef DEBUG_TB_CHECK
2005
            tb_invalidate_check(address);
2006
#endif
2007
            return 1;
2008
        }
2009
    }
2010
    return 0;
2011
}
2012

    
2013
static inline void tlb_set_dirty(CPUState *env,
2014
                                 unsigned long addr, target_ulong vaddr)
2015
{
2016
}
2017
#endif /* defined(CONFIG_USER_ONLY) */
2018

    
2019
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2020
                             ram_addr_t memory);
2021
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2022
                           ram_addr_t orig_memory);
2023
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2024
                      need_subpage)                                     \
2025
    do {                                                                \
2026
        if (addr > start_addr)                                          \
2027
            start_addr2 = 0;                                            \
2028
        else {                                                          \
2029
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2030
            if (start_addr2 > 0)                                        \
2031
                need_subpage = 1;                                       \
2032
        }                                                               \
2033
                                                                        \
2034
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2035
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2036
        else {                                                          \
2037
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2038
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2039
                need_subpage = 1;                                       \
2040
        }                                                               \
2041
    } while (0)
2042

    
2043
/* register physical memory. 'size' must be a multiple of the target
2044
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2045
   io memory page */
2046
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2047
                                  ram_addr_t size,
2048
                                  ram_addr_t phys_offset)
2049
{
2050
    target_phys_addr_t addr, end_addr;
2051
    PhysPageDesc *p;
2052
    CPUState *env;
2053
    ram_addr_t orig_size = size;
2054
    void *subpage;
2055

    
2056
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2057
    end_addr = start_addr + (target_phys_addr_t)size;
2058
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2059
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2060
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2061
            ram_addr_t orig_memory = p->phys_offset;
2062
            target_phys_addr_t start_addr2, end_addr2;
2063
            int need_subpage = 0;
2064

    
2065
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2066
                          need_subpage);
2067
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2068
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2069
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2070
                                           &p->phys_offset, orig_memory);
2071
                } else {
2072
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2073
                                            >> IO_MEM_SHIFT];
2074
                }
2075
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2076
            } else {
2077
                p->phys_offset = phys_offset;
2078
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2079
                    (phys_offset & IO_MEM_ROMD))
2080
                    phys_offset += TARGET_PAGE_SIZE;
2081
            }
2082
        } else {
2083
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2084
            p->phys_offset = phys_offset;
2085
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2086
                (phys_offset & IO_MEM_ROMD))
2087
                phys_offset += TARGET_PAGE_SIZE;
2088
            else {
2089
                target_phys_addr_t start_addr2, end_addr2;
2090
                int need_subpage = 0;
2091

    
2092
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2093
                              end_addr2, need_subpage);
2094

    
2095
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2096
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2097
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2098
                    subpage_register(subpage, start_addr2, end_addr2,
2099
                                     phys_offset);
2100
                }
2101
            }
2102
        }
2103
    }
2104

    
2105
    /* since each CPU stores ram addresses in its TLB cache, we must
2106
       reset the modified entries */
2107
    /* XXX: slow ! */
2108
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2109
        tlb_flush(env, 1);
2110
    }
2111
}
2112

    
2113
/* XXX: temporary until new memory mapping API */
2114
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2115
{
2116
    PhysPageDesc *p;
2117

    
2118
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2119
    if (!p)
2120
        return IO_MEM_UNASSIGNED;
2121
    return p->phys_offset;
2122
}
2123

    
2124
/* XXX: better than nothing */
2125
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2126
{
2127
    ram_addr_t addr;
2128
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2129
        fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
2130
                size, phys_ram_size);
2131
        abort();
2132
    }
2133
    addr = phys_ram_alloc_offset;
2134
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2135
    return addr;
2136
}
2137

    
2138
void qemu_ram_free(ram_addr_t addr)
2139
{
2140
}
2141

    
2142
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2143
{
2144
#ifdef DEBUG_UNASSIGNED
2145
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2146
#endif
2147
#ifdef TARGET_SPARC
2148
    do_unassigned_access(addr, 0, 0, 0);
2149
#elif TARGET_CRIS
2150
    do_unassigned_access(addr, 0, 0, 0);
2151
#endif
2152
    return 0;
2153
}
2154

    
2155
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2156
{
2157
#ifdef DEBUG_UNASSIGNED
2158
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2159
#endif
2160
#ifdef TARGET_SPARC
2161
    do_unassigned_access(addr, 1, 0, 0);
2162
#elif TARGET_CRIS
2163
    do_unassigned_access(addr, 1, 0, 0);
2164
#endif
2165
}
2166

    
2167
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2168
    unassigned_mem_readb,
2169
    unassigned_mem_readb,
2170
    unassigned_mem_readb,
2171
};
2172

    
2173
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2174
    unassigned_mem_writeb,
2175
    unassigned_mem_writeb,
2176
    unassigned_mem_writeb,
2177
};
2178

    
2179
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2180
{
2181
    unsigned long ram_addr;
2182
    int dirty_flags;
2183
    ram_addr = addr - (unsigned long)phys_ram_base;
2184
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2185
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2186
#if !defined(CONFIG_USER_ONLY)
2187
        tb_invalidate_phys_page_fast(ram_addr, 1);
2188
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2189
#endif
2190
    }
2191
    stb_p((uint8_t *)(long)addr, val);
2192
#ifdef USE_KQEMU
2193
    if (cpu_single_env->kqemu_enabled &&
2194
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2195
        kqemu_modify_page(cpu_single_env, ram_addr);
2196
#endif
2197
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2198
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2199
    /* we remove the notdirty callback only if the code has been
2200
       flushed */
2201
    if (dirty_flags == 0xff)
2202
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2203
}
2204

    
2205
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2206
{
2207
    unsigned long ram_addr;
2208
    int dirty_flags;
2209
    ram_addr = addr - (unsigned long)phys_ram_base;
2210
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2211
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2212
#if !defined(CONFIG_USER_ONLY)
2213
        tb_invalidate_phys_page_fast(ram_addr, 2);
2214
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2215
#endif
2216
    }
2217
    stw_p((uint8_t *)(long)addr, val);
2218
#ifdef USE_KQEMU
2219
    if (cpu_single_env->kqemu_enabled &&
2220
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2221
        kqemu_modify_page(cpu_single_env, ram_addr);
2222
#endif
2223
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2224
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2225
    /* we remove the notdirty callback only if the code has been
2226
       flushed */
2227
    if (dirty_flags == 0xff)
2228
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2229
}
2230

    
2231
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2232
{
2233
    unsigned long ram_addr;
2234
    int dirty_flags;
2235
    ram_addr = addr - (unsigned long)phys_ram_base;
2236
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2237
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2238
#if !defined(CONFIG_USER_ONLY)
2239
        tb_invalidate_phys_page_fast(ram_addr, 4);
2240
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2241
#endif
2242
    }
2243
    stl_p((uint8_t *)(long)addr, val);
2244
#ifdef USE_KQEMU
2245
    if (cpu_single_env->kqemu_enabled &&
2246
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2247
        kqemu_modify_page(cpu_single_env, ram_addr);
2248
#endif
2249
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2250
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2251
    /* we remove the notdirty callback only if the code has been
2252
       flushed */
2253
    if (dirty_flags == 0xff)
2254
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2255
}
2256

    
2257
static CPUReadMemoryFunc *error_mem_read[3] = {
2258
    NULL, /* never used */
2259
    NULL, /* never used */
2260
    NULL, /* never used */
2261
};
2262

    
2263
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2264
    notdirty_mem_writeb,
2265
    notdirty_mem_writew,
2266
    notdirty_mem_writel,
2267
};
2268

    
2269
#if defined(CONFIG_SOFTMMU)
2270
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2271
   so these check for a hit then pass through to the normal out-of-line
2272
   phys routines.  */
2273
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2274
{
2275
    return ldub_phys(addr);
2276
}
2277

    
2278
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2279
{
2280
    return lduw_phys(addr);
2281
}
2282

    
2283
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2284
{
2285
    return ldl_phys(addr);
2286
}
2287

    
2288
/* Generate a debug exception if a watchpoint has been hit.
2289
   Returns the real physical address of the access.  addr will be a host
2290
   address in case of a RAM location.  */
2291
static target_ulong check_watchpoint(target_phys_addr_t addr)
2292
{
2293
    CPUState *env = cpu_single_env;
2294
    target_ulong watch;
2295
    target_ulong retaddr;
2296
    int i;
2297

    
2298
    retaddr = addr;
2299
    for (i = 0; i < env->nb_watchpoints; i++) {
2300
        watch = env->watchpoint[i].vaddr;
2301
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2302
            retaddr = addr - env->watchpoint[i].addend;
2303
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2304
                cpu_single_env->watchpoint_hit = i + 1;
2305
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2306
                break;
2307
            }
2308
        }
2309
    }
2310
    return retaddr;
2311
}
2312

    
2313
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2314
                             uint32_t val)
2315
{
2316
    addr = check_watchpoint(addr);
2317
    stb_phys(addr, val);
2318
}
2319

    
2320
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2321
                             uint32_t val)
2322
{
2323
    addr = check_watchpoint(addr);
2324
    stw_phys(addr, val);
2325
}
2326

    
2327
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2328
                             uint32_t val)
2329
{
2330
    addr = check_watchpoint(addr);
2331
    stl_phys(addr, val);
2332
}
2333

    
2334
static CPUReadMemoryFunc *watch_mem_read[3] = {
2335
    watch_mem_readb,
2336
    watch_mem_readw,
2337
    watch_mem_readl,
2338
};
2339

    
2340
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2341
    watch_mem_writeb,
2342
    watch_mem_writew,
2343
    watch_mem_writel,
2344
};
2345
#endif
2346

    
2347
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2348
                                 unsigned int len)
2349
{
2350
    uint32_t ret;
2351
    unsigned int idx;
2352

    
2353
    idx = SUBPAGE_IDX(addr - mmio->base);
2354
#if defined(DEBUG_SUBPAGE)
2355
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2356
           mmio, len, addr, idx);
2357
#endif
2358
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2359

    
2360
    return ret;
2361
}
2362

    
2363
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2364
                              uint32_t value, unsigned int len)
2365
{
2366
    unsigned int idx;
2367

    
2368
    idx = SUBPAGE_IDX(addr - mmio->base);
2369
#if defined(DEBUG_SUBPAGE)
2370
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2371
           mmio, len, addr, idx, value);
2372
#endif
2373
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2374
}
2375

    
2376
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2377
{
2378
#if defined(DEBUG_SUBPAGE)
2379
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2380
#endif
2381

    
2382
    return subpage_readlen(opaque, addr, 0);
2383
}
2384

    
2385
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2386
                            uint32_t value)
2387
{
2388
#if defined(DEBUG_SUBPAGE)
2389
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2390
#endif
2391
    subpage_writelen(opaque, addr, value, 0);
2392
}
2393

    
2394
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2395
{
2396
#if defined(DEBUG_SUBPAGE)
2397
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2398
#endif
2399

    
2400
    return subpage_readlen(opaque, addr, 1);
2401
}
2402

    
2403
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2404
                            uint32_t value)
2405
{
2406
#if defined(DEBUG_SUBPAGE)
2407
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2408
#endif
2409
    subpage_writelen(opaque, addr, value, 1);
2410
}
2411

    
2412
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2413
{
2414
#if defined(DEBUG_SUBPAGE)
2415
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2416
#endif
2417

    
2418
    return subpage_readlen(opaque, addr, 2);
2419
}
2420

    
2421
static void subpage_writel (void *opaque,
2422
                         target_phys_addr_t addr, uint32_t value)
2423
{
2424
#if defined(DEBUG_SUBPAGE)
2425
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2426
#endif
2427
    subpage_writelen(opaque, addr, value, 2);
2428
}
2429

    
2430
static CPUReadMemoryFunc *subpage_read[] = {
2431
    &subpage_readb,
2432
    &subpage_readw,
2433
    &subpage_readl,
2434
};
2435

    
2436
static CPUWriteMemoryFunc *subpage_write[] = {
2437
    &subpage_writeb,
2438
    &subpage_writew,
2439
    &subpage_writel,
2440
};
2441

    
2442
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2443
                             ram_addr_t memory)
2444
{
2445
    int idx, eidx;
2446
    unsigned int i;
2447

    
2448
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2449
        return -1;
2450
    idx = SUBPAGE_IDX(start);
2451
    eidx = SUBPAGE_IDX(end);
2452
#if defined(DEBUG_SUBPAGE)
2453
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2454
           mmio, start, end, idx, eidx, memory);
2455
#endif
2456
    memory >>= IO_MEM_SHIFT;
2457
    for (; idx <= eidx; idx++) {
2458
        for (i = 0; i < 4; i++) {
2459
            if (io_mem_read[memory][i]) {
2460
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2461
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2462
            }
2463
            if (io_mem_write[memory][i]) {
2464
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2465
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2466
            }
2467
        }
2468
    }
2469

    
2470
    return 0;
2471
}
2472

    
2473
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2474
                           ram_addr_t orig_memory)
2475
{
2476
    subpage_t *mmio;
2477
    int subpage_memory;
2478

    
2479
    mmio = qemu_mallocz(sizeof(subpage_t));
2480
    if (mmio != NULL) {
2481
        mmio->base = base;
2482
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2483
#if defined(DEBUG_SUBPAGE)
2484
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2485
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2486
#endif
2487
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2488
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2489
    }
2490

    
2491
    return mmio;
2492
}
2493

    
2494
static void io_mem_init(void)
2495
{
2496
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2497
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2498
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2499
    io_mem_nb = 5;
2500

    
2501
#if defined(CONFIG_SOFTMMU)
2502
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2503
                                          watch_mem_write, NULL);
2504
#endif
2505
    /* alloc dirty bits array */
2506
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2507
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2508
}
2509

    
2510
/* mem_read and mem_write are arrays of functions containing the
2511
   function to access byte (index 0), word (index 1) and dword (index
2512
   2). Functions can be omitted with a NULL function pointer. The
2513
   registered functions may be modified dynamically later.
2514
   If io_index is non zero, the corresponding io zone is
2515
   modified. If it is zero, a new io zone is allocated. The return
2516
   value can be used with cpu_register_physical_memory(). (-1) is
2517
   returned if error. */
2518
int cpu_register_io_memory(int io_index,
2519
                           CPUReadMemoryFunc **mem_read,
2520
                           CPUWriteMemoryFunc **mem_write,
2521
                           void *opaque)
2522
{
2523
    int i, subwidth = 0;
2524

    
2525
    if (io_index <= 0) {
2526
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2527
            return -1;
2528
        io_index = io_mem_nb++;
2529
    } else {
2530
        if (io_index >= IO_MEM_NB_ENTRIES)
2531
            return -1;
2532
    }
2533

    
2534
    for(i = 0;i < 3; i++) {
2535
        if (!mem_read[i] || !mem_write[i])
2536
            subwidth = IO_MEM_SUBWIDTH;
2537
        io_mem_read[io_index][i] = mem_read[i];
2538
        io_mem_write[io_index][i] = mem_write[i];
2539
    }
2540
    io_mem_opaque[io_index] = opaque;
2541
    return (io_index << IO_MEM_SHIFT) | subwidth;
2542
}
2543

    
2544
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2545
{
2546
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2547
}
2548

    
2549
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2550
{
2551
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2552
}
2553

    
2554
/* physical memory access (slow version, mainly for debug) */
2555
#if defined(CONFIG_USER_ONLY)
2556
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2557
                            int len, int is_write)
2558
{
2559
    int l, flags;
2560
    target_ulong page;
2561
    void * p;
2562

    
2563
    while (len > 0) {
2564
        page = addr & TARGET_PAGE_MASK;
2565
        l = (page + TARGET_PAGE_SIZE) - addr;
2566
        if (l > len)
2567
            l = len;
2568
        flags = page_get_flags(page);
2569
        if (!(flags & PAGE_VALID))
2570
            return;
2571
        if (is_write) {
2572
            if (!(flags & PAGE_WRITE))
2573
                return;
2574
            /* XXX: this code should not depend on lock_user */
2575
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2576
                /* FIXME - should this return an error rather than just fail? */
2577
                return;
2578
            memcpy(p, buf, l);
2579
            unlock_user(p, addr, l);
2580
        } else {
2581
            if (!(flags & PAGE_READ))
2582
                return;
2583
            /* XXX: this code should not depend on lock_user */
2584
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2585
                /* FIXME - should this return an error rather than just fail? */
2586
                return;
2587
            memcpy(buf, p, l);
2588
            unlock_user(p, addr, 0);
2589
        }
2590
        len -= l;
2591
        buf += l;
2592
        addr += l;
2593
    }
2594
}
2595

    
2596
#else
2597
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2598
                            int len, int is_write)
2599
{
2600
    int l, io_index;
2601
    uint8_t *ptr;
2602
    uint32_t val;
2603
    target_phys_addr_t page;
2604
    unsigned long pd;
2605
    PhysPageDesc *p;
2606

    
2607
    while (len > 0) {
2608
        page = addr & TARGET_PAGE_MASK;
2609
        l = (page + TARGET_PAGE_SIZE) - addr;
2610
        if (l > len)
2611
            l = len;
2612
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2613
        if (!p) {
2614
            pd = IO_MEM_UNASSIGNED;
2615
        } else {
2616
            pd = p->phys_offset;
2617
        }
2618

    
2619
        if (is_write) {
2620
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2621
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2622
                /* XXX: could force cpu_single_env to NULL to avoid
2623
                   potential bugs */
2624
                if (l >= 4 && ((addr & 3) == 0)) {
2625
                    /* 32 bit write access */
2626
                    val = ldl_p(buf);
2627
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2628
                    l = 4;
2629
                } else if (l >= 2 && ((addr & 1) == 0)) {
2630
                    /* 16 bit write access */
2631
                    val = lduw_p(buf);
2632
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2633
                    l = 2;
2634
                } else {
2635
                    /* 8 bit write access */
2636
                    val = ldub_p(buf);
2637
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2638
                    l = 1;
2639
                }
2640
            } else {
2641
                unsigned long addr1;
2642
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2643
                /* RAM case */
2644
                ptr = phys_ram_base + addr1;
2645
                memcpy(ptr, buf, l);
2646
                if (!cpu_physical_memory_is_dirty(addr1)) {
2647
                    /* invalidate code */
2648
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2649
                    /* set dirty bit */
2650
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2651
                        (0xff & ~CODE_DIRTY_FLAG);
2652
                }
2653
            }
2654
        } else {
2655
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2656
                !(pd & IO_MEM_ROMD)) {
2657
                /* I/O case */
2658
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2659
                if (l >= 4 && ((addr & 3) == 0)) {
2660
                    /* 32 bit read access */
2661
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2662
                    stl_p(buf, val);
2663
                    l = 4;
2664
                } else if (l >= 2 && ((addr & 1) == 0)) {
2665
                    /* 16 bit read access */
2666
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2667
                    stw_p(buf, val);
2668
                    l = 2;
2669
                } else {
2670
                    /* 8 bit read access */
2671
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2672
                    stb_p(buf, val);
2673
                    l = 1;
2674
                }
2675
            } else {
2676
                /* RAM case */
2677
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2678
                    (addr & ~TARGET_PAGE_MASK);
2679
                memcpy(buf, ptr, l);
2680
            }
2681
        }
2682
        len -= l;
2683
        buf += l;
2684
        addr += l;
2685
    }
2686
}
2687

    
2688
/* used for ROM loading : can write in RAM and ROM */
2689
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2690
                                   const uint8_t *buf, int len)
2691
{
2692
    int l;
2693
    uint8_t *ptr;
2694
    target_phys_addr_t page;
2695
    unsigned long pd;
2696
    PhysPageDesc *p;
2697

    
2698
    while (len > 0) {
2699
        page = addr & TARGET_PAGE_MASK;
2700
        l = (page + TARGET_PAGE_SIZE) - addr;
2701
        if (l > len)
2702
            l = len;
2703
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2704
        if (!p) {
2705
            pd = IO_MEM_UNASSIGNED;
2706
        } else {
2707
            pd = p->phys_offset;
2708
        }
2709

    
2710
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2711
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2712
            !(pd & IO_MEM_ROMD)) {
2713
            /* do nothing */
2714
        } else {
2715
            unsigned long addr1;
2716
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2717
            /* ROM/RAM case */
2718
            ptr = phys_ram_base + addr1;
2719
            memcpy(ptr, buf, l);
2720
        }
2721
        len -= l;
2722
        buf += l;
2723
        addr += l;
2724
    }
2725
}
2726

    
2727

    
2728
/* warning: addr must be aligned */
2729
uint32_t ldl_phys(target_phys_addr_t addr)
2730
{
2731
    int io_index;
2732
    uint8_t *ptr;
2733
    uint32_t val;
2734
    unsigned long pd;
2735
    PhysPageDesc *p;
2736

    
2737
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2738
    if (!p) {
2739
        pd = IO_MEM_UNASSIGNED;
2740
    } else {
2741
        pd = p->phys_offset;
2742
    }
2743

    
2744
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2745
        !(pd & IO_MEM_ROMD)) {
2746
        /* I/O case */
2747
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2748
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2749
    } else {
2750
        /* RAM case */
2751
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2752
            (addr & ~TARGET_PAGE_MASK);
2753
        val = ldl_p(ptr);
2754
    }
2755
    return val;
2756
}
2757

    
2758
/* warning: addr must be aligned */
2759
uint64_t ldq_phys(target_phys_addr_t addr)
2760
{
2761
    int io_index;
2762
    uint8_t *ptr;
2763
    uint64_t val;
2764
    unsigned long pd;
2765
    PhysPageDesc *p;
2766

    
2767
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2768
    if (!p) {
2769
        pd = IO_MEM_UNASSIGNED;
2770
    } else {
2771
        pd = p->phys_offset;
2772
    }
2773

    
2774
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2775
        !(pd & IO_MEM_ROMD)) {
2776
        /* I/O case */
2777
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2778
#ifdef TARGET_WORDS_BIGENDIAN
2779
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2780
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2781
#else
2782
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2783
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2784
#endif
2785
    } else {
2786
        /* RAM case */
2787
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2788
            (addr & ~TARGET_PAGE_MASK);
2789
        val = ldq_p(ptr);
2790
    }
2791
    return val;
2792
}
2793

    
2794
/* XXX: optimize */
2795
uint32_t ldub_phys(target_phys_addr_t addr)
2796
{
2797
    uint8_t val;
2798
    cpu_physical_memory_read(addr, &val, 1);
2799
    return val;
2800
}
2801

    
2802
/* XXX: optimize */
2803
uint32_t lduw_phys(target_phys_addr_t addr)
2804
{
2805
    uint16_t val;
2806
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2807
    return tswap16(val);
2808
}
2809

    
2810
/* warning: addr must be aligned. The ram page is not masked as dirty
2811
   and the code inside is not invalidated. It is useful if the dirty
2812
   bits are used to track modified PTEs */
2813
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2814
{
2815
    int io_index;
2816
    uint8_t *ptr;
2817
    unsigned long pd;
2818
    PhysPageDesc *p;
2819

    
2820
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2821
    if (!p) {
2822
        pd = IO_MEM_UNASSIGNED;
2823
    } else {
2824
        pd = p->phys_offset;
2825
    }
2826

    
2827
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2828
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2829
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2830
    } else {
2831
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2832
            (addr & ~TARGET_PAGE_MASK);
2833
        stl_p(ptr, val);
2834
    }
2835
}
2836

    
2837
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2838
{
2839
    int io_index;
2840
    uint8_t *ptr;
2841
    unsigned long pd;
2842
    PhysPageDesc *p;
2843

    
2844
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2845
    if (!p) {
2846
        pd = IO_MEM_UNASSIGNED;
2847
    } else {
2848
        pd = p->phys_offset;
2849
    }
2850

    
2851
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2852
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2853
#ifdef TARGET_WORDS_BIGENDIAN
2854
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2855
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2856
#else
2857
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2858
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2859
#endif
2860
    } else {
2861
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2862
            (addr & ~TARGET_PAGE_MASK);
2863
        stq_p(ptr, val);
2864
    }
2865
}
2866

    
2867
/* warning: addr must be aligned */
2868
void stl_phys(target_phys_addr_t addr, uint32_t val)
2869
{
2870
    int io_index;
2871
    uint8_t *ptr;
2872
    unsigned long pd;
2873
    PhysPageDesc *p;
2874

    
2875
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2876
    if (!p) {
2877
        pd = IO_MEM_UNASSIGNED;
2878
    } else {
2879
        pd = p->phys_offset;
2880
    }
2881

    
2882
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2883
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2884
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2885
    } else {
2886
        unsigned long addr1;
2887
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2888
        /* RAM case */
2889
        ptr = phys_ram_base + addr1;
2890
        stl_p(ptr, val);
2891
        if (!cpu_physical_memory_is_dirty(addr1)) {
2892
            /* invalidate code */
2893
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2894
            /* set dirty bit */
2895
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2896
                (0xff & ~CODE_DIRTY_FLAG);
2897
        }
2898
    }
2899
}
2900

    
2901
/* XXX: optimize */
2902
void stb_phys(target_phys_addr_t addr, uint32_t val)
2903
{
2904
    uint8_t v = val;
2905
    cpu_physical_memory_write(addr, &v, 1);
2906
}
2907

    
2908
/* XXX: optimize */
2909
void stw_phys(target_phys_addr_t addr, uint32_t val)
2910
{
2911
    uint16_t v = tswap16(val);
2912
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2913
}
2914

    
2915
/* XXX: optimize */
2916
void stq_phys(target_phys_addr_t addr, uint64_t val)
2917
{
2918
    val = tswap64(val);
2919
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2920
}
2921

    
2922
#endif
2923

    
2924
/* virtual memory access for debug */
2925
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2926
                        uint8_t *buf, int len, int is_write)
2927
{
2928
    int l;
2929
    target_phys_addr_t phys_addr;
2930
    target_ulong page;
2931

    
2932
    while (len > 0) {
2933
        page = addr & TARGET_PAGE_MASK;
2934
        phys_addr = cpu_get_phys_page_debug(env, page);
2935
        /* if no physical page mapped, return an error */
2936
        if (phys_addr == -1)
2937
            return -1;
2938
        l = (page + TARGET_PAGE_SIZE) - addr;
2939
        if (l > len)
2940
            l = len;
2941
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2942
                               buf, l, is_write);
2943
        len -= l;
2944
        buf += l;
2945
        addr += l;
2946
    }
2947
    return 0;
2948
}
2949

    
2950
void dump_exec_info(FILE *f,
2951
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2952
{
2953
    int i, target_code_size, max_target_code_size;
2954
    int direct_jmp_count, direct_jmp2_count, cross_page;
2955
    TranslationBlock *tb;
2956

    
2957
    target_code_size = 0;
2958
    max_target_code_size = 0;
2959
    cross_page = 0;
2960
    direct_jmp_count = 0;
2961
    direct_jmp2_count = 0;
2962
    for(i = 0; i < nb_tbs; i++) {
2963
        tb = &tbs[i];
2964
        target_code_size += tb->size;
2965
        if (tb->size > max_target_code_size)
2966
            max_target_code_size = tb->size;
2967
        if (tb->page_addr[1] != -1)
2968
            cross_page++;
2969
        if (tb->tb_next_offset[0] != 0xffff) {
2970
            direct_jmp_count++;
2971
            if (tb->tb_next_offset[1] != 0xffff) {
2972
                direct_jmp2_count++;
2973
            }
2974
        }
2975
    }
2976
    /* XXX: avoid using doubles ? */
2977
    cpu_fprintf(f, "Translation buffer state:\n");
2978
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2979
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2980
                nb_tbs ? target_code_size / nb_tbs : 0,
2981
                max_target_code_size);
2982
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2983
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2984
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2985
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2986
            cross_page,
2987
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2988
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2989
                direct_jmp_count,
2990
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2991
                direct_jmp2_count,
2992
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2993
    cpu_fprintf(f, "\nStatistics:\n");
2994
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2995
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2996
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2997
#ifdef CONFIG_PROFILER
2998
    {
2999
        int64_t tot;
3000
        tot = dyngen_interm_time + dyngen_code_time;
3001
        cpu_fprintf(f, "JIT cycles          %" PRId64 " (%0.3f s at 2.4 GHz)\n",
3002
                    tot, tot / 2.4e9);
3003
        cpu_fprintf(f, "translated TBs      %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n", 
3004
                    dyngen_tb_count, 
3005
                    dyngen_tb_count1 - dyngen_tb_count,
3006
                    dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
3007
        cpu_fprintf(f, "avg ops/TB          %0.1f max=%d\n", 
3008
                    dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
3009
        cpu_fprintf(f, "old ops/total ops   %0.1f%%\n", 
3010
                    dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
3011
        cpu_fprintf(f, "deleted ops/TB      %0.2f\n",
3012
                    dyngen_tb_count ? 
3013
                    (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
3014
        cpu_fprintf(f, "cycles/op           %0.1f\n", 
3015
                    dyngen_op_count ? (double)tot / dyngen_op_count : 0);
3016
        cpu_fprintf(f, "cycles/in byte     %0.1f\n", 
3017
                    dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
3018
        cpu_fprintf(f, "cycles/out byte     %0.1f\n", 
3019
                    dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
3020
        if (tot == 0)
3021
            tot = 1;
3022
        cpu_fprintf(f, "  gen_interm time   %0.1f%%\n", 
3023
                    (double)dyngen_interm_time / tot * 100.0);
3024
        cpu_fprintf(f, "  gen_code time     %0.1f%%\n", 
3025
                    (double)dyngen_code_time / tot * 100.0);
3026
        cpu_fprintf(f, "cpu_restore count   %" PRId64 "\n",
3027
                    dyngen_restore_count);
3028
        cpu_fprintf(f, "  avg cycles        %0.1f\n",
3029
                    dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
3030
        {
3031
            extern void dump_op_count(void);
3032
            dump_op_count();
3033
        }
3034
    }
3035
#endif
3036
}
3037

    
3038
#if !defined(CONFIG_USER_ONLY)
3039

    
3040
#define MMUSUFFIX _cmmu
3041
#define GETPC() NULL
3042
#define env cpu_single_env
3043
#define SOFTMMU_CODE_ACCESS
3044

    
3045
#define SHIFT 0
3046
#include "softmmu_template.h"
3047

    
3048
#define SHIFT 1
3049
#include "softmmu_template.h"
3050

    
3051
#define SHIFT 2
3052
#include "softmmu_template.h"
3053

    
3054
#define SHIFT 3
3055
#include "softmmu_template.h"
3056

    
3057
#undef env
3058

    
3059
#endif