Statistics
| Branch: | Revision:

root / exec.c @ f9320410

History | View | Annotate | Download (91.5 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#if defined(CONFIG_USER_ONLY)
40
#include <qemu.h>
41
#endif
42

    
43
//#define DEBUG_TB_INVALIDATE
44
//#define DEBUG_FLUSH
45
//#define DEBUG_TLB
46
//#define DEBUG_UNASSIGNED
47

    
48
/* make various TB consistency checks */
49
//#define DEBUG_TB_CHECK
50
//#define DEBUG_TLB_CHECK
51

    
52
//#define DEBUG_IOPORT
53
//#define DEBUG_SUBPAGE
54

    
55
#if !defined(CONFIG_USER_ONLY)
56
/* TB consistency checks only implemented for usermode emulation.  */
57
#undef DEBUG_TB_CHECK
58
#endif
59

    
60
/* threshold to flush the translated code buffer */
61
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#define MMAP_AREA_START        0x00000000
66
#define MMAP_AREA_END          0xa8000000
67

    
68
#if defined(TARGET_SPARC64)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 41
70
#elif defined(TARGET_SPARC)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 36
72
#elif defined(TARGET_ALPHA)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#define TARGET_VIRT_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_PPC64)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 42
77
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78
#define TARGET_PHYS_ADDR_SPACE_BITS 42
79
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
80
#define TARGET_PHYS_ADDR_SPACE_BITS 36
81
#else
82
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83
#define TARGET_PHYS_ADDR_SPACE_BITS 32
84
#endif
85

    
86
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
87
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88
int nb_tbs;
89
/* any access to the tbs or the page table must use this lock */
90
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91

    
92
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
93
uint8_t *code_gen_ptr;
94

    
95
ram_addr_t phys_ram_size;
96
int phys_ram_fd;
97
uint8_t *phys_ram_base;
98
uint8_t *phys_ram_dirty;
99
static ram_addr_t phys_ram_alloc_offset = 0;
100

    
101
CPUState *first_cpu;
102
/* current CPU in the current thread. It is only valid inside
103
   cpu_exec() */
104
CPUState *cpu_single_env;
105

    
106
typedef struct PageDesc {
107
    /* list of TBs intersecting this ram page */
108
    TranslationBlock *first_tb;
109
    /* in order to optimize self modifying code, we count the number
110
       of lookups we do to a given page to use a bitmap */
111
    unsigned int code_write_count;
112
    uint8_t *code_bitmap;
113
#if defined(CONFIG_USER_ONLY)
114
    unsigned long flags;
115
#endif
116
} PageDesc;
117

    
118
typedef struct PhysPageDesc {
119
    /* offset in host memory of the page + io_index in the low 12 bits */
120
    ram_addr_t phys_offset;
121
} PhysPageDesc;
122

    
123
#define L2_BITS 10
124
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
125
/* XXX: this is a temporary hack for alpha target.
126
 *      In the future, this is to be replaced by a multi-level table
127
 *      to actually be able to handle the complete 64 bits address space.
128
 */
129
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
130
#else
131
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
132
#endif
133

    
134
#define L1_SIZE (1 << L1_BITS)
135
#define L2_SIZE (1 << L2_BITS)
136

    
137
static void io_mem_init(void);
138

    
139
unsigned long qemu_real_host_page_size;
140
unsigned long qemu_host_page_bits;
141
unsigned long qemu_host_page_size;
142
unsigned long qemu_host_page_mask;
143

    
144
/* XXX: for system emulation, it could just be an array */
145
static PageDesc *l1_map[L1_SIZE];
146
PhysPageDesc **l1_phys_map;
147

    
148
/* io memory support */
149
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
150
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
151
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
152
static int io_mem_nb;
153
#if defined(CONFIG_SOFTMMU)
154
static int io_mem_watch;
155
#endif
156

    
157
/* log support */
158
char *logfilename = "/tmp/qemu.log";
159
FILE *logfile;
160
int loglevel;
161
static int log_append = 0;
162

    
163
/* statistics */
164
static int tlb_flush_count;
165
static int tb_flush_count;
166
static int tb_phys_invalidate_count;
167

    
168
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
169
typedef struct subpage_t {
170
    target_phys_addr_t base;
171
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
172
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
173
    void *opaque[TARGET_PAGE_SIZE][2][4];
174
} subpage_t;
175

    
176
static void page_init(void)
177
{
178
    /* NOTE: we can always suppose that qemu_host_page_size >=
179
       TARGET_PAGE_SIZE */
180
#ifdef _WIN32
181
    {
182
        SYSTEM_INFO system_info;
183
        DWORD old_protect;
184

    
185
        GetSystemInfo(&system_info);
186
        qemu_real_host_page_size = system_info.dwPageSize;
187

    
188
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
189
                       PAGE_EXECUTE_READWRITE, &old_protect);
190
    }
191
#else
192
    qemu_real_host_page_size = getpagesize();
193
    {
194
        unsigned long start, end;
195

    
196
        start = (unsigned long)code_gen_buffer;
197
        start &= ~(qemu_real_host_page_size - 1);
198

    
199
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
200
        end += qemu_real_host_page_size - 1;
201
        end &= ~(qemu_real_host_page_size - 1);
202

    
203
        mprotect((void *)start, end - start,
204
                 PROT_READ | PROT_WRITE | PROT_EXEC);
205
    }
206
#endif
207

    
208
    if (qemu_host_page_size == 0)
209
        qemu_host_page_size = qemu_real_host_page_size;
210
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
211
        qemu_host_page_size = TARGET_PAGE_SIZE;
212
    qemu_host_page_bits = 0;
213
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
214
        qemu_host_page_bits++;
215
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
216
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
217
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
218

    
219
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
220
    {
221
        long long startaddr, endaddr;
222
        FILE *f;
223
        int n;
224

    
225
        f = fopen("/proc/self/maps", "r");
226
        if (f) {
227
            do {
228
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
229
                if (n == 2) {
230
                    startaddr = MIN(startaddr,
231
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
232
                    endaddr = MIN(endaddr,
233
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
234
                    page_set_flags(TARGET_PAGE_ALIGN(startaddr),
235
                                   TARGET_PAGE_ALIGN(endaddr),
236
                                   PAGE_RESERVED); 
237
                }
238
            } while (!feof(f));
239
            fclose(f);
240
        }
241
    }
242
#endif
243
}
244

    
245
static inline PageDesc *page_find_alloc(target_ulong index)
246
{
247
    PageDesc **lp, *p;
248

    
249
    lp = &l1_map[index >> L2_BITS];
250
    p = *lp;
251
    if (!p) {
252
        /* allocate if not found */
253
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
254
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
255
        *lp = p;
256
    }
257
    return p + (index & (L2_SIZE - 1));
258
}
259

    
260
static inline PageDesc *page_find(target_ulong index)
261
{
262
    PageDesc *p;
263

    
264
    p = l1_map[index >> L2_BITS];
265
    if (!p)
266
        return 0;
267
    return p + (index & (L2_SIZE - 1));
268
}
269

    
270
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
271
{
272
    void **lp, **p;
273
    PhysPageDesc *pd;
274

    
275
    p = (void **)l1_phys_map;
276
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
277

    
278
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
279
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
280
#endif
281
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
282
    p = *lp;
283
    if (!p) {
284
        /* allocate if not found */
285
        if (!alloc)
286
            return NULL;
287
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
288
        memset(p, 0, sizeof(void *) * L1_SIZE);
289
        *lp = p;
290
    }
291
#endif
292
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
293
    pd = *lp;
294
    if (!pd) {
295
        int i;
296
        /* allocate if not found */
297
        if (!alloc)
298
            return NULL;
299
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
300
        *lp = pd;
301
        for (i = 0; i < L2_SIZE; i++)
302
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
303
    }
304
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
305
}
306

    
307
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
308
{
309
    return phys_page_find_alloc(index, 0);
310
}
311

    
312
#if !defined(CONFIG_USER_ONLY)
313
static void tlb_protect_code(ram_addr_t ram_addr);
314
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
315
                                    target_ulong vaddr);
316
#endif
317

    
318
void cpu_exec_init(CPUState *env)
319
{
320
    CPUState **penv;
321
    int cpu_index;
322

    
323
    if (!code_gen_ptr) {
324
        cpu_gen_init();
325
        code_gen_ptr = code_gen_buffer;
326
        page_init();
327
        io_mem_init();
328
    }
329
    env->next_cpu = NULL;
330
    penv = &first_cpu;
331
    cpu_index = 0;
332
    while (*penv != NULL) {
333
        penv = (CPUState **)&(*penv)->next_cpu;
334
        cpu_index++;
335
    }
336
    env->cpu_index = cpu_index;
337
    env->nb_watchpoints = 0;
338
    *penv = env;
339
}
340

    
341
static inline void invalidate_page_bitmap(PageDesc *p)
342
{
343
    if (p->code_bitmap) {
344
        qemu_free(p->code_bitmap);
345
        p->code_bitmap = NULL;
346
    }
347
    p->code_write_count = 0;
348
}
349

    
350
/* set to NULL all the 'first_tb' fields in all PageDescs */
351
static void page_flush_tb(void)
352
{
353
    int i, j;
354
    PageDesc *p;
355

    
356
    for(i = 0; i < L1_SIZE; i++) {
357
        p = l1_map[i];
358
        if (p) {
359
            for(j = 0; j < L2_SIZE; j++) {
360
                p->first_tb = NULL;
361
                invalidate_page_bitmap(p);
362
                p++;
363
            }
364
        }
365
    }
366
}
367

    
368
/* flush all the translation blocks */
369
/* XXX: tb_flush is currently not thread safe */
370
void tb_flush(CPUState *env1)
371
{
372
    CPUState *env;
373
#if defined(DEBUG_FLUSH)
374
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
375
           (unsigned long)(code_gen_ptr - code_gen_buffer),
376
           nb_tbs, nb_tbs > 0 ?
377
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
378
#endif
379
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
380
        cpu_abort(env1, "Internal error: code buffer overflow\n");
381

    
382
    nb_tbs = 0;
383

    
384
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
385
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
386
    }
387

    
388
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
389
    page_flush_tb();
390

    
391
    code_gen_ptr = code_gen_buffer;
392
    /* XXX: flush processor icache at this point if cache flush is
393
       expensive */
394
    tb_flush_count++;
395
}
396

    
397
#ifdef DEBUG_TB_CHECK
398

    
399
static void tb_invalidate_check(target_ulong address)
400
{
401
    TranslationBlock *tb;
402
    int i;
403
    address &= TARGET_PAGE_MASK;
404
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
405
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
406
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
407
                  address >= tb->pc + tb->size)) {
408
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
409
                       address, (long)tb->pc, tb->size);
410
            }
411
        }
412
    }
413
}
414

    
415
/* verify that all the pages have correct rights for code */
416
static void tb_page_check(void)
417
{
418
    TranslationBlock *tb;
419
    int i, flags1, flags2;
420

    
421
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
422
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
423
            flags1 = page_get_flags(tb->pc);
424
            flags2 = page_get_flags(tb->pc + tb->size - 1);
425
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
426
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
427
                       (long)tb->pc, tb->size, flags1, flags2);
428
            }
429
        }
430
    }
431
}
432

    
433
void tb_jmp_check(TranslationBlock *tb)
434
{
435
    TranslationBlock *tb1;
436
    unsigned int n1;
437

    
438
    /* suppress any remaining jumps to this TB */
439
    tb1 = tb->jmp_first;
440
    for(;;) {
441
        n1 = (long)tb1 & 3;
442
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
443
        if (n1 == 2)
444
            break;
445
        tb1 = tb1->jmp_next[n1];
446
    }
447
    /* check end of list */
448
    if (tb1 != tb) {
449
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
450
    }
451
}
452

    
453
#endif
454

    
455
/* invalidate one TB */
456
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
457
                             int next_offset)
458
{
459
    TranslationBlock *tb1;
460
    for(;;) {
461
        tb1 = *ptb;
462
        if (tb1 == tb) {
463
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
464
            break;
465
        }
466
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
467
    }
468
}
469

    
470
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
471
{
472
    TranslationBlock *tb1;
473
    unsigned int n1;
474

    
475
    for(;;) {
476
        tb1 = *ptb;
477
        n1 = (long)tb1 & 3;
478
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
479
        if (tb1 == tb) {
480
            *ptb = tb1->page_next[n1];
481
            break;
482
        }
483
        ptb = &tb1->page_next[n1];
484
    }
485
}
486

    
487
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
488
{
489
    TranslationBlock *tb1, **ptb;
490
    unsigned int n1;
491

    
492
    ptb = &tb->jmp_next[n];
493
    tb1 = *ptb;
494
    if (tb1) {
495
        /* find tb(n) in circular list */
496
        for(;;) {
497
            tb1 = *ptb;
498
            n1 = (long)tb1 & 3;
499
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
500
            if (n1 == n && tb1 == tb)
501
                break;
502
            if (n1 == 2) {
503
                ptb = &tb1->jmp_first;
504
            } else {
505
                ptb = &tb1->jmp_next[n1];
506
            }
507
        }
508
        /* now we can suppress tb(n) from the list */
509
        *ptb = tb->jmp_next[n];
510

    
511
        tb->jmp_next[n] = NULL;
512
    }
513
}
514

    
515
/* reset the jump entry 'n' of a TB so that it is not chained to
516
   another TB */
517
static inline void tb_reset_jump(TranslationBlock *tb, int n)
518
{
519
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
520
}
521

    
522
static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
523
{
524
    CPUState *env;
525
    PageDesc *p;
526
    unsigned int h, n1;
527
    target_phys_addr_t phys_pc;
528
    TranslationBlock *tb1, *tb2;
529

    
530
    /* remove the TB from the hash list */
531
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
532
    h = tb_phys_hash_func(phys_pc);
533
    tb_remove(&tb_phys_hash[h], tb,
534
              offsetof(TranslationBlock, phys_hash_next));
535

    
536
    /* remove the TB from the page list */
537
    if (tb->page_addr[0] != page_addr) {
538
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
539
        tb_page_remove(&p->first_tb, tb);
540
        invalidate_page_bitmap(p);
541
    }
542
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
543
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
544
        tb_page_remove(&p->first_tb, tb);
545
        invalidate_page_bitmap(p);
546
    }
547

    
548
    tb_invalidated_flag = 1;
549

    
550
    /* remove the TB from the hash list */
551
    h = tb_jmp_cache_hash_func(tb->pc);
552
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
553
        if (env->tb_jmp_cache[h] == tb)
554
            env->tb_jmp_cache[h] = NULL;
555
    }
556

    
557
    /* suppress this TB from the two jump lists */
558
    tb_jmp_remove(tb, 0);
559
    tb_jmp_remove(tb, 1);
560

    
561
    /* suppress any remaining jumps to this TB */
562
    tb1 = tb->jmp_first;
563
    for(;;) {
564
        n1 = (long)tb1 & 3;
565
        if (n1 == 2)
566
            break;
567
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
568
        tb2 = tb1->jmp_next[n1];
569
        tb_reset_jump(tb1, n1);
570
        tb1->jmp_next[n1] = NULL;
571
        tb1 = tb2;
572
    }
573
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
574

    
575
    tb_phys_invalidate_count++;
576
}
577

    
578
static inline void set_bits(uint8_t *tab, int start, int len)
579
{
580
    int end, mask, end1;
581

    
582
    end = start + len;
583
    tab += start >> 3;
584
    mask = 0xff << (start & 7);
585
    if ((start & ~7) == (end & ~7)) {
586
        if (start < end) {
587
            mask &= ~(0xff << (end & 7));
588
            *tab |= mask;
589
        }
590
    } else {
591
        *tab++ |= mask;
592
        start = (start + 8) & ~7;
593
        end1 = end & ~7;
594
        while (start < end1) {
595
            *tab++ = 0xff;
596
            start += 8;
597
        }
598
        if (start < end) {
599
            mask = ~(0xff << (end & 7));
600
            *tab |= mask;
601
        }
602
    }
603
}
604

    
605
static void build_page_bitmap(PageDesc *p)
606
{
607
    int n, tb_start, tb_end;
608
    TranslationBlock *tb;
609

    
610
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
611
    if (!p->code_bitmap)
612
        return;
613
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
614

    
615
    tb = p->first_tb;
616
    while (tb != NULL) {
617
        n = (long)tb & 3;
618
        tb = (TranslationBlock *)((long)tb & ~3);
619
        /* NOTE: this is subtle as a TB may span two physical pages */
620
        if (n == 0) {
621
            /* NOTE: tb_end may be after the end of the page, but
622
               it is not a problem */
623
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
624
            tb_end = tb_start + tb->size;
625
            if (tb_end > TARGET_PAGE_SIZE)
626
                tb_end = TARGET_PAGE_SIZE;
627
        } else {
628
            tb_start = 0;
629
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
630
        }
631
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
632
        tb = tb->page_next[n];
633
    }
634
}
635

    
636
#ifdef TARGET_HAS_PRECISE_SMC
637

    
638
static void tb_gen_code(CPUState *env,
639
                        target_ulong pc, target_ulong cs_base, int flags,
640
                        int cflags)
641
{
642
    TranslationBlock *tb;
643
    uint8_t *tc_ptr;
644
    target_ulong phys_pc, phys_page2, virt_page2;
645
    int code_gen_size;
646

    
647
    phys_pc = get_phys_addr_code(env, pc);
648
    tb = tb_alloc(pc);
649
    if (!tb) {
650
        /* flush must be done */
651
        tb_flush(env);
652
        /* cannot fail at this point */
653
        tb = tb_alloc(pc);
654
    }
655
    tc_ptr = code_gen_ptr;
656
    tb->tc_ptr = tc_ptr;
657
    tb->cs_base = cs_base;
658
    tb->flags = flags;
659
    tb->cflags = cflags;
660
    cpu_gen_code(env, tb, &code_gen_size);
661
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
662

    
663
    /* check next page if needed */
664
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
665
    phys_page2 = -1;
666
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
667
        phys_page2 = get_phys_addr_code(env, virt_page2);
668
    }
669
    tb_link_phys(tb, phys_pc, phys_page2);
670
}
671
#endif
672

    
673
/* invalidate all TBs which intersect with the target physical page
674
   starting in range [start;end[. NOTE: start and end must refer to
675
   the same physical page. 'is_cpu_write_access' should be true if called
676
   from a real cpu write access: the virtual CPU will exit the current
677
   TB if code is modified inside this TB. */
678
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
679
                                   int is_cpu_write_access)
680
{
681
    int n, current_tb_modified, current_tb_not_found, current_flags;
682
    CPUState *env = cpu_single_env;
683
    PageDesc *p;
684
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
685
    target_ulong tb_start, tb_end;
686
    target_ulong current_pc, current_cs_base;
687

    
688
    p = page_find(start >> TARGET_PAGE_BITS);
689
    if (!p)
690
        return;
691
    if (!p->code_bitmap &&
692
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
693
        is_cpu_write_access) {
694
        /* build code bitmap */
695
        build_page_bitmap(p);
696
    }
697

    
698
    /* we remove all the TBs in the range [start, end[ */
699
    /* XXX: see if in some cases it could be faster to invalidate all the code */
700
    current_tb_not_found = is_cpu_write_access;
701
    current_tb_modified = 0;
702
    current_tb = NULL; /* avoid warning */
703
    current_pc = 0; /* avoid warning */
704
    current_cs_base = 0; /* avoid warning */
705
    current_flags = 0; /* avoid warning */
706
    tb = p->first_tb;
707
    while (tb != NULL) {
708
        n = (long)tb & 3;
709
        tb = (TranslationBlock *)((long)tb & ~3);
710
        tb_next = tb->page_next[n];
711
        /* NOTE: this is subtle as a TB may span two physical pages */
712
        if (n == 0) {
713
            /* NOTE: tb_end may be after the end of the page, but
714
               it is not a problem */
715
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
716
            tb_end = tb_start + tb->size;
717
        } else {
718
            tb_start = tb->page_addr[1];
719
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
720
        }
721
        if (!(tb_end <= start || tb_start >= end)) {
722
#ifdef TARGET_HAS_PRECISE_SMC
723
            if (current_tb_not_found) {
724
                current_tb_not_found = 0;
725
                current_tb = NULL;
726
                if (env->mem_write_pc) {
727
                    /* now we have a real cpu fault */
728
                    current_tb = tb_find_pc(env->mem_write_pc);
729
                }
730
            }
731
            if (current_tb == tb &&
732
                !(current_tb->cflags & CF_SINGLE_INSN)) {
733
                /* If we are modifying the current TB, we must stop
734
                its execution. We could be more precise by checking
735
                that the modification is after the current PC, but it
736
                would require a specialized function to partially
737
                restore the CPU state */
738

    
739
                current_tb_modified = 1;
740
                cpu_restore_state(current_tb, env,
741
                                  env->mem_write_pc, NULL);
742
#if defined(TARGET_I386)
743
                current_flags = env->hflags;
744
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
745
                current_cs_base = (target_ulong)env->segs[R_CS].base;
746
                current_pc = current_cs_base + env->eip;
747
#else
748
#error unsupported CPU
749
#endif
750
            }
751
#endif /* TARGET_HAS_PRECISE_SMC */
752
            /* we need to do that to handle the case where a signal
753
               occurs while doing tb_phys_invalidate() */
754
            saved_tb = NULL;
755
            if (env) {
756
                saved_tb = env->current_tb;
757
                env->current_tb = NULL;
758
            }
759
            tb_phys_invalidate(tb, -1);
760
            if (env) {
761
                env->current_tb = saved_tb;
762
                if (env->interrupt_request && env->current_tb)
763
                    cpu_interrupt(env, env->interrupt_request);
764
            }
765
        }
766
        tb = tb_next;
767
    }
768
#if !defined(CONFIG_USER_ONLY)
769
    /* if no code remaining, no need to continue to use slow writes */
770
    if (!p->first_tb) {
771
        invalidate_page_bitmap(p);
772
        if (is_cpu_write_access) {
773
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
774
        }
775
    }
776
#endif
777
#ifdef TARGET_HAS_PRECISE_SMC
778
    if (current_tb_modified) {
779
        /* we generate a block containing just the instruction
780
           modifying the memory. It will ensure that it cannot modify
781
           itself */
782
        env->current_tb = NULL;
783
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
784
                    CF_SINGLE_INSN);
785
        cpu_resume_from_signal(env, NULL);
786
    }
787
#endif
788
}
789

    
790
/* len must be <= 8 and start must be a multiple of len */
791
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
792
{
793
    PageDesc *p;
794
    int offset, b;
795
#if 0
796
    if (1) {
797
        if (loglevel) {
798
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
799
                   cpu_single_env->mem_write_vaddr, len,
800
                   cpu_single_env->eip,
801
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
802
        }
803
    }
804
#endif
805
    p = page_find(start >> TARGET_PAGE_BITS);
806
    if (!p)
807
        return;
808
    if (p->code_bitmap) {
809
        offset = start & ~TARGET_PAGE_MASK;
810
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
811
        if (b & ((1 << len) - 1))
812
            goto do_invalidate;
813
    } else {
814
    do_invalidate:
815
        tb_invalidate_phys_page_range(start, start + len, 1);
816
    }
817
}
818

    
819
#if !defined(CONFIG_SOFTMMU)
820
static void tb_invalidate_phys_page(target_phys_addr_t addr,
821
                                    unsigned long pc, void *puc)
822
{
823
    int n, current_flags, current_tb_modified;
824
    target_ulong current_pc, current_cs_base;
825
    PageDesc *p;
826
    TranslationBlock *tb, *current_tb;
827
#ifdef TARGET_HAS_PRECISE_SMC
828
    CPUState *env = cpu_single_env;
829
#endif
830

    
831
    addr &= TARGET_PAGE_MASK;
832
    p = page_find(addr >> TARGET_PAGE_BITS);
833
    if (!p)
834
        return;
835
    tb = p->first_tb;
836
    current_tb_modified = 0;
837
    current_tb = NULL;
838
    current_pc = 0; /* avoid warning */
839
    current_cs_base = 0; /* avoid warning */
840
    current_flags = 0; /* avoid warning */
841
#ifdef TARGET_HAS_PRECISE_SMC
842
    if (tb && pc != 0) {
843
        current_tb = tb_find_pc(pc);
844
    }
845
#endif
846
    while (tb != NULL) {
847
        n = (long)tb & 3;
848
        tb = (TranslationBlock *)((long)tb & ~3);
849
#ifdef TARGET_HAS_PRECISE_SMC
850
        if (current_tb == tb &&
851
            !(current_tb->cflags & CF_SINGLE_INSN)) {
852
                /* If we are modifying the current TB, we must stop
853
                   its execution. We could be more precise by checking
854
                   that the modification is after the current PC, but it
855
                   would require a specialized function to partially
856
                   restore the CPU state */
857

    
858
            current_tb_modified = 1;
859
            cpu_restore_state(current_tb, env, pc, puc);
860
#if defined(TARGET_I386)
861
            current_flags = env->hflags;
862
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
863
            current_cs_base = (target_ulong)env->segs[R_CS].base;
864
            current_pc = current_cs_base + env->eip;
865
#else
866
#error unsupported CPU
867
#endif
868
        }
869
#endif /* TARGET_HAS_PRECISE_SMC */
870
        tb_phys_invalidate(tb, addr);
871
        tb = tb->page_next[n];
872
    }
873
    p->first_tb = NULL;
874
#ifdef TARGET_HAS_PRECISE_SMC
875
    if (current_tb_modified) {
876
        /* we generate a block containing just the instruction
877
           modifying the memory. It will ensure that it cannot modify
878
           itself */
879
        env->current_tb = NULL;
880
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
881
                    CF_SINGLE_INSN);
882
        cpu_resume_from_signal(env, puc);
883
    }
884
#endif
885
}
886
#endif
887

    
888
/* add the tb in the target page and protect it if necessary */
889
static inline void tb_alloc_page(TranslationBlock *tb,
890
                                 unsigned int n, target_ulong page_addr)
891
{
892
    PageDesc *p;
893
    TranslationBlock *last_first_tb;
894

    
895
    tb->page_addr[n] = page_addr;
896
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
897
    tb->page_next[n] = p->first_tb;
898
    last_first_tb = p->first_tb;
899
    p->first_tb = (TranslationBlock *)((long)tb | n);
900
    invalidate_page_bitmap(p);
901

    
902
#if defined(TARGET_HAS_SMC) || 1
903

    
904
#if defined(CONFIG_USER_ONLY)
905
    if (p->flags & PAGE_WRITE) {
906
        target_ulong addr;
907
        PageDesc *p2;
908
        int prot;
909

    
910
        /* force the host page as non writable (writes will have a
911
           page fault + mprotect overhead) */
912
        page_addr &= qemu_host_page_mask;
913
        prot = 0;
914
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
915
            addr += TARGET_PAGE_SIZE) {
916

    
917
            p2 = page_find (addr >> TARGET_PAGE_BITS);
918
            if (!p2)
919
                continue;
920
            prot |= p2->flags;
921
            p2->flags &= ~PAGE_WRITE;
922
            page_get_flags(addr);
923
          }
924
        mprotect(g2h(page_addr), qemu_host_page_size,
925
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
926
#ifdef DEBUG_TB_INVALIDATE
927
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
928
               page_addr);
929
#endif
930
    }
931
#else
932
    /* if some code is already present, then the pages are already
933
       protected. So we handle the case where only the first TB is
934
       allocated in a physical page */
935
    if (!last_first_tb) {
936
        tlb_protect_code(page_addr);
937
    }
938
#endif
939

    
940
#endif /* TARGET_HAS_SMC */
941
}
942

    
943
/* Allocate a new translation block. Flush the translation buffer if
944
   too many translation blocks or too much generated code. */
945
TranslationBlock *tb_alloc(target_ulong pc)
946
{
947
    TranslationBlock *tb;
948

    
949
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
950
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
951
        return NULL;
952
    tb = &tbs[nb_tbs++];
953
    tb->pc = pc;
954
    tb->cflags = 0;
955
    return tb;
956
}
957

    
958
/* add a new TB and link it to the physical page tables. phys_page2 is
959
   (-1) to indicate that only one page contains the TB. */
960
void tb_link_phys(TranslationBlock *tb,
961
                  target_ulong phys_pc, target_ulong phys_page2)
962
{
963
    unsigned int h;
964
    TranslationBlock **ptb;
965

    
966
    /* add in the physical hash table */
967
    h = tb_phys_hash_func(phys_pc);
968
    ptb = &tb_phys_hash[h];
969
    tb->phys_hash_next = *ptb;
970
    *ptb = tb;
971

    
972
    /* add in the page list */
973
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
974
    if (phys_page2 != -1)
975
        tb_alloc_page(tb, 1, phys_page2);
976
    else
977
        tb->page_addr[1] = -1;
978

    
979
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
980
    tb->jmp_next[0] = NULL;
981
    tb->jmp_next[1] = NULL;
982

    
983
    /* init original jump addresses */
984
    if (tb->tb_next_offset[0] != 0xffff)
985
        tb_reset_jump(tb, 0);
986
    if (tb->tb_next_offset[1] != 0xffff)
987
        tb_reset_jump(tb, 1);
988

    
989
#ifdef DEBUG_TB_CHECK
990
    tb_page_check();
991
#endif
992
}
993

    
994
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
995
   tb[1].tc_ptr. Return NULL if not found */
996
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
997
{
998
    int m_min, m_max, m;
999
    unsigned long v;
1000
    TranslationBlock *tb;
1001

    
1002
    if (nb_tbs <= 0)
1003
        return NULL;
1004
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1005
        tc_ptr >= (unsigned long)code_gen_ptr)
1006
        return NULL;
1007
    /* binary search (cf Knuth) */
1008
    m_min = 0;
1009
    m_max = nb_tbs - 1;
1010
    while (m_min <= m_max) {
1011
        m = (m_min + m_max) >> 1;
1012
        tb = &tbs[m];
1013
        v = (unsigned long)tb->tc_ptr;
1014
        if (v == tc_ptr)
1015
            return tb;
1016
        else if (tc_ptr < v) {
1017
            m_max = m - 1;
1018
        } else {
1019
            m_min = m + 1;
1020
        }
1021
    }
1022
    return &tbs[m_max];
1023
}
1024

    
1025
static void tb_reset_jump_recursive(TranslationBlock *tb);
1026

    
1027
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1028
{
1029
    TranslationBlock *tb1, *tb_next, **ptb;
1030
    unsigned int n1;
1031

    
1032
    tb1 = tb->jmp_next[n];
1033
    if (tb1 != NULL) {
1034
        /* find head of list */
1035
        for(;;) {
1036
            n1 = (long)tb1 & 3;
1037
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1038
            if (n1 == 2)
1039
                break;
1040
            tb1 = tb1->jmp_next[n1];
1041
        }
1042
        /* we are now sure now that tb jumps to tb1 */
1043
        tb_next = tb1;
1044

    
1045
        /* remove tb from the jmp_first list */
1046
        ptb = &tb_next->jmp_first;
1047
        for(;;) {
1048
            tb1 = *ptb;
1049
            n1 = (long)tb1 & 3;
1050
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1051
            if (n1 == n && tb1 == tb)
1052
                break;
1053
            ptb = &tb1->jmp_next[n1];
1054
        }
1055
        *ptb = tb->jmp_next[n];
1056
        tb->jmp_next[n] = NULL;
1057

    
1058
        /* suppress the jump to next tb in generated code */
1059
        tb_reset_jump(tb, n);
1060

    
1061
        /* suppress jumps in the tb on which we could have jumped */
1062
        tb_reset_jump_recursive(tb_next);
1063
    }
1064
}
1065

    
1066
static void tb_reset_jump_recursive(TranslationBlock *tb)
1067
{
1068
    tb_reset_jump_recursive2(tb, 0);
1069
    tb_reset_jump_recursive2(tb, 1);
1070
}
1071

    
1072
#if defined(TARGET_HAS_ICE)
1073
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1074
{
1075
    target_phys_addr_t addr;
1076
    target_ulong pd;
1077
    ram_addr_t ram_addr;
1078
    PhysPageDesc *p;
1079

    
1080
    addr = cpu_get_phys_page_debug(env, pc);
1081
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1082
    if (!p) {
1083
        pd = IO_MEM_UNASSIGNED;
1084
    } else {
1085
        pd = p->phys_offset;
1086
    }
1087
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1088
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1089
}
1090
#endif
1091

    
1092
/* Add a watchpoint.  */
1093
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1094
{
1095
    int i;
1096

    
1097
    for (i = 0; i < env->nb_watchpoints; i++) {
1098
        if (addr == env->watchpoint[i].vaddr)
1099
            return 0;
1100
    }
1101
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1102
        return -1;
1103

    
1104
    i = env->nb_watchpoints++;
1105
    env->watchpoint[i].vaddr = addr;
1106
    tlb_flush_page(env, addr);
1107
    /* FIXME: This flush is needed because of the hack to make memory ops
1108
       terminate the TB.  It can be removed once the proper IO trap and
1109
       re-execute bits are in.  */
1110
    tb_flush(env);
1111
    return i;
1112
}
1113

    
1114
/* Remove a watchpoint.  */
1115
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1116
{
1117
    int i;
1118

    
1119
    for (i = 0; i < env->nb_watchpoints; i++) {
1120
        if (addr == env->watchpoint[i].vaddr) {
1121
            env->nb_watchpoints--;
1122
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1123
            tlb_flush_page(env, addr);
1124
            return 0;
1125
        }
1126
    }
1127
    return -1;
1128
}
1129

    
1130
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1131
   breakpoint is reached */
1132
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1133
{
1134
#if defined(TARGET_HAS_ICE)
1135
    int i;
1136

    
1137
    for(i = 0; i < env->nb_breakpoints; i++) {
1138
        if (env->breakpoints[i] == pc)
1139
            return 0;
1140
    }
1141

    
1142
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1143
        return -1;
1144
    env->breakpoints[env->nb_breakpoints++] = pc;
1145

    
1146
    breakpoint_invalidate(env, pc);
1147
    return 0;
1148
#else
1149
    return -1;
1150
#endif
1151
}
1152

    
1153
/* remove a breakpoint */
1154
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1155
{
1156
#if defined(TARGET_HAS_ICE)
1157
    int i;
1158
    for(i = 0; i < env->nb_breakpoints; i++) {
1159
        if (env->breakpoints[i] == pc)
1160
            goto found;
1161
    }
1162
    return -1;
1163
 found:
1164
    env->nb_breakpoints--;
1165
    if (i < env->nb_breakpoints)
1166
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1167

    
1168
    breakpoint_invalidate(env, pc);
1169
    return 0;
1170
#else
1171
    return -1;
1172
#endif
1173
}
1174

    
1175
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1176
   CPU loop after each instruction */
1177
void cpu_single_step(CPUState *env, int enabled)
1178
{
1179
#if defined(TARGET_HAS_ICE)
1180
    if (env->singlestep_enabled != enabled) {
1181
        env->singlestep_enabled = enabled;
1182
        /* must flush all the translated code to avoid inconsistancies */
1183
        /* XXX: only flush what is necessary */
1184
        tb_flush(env);
1185
    }
1186
#endif
1187
}
1188

    
1189
/* enable or disable low levels log */
1190
void cpu_set_log(int log_flags)
1191
{
1192
    loglevel = log_flags;
1193
    if (loglevel && !logfile) {
1194
        logfile = fopen(logfilename, log_append ? "a" : "w");
1195
        if (!logfile) {
1196
            perror(logfilename);
1197
            _exit(1);
1198
        }
1199
#if !defined(CONFIG_SOFTMMU)
1200
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1201
        {
1202
            static uint8_t logfile_buf[4096];
1203
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1204
        }
1205
#else
1206
        setvbuf(logfile, NULL, _IOLBF, 0);
1207
#endif
1208
        log_append = 1;
1209
    }
1210
    if (!loglevel && logfile) {
1211
        fclose(logfile);
1212
        logfile = NULL;
1213
    }
1214
}
1215

    
1216
void cpu_set_log_filename(const char *filename)
1217
{
1218
    logfilename = strdup(filename);
1219
    if (logfile) {
1220
        fclose(logfile);
1221
        logfile = NULL;
1222
    }
1223
    cpu_set_log(loglevel);
1224
}
1225

    
1226
/* mask must never be zero, except for A20 change call */
1227
void cpu_interrupt(CPUState *env, int mask)
1228
{
1229
    TranslationBlock *tb;
1230
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1231

    
1232
    env->interrupt_request |= mask;
1233
    /* if the cpu is currently executing code, we must unlink it and
1234
       all the potentially executing TB */
1235
    tb = env->current_tb;
1236
    if (tb && !testandset(&interrupt_lock)) {
1237
        env->current_tb = NULL;
1238
        tb_reset_jump_recursive(tb);
1239
        resetlock(&interrupt_lock);
1240
    }
1241
}
1242

    
1243
void cpu_reset_interrupt(CPUState *env, int mask)
1244
{
1245
    env->interrupt_request &= ~mask;
1246
}
1247

    
1248
CPULogItem cpu_log_items[] = {
1249
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1250
      "show generated host assembly code for each compiled TB" },
1251
    { CPU_LOG_TB_IN_ASM, "in_asm",
1252
      "show target assembly code for each compiled TB" },
1253
    { CPU_LOG_TB_OP, "op",
1254
      "show micro ops for each compiled TB" },
1255
    { CPU_LOG_TB_OP_OPT, "op_opt",
1256
      "show micro ops "
1257
#ifdef TARGET_I386
1258
      "before eflags optimization and "
1259
#endif
1260
      "after liveness analysis" },
1261
    { CPU_LOG_INT, "int",
1262
      "show interrupts/exceptions in short format" },
1263
    { CPU_LOG_EXEC, "exec",
1264
      "show trace before each executed TB (lots of logs)" },
1265
    { CPU_LOG_TB_CPU, "cpu",
1266
      "show CPU state before block translation" },
1267
#ifdef TARGET_I386
1268
    { CPU_LOG_PCALL, "pcall",
1269
      "show protected mode far calls/returns/exceptions" },
1270
#endif
1271
#ifdef DEBUG_IOPORT
1272
    { CPU_LOG_IOPORT, "ioport",
1273
      "show all i/o ports accesses" },
1274
#endif
1275
    { 0, NULL, NULL },
1276
};
1277

    
1278
static int cmp1(const char *s1, int n, const char *s2)
1279
{
1280
    if (strlen(s2) != n)
1281
        return 0;
1282
    return memcmp(s1, s2, n) == 0;
1283
}
1284

    
1285
/* takes a comma separated list of log masks. Return 0 if error. */
1286
int cpu_str_to_log_mask(const char *str)
1287
{
1288
    CPULogItem *item;
1289
    int mask;
1290
    const char *p, *p1;
1291

    
1292
    p = str;
1293
    mask = 0;
1294
    for(;;) {
1295
        p1 = strchr(p, ',');
1296
        if (!p1)
1297
            p1 = p + strlen(p);
1298
        if(cmp1(p,p1-p,"all")) {
1299
                for(item = cpu_log_items; item->mask != 0; item++) {
1300
                        mask |= item->mask;
1301
                }
1302
        } else {
1303
        for(item = cpu_log_items; item->mask != 0; item++) {
1304
            if (cmp1(p, p1 - p, item->name))
1305
                goto found;
1306
        }
1307
        return 0;
1308
        }
1309
    found:
1310
        mask |= item->mask;
1311
        if (*p1 != ',')
1312
            break;
1313
        p = p1 + 1;
1314
    }
1315
    return mask;
1316
}
1317

    
1318
void cpu_abort(CPUState *env, const char *fmt, ...)
1319
{
1320
    va_list ap;
1321
    va_list ap2;
1322

    
1323
    va_start(ap, fmt);
1324
    va_copy(ap2, ap);
1325
    fprintf(stderr, "qemu: fatal: ");
1326
    vfprintf(stderr, fmt, ap);
1327
    fprintf(stderr, "\n");
1328
#ifdef TARGET_I386
1329
    if(env->intercept & INTERCEPT_SVM_MASK) {
1330
        /* most probably the virtual machine should not
1331
           be shut down but rather caught by the VMM */
1332
        vmexit(SVM_EXIT_SHUTDOWN, 0);
1333
    }
1334
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1335
#else
1336
    cpu_dump_state(env, stderr, fprintf, 0);
1337
#endif
1338
    if (logfile) {
1339
        fprintf(logfile, "qemu: fatal: ");
1340
        vfprintf(logfile, fmt, ap2);
1341
        fprintf(logfile, "\n");
1342
#ifdef TARGET_I386
1343
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1344
#else
1345
        cpu_dump_state(env, logfile, fprintf, 0);
1346
#endif
1347
        fflush(logfile);
1348
        fclose(logfile);
1349
    }
1350
    va_end(ap2);
1351
    va_end(ap);
1352
    abort();
1353
}
1354

    
1355
CPUState *cpu_copy(CPUState *env)
1356
{
1357
    CPUState *new_env = cpu_init(env->cpu_model_str);
1358
    /* preserve chaining and index */
1359
    CPUState *next_cpu = new_env->next_cpu;
1360
    int cpu_index = new_env->cpu_index;
1361
    memcpy(new_env, env, sizeof(CPUState));
1362
    new_env->next_cpu = next_cpu;
1363
    new_env->cpu_index = cpu_index;
1364
    return new_env;
1365
}
1366

    
1367
#if !defined(CONFIG_USER_ONLY)
1368

    
1369
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1370
{
1371
    unsigned int i;
1372

    
1373
    /* Discard jump cache entries for any tb which might potentially
1374
       overlap the flushed page.  */
1375
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1376
    memset (&env->tb_jmp_cache[i], 0, 
1377
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1378

    
1379
    i = tb_jmp_cache_hash_page(addr);
1380
    memset (&env->tb_jmp_cache[i], 0, 
1381
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1382
}
1383

    
1384
/* NOTE: if flush_global is true, also flush global entries (not
1385
   implemented yet) */
1386
void tlb_flush(CPUState *env, int flush_global)
1387
{
1388
    int i;
1389

    
1390
#if defined(DEBUG_TLB)
1391
    printf("tlb_flush:\n");
1392
#endif
1393
    /* must reset current TB so that interrupts cannot modify the
1394
       links while we are modifying them */
1395
    env->current_tb = NULL;
1396

    
1397
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1398
        env->tlb_table[0][i].addr_read = -1;
1399
        env->tlb_table[0][i].addr_write = -1;
1400
        env->tlb_table[0][i].addr_code = -1;
1401
        env->tlb_table[1][i].addr_read = -1;
1402
        env->tlb_table[1][i].addr_write = -1;
1403
        env->tlb_table[1][i].addr_code = -1;
1404
#if (NB_MMU_MODES >= 3)
1405
        env->tlb_table[2][i].addr_read = -1;
1406
        env->tlb_table[2][i].addr_write = -1;
1407
        env->tlb_table[2][i].addr_code = -1;
1408
#if (NB_MMU_MODES == 4)
1409
        env->tlb_table[3][i].addr_read = -1;
1410
        env->tlb_table[3][i].addr_write = -1;
1411
        env->tlb_table[3][i].addr_code = -1;
1412
#endif
1413
#endif
1414
    }
1415

    
1416
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1417

    
1418
#if !defined(CONFIG_SOFTMMU)
1419
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1420
#endif
1421
#ifdef USE_KQEMU
1422
    if (env->kqemu_enabled) {
1423
        kqemu_flush(env, flush_global);
1424
    }
1425
#endif
1426
    tlb_flush_count++;
1427
}
1428

    
1429
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1430
{
1431
    if (addr == (tlb_entry->addr_read &
1432
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1433
        addr == (tlb_entry->addr_write &
1434
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1435
        addr == (tlb_entry->addr_code &
1436
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1437
        tlb_entry->addr_read = -1;
1438
        tlb_entry->addr_write = -1;
1439
        tlb_entry->addr_code = -1;
1440
    }
1441
}
1442

    
1443
void tlb_flush_page(CPUState *env, target_ulong addr)
1444
{
1445
    int i;
1446

    
1447
#if defined(DEBUG_TLB)
1448
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1449
#endif
1450
    /* must reset current TB so that interrupts cannot modify the
1451
       links while we are modifying them */
1452
    env->current_tb = NULL;
1453

    
1454
    addr &= TARGET_PAGE_MASK;
1455
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1456
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1457
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1458
#if (NB_MMU_MODES >= 3)
1459
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1460
#if (NB_MMU_MODES == 4)
1461
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1462
#endif
1463
#endif
1464

    
1465
    tlb_flush_jmp_cache(env, addr);
1466

    
1467
#if !defined(CONFIG_SOFTMMU)
1468
    if (addr < MMAP_AREA_END)
1469
        munmap((void *)addr, TARGET_PAGE_SIZE);
1470
#endif
1471
#ifdef USE_KQEMU
1472
    if (env->kqemu_enabled) {
1473
        kqemu_flush_page(env, addr);
1474
    }
1475
#endif
1476
}
1477

    
1478
/* update the TLBs so that writes to code in the virtual page 'addr'
1479
   can be detected */
1480
static void tlb_protect_code(ram_addr_t ram_addr)
1481
{
1482
    cpu_physical_memory_reset_dirty(ram_addr,
1483
                                    ram_addr + TARGET_PAGE_SIZE,
1484
                                    CODE_DIRTY_FLAG);
1485
}
1486

    
1487
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1488
   tested for self modifying code */
1489
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1490
                                    target_ulong vaddr)
1491
{
1492
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1493
}
1494

    
1495
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1496
                                         unsigned long start, unsigned long length)
1497
{
1498
    unsigned long addr;
1499
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1500
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1501
        if ((addr - start) < length) {
1502
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1503
        }
1504
    }
1505
}
1506

    
1507
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1508
                                     int dirty_flags)
1509
{
1510
    CPUState *env;
1511
    unsigned long length, start1;
1512
    int i, mask, len;
1513
    uint8_t *p;
1514

    
1515
    start &= TARGET_PAGE_MASK;
1516
    end = TARGET_PAGE_ALIGN(end);
1517

    
1518
    length = end - start;
1519
    if (length == 0)
1520
        return;
1521
    len = length >> TARGET_PAGE_BITS;
1522
#ifdef USE_KQEMU
1523
    /* XXX: should not depend on cpu context */
1524
    env = first_cpu;
1525
    if (env->kqemu_enabled) {
1526
        ram_addr_t addr;
1527
        addr = start;
1528
        for(i = 0; i < len; i++) {
1529
            kqemu_set_notdirty(env, addr);
1530
            addr += TARGET_PAGE_SIZE;
1531
        }
1532
    }
1533
#endif
1534
    mask = ~dirty_flags;
1535
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1536
    for(i = 0; i < len; i++)
1537
        p[i] &= mask;
1538

    
1539
    /* we modify the TLB cache so that the dirty bit will be set again
1540
       when accessing the range */
1541
    start1 = start + (unsigned long)phys_ram_base;
1542
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1543
        for(i = 0; i < CPU_TLB_SIZE; i++)
1544
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1545
        for(i = 0; i < CPU_TLB_SIZE; i++)
1546
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1547
#if (NB_MMU_MODES >= 3)
1548
        for(i = 0; i < CPU_TLB_SIZE; i++)
1549
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1550
#if (NB_MMU_MODES == 4)
1551
        for(i = 0; i < CPU_TLB_SIZE; i++)
1552
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1553
#endif
1554
#endif
1555
    }
1556

    
1557
#if !defined(CONFIG_SOFTMMU)
1558
    /* XXX: this is expensive */
1559
    {
1560
        VirtPageDesc *p;
1561
        int j;
1562
        target_ulong addr;
1563

    
1564
        for(i = 0; i < L1_SIZE; i++) {
1565
            p = l1_virt_map[i];
1566
            if (p) {
1567
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1568
                for(j = 0; j < L2_SIZE; j++) {
1569
                    if (p->valid_tag == virt_valid_tag &&
1570
                        p->phys_addr >= start && p->phys_addr < end &&
1571
                        (p->prot & PROT_WRITE)) {
1572
                        if (addr < MMAP_AREA_END) {
1573
                            mprotect((void *)addr, TARGET_PAGE_SIZE,
1574
                                     p->prot & ~PROT_WRITE);
1575
                        }
1576
                    }
1577
                    addr += TARGET_PAGE_SIZE;
1578
                    p++;
1579
                }
1580
            }
1581
        }
1582
    }
1583
#endif
1584
}
1585

    
1586
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1587
{
1588
    ram_addr_t ram_addr;
1589

    
1590
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1591
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1592
            tlb_entry->addend - (unsigned long)phys_ram_base;
1593
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1594
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1595
        }
1596
    }
1597
}
1598

    
1599
/* update the TLB according to the current state of the dirty bits */
1600
void cpu_tlb_update_dirty(CPUState *env)
1601
{
1602
    int i;
1603
    for(i = 0; i < CPU_TLB_SIZE; i++)
1604
        tlb_update_dirty(&env->tlb_table[0][i]);
1605
    for(i = 0; i < CPU_TLB_SIZE; i++)
1606
        tlb_update_dirty(&env->tlb_table[1][i]);
1607
#if (NB_MMU_MODES >= 3)
1608
    for(i = 0; i < CPU_TLB_SIZE; i++)
1609
        tlb_update_dirty(&env->tlb_table[2][i]);
1610
#if (NB_MMU_MODES == 4)
1611
    for(i = 0; i < CPU_TLB_SIZE; i++)
1612
        tlb_update_dirty(&env->tlb_table[3][i]);
1613
#endif
1614
#endif
1615
}
1616

    
1617
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1618
                                  unsigned long start)
1619
{
1620
    unsigned long addr;
1621
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1622
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1623
        if (addr == start) {
1624
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1625
        }
1626
    }
1627
}
1628

    
1629
/* update the TLB corresponding to virtual page vaddr and phys addr
1630
   addr so that it is no longer dirty */
1631
static inline void tlb_set_dirty(CPUState *env,
1632
                                 unsigned long addr, target_ulong vaddr)
1633
{
1634
    int i;
1635

    
1636
    addr &= TARGET_PAGE_MASK;
1637
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1638
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1639
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1640
#if (NB_MMU_MODES >= 3)
1641
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1642
#if (NB_MMU_MODES == 4)
1643
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1644
#endif
1645
#endif
1646
}
1647

    
1648
/* add a new TLB entry. At most one entry for a given virtual address
1649
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1650
   (can only happen in non SOFTMMU mode for I/O pages or pages
1651
   conflicting with the host address space). */
1652
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1653
                      target_phys_addr_t paddr, int prot,
1654
                      int mmu_idx, int is_softmmu)
1655
{
1656
    PhysPageDesc *p;
1657
    unsigned long pd;
1658
    unsigned int index;
1659
    target_ulong address;
1660
    target_phys_addr_t addend;
1661
    int ret;
1662
    CPUTLBEntry *te;
1663
    int i;
1664

    
1665
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1666
    if (!p) {
1667
        pd = IO_MEM_UNASSIGNED;
1668
    } else {
1669
        pd = p->phys_offset;
1670
    }
1671
#if defined(DEBUG_TLB)
1672
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1673
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1674
#endif
1675

    
1676
    ret = 0;
1677
#if !defined(CONFIG_SOFTMMU)
1678
    if (is_softmmu)
1679
#endif
1680
    {
1681
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1682
            /* IO memory case */
1683
            address = vaddr | pd;
1684
            addend = paddr;
1685
        } else {
1686
            /* standard memory */
1687
            address = vaddr;
1688
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1689
        }
1690

    
1691
        /* Make accesses to pages with watchpoints go via the
1692
           watchpoint trap routines.  */
1693
        for (i = 0; i < env->nb_watchpoints; i++) {
1694
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1695
                if (address & ~TARGET_PAGE_MASK) {
1696
                    env->watchpoint[i].addend = 0;
1697
                    address = vaddr | io_mem_watch;
1698
                } else {
1699
                    env->watchpoint[i].addend = pd - paddr +
1700
                        (unsigned long) phys_ram_base;
1701
                    /* TODO: Figure out how to make read watchpoints coexist
1702
                       with code.  */
1703
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1704
                }
1705
            }
1706
        }
1707

    
1708
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1709
        addend -= vaddr;
1710
        te = &env->tlb_table[mmu_idx][index];
1711
        te->addend = addend;
1712
        if (prot & PAGE_READ) {
1713
            te->addr_read = address;
1714
        } else {
1715
            te->addr_read = -1;
1716
        }
1717

    
1718
        if (te->addr_code != -1) {
1719
            tlb_flush_jmp_cache(env, te->addr_code);
1720
        }
1721
        if (prot & PAGE_EXEC) {
1722
            te->addr_code = address;
1723
        } else {
1724
            te->addr_code = -1;
1725
        }
1726
        if (prot & PAGE_WRITE) {
1727
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1728
                (pd & IO_MEM_ROMD)) {
1729
                /* write access calls the I/O callback */
1730
                te->addr_write = vaddr |
1731
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1732
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1733
                       !cpu_physical_memory_is_dirty(pd)) {
1734
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1735
            } else {
1736
                te->addr_write = address;
1737
            }
1738
        } else {
1739
            te->addr_write = -1;
1740
        }
1741
    }
1742
#if !defined(CONFIG_SOFTMMU)
1743
    else {
1744
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1745
            /* IO access: no mapping is done as it will be handled by the
1746
               soft MMU */
1747
            if (!(env->hflags & HF_SOFTMMU_MASK))
1748
                ret = 2;
1749
        } else {
1750
            void *map_addr;
1751

    
1752
            if (vaddr >= MMAP_AREA_END) {
1753
                ret = 2;
1754
            } else {
1755
                if (prot & PROT_WRITE) {
1756
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1757
#if defined(TARGET_HAS_SMC) || 1
1758
                        first_tb ||
1759
#endif
1760
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1761
                         !cpu_physical_memory_is_dirty(pd))) {
1762
                        /* ROM: we do as if code was inside */
1763
                        /* if code is present, we only map as read only and save the
1764
                           original mapping */
1765
                        VirtPageDesc *vp;
1766

    
1767
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1768
                        vp->phys_addr = pd;
1769
                        vp->prot = prot;
1770
                        vp->valid_tag = virt_valid_tag;
1771
                        prot &= ~PAGE_WRITE;
1772
                    }
1773
                }
1774
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1775
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1776
                if (map_addr == MAP_FAILED) {
1777
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1778
                              paddr, vaddr);
1779
                }
1780
            }
1781
        }
1782
    }
1783
#endif
1784
    return ret;
1785
}
1786

    
1787
/* called from signal handler: invalidate the code and unprotect the
1788
   page. Return TRUE if the fault was succesfully handled. */
1789
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1790
{
1791
#if !defined(CONFIG_SOFTMMU)
1792
    VirtPageDesc *vp;
1793

    
1794
#if defined(DEBUG_TLB)
1795
    printf("page_unprotect: addr=0x%08x\n", addr);
1796
#endif
1797
    addr &= TARGET_PAGE_MASK;
1798

    
1799
    /* if it is not mapped, no need to worry here */
1800
    if (addr >= MMAP_AREA_END)
1801
        return 0;
1802
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1803
    if (!vp)
1804
        return 0;
1805
    /* NOTE: in this case, validate_tag is _not_ tested as it
1806
       validates only the code TLB */
1807
    if (vp->valid_tag != virt_valid_tag)
1808
        return 0;
1809
    if (!(vp->prot & PAGE_WRITE))
1810
        return 0;
1811
#if defined(DEBUG_TLB)
1812
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1813
           addr, vp->phys_addr, vp->prot);
1814
#endif
1815
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1816
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1817
                  (unsigned long)addr, vp->prot);
1818
    /* set the dirty bit */
1819
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1820
    /* flush the code inside */
1821
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1822
    return 1;
1823
#else
1824
    return 0;
1825
#endif
1826
}
1827

    
1828
#else
1829

    
1830
void tlb_flush(CPUState *env, int flush_global)
1831
{
1832
}
1833

    
1834
void tlb_flush_page(CPUState *env, target_ulong addr)
1835
{
1836
}
1837

    
1838
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1839
                      target_phys_addr_t paddr, int prot,
1840
                      int mmu_idx, int is_softmmu)
1841
{
1842
    return 0;
1843
}
1844

    
1845
/* dump memory mappings */
1846
void page_dump(FILE *f)
1847
{
1848
    unsigned long start, end;
1849
    int i, j, prot, prot1;
1850
    PageDesc *p;
1851

    
1852
    fprintf(f, "%-8s %-8s %-8s %s\n",
1853
            "start", "end", "size", "prot");
1854
    start = -1;
1855
    end = -1;
1856
    prot = 0;
1857
    for(i = 0; i <= L1_SIZE; i++) {
1858
        if (i < L1_SIZE)
1859
            p = l1_map[i];
1860
        else
1861
            p = NULL;
1862
        for(j = 0;j < L2_SIZE; j++) {
1863
            if (!p)
1864
                prot1 = 0;
1865
            else
1866
                prot1 = p[j].flags;
1867
            if (prot1 != prot) {
1868
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1869
                if (start != -1) {
1870
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1871
                            start, end, end - start,
1872
                            prot & PAGE_READ ? 'r' : '-',
1873
                            prot & PAGE_WRITE ? 'w' : '-',
1874
                            prot & PAGE_EXEC ? 'x' : '-');
1875
                }
1876
                if (prot1 != 0)
1877
                    start = end;
1878
                else
1879
                    start = -1;
1880
                prot = prot1;
1881
            }
1882
            if (!p)
1883
                break;
1884
        }
1885
    }
1886
}
1887

    
1888
int page_get_flags(target_ulong address)
1889
{
1890
    PageDesc *p;
1891

    
1892
    p = page_find(address >> TARGET_PAGE_BITS);
1893
    if (!p)
1894
        return 0;
1895
    return p->flags;
1896
}
1897

    
1898
/* modify the flags of a page and invalidate the code if
1899
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1900
   depending on PAGE_WRITE */
1901
void page_set_flags(target_ulong start, target_ulong end, int flags)
1902
{
1903
    PageDesc *p;
1904
    target_ulong addr;
1905

    
1906
    start = start & TARGET_PAGE_MASK;
1907
    end = TARGET_PAGE_ALIGN(end);
1908
    if (flags & PAGE_WRITE)
1909
        flags |= PAGE_WRITE_ORG;
1910
    spin_lock(&tb_lock);
1911
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1912
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1913
        /* if the write protection is set, then we invalidate the code
1914
           inside */
1915
        if (!(p->flags & PAGE_WRITE) &&
1916
            (flags & PAGE_WRITE) &&
1917
            p->first_tb) {
1918
            tb_invalidate_phys_page(addr, 0, NULL);
1919
        }
1920
        p->flags = flags;
1921
    }
1922
    spin_unlock(&tb_lock);
1923
}
1924

    
1925
int page_check_range(target_ulong start, target_ulong len, int flags)
1926
{
1927
    PageDesc *p;
1928
    target_ulong end;
1929
    target_ulong addr;
1930

    
1931
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1932
    start = start & TARGET_PAGE_MASK;
1933

    
1934
    if( end < start )
1935
        /* we've wrapped around */
1936
        return -1;
1937
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1938
        p = page_find(addr >> TARGET_PAGE_BITS);
1939
        if( !p )
1940
            return -1;
1941
        if( !(p->flags & PAGE_VALID) )
1942
            return -1;
1943

    
1944
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1945
            return -1;
1946
        if (flags & PAGE_WRITE) {
1947
            if (!(p->flags & PAGE_WRITE_ORG))
1948
                return -1;
1949
            /* unprotect the page if it was put read-only because it
1950
               contains translated code */
1951
            if (!(p->flags & PAGE_WRITE)) {
1952
                if (!page_unprotect(addr, 0, NULL))
1953
                    return -1;
1954
            }
1955
            return 0;
1956
        }
1957
    }
1958
    return 0;
1959
}
1960

    
1961
/* called from signal handler: invalidate the code and unprotect the
1962
   page. Return TRUE if the fault was succesfully handled. */
1963
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1964
{
1965
    unsigned int page_index, prot, pindex;
1966
    PageDesc *p, *p1;
1967
    target_ulong host_start, host_end, addr;
1968

    
1969
    host_start = address & qemu_host_page_mask;
1970
    page_index = host_start >> TARGET_PAGE_BITS;
1971
    p1 = page_find(page_index);
1972
    if (!p1)
1973
        return 0;
1974
    host_end = host_start + qemu_host_page_size;
1975
    p = p1;
1976
    prot = 0;
1977
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1978
        prot |= p->flags;
1979
        p++;
1980
    }
1981
    /* if the page was really writable, then we change its
1982
       protection back to writable */
1983
    if (prot & PAGE_WRITE_ORG) {
1984
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1985
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1986
            mprotect((void *)g2h(host_start), qemu_host_page_size,
1987
                     (prot & PAGE_BITS) | PAGE_WRITE);
1988
            p1[pindex].flags |= PAGE_WRITE;
1989
            /* and since the content will be modified, we must invalidate
1990
               the corresponding translated code. */
1991
            tb_invalidate_phys_page(address, pc, puc);
1992
#ifdef DEBUG_TB_CHECK
1993
            tb_invalidate_check(address);
1994
#endif
1995
            return 1;
1996
        }
1997
    }
1998
    return 0;
1999
}
2000

    
2001
static inline void tlb_set_dirty(CPUState *env,
2002
                                 unsigned long addr, target_ulong vaddr)
2003
{
2004
}
2005
#endif /* defined(CONFIG_USER_ONLY) */
2006

    
2007
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2008
                             ram_addr_t memory);
2009
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2010
                           ram_addr_t orig_memory);
2011
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2012
                      need_subpage)                                     \
2013
    do {                                                                \
2014
        if (addr > start_addr)                                          \
2015
            start_addr2 = 0;                                            \
2016
        else {                                                          \
2017
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2018
            if (start_addr2 > 0)                                        \
2019
                need_subpage = 1;                                       \
2020
        }                                                               \
2021
                                                                        \
2022
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2023
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2024
        else {                                                          \
2025
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2026
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2027
                need_subpage = 1;                                       \
2028
        }                                                               \
2029
    } while (0)
2030

    
2031
/* register physical memory. 'size' must be a multiple of the target
2032
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2033
   io memory page */
2034
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2035
                                  ram_addr_t size,
2036
                                  ram_addr_t phys_offset)
2037
{
2038
    target_phys_addr_t addr, end_addr;
2039
    PhysPageDesc *p;
2040
    CPUState *env;
2041
    ram_addr_t orig_size = size;
2042
    void *subpage;
2043

    
2044
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2045
    end_addr = start_addr + (target_phys_addr_t)size;
2046
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2047
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2048
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2049
            ram_addr_t orig_memory = p->phys_offset;
2050
            target_phys_addr_t start_addr2, end_addr2;
2051
            int need_subpage = 0;
2052

    
2053
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2054
                          need_subpage);
2055
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2056
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2057
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2058
                                           &p->phys_offset, orig_memory);
2059
                } else {
2060
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2061
                                            >> IO_MEM_SHIFT];
2062
                }
2063
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2064
            } else {
2065
                p->phys_offset = phys_offset;
2066
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2067
                    (phys_offset & IO_MEM_ROMD))
2068
                    phys_offset += TARGET_PAGE_SIZE;
2069
            }
2070
        } else {
2071
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2072
            p->phys_offset = phys_offset;
2073
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2074
                (phys_offset & IO_MEM_ROMD))
2075
                phys_offset += TARGET_PAGE_SIZE;
2076
            else {
2077
                target_phys_addr_t start_addr2, end_addr2;
2078
                int need_subpage = 0;
2079

    
2080
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2081
                              end_addr2, need_subpage);
2082

    
2083
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2084
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2085
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2086
                    subpage_register(subpage, start_addr2, end_addr2,
2087
                                     phys_offset);
2088
                }
2089
            }
2090
        }
2091
    }
2092

    
2093
    /* since each CPU stores ram addresses in its TLB cache, we must
2094
       reset the modified entries */
2095
    /* XXX: slow ! */
2096
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2097
        tlb_flush(env, 1);
2098
    }
2099
}
2100

    
2101
/* XXX: temporary until new memory mapping API */
2102
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2103
{
2104
    PhysPageDesc *p;
2105

    
2106
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2107
    if (!p)
2108
        return IO_MEM_UNASSIGNED;
2109
    return p->phys_offset;
2110
}
2111

    
2112
/* XXX: better than nothing */
2113
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2114
{
2115
    ram_addr_t addr;
2116
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2117
        fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
2118
                size, phys_ram_size);
2119
        abort();
2120
    }
2121
    addr = phys_ram_alloc_offset;
2122
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2123
    return addr;
2124
}
2125

    
2126
void qemu_ram_free(ram_addr_t addr)
2127
{
2128
}
2129

    
2130
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2131
{
2132
#ifdef DEBUG_UNASSIGNED
2133
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2134
#endif
2135
#ifdef TARGET_SPARC
2136
    do_unassigned_access(addr, 0, 0, 0);
2137
#elif TARGET_CRIS
2138
    do_unassigned_access(addr, 0, 0, 0);
2139
#endif
2140
    return 0;
2141
}
2142

    
2143
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2144
{
2145
#ifdef DEBUG_UNASSIGNED
2146
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2147
#endif
2148
#ifdef TARGET_SPARC
2149
    do_unassigned_access(addr, 1, 0, 0);
2150
#elif TARGET_CRIS
2151
    do_unassigned_access(addr, 1, 0, 0);
2152
#endif
2153
}
2154

    
2155
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2156
    unassigned_mem_readb,
2157
    unassigned_mem_readb,
2158
    unassigned_mem_readb,
2159
};
2160

    
2161
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2162
    unassigned_mem_writeb,
2163
    unassigned_mem_writeb,
2164
    unassigned_mem_writeb,
2165
};
2166

    
2167
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2168
{
2169
    unsigned long ram_addr;
2170
    int dirty_flags;
2171
    ram_addr = addr - (unsigned long)phys_ram_base;
2172
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2173
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2174
#if !defined(CONFIG_USER_ONLY)
2175
        tb_invalidate_phys_page_fast(ram_addr, 1);
2176
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2177
#endif
2178
    }
2179
    stb_p((uint8_t *)(long)addr, val);
2180
#ifdef USE_KQEMU
2181
    if (cpu_single_env->kqemu_enabled &&
2182
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2183
        kqemu_modify_page(cpu_single_env, ram_addr);
2184
#endif
2185
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2186
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2187
    /* we remove the notdirty callback only if the code has been
2188
       flushed */
2189
    if (dirty_flags == 0xff)
2190
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2191
}
2192

    
2193
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2194
{
2195
    unsigned long ram_addr;
2196
    int dirty_flags;
2197
    ram_addr = addr - (unsigned long)phys_ram_base;
2198
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2199
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2200
#if !defined(CONFIG_USER_ONLY)
2201
        tb_invalidate_phys_page_fast(ram_addr, 2);
2202
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2203
#endif
2204
    }
2205
    stw_p((uint8_t *)(long)addr, val);
2206
#ifdef USE_KQEMU
2207
    if (cpu_single_env->kqemu_enabled &&
2208
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2209
        kqemu_modify_page(cpu_single_env, ram_addr);
2210
#endif
2211
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2212
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2213
    /* we remove the notdirty callback only if the code has been
2214
       flushed */
2215
    if (dirty_flags == 0xff)
2216
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2217
}
2218

    
2219
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2220
{
2221
    unsigned long ram_addr;
2222
    int dirty_flags;
2223
    ram_addr = addr - (unsigned long)phys_ram_base;
2224
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2225
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2226
#if !defined(CONFIG_USER_ONLY)
2227
        tb_invalidate_phys_page_fast(ram_addr, 4);
2228
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2229
#endif
2230
    }
2231
    stl_p((uint8_t *)(long)addr, val);
2232
#ifdef USE_KQEMU
2233
    if (cpu_single_env->kqemu_enabled &&
2234
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2235
        kqemu_modify_page(cpu_single_env, ram_addr);
2236
#endif
2237
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2238
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2239
    /* we remove the notdirty callback only if the code has been
2240
       flushed */
2241
    if (dirty_flags == 0xff)
2242
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2243
}
2244

    
2245
static CPUReadMemoryFunc *error_mem_read[3] = {
2246
    NULL, /* never used */
2247
    NULL, /* never used */
2248
    NULL, /* never used */
2249
};
2250

    
2251
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2252
    notdirty_mem_writeb,
2253
    notdirty_mem_writew,
2254
    notdirty_mem_writel,
2255
};
2256

    
2257
#if defined(CONFIG_SOFTMMU)
2258
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2259
   so these check for a hit then pass through to the normal out-of-line
2260
   phys routines.  */
2261
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2262
{
2263
    return ldub_phys(addr);
2264
}
2265

    
2266
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2267
{
2268
    return lduw_phys(addr);
2269
}
2270

    
2271
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2272
{
2273
    return ldl_phys(addr);
2274
}
2275

    
2276
/* Generate a debug exception if a watchpoint has been hit.
2277
   Returns the real physical address of the access.  addr will be a host
2278
   address in case of a RAM location.  */
2279
static target_ulong check_watchpoint(target_phys_addr_t addr)
2280
{
2281
    CPUState *env = cpu_single_env;
2282
    target_ulong watch;
2283
    target_ulong retaddr;
2284
    int i;
2285

    
2286
    retaddr = addr;
2287
    for (i = 0; i < env->nb_watchpoints; i++) {
2288
        watch = env->watchpoint[i].vaddr;
2289
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2290
            retaddr = addr - env->watchpoint[i].addend;
2291
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2292
                cpu_single_env->watchpoint_hit = i + 1;
2293
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2294
                break;
2295
            }
2296
        }
2297
    }
2298
    return retaddr;
2299
}
2300

    
2301
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2302
                             uint32_t val)
2303
{
2304
    addr = check_watchpoint(addr);
2305
    stb_phys(addr, val);
2306
}
2307

    
2308
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2309
                             uint32_t val)
2310
{
2311
    addr = check_watchpoint(addr);
2312
    stw_phys(addr, val);
2313
}
2314

    
2315
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2316
                             uint32_t val)
2317
{
2318
    addr = check_watchpoint(addr);
2319
    stl_phys(addr, val);
2320
}
2321

    
2322
static CPUReadMemoryFunc *watch_mem_read[3] = {
2323
    watch_mem_readb,
2324
    watch_mem_readw,
2325
    watch_mem_readl,
2326
};
2327

    
2328
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2329
    watch_mem_writeb,
2330
    watch_mem_writew,
2331
    watch_mem_writel,
2332
};
2333
#endif
2334

    
2335
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2336
                                 unsigned int len)
2337
{
2338
    uint32_t ret;
2339
    unsigned int idx;
2340

    
2341
    idx = SUBPAGE_IDX(addr - mmio->base);
2342
#if defined(DEBUG_SUBPAGE)
2343
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2344
           mmio, len, addr, idx);
2345
#endif
2346
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2347

    
2348
    return ret;
2349
}
2350

    
2351
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2352
                              uint32_t value, unsigned int len)
2353
{
2354
    unsigned int idx;
2355

    
2356
    idx = SUBPAGE_IDX(addr - mmio->base);
2357
#if defined(DEBUG_SUBPAGE)
2358
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2359
           mmio, len, addr, idx, value);
2360
#endif
2361
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2362
}
2363

    
2364
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2365
{
2366
#if defined(DEBUG_SUBPAGE)
2367
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2368
#endif
2369

    
2370
    return subpage_readlen(opaque, addr, 0);
2371
}
2372

    
2373
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2374
                            uint32_t value)
2375
{
2376
#if defined(DEBUG_SUBPAGE)
2377
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2378
#endif
2379
    subpage_writelen(opaque, addr, value, 0);
2380
}
2381

    
2382
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2383
{
2384
#if defined(DEBUG_SUBPAGE)
2385
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2386
#endif
2387

    
2388
    return subpage_readlen(opaque, addr, 1);
2389
}
2390

    
2391
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2392
                            uint32_t value)
2393
{
2394
#if defined(DEBUG_SUBPAGE)
2395
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2396
#endif
2397
    subpage_writelen(opaque, addr, value, 1);
2398
}
2399

    
2400
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2401
{
2402
#if defined(DEBUG_SUBPAGE)
2403
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2404
#endif
2405

    
2406
    return subpage_readlen(opaque, addr, 2);
2407
}
2408

    
2409
static void subpage_writel (void *opaque,
2410
                         target_phys_addr_t addr, uint32_t value)
2411
{
2412
#if defined(DEBUG_SUBPAGE)
2413
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2414
#endif
2415
    subpage_writelen(opaque, addr, value, 2);
2416
}
2417

    
2418
static CPUReadMemoryFunc *subpage_read[] = {
2419
    &subpage_readb,
2420
    &subpage_readw,
2421
    &subpage_readl,
2422
};
2423

    
2424
static CPUWriteMemoryFunc *subpage_write[] = {
2425
    &subpage_writeb,
2426
    &subpage_writew,
2427
    &subpage_writel,
2428
};
2429

    
2430
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2431
                             ram_addr_t memory)
2432
{
2433
    int idx, eidx;
2434
    unsigned int i;
2435

    
2436
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2437
        return -1;
2438
    idx = SUBPAGE_IDX(start);
2439
    eidx = SUBPAGE_IDX(end);
2440
#if defined(DEBUG_SUBPAGE)
2441
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2442
           mmio, start, end, idx, eidx, memory);
2443
#endif
2444
    memory >>= IO_MEM_SHIFT;
2445
    for (; idx <= eidx; idx++) {
2446
        for (i = 0; i < 4; i++) {
2447
            if (io_mem_read[memory][i]) {
2448
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2449
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2450
            }
2451
            if (io_mem_write[memory][i]) {
2452
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2453
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2454
            }
2455
        }
2456
    }
2457

    
2458
    return 0;
2459
}
2460

    
2461
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2462
                           ram_addr_t orig_memory)
2463
{
2464
    subpage_t *mmio;
2465
    int subpage_memory;
2466

    
2467
    mmio = qemu_mallocz(sizeof(subpage_t));
2468
    if (mmio != NULL) {
2469
        mmio->base = base;
2470
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2471
#if defined(DEBUG_SUBPAGE)
2472
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2473
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2474
#endif
2475
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2476
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2477
    }
2478

    
2479
    return mmio;
2480
}
2481

    
2482
static void io_mem_init(void)
2483
{
2484
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2485
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2486
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2487
    io_mem_nb = 5;
2488

    
2489
#if defined(CONFIG_SOFTMMU)
2490
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2491
                                          watch_mem_write, NULL);
2492
#endif
2493
    /* alloc dirty bits array */
2494
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2495
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2496
}
2497

    
2498
/* mem_read and mem_write are arrays of functions containing the
2499
   function to access byte (index 0), word (index 1) and dword (index
2500
   2). Functions can be omitted with a NULL function pointer. The
2501
   registered functions may be modified dynamically later.
2502
   If io_index is non zero, the corresponding io zone is
2503
   modified. If it is zero, a new io zone is allocated. The return
2504
   value can be used with cpu_register_physical_memory(). (-1) is
2505
   returned if error. */
2506
int cpu_register_io_memory(int io_index,
2507
                           CPUReadMemoryFunc **mem_read,
2508
                           CPUWriteMemoryFunc **mem_write,
2509
                           void *opaque)
2510
{
2511
    int i, subwidth = 0;
2512

    
2513
    if (io_index <= 0) {
2514
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2515
            return -1;
2516
        io_index = io_mem_nb++;
2517
    } else {
2518
        if (io_index >= IO_MEM_NB_ENTRIES)
2519
            return -1;
2520
    }
2521

    
2522
    for(i = 0;i < 3; i++) {
2523
        if (!mem_read[i] || !mem_write[i])
2524
            subwidth = IO_MEM_SUBWIDTH;
2525
        io_mem_read[io_index][i] = mem_read[i];
2526
        io_mem_write[io_index][i] = mem_write[i];
2527
    }
2528
    io_mem_opaque[io_index] = opaque;
2529
    return (io_index << IO_MEM_SHIFT) | subwidth;
2530
}
2531

    
2532
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2533
{
2534
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2535
}
2536

    
2537
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2538
{
2539
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2540
}
2541

    
2542
/* physical memory access (slow version, mainly for debug) */
2543
#if defined(CONFIG_USER_ONLY)
2544
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2545
                            int len, int is_write)
2546
{
2547
    int l, flags;
2548
    target_ulong page;
2549
    void * p;
2550

    
2551
    while (len > 0) {
2552
        page = addr & TARGET_PAGE_MASK;
2553
        l = (page + TARGET_PAGE_SIZE) - addr;
2554
        if (l > len)
2555
            l = len;
2556
        flags = page_get_flags(page);
2557
        if (!(flags & PAGE_VALID))
2558
            return;
2559
        if (is_write) {
2560
            if (!(flags & PAGE_WRITE))
2561
                return;
2562
            /* XXX: this code should not depend on lock_user */
2563
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2564
                /* FIXME - should this return an error rather than just fail? */
2565
                return;
2566
            memcpy(p, buf, l);
2567
            unlock_user(p, addr, l);
2568
        } else {
2569
            if (!(flags & PAGE_READ))
2570
                return;
2571
            /* XXX: this code should not depend on lock_user */
2572
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2573
                /* FIXME - should this return an error rather than just fail? */
2574
                return;
2575
            memcpy(buf, p, l);
2576
            unlock_user(p, addr, 0);
2577
        }
2578
        len -= l;
2579
        buf += l;
2580
        addr += l;
2581
    }
2582
}
2583

    
2584
#else
2585
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2586
                            int len, int is_write)
2587
{
2588
    int l, io_index;
2589
    uint8_t *ptr;
2590
    uint32_t val;
2591
    target_phys_addr_t page;
2592
    unsigned long pd;
2593
    PhysPageDesc *p;
2594

    
2595
    while (len > 0) {
2596
        page = addr & TARGET_PAGE_MASK;
2597
        l = (page + TARGET_PAGE_SIZE) - addr;
2598
        if (l > len)
2599
            l = len;
2600
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2601
        if (!p) {
2602
            pd = IO_MEM_UNASSIGNED;
2603
        } else {
2604
            pd = p->phys_offset;
2605
        }
2606

    
2607
        if (is_write) {
2608
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2609
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2610
                /* XXX: could force cpu_single_env to NULL to avoid
2611
                   potential bugs */
2612
                if (l >= 4 && ((addr & 3) == 0)) {
2613
                    /* 32 bit write access */
2614
                    val = ldl_p(buf);
2615
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2616
                    l = 4;
2617
                } else if (l >= 2 && ((addr & 1) == 0)) {
2618
                    /* 16 bit write access */
2619
                    val = lduw_p(buf);
2620
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2621
                    l = 2;
2622
                } else {
2623
                    /* 8 bit write access */
2624
                    val = ldub_p(buf);
2625
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2626
                    l = 1;
2627
                }
2628
            } else {
2629
                unsigned long addr1;
2630
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2631
                /* RAM case */
2632
                ptr = phys_ram_base + addr1;
2633
                memcpy(ptr, buf, l);
2634
                if (!cpu_physical_memory_is_dirty(addr1)) {
2635
                    /* invalidate code */
2636
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2637
                    /* set dirty bit */
2638
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2639
                        (0xff & ~CODE_DIRTY_FLAG);
2640
                }
2641
            }
2642
        } else {
2643
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2644
                !(pd & IO_MEM_ROMD)) {
2645
                /* I/O case */
2646
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2647
                if (l >= 4 && ((addr & 3) == 0)) {
2648
                    /* 32 bit read access */
2649
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2650
                    stl_p(buf, val);
2651
                    l = 4;
2652
                } else if (l >= 2 && ((addr & 1) == 0)) {
2653
                    /* 16 bit read access */
2654
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2655
                    stw_p(buf, val);
2656
                    l = 2;
2657
                } else {
2658
                    /* 8 bit read access */
2659
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2660
                    stb_p(buf, val);
2661
                    l = 1;
2662
                }
2663
            } else {
2664
                /* RAM case */
2665
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2666
                    (addr & ~TARGET_PAGE_MASK);
2667
                memcpy(buf, ptr, l);
2668
            }
2669
        }
2670
        len -= l;
2671
        buf += l;
2672
        addr += l;
2673
    }
2674
}
2675

    
2676
/* used for ROM loading : can write in RAM and ROM */
2677
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2678
                                   const uint8_t *buf, int len)
2679
{
2680
    int l;
2681
    uint8_t *ptr;
2682
    target_phys_addr_t page;
2683
    unsigned long pd;
2684
    PhysPageDesc *p;
2685

    
2686
    while (len > 0) {
2687
        page = addr & TARGET_PAGE_MASK;
2688
        l = (page + TARGET_PAGE_SIZE) - addr;
2689
        if (l > len)
2690
            l = len;
2691
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2692
        if (!p) {
2693
            pd = IO_MEM_UNASSIGNED;
2694
        } else {
2695
            pd = p->phys_offset;
2696
        }
2697

    
2698
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2699
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2700
            !(pd & IO_MEM_ROMD)) {
2701
            /* do nothing */
2702
        } else {
2703
            unsigned long addr1;
2704
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2705
            /* ROM/RAM case */
2706
            ptr = phys_ram_base + addr1;
2707
            memcpy(ptr, buf, l);
2708
        }
2709
        len -= l;
2710
        buf += l;
2711
        addr += l;
2712
    }
2713
}
2714

    
2715

    
2716
/* warning: addr must be aligned */
2717
uint32_t ldl_phys(target_phys_addr_t addr)
2718
{
2719
    int io_index;
2720
    uint8_t *ptr;
2721
    uint32_t val;
2722
    unsigned long pd;
2723
    PhysPageDesc *p;
2724

    
2725
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2726
    if (!p) {
2727
        pd = IO_MEM_UNASSIGNED;
2728
    } else {
2729
        pd = p->phys_offset;
2730
    }
2731

    
2732
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2733
        !(pd & IO_MEM_ROMD)) {
2734
        /* I/O case */
2735
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2736
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2737
    } else {
2738
        /* RAM case */
2739
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2740
            (addr & ~TARGET_PAGE_MASK);
2741
        val = ldl_p(ptr);
2742
    }
2743
    return val;
2744
}
2745

    
2746
/* warning: addr must be aligned */
2747
uint64_t ldq_phys(target_phys_addr_t addr)
2748
{
2749
    int io_index;
2750
    uint8_t *ptr;
2751
    uint64_t val;
2752
    unsigned long pd;
2753
    PhysPageDesc *p;
2754

    
2755
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2756
    if (!p) {
2757
        pd = IO_MEM_UNASSIGNED;
2758
    } else {
2759
        pd = p->phys_offset;
2760
    }
2761

    
2762
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2763
        !(pd & IO_MEM_ROMD)) {
2764
        /* I/O case */
2765
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2766
#ifdef TARGET_WORDS_BIGENDIAN
2767
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2768
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2769
#else
2770
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2771
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2772
#endif
2773
    } else {
2774
        /* RAM case */
2775
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2776
            (addr & ~TARGET_PAGE_MASK);
2777
        val = ldq_p(ptr);
2778
    }
2779
    return val;
2780
}
2781

    
2782
/* XXX: optimize */
2783
uint32_t ldub_phys(target_phys_addr_t addr)
2784
{
2785
    uint8_t val;
2786
    cpu_physical_memory_read(addr, &val, 1);
2787
    return val;
2788
}
2789

    
2790
/* XXX: optimize */
2791
uint32_t lduw_phys(target_phys_addr_t addr)
2792
{
2793
    uint16_t val;
2794
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2795
    return tswap16(val);
2796
}
2797

    
2798
/* warning: addr must be aligned. The ram page is not masked as dirty
2799
   and the code inside is not invalidated. It is useful if the dirty
2800
   bits are used to track modified PTEs */
2801
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2802
{
2803
    int io_index;
2804
    uint8_t *ptr;
2805
    unsigned long pd;
2806
    PhysPageDesc *p;
2807

    
2808
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2809
    if (!p) {
2810
        pd = IO_MEM_UNASSIGNED;
2811
    } else {
2812
        pd = p->phys_offset;
2813
    }
2814

    
2815
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2816
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2817
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2818
    } else {
2819
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2820
            (addr & ~TARGET_PAGE_MASK);
2821
        stl_p(ptr, val);
2822
    }
2823
}
2824

    
2825
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2826
{
2827
    int io_index;
2828
    uint8_t *ptr;
2829
    unsigned long pd;
2830
    PhysPageDesc *p;
2831

    
2832
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2833
    if (!p) {
2834
        pd = IO_MEM_UNASSIGNED;
2835
    } else {
2836
        pd = p->phys_offset;
2837
    }
2838

    
2839
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2840
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2841
#ifdef TARGET_WORDS_BIGENDIAN
2842
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2843
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2844
#else
2845
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2846
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2847
#endif
2848
    } else {
2849
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2850
            (addr & ~TARGET_PAGE_MASK);
2851
        stq_p(ptr, val);
2852
    }
2853
}
2854

    
2855
/* warning: addr must be aligned */
2856
void stl_phys(target_phys_addr_t addr, uint32_t val)
2857
{
2858
    int io_index;
2859
    uint8_t *ptr;
2860
    unsigned long pd;
2861
    PhysPageDesc *p;
2862

    
2863
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2864
    if (!p) {
2865
        pd = IO_MEM_UNASSIGNED;
2866
    } else {
2867
        pd = p->phys_offset;
2868
    }
2869

    
2870
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2871
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2872
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2873
    } else {
2874
        unsigned long addr1;
2875
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2876
        /* RAM case */
2877
        ptr = phys_ram_base + addr1;
2878
        stl_p(ptr, val);
2879
        if (!cpu_physical_memory_is_dirty(addr1)) {
2880
            /* invalidate code */
2881
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2882
            /* set dirty bit */
2883
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2884
                (0xff & ~CODE_DIRTY_FLAG);
2885
        }
2886
    }
2887
}
2888

    
2889
/* XXX: optimize */
2890
void stb_phys(target_phys_addr_t addr, uint32_t val)
2891
{
2892
    uint8_t v = val;
2893
    cpu_physical_memory_write(addr, &v, 1);
2894
}
2895

    
2896
/* XXX: optimize */
2897
void stw_phys(target_phys_addr_t addr, uint32_t val)
2898
{
2899
    uint16_t v = tswap16(val);
2900
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2901
}
2902

    
2903
/* XXX: optimize */
2904
void stq_phys(target_phys_addr_t addr, uint64_t val)
2905
{
2906
    val = tswap64(val);
2907
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2908
}
2909

    
2910
#endif
2911

    
2912
/* virtual memory access for debug */
2913
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2914
                        uint8_t *buf, int len, int is_write)
2915
{
2916
    int l;
2917
    target_phys_addr_t phys_addr;
2918
    target_ulong page;
2919

    
2920
    while (len > 0) {
2921
        page = addr & TARGET_PAGE_MASK;
2922
        phys_addr = cpu_get_phys_page_debug(env, page);
2923
        /* if no physical page mapped, return an error */
2924
        if (phys_addr == -1)
2925
            return -1;
2926
        l = (page + TARGET_PAGE_SIZE) - addr;
2927
        if (l > len)
2928
            l = len;
2929
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2930
                               buf, l, is_write);
2931
        len -= l;
2932
        buf += l;
2933
        addr += l;
2934
    }
2935
    return 0;
2936
}
2937

    
2938
void dump_exec_info(FILE *f,
2939
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2940
{
2941
    int i, target_code_size, max_target_code_size;
2942
    int direct_jmp_count, direct_jmp2_count, cross_page;
2943
    TranslationBlock *tb;
2944

    
2945
    target_code_size = 0;
2946
    max_target_code_size = 0;
2947
    cross_page = 0;
2948
    direct_jmp_count = 0;
2949
    direct_jmp2_count = 0;
2950
    for(i = 0; i < nb_tbs; i++) {
2951
        tb = &tbs[i];
2952
        target_code_size += tb->size;
2953
        if (tb->size > max_target_code_size)
2954
            max_target_code_size = tb->size;
2955
        if (tb->page_addr[1] != -1)
2956
            cross_page++;
2957
        if (tb->tb_next_offset[0] != 0xffff) {
2958
            direct_jmp_count++;
2959
            if (tb->tb_next_offset[1] != 0xffff) {
2960
                direct_jmp2_count++;
2961
            }
2962
        }
2963
    }
2964
    /* XXX: avoid using doubles ? */
2965
    cpu_fprintf(f, "Translation buffer state:\n");
2966
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2967
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2968
                nb_tbs ? target_code_size / nb_tbs : 0,
2969
                max_target_code_size);
2970
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2971
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2972
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2973
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2974
            cross_page,
2975
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2976
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2977
                direct_jmp_count,
2978
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2979
                direct_jmp2_count,
2980
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2981
    cpu_fprintf(f, "\nStatistics:\n");
2982
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2983
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2984
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2985
#ifdef CONFIG_PROFILER
2986
    {
2987
        int64_t tot;
2988
        tot = dyngen_interm_time + dyngen_code_time;
2989
        cpu_fprintf(f, "JIT cycles          %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2990
                    tot, tot / 2.4e9);
2991
        cpu_fprintf(f, "translated TBs      %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n", 
2992
                    dyngen_tb_count, 
2993
                    dyngen_tb_count1 - dyngen_tb_count,
2994
                    dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
2995
        cpu_fprintf(f, "avg ops/TB          %0.1f max=%d\n", 
2996
                    dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
2997
        cpu_fprintf(f, "old ops/total ops   %0.1f%%\n", 
2998
                    dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
2999
        cpu_fprintf(f, "deleted ops/TB      %0.2f\n",
3000
                    dyngen_tb_count ? 
3001
                    (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
3002
        cpu_fprintf(f, "cycles/op           %0.1f\n", 
3003
                    dyngen_op_count ? (double)tot / dyngen_op_count : 0);
3004
        cpu_fprintf(f, "cycles/in byte     %0.1f\n", 
3005
                    dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
3006
        cpu_fprintf(f, "cycles/out byte     %0.1f\n", 
3007
                    dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
3008
        if (tot == 0)
3009
            tot = 1;
3010
        cpu_fprintf(f, "  gen_interm time   %0.1f%%\n", 
3011
                    (double)dyngen_interm_time / tot * 100.0);
3012
        cpu_fprintf(f, "  gen_code time     %0.1f%%\n", 
3013
                    (double)dyngen_code_time / tot * 100.0);
3014
        cpu_fprintf(f, "cpu_restore count   %" PRId64 "\n",
3015
                    dyngen_restore_count);
3016
        cpu_fprintf(f, "  avg cycles        %0.1f\n",
3017
                    dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
3018
        {
3019
            extern void dump_op_count(void);
3020
            dump_op_count();
3021
        }
3022
    }
3023
#endif
3024
}
3025

    
3026
#if !defined(CONFIG_USER_ONLY)
3027

    
3028
#define MMUSUFFIX _cmmu
3029
#define GETPC() NULL
3030
#define env cpu_single_env
3031
#define SOFTMMU_CODE_ACCESS
3032

    
3033
#define SHIFT 0
3034
#include "softmmu_template.h"
3035

    
3036
#define SHIFT 1
3037
#include "softmmu_template.h"
3038

    
3039
#define SHIFT 2
3040
#include "softmmu_template.h"
3041

    
3042
#define SHIFT 3
3043
#include "softmmu_template.h"
3044

    
3045
#undef env
3046

    
3047
#endif