Statistics
| Branch: | Revision:

root / exec.c @ 00f82b8a

History | View | Annotate | Download (91 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#if defined(CONFIG_USER_ONLY)
40
#include <qemu.h>
41
#endif
42

    
43
//#define DEBUG_TB_INVALIDATE
44
//#define DEBUG_FLUSH
45
//#define DEBUG_TLB
46
//#define DEBUG_UNASSIGNED
47

    
48
/* make various TB consistency checks */
49
//#define DEBUG_TB_CHECK
50
//#define DEBUG_TLB_CHECK
51

    
52
//#define DEBUG_IOPORT
53
//#define DEBUG_SUBPAGE
54

    
55
#if !defined(CONFIG_USER_ONLY)
56
/* TB consistency checks only implemented for usermode emulation.  */
57
#undef DEBUG_TB_CHECK
58
#endif
59

    
60
/* threshold to flush the translated code buffer */
61
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#define MMAP_AREA_START        0x00000000
66
#define MMAP_AREA_END          0xa8000000
67

    
68
#if defined(TARGET_SPARC64)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 41
70
#elif defined(TARGET_SPARC)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 36
72
#elif defined(TARGET_ALPHA)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#define TARGET_VIRT_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_PPC64)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 42
77
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78
#define TARGET_PHYS_ADDR_SPACE_BITS 42
79
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
80
#define TARGET_PHYS_ADDR_SPACE_BITS 36
81
#else
82
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83
#define TARGET_PHYS_ADDR_SPACE_BITS 32
84
#endif
85

    
86
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
87
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88
int nb_tbs;
89
/* any access to the tbs or the page table must use this lock */
90
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91

    
92
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
93
uint8_t *code_gen_ptr;
94

    
95
ram_addr_t phys_ram_size;
96
int phys_ram_fd;
97
uint8_t *phys_ram_base;
98
uint8_t *phys_ram_dirty;
99
static ram_addr_t phys_ram_alloc_offset = 0;
100

    
101
CPUState *first_cpu;
102
/* current CPU in the current thread. It is only valid inside
103
   cpu_exec() */
104
CPUState *cpu_single_env;
105

    
106
typedef struct PageDesc {
107
    /* list of TBs intersecting this ram page */
108
    TranslationBlock *first_tb;
109
    /* in order to optimize self modifying code, we count the number
110
       of lookups we do to a given page to use a bitmap */
111
    unsigned int code_write_count;
112
    uint8_t *code_bitmap;
113
#if defined(CONFIG_USER_ONLY)
114
    unsigned long flags;
115
#endif
116
} PageDesc;
117

    
118
typedef struct PhysPageDesc {
119
    /* offset in host memory of the page + io_index in the low 12 bits */
120
    ram_addr_t phys_offset;
121
} PhysPageDesc;
122

    
123
#define L2_BITS 10
124
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
125
/* XXX: this is a temporary hack for alpha target.
126
 *      In the future, this is to be replaced by a multi-level table
127
 *      to actually be able to handle the complete 64 bits address space.
128
 */
129
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
130
#else
131
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
132
#endif
133

    
134
#define L1_SIZE (1 << L1_BITS)
135
#define L2_SIZE (1 << L2_BITS)
136

    
137
static void io_mem_init(void);
138

    
139
unsigned long qemu_real_host_page_size;
140
unsigned long qemu_host_page_bits;
141
unsigned long qemu_host_page_size;
142
unsigned long qemu_host_page_mask;
143

    
144
/* XXX: for system emulation, it could just be an array */
145
static PageDesc *l1_map[L1_SIZE];
146
PhysPageDesc **l1_phys_map;
147

    
148
/* io memory support */
149
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
150
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
151
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
152
static int io_mem_nb;
153
#if defined(CONFIG_SOFTMMU)
154
static int io_mem_watch;
155
#endif
156

    
157
/* log support */
158
char *logfilename = "/tmp/qemu.log";
159
FILE *logfile;
160
int loglevel;
161
static int log_append = 0;
162

    
163
/* statistics */
164
static int tlb_flush_count;
165
static int tb_flush_count;
166
static int tb_phys_invalidate_count;
167

    
168
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
169
typedef struct subpage_t {
170
    target_phys_addr_t base;
171
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
172
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
173
    void *opaque[TARGET_PAGE_SIZE][2][4];
174
} subpage_t;
175

    
176
static void page_init(void)
177
{
178
    /* NOTE: we can always suppose that qemu_host_page_size >=
179
       TARGET_PAGE_SIZE */
180
#ifdef _WIN32
181
    {
182
        SYSTEM_INFO system_info;
183
        DWORD old_protect;
184

    
185
        GetSystemInfo(&system_info);
186
        qemu_real_host_page_size = system_info.dwPageSize;
187

    
188
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
189
                       PAGE_EXECUTE_READWRITE, &old_protect);
190
    }
191
#else
192
    qemu_real_host_page_size = getpagesize();
193
    {
194
        unsigned long start, end;
195

    
196
        start = (unsigned long)code_gen_buffer;
197
        start &= ~(qemu_real_host_page_size - 1);
198

    
199
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
200
        end += qemu_real_host_page_size - 1;
201
        end &= ~(qemu_real_host_page_size - 1);
202

    
203
        mprotect((void *)start, end - start,
204
                 PROT_READ | PROT_WRITE | PROT_EXEC);
205
    }
206
#endif
207

    
208
    if (qemu_host_page_size == 0)
209
        qemu_host_page_size = qemu_real_host_page_size;
210
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
211
        qemu_host_page_size = TARGET_PAGE_SIZE;
212
    qemu_host_page_bits = 0;
213
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
214
        qemu_host_page_bits++;
215
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
216
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
217
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
218

    
219
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
220
    {
221
        long long startaddr, endaddr;
222
        FILE *f;
223
        int n;
224

    
225
        f = fopen("/proc/self/maps", "r");
226
        if (f) {
227
            do {
228
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
229
                if (n == 2) {
230
                    page_set_flags(TARGET_PAGE_ALIGN(startaddr),
231
                                   TARGET_PAGE_ALIGN(endaddr),
232
                                   PAGE_RESERVED); 
233
                }
234
            } while (!feof(f));
235
            fclose(f);
236
        }
237
    }
238
#endif
239
}
240

    
241
static inline PageDesc *page_find_alloc(target_ulong index)
242
{
243
    PageDesc **lp, *p;
244

    
245
    lp = &l1_map[index >> L2_BITS];
246
    p = *lp;
247
    if (!p) {
248
        /* allocate if not found */
249
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
250
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
251
        *lp = p;
252
    }
253
    return p + (index & (L2_SIZE - 1));
254
}
255

    
256
static inline PageDesc *page_find(target_ulong index)
257
{
258
    PageDesc *p;
259

    
260
    p = l1_map[index >> L2_BITS];
261
    if (!p)
262
        return 0;
263
    return p + (index & (L2_SIZE - 1));
264
}
265

    
266
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
267
{
268
    void **lp, **p;
269
    PhysPageDesc *pd;
270

    
271
    p = (void **)l1_phys_map;
272
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
273

    
274
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
275
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
276
#endif
277
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
278
    p = *lp;
279
    if (!p) {
280
        /* allocate if not found */
281
        if (!alloc)
282
            return NULL;
283
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
284
        memset(p, 0, sizeof(void *) * L1_SIZE);
285
        *lp = p;
286
    }
287
#endif
288
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
289
    pd = *lp;
290
    if (!pd) {
291
        int i;
292
        /* allocate if not found */
293
        if (!alloc)
294
            return NULL;
295
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
296
        *lp = pd;
297
        for (i = 0; i < L2_SIZE; i++)
298
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
299
    }
300
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
301
}
302

    
303
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
304
{
305
    return phys_page_find_alloc(index, 0);
306
}
307

    
308
#if !defined(CONFIG_USER_ONLY)
309
static void tlb_protect_code(ram_addr_t ram_addr);
310
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
311
                                    target_ulong vaddr);
312
#endif
313

    
314
void cpu_exec_init(CPUState *env)
315
{
316
    CPUState **penv;
317
    int cpu_index;
318

    
319
    if (!code_gen_ptr) {
320
        cpu_gen_init();
321
        code_gen_ptr = code_gen_buffer;
322
        page_init();
323
        io_mem_init();
324
    }
325
    env->next_cpu = NULL;
326
    penv = &first_cpu;
327
    cpu_index = 0;
328
    while (*penv != NULL) {
329
        penv = (CPUState **)&(*penv)->next_cpu;
330
        cpu_index++;
331
    }
332
    env->cpu_index = cpu_index;
333
    env->nb_watchpoints = 0;
334
    *penv = env;
335
}
336

    
337
static inline void invalidate_page_bitmap(PageDesc *p)
338
{
339
    if (p->code_bitmap) {
340
        qemu_free(p->code_bitmap);
341
        p->code_bitmap = NULL;
342
    }
343
    p->code_write_count = 0;
344
}
345

    
346
/* set to NULL all the 'first_tb' fields in all PageDescs */
347
static void page_flush_tb(void)
348
{
349
    int i, j;
350
    PageDesc *p;
351

    
352
    for(i = 0; i < L1_SIZE; i++) {
353
        p = l1_map[i];
354
        if (p) {
355
            for(j = 0; j < L2_SIZE; j++) {
356
                p->first_tb = NULL;
357
                invalidate_page_bitmap(p);
358
                p++;
359
            }
360
        }
361
    }
362
}
363

    
364
/* flush all the translation blocks */
365
/* XXX: tb_flush is currently not thread safe */
366
void tb_flush(CPUState *env1)
367
{
368
    CPUState *env;
369
#if defined(DEBUG_FLUSH)
370
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
371
           (unsigned long)(code_gen_ptr - code_gen_buffer),
372
           nb_tbs, nb_tbs > 0 ?
373
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
374
#endif
375
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
376
        cpu_abort(env1, "Internal error: code buffer overflow\n");
377

    
378
    nb_tbs = 0;
379

    
380
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
381
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
382
    }
383

    
384
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
385
    page_flush_tb();
386

    
387
    code_gen_ptr = code_gen_buffer;
388
    /* XXX: flush processor icache at this point if cache flush is
389
       expensive */
390
    tb_flush_count++;
391
}
392

    
393
#ifdef DEBUG_TB_CHECK
394

    
395
static void tb_invalidate_check(target_ulong address)
396
{
397
    TranslationBlock *tb;
398
    int i;
399
    address &= TARGET_PAGE_MASK;
400
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
401
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
402
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
403
                  address >= tb->pc + tb->size)) {
404
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
405
                       address, (long)tb->pc, tb->size);
406
            }
407
        }
408
    }
409
}
410

    
411
/* verify that all the pages have correct rights for code */
412
static void tb_page_check(void)
413
{
414
    TranslationBlock *tb;
415
    int i, flags1, flags2;
416

    
417
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
418
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
419
            flags1 = page_get_flags(tb->pc);
420
            flags2 = page_get_flags(tb->pc + tb->size - 1);
421
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
422
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
423
                       (long)tb->pc, tb->size, flags1, flags2);
424
            }
425
        }
426
    }
427
}
428

    
429
void tb_jmp_check(TranslationBlock *tb)
430
{
431
    TranslationBlock *tb1;
432
    unsigned int n1;
433

    
434
    /* suppress any remaining jumps to this TB */
435
    tb1 = tb->jmp_first;
436
    for(;;) {
437
        n1 = (long)tb1 & 3;
438
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
439
        if (n1 == 2)
440
            break;
441
        tb1 = tb1->jmp_next[n1];
442
    }
443
    /* check end of list */
444
    if (tb1 != tb) {
445
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
446
    }
447
}
448

    
449
#endif
450

    
451
/* invalidate one TB */
452
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
453
                             int next_offset)
454
{
455
    TranslationBlock *tb1;
456
    for(;;) {
457
        tb1 = *ptb;
458
        if (tb1 == tb) {
459
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
460
            break;
461
        }
462
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
463
    }
464
}
465

    
466
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
467
{
468
    TranslationBlock *tb1;
469
    unsigned int n1;
470

    
471
    for(;;) {
472
        tb1 = *ptb;
473
        n1 = (long)tb1 & 3;
474
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
475
        if (tb1 == tb) {
476
            *ptb = tb1->page_next[n1];
477
            break;
478
        }
479
        ptb = &tb1->page_next[n1];
480
    }
481
}
482

    
483
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
484
{
485
    TranslationBlock *tb1, **ptb;
486
    unsigned int n1;
487

    
488
    ptb = &tb->jmp_next[n];
489
    tb1 = *ptb;
490
    if (tb1) {
491
        /* find tb(n) in circular list */
492
        for(;;) {
493
            tb1 = *ptb;
494
            n1 = (long)tb1 & 3;
495
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
496
            if (n1 == n && tb1 == tb)
497
                break;
498
            if (n1 == 2) {
499
                ptb = &tb1->jmp_first;
500
            } else {
501
                ptb = &tb1->jmp_next[n1];
502
            }
503
        }
504
        /* now we can suppress tb(n) from the list */
505
        *ptb = tb->jmp_next[n];
506

    
507
        tb->jmp_next[n] = NULL;
508
    }
509
}
510

    
511
/* reset the jump entry 'n' of a TB so that it is not chained to
512
   another TB */
513
static inline void tb_reset_jump(TranslationBlock *tb, int n)
514
{
515
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
516
}
517

    
518
static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
519
{
520
    CPUState *env;
521
    PageDesc *p;
522
    unsigned int h, n1;
523
    target_phys_addr_t phys_pc;
524
    TranslationBlock *tb1, *tb2;
525

    
526
    /* remove the TB from the hash list */
527
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
528
    h = tb_phys_hash_func(phys_pc);
529
    tb_remove(&tb_phys_hash[h], tb,
530
              offsetof(TranslationBlock, phys_hash_next));
531

    
532
    /* remove the TB from the page list */
533
    if (tb->page_addr[0] != page_addr) {
534
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
535
        tb_page_remove(&p->first_tb, tb);
536
        invalidate_page_bitmap(p);
537
    }
538
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
539
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
540
        tb_page_remove(&p->first_tb, tb);
541
        invalidate_page_bitmap(p);
542
    }
543

    
544
    tb_invalidated_flag = 1;
545

    
546
    /* remove the TB from the hash list */
547
    h = tb_jmp_cache_hash_func(tb->pc);
548
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
549
        if (env->tb_jmp_cache[h] == tb)
550
            env->tb_jmp_cache[h] = NULL;
551
    }
552

    
553
    /* suppress this TB from the two jump lists */
554
    tb_jmp_remove(tb, 0);
555
    tb_jmp_remove(tb, 1);
556

    
557
    /* suppress any remaining jumps to this TB */
558
    tb1 = tb->jmp_first;
559
    for(;;) {
560
        n1 = (long)tb1 & 3;
561
        if (n1 == 2)
562
            break;
563
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
564
        tb2 = tb1->jmp_next[n1];
565
        tb_reset_jump(tb1, n1);
566
        tb1->jmp_next[n1] = NULL;
567
        tb1 = tb2;
568
    }
569
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
570

    
571
    tb_phys_invalidate_count++;
572
}
573

    
574
static inline void set_bits(uint8_t *tab, int start, int len)
575
{
576
    int end, mask, end1;
577

    
578
    end = start + len;
579
    tab += start >> 3;
580
    mask = 0xff << (start & 7);
581
    if ((start & ~7) == (end & ~7)) {
582
        if (start < end) {
583
            mask &= ~(0xff << (end & 7));
584
            *tab |= mask;
585
        }
586
    } else {
587
        *tab++ |= mask;
588
        start = (start + 8) & ~7;
589
        end1 = end & ~7;
590
        while (start < end1) {
591
            *tab++ = 0xff;
592
            start += 8;
593
        }
594
        if (start < end) {
595
            mask = ~(0xff << (end & 7));
596
            *tab |= mask;
597
        }
598
    }
599
}
600

    
601
static void build_page_bitmap(PageDesc *p)
602
{
603
    int n, tb_start, tb_end;
604
    TranslationBlock *tb;
605

    
606
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
607
    if (!p->code_bitmap)
608
        return;
609
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
610

    
611
    tb = p->first_tb;
612
    while (tb != NULL) {
613
        n = (long)tb & 3;
614
        tb = (TranslationBlock *)((long)tb & ~3);
615
        /* NOTE: this is subtle as a TB may span two physical pages */
616
        if (n == 0) {
617
            /* NOTE: tb_end may be after the end of the page, but
618
               it is not a problem */
619
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
620
            tb_end = tb_start + tb->size;
621
            if (tb_end > TARGET_PAGE_SIZE)
622
                tb_end = TARGET_PAGE_SIZE;
623
        } else {
624
            tb_start = 0;
625
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
626
        }
627
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
628
        tb = tb->page_next[n];
629
    }
630
}
631

    
632
#ifdef TARGET_HAS_PRECISE_SMC
633

    
634
static void tb_gen_code(CPUState *env,
635
                        target_ulong pc, target_ulong cs_base, int flags,
636
                        int cflags)
637
{
638
    TranslationBlock *tb;
639
    uint8_t *tc_ptr;
640
    target_ulong phys_pc, phys_page2, virt_page2;
641
    int code_gen_size;
642

    
643
    phys_pc = get_phys_addr_code(env, pc);
644
    tb = tb_alloc(pc);
645
    if (!tb) {
646
        /* flush must be done */
647
        tb_flush(env);
648
        /* cannot fail at this point */
649
        tb = tb_alloc(pc);
650
    }
651
    tc_ptr = code_gen_ptr;
652
    tb->tc_ptr = tc_ptr;
653
    tb->cs_base = cs_base;
654
    tb->flags = flags;
655
    tb->cflags = cflags;
656
    cpu_gen_code(env, tb, &code_gen_size);
657
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
658

    
659
    /* check next page if needed */
660
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
661
    phys_page2 = -1;
662
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
663
        phys_page2 = get_phys_addr_code(env, virt_page2);
664
    }
665
    tb_link_phys(tb, phys_pc, phys_page2);
666
}
667
#endif
668

    
669
/* invalidate all TBs which intersect with the target physical page
670
   starting in range [start;end[. NOTE: start and end must refer to
671
   the same physical page. 'is_cpu_write_access' should be true if called
672
   from a real cpu write access: the virtual CPU will exit the current
673
   TB if code is modified inside this TB. */
674
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
675
                                   int is_cpu_write_access)
676
{
677
    int n, current_tb_modified, current_tb_not_found, current_flags;
678
    CPUState *env = cpu_single_env;
679
    PageDesc *p;
680
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
681
    target_ulong tb_start, tb_end;
682
    target_ulong current_pc, current_cs_base;
683

    
684
    p = page_find(start >> TARGET_PAGE_BITS);
685
    if (!p)
686
        return;
687
    if (!p->code_bitmap &&
688
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
689
        is_cpu_write_access) {
690
        /* build code bitmap */
691
        build_page_bitmap(p);
692
    }
693

    
694
    /* we remove all the TBs in the range [start, end[ */
695
    /* XXX: see if in some cases it could be faster to invalidate all the code */
696
    current_tb_not_found = is_cpu_write_access;
697
    current_tb_modified = 0;
698
    current_tb = NULL; /* avoid warning */
699
    current_pc = 0; /* avoid warning */
700
    current_cs_base = 0; /* avoid warning */
701
    current_flags = 0; /* avoid warning */
702
    tb = p->first_tb;
703
    while (tb != NULL) {
704
        n = (long)tb & 3;
705
        tb = (TranslationBlock *)((long)tb & ~3);
706
        tb_next = tb->page_next[n];
707
        /* NOTE: this is subtle as a TB may span two physical pages */
708
        if (n == 0) {
709
            /* NOTE: tb_end may be after the end of the page, but
710
               it is not a problem */
711
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
712
            tb_end = tb_start + tb->size;
713
        } else {
714
            tb_start = tb->page_addr[1];
715
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
716
        }
717
        if (!(tb_end <= start || tb_start >= end)) {
718
#ifdef TARGET_HAS_PRECISE_SMC
719
            if (current_tb_not_found) {
720
                current_tb_not_found = 0;
721
                current_tb = NULL;
722
                if (env->mem_write_pc) {
723
                    /* now we have a real cpu fault */
724
                    current_tb = tb_find_pc(env->mem_write_pc);
725
                }
726
            }
727
            if (current_tb == tb &&
728
                !(current_tb->cflags & CF_SINGLE_INSN)) {
729
                /* If we are modifying the current TB, we must stop
730
                its execution. We could be more precise by checking
731
                that the modification is after the current PC, but it
732
                would require a specialized function to partially
733
                restore the CPU state */
734

    
735
                current_tb_modified = 1;
736
                cpu_restore_state(current_tb, env,
737
                                  env->mem_write_pc, NULL);
738
#if defined(TARGET_I386)
739
                current_flags = env->hflags;
740
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
741
                current_cs_base = (target_ulong)env->segs[R_CS].base;
742
                current_pc = current_cs_base + env->eip;
743
#else
744
#error unsupported CPU
745
#endif
746
            }
747
#endif /* TARGET_HAS_PRECISE_SMC */
748
            /* we need to do that to handle the case where a signal
749
               occurs while doing tb_phys_invalidate() */
750
            saved_tb = NULL;
751
            if (env) {
752
                saved_tb = env->current_tb;
753
                env->current_tb = NULL;
754
            }
755
            tb_phys_invalidate(tb, -1);
756
            if (env) {
757
                env->current_tb = saved_tb;
758
                if (env->interrupt_request && env->current_tb)
759
                    cpu_interrupt(env, env->interrupt_request);
760
            }
761
        }
762
        tb = tb_next;
763
    }
764
#if !defined(CONFIG_USER_ONLY)
765
    /* if no code remaining, no need to continue to use slow writes */
766
    if (!p->first_tb) {
767
        invalidate_page_bitmap(p);
768
        if (is_cpu_write_access) {
769
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
770
        }
771
    }
772
#endif
773
#ifdef TARGET_HAS_PRECISE_SMC
774
    if (current_tb_modified) {
775
        /* we generate a block containing just the instruction
776
           modifying the memory. It will ensure that it cannot modify
777
           itself */
778
        env->current_tb = NULL;
779
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
780
                    CF_SINGLE_INSN);
781
        cpu_resume_from_signal(env, NULL);
782
    }
783
#endif
784
}
785

    
786
/* len must be <= 8 and start must be a multiple of len */
787
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
788
{
789
    PageDesc *p;
790
    int offset, b;
791
#if 0
792
    if (1) {
793
        if (loglevel) {
794
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
795
                   cpu_single_env->mem_write_vaddr, len,
796
                   cpu_single_env->eip,
797
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
798
        }
799
    }
800
#endif
801
    p = page_find(start >> TARGET_PAGE_BITS);
802
    if (!p)
803
        return;
804
    if (p->code_bitmap) {
805
        offset = start & ~TARGET_PAGE_MASK;
806
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
807
        if (b & ((1 << len) - 1))
808
            goto do_invalidate;
809
    } else {
810
    do_invalidate:
811
        tb_invalidate_phys_page_range(start, start + len, 1);
812
    }
813
}
814

    
815
#if !defined(CONFIG_SOFTMMU)
816
static void tb_invalidate_phys_page(target_phys_addr_t addr,
817
                                    unsigned long pc, void *puc)
818
{
819
    int n, current_flags, current_tb_modified;
820
    target_ulong current_pc, current_cs_base;
821
    PageDesc *p;
822
    TranslationBlock *tb, *current_tb;
823
#ifdef TARGET_HAS_PRECISE_SMC
824
    CPUState *env = cpu_single_env;
825
#endif
826

    
827
    addr &= TARGET_PAGE_MASK;
828
    p = page_find(addr >> TARGET_PAGE_BITS);
829
    if (!p)
830
        return;
831
    tb = p->first_tb;
832
    current_tb_modified = 0;
833
    current_tb = NULL;
834
    current_pc = 0; /* avoid warning */
835
    current_cs_base = 0; /* avoid warning */
836
    current_flags = 0; /* avoid warning */
837
#ifdef TARGET_HAS_PRECISE_SMC
838
    if (tb && pc != 0) {
839
        current_tb = tb_find_pc(pc);
840
    }
841
#endif
842
    while (tb != NULL) {
843
        n = (long)tb & 3;
844
        tb = (TranslationBlock *)((long)tb & ~3);
845
#ifdef TARGET_HAS_PRECISE_SMC
846
        if (current_tb == tb &&
847
            !(current_tb->cflags & CF_SINGLE_INSN)) {
848
                /* If we are modifying the current TB, we must stop
849
                   its execution. We could be more precise by checking
850
                   that the modification is after the current PC, but it
851
                   would require a specialized function to partially
852
                   restore the CPU state */
853

    
854
            current_tb_modified = 1;
855
            cpu_restore_state(current_tb, env, pc, puc);
856
#if defined(TARGET_I386)
857
            current_flags = env->hflags;
858
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
859
            current_cs_base = (target_ulong)env->segs[R_CS].base;
860
            current_pc = current_cs_base + env->eip;
861
#else
862
#error unsupported CPU
863
#endif
864
        }
865
#endif /* TARGET_HAS_PRECISE_SMC */
866
        tb_phys_invalidate(tb, addr);
867
        tb = tb->page_next[n];
868
    }
869
    p->first_tb = NULL;
870
#ifdef TARGET_HAS_PRECISE_SMC
871
    if (current_tb_modified) {
872
        /* we generate a block containing just the instruction
873
           modifying the memory. It will ensure that it cannot modify
874
           itself */
875
        env->current_tb = NULL;
876
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
877
                    CF_SINGLE_INSN);
878
        cpu_resume_from_signal(env, puc);
879
    }
880
#endif
881
}
882
#endif
883

    
884
/* add the tb in the target page and protect it if necessary */
885
static inline void tb_alloc_page(TranslationBlock *tb,
886
                                 unsigned int n, target_ulong page_addr)
887
{
888
    PageDesc *p;
889
    TranslationBlock *last_first_tb;
890

    
891
    tb->page_addr[n] = page_addr;
892
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
893
    tb->page_next[n] = p->first_tb;
894
    last_first_tb = p->first_tb;
895
    p->first_tb = (TranslationBlock *)((long)tb | n);
896
    invalidate_page_bitmap(p);
897

    
898
#if defined(TARGET_HAS_SMC) || 1
899

    
900
#if defined(CONFIG_USER_ONLY)
901
    if (p->flags & PAGE_WRITE) {
902
        target_ulong addr;
903
        PageDesc *p2;
904
        int prot;
905

    
906
        /* force the host page as non writable (writes will have a
907
           page fault + mprotect overhead) */
908
        page_addr &= qemu_host_page_mask;
909
        prot = 0;
910
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
911
            addr += TARGET_PAGE_SIZE) {
912

    
913
            p2 = page_find (addr >> TARGET_PAGE_BITS);
914
            if (!p2)
915
                continue;
916
            prot |= p2->flags;
917
            p2->flags &= ~PAGE_WRITE;
918
            page_get_flags(addr);
919
          }
920
        mprotect(g2h(page_addr), qemu_host_page_size,
921
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
922
#ifdef DEBUG_TB_INVALIDATE
923
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
924
               page_addr);
925
#endif
926
    }
927
#else
928
    /* if some code is already present, then the pages are already
929
       protected. So we handle the case where only the first TB is
930
       allocated in a physical page */
931
    if (!last_first_tb) {
932
        tlb_protect_code(page_addr);
933
    }
934
#endif
935

    
936
#endif /* TARGET_HAS_SMC */
937
}
938

    
939
/* Allocate a new translation block. Flush the translation buffer if
940
   too many translation blocks or too much generated code. */
941
TranslationBlock *tb_alloc(target_ulong pc)
942
{
943
    TranslationBlock *tb;
944

    
945
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
946
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
947
        return NULL;
948
    tb = &tbs[nb_tbs++];
949
    tb->pc = pc;
950
    tb->cflags = 0;
951
    return tb;
952
}
953

    
954
/* add a new TB and link it to the physical page tables. phys_page2 is
955
   (-1) to indicate that only one page contains the TB. */
956
void tb_link_phys(TranslationBlock *tb,
957
                  target_ulong phys_pc, target_ulong phys_page2)
958
{
959
    unsigned int h;
960
    TranslationBlock **ptb;
961

    
962
    /* add in the physical hash table */
963
    h = tb_phys_hash_func(phys_pc);
964
    ptb = &tb_phys_hash[h];
965
    tb->phys_hash_next = *ptb;
966
    *ptb = tb;
967

    
968
    /* add in the page list */
969
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
970
    if (phys_page2 != -1)
971
        tb_alloc_page(tb, 1, phys_page2);
972
    else
973
        tb->page_addr[1] = -1;
974

    
975
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
976
    tb->jmp_next[0] = NULL;
977
    tb->jmp_next[1] = NULL;
978

    
979
    /* init original jump addresses */
980
    if (tb->tb_next_offset[0] != 0xffff)
981
        tb_reset_jump(tb, 0);
982
    if (tb->tb_next_offset[1] != 0xffff)
983
        tb_reset_jump(tb, 1);
984

    
985
#ifdef DEBUG_TB_CHECK
986
    tb_page_check();
987
#endif
988
}
989

    
990
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
991
   tb[1].tc_ptr. Return NULL if not found */
992
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
993
{
994
    int m_min, m_max, m;
995
    unsigned long v;
996
    TranslationBlock *tb;
997

    
998
    if (nb_tbs <= 0)
999
        return NULL;
1000
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1001
        tc_ptr >= (unsigned long)code_gen_ptr)
1002
        return NULL;
1003
    /* binary search (cf Knuth) */
1004
    m_min = 0;
1005
    m_max = nb_tbs - 1;
1006
    while (m_min <= m_max) {
1007
        m = (m_min + m_max) >> 1;
1008
        tb = &tbs[m];
1009
        v = (unsigned long)tb->tc_ptr;
1010
        if (v == tc_ptr)
1011
            return tb;
1012
        else if (tc_ptr < v) {
1013
            m_max = m - 1;
1014
        } else {
1015
            m_min = m + 1;
1016
        }
1017
    }
1018
    return &tbs[m_max];
1019
}
1020

    
1021
static void tb_reset_jump_recursive(TranslationBlock *tb);
1022

    
1023
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1024
{
1025
    TranslationBlock *tb1, *tb_next, **ptb;
1026
    unsigned int n1;
1027

    
1028
    tb1 = tb->jmp_next[n];
1029
    if (tb1 != NULL) {
1030
        /* find head of list */
1031
        for(;;) {
1032
            n1 = (long)tb1 & 3;
1033
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1034
            if (n1 == 2)
1035
                break;
1036
            tb1 = tb1->jmp_next[n1];
1037
        }
1038
        /* we are now sure now that tb jumps to tb1 */
1039
        tb_next = tb1;
1040

    
1041
        /* remove tb from the jmp_first list */
1042
        ptb = &tb_next->jmp_first;
1043
        for(;;) {
1044
            tb1 = *ptb;
1045
            n1 = (long)tb1 & 3;
1046
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1047
            if (n1 == n && tb1 == tb)
1048
                break;
1049
            ptb = &tb1->jmp_next[n1];
1050
        }
1051
        *ptb = tb->jmp_next[n];
1052
        tb->jmp_next[n] = NULL;
1053

    
1054
        /* suppress the jump to next tb in generated code */
1055
        tb_reset_jump(tb, n);
1056

    
1057
        /* suppress jumps in the tb on which we could have jumped */
1058
        tb_reset_jump_recursive(tb_next);
1059
    }
1060
}
1061

    
1062
static void tb_reset_jump_recursive(TranslationBlock *tb)
1063
{
1064
    tb_reset_jump_recursive2(tb, 0);
1065
    tb_reset_jump_recursive2(tb, 1);
1066
}
1067

    
1068
#if defined(TARGET_HAS_ICE)
1069
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1070
{
1071
    target_phys_addr_t addr;
1072
    target_ulong pd;
1073
    ram_addr_t ram_addr;
1074
    PhysPageDesc *p;
1075

    
1076
    addr = cpu_get_phys_page_debug(env, pc);
1077
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1078
    if (!p) {
1079
        pd = IO_MEM_UNASSIGNED;
1080
    } else {
1081
        pd = p->phys_offset;
1082
    }
1083
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1084
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1085
}
1086
#endif
1087

    
1088
/* Add a watchpoint.  */
1089
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1090
{
1091
    int i;
1092

    
1093
    for (i = 0; i < env->nb_watchpoints; i++) {
1094
        if (addr == env->watchpoint[i].vaddr)
1095
            return 0;
1096
    }
1097
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1098
        return -1;
1099

    
1100
    i = env->nb_watchpoints++;
1101
    env->watchpoint[i].vaddr = addr;
1102
    tlb_flush_page(env, addr);
1103
    /* FIXME: This flush is needed because of the hack to make memory ops
1104
       terminate the TB.  It can be removed once the proper IO trap and
1105
       re-execute bits are in.  */
1106
    tb_flush(env);
1107
    return i;
1108
}
1109

    
1110
/* Remove a watchpoint.  */
1111
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1112
{
1113
    int i;
1114

    
1115
    for (i = 0; i < env->nb_watchpoints; i++) {
1116
        if (addr == env->watchpoint[i].vaddr) {
1117
            env->nb_watchpoints--;
1118
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1119
            tlb_flush_page(env, addr);
1120
            return 0;
1121
        }
1122
    }
1123
    return -1;
1124
}
1125

    
1126
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1127
   breakpoint is reached */
1128
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1129
{
1130
#if defined(TARGET_HAS_ICE)
1131
    int i;
1132

    
1133
    for(i = 0; i < env->nb_breakpoints; i++) {
1134
        if (env->breakpoints[i] == pc)
1135
            return 0;
1136
    }
1137

    
1138
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1139
        return -1;
1140
    env->breakpoints[env->nb_breakpoints++] = pc;
1141

    
1142
    breakpoint_invalidate(env, pc);
1143
    return 0;
1144
#else
1145
    return -1;
1146
#endif
1147
}
1148

    
1149
/* remove a breakpoint */
1150
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1151
{
1152
#if defined(TARGET_HAS_ICE)
1153
    int i;
1154
    for(i = 0; i < env->nb_breakpoints; i++) {
1155
        if (env->breakpoints[i] == pc)
1156
            goto found;
1157
    }
1158
    return -1;
1159
 found:
1160
    env->nb_breakpoints--;
1161
    if (i < env->nb_breakpoints)
1162
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1163

    
1164
    breakpoint_invalidate(env, pc);
1165
    return 0;
1166
#else
1167
    return -1;
1168
#endif
1169
}
1170

    
1171
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1172
   CPU loop after each instruction */
1173
void cpu_single_step(CPUState *env, int enabled)
1174
{
1175
#if defined(TARGET_HAS_ICE)
1176
    if (env->singlestep_enabled != enabled) {
1177
        env->singlestep_enabled = enabled;
1178
        /* must flush all the translated code to avoid inconsistancies */
1179
        /* XXX: only flush what is necessary */
1180
        tb_flush(env);
1181
    }
1182
#endif
1183
}
1184

    
1185
/* enable or disable low levels log */
1186
void cpu_set_log(int log_flags)
1187
{
1188
    loglevel = log_flags;
1189
    if (loglevel && !logfile) {
1190
        logfile = fopen(logfilename, log_append ? "a" : "w");
1191
        if (!logfile) {
1192
            perror(logfilename);
1193
            _exit(1);
1194
        }
1195
#if !defined(CONFIG_SOFTMMU)
1196
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1197
        {
1198
            static uint8_t logfile_buf[4096];
1199
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1200
        }
1201
#else
1202
        setvbuf(logfile, NULL, _IOLBF, 0);
1203
#endif
1204
        log_append = 1;
1205
    }
1206
    if (!loglevel && logfile) {
1207
        fclose(logfile);
1208
        logfile = NULL;
1209
    }
1210
}
1211

    
1212
void cpu_set_log_filename(const char *filename)
1213
{
1214
    logfilename = strdup(filename);
1215
    if (logfile) {
1216
        fclose(logfile);
1217
        logfile = NULL;
1218
    }
1219
    cpu_set_log(loglevel);
1220
}
1221

    
1222
/* mask must never be zero, except for A20 change call */
1223
void cpu_interrupt(CPUState *env, int mask)
1224
{
1225
    TranslationBlock *tb;
1226
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1227

    
1228
    env->interrupt_request |= mask;
1229
    /* if the cpu is currently executing code, we must unlink it and
1230
       all the potentially executing TB */
1231
    tb = env->current_tb;
1232
    if (tb && !testandset(&interrupt_lock)) {
1233
        env->current_tb = NULL;
1234
        tb_reset_jump_recursive(tb);
1235
        resetlock(&interrupt_lock);
1236
    }
1237
}
1238

    
1239
void cpu_reset_interrupt(CPUState *env, int mask)
1240
{
1241
    env->interrupt_request &= ~mask;
1242
}
1243

    
1244
CPULogItem cpu_log_items[] = {
1245
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1246
      "show generated host assembly code for each compiled TB" },
1247
    { CPU_LOG_TB_IN_ASM, "in_asm",
1248
      "show target assembly code for each compiled TB" },
1249
    { CPU_LOG_TB_OP, "op",
1250
      "show micro ops for each compiled TB" },
1251
    { CPU_LOG_TB_OP_OPT, "op_opt",
1252
      "show micro ops "
1253
#ifdef TARGET_I386
1254
      "before eflags optimization and "
1255
#endif
1256
      "after liveness analysis" },
1257
    { CPU_LOG_INT, "int",
1258
      "show interrupts/exceptions in short format" },
1259
    { CPU_LOG_EXEC, "exec",
1260
      "show trace before each executed TB (lots of logs)" },
1261
    { CPU_LOG_TB_CPU, "cpu",
1262
      "show CPU state before block translation" },
1263
#ifdef TARGET_I386
1264
    { CPU_LOG_PCALL, "pcall",
1265
      "show protected mode far calls/returns/exceptions" },
1266
#endif
1267
#ifdef DEBUG_IOPORT
1268
    { CPU_LOG_IOPORT, "ioport",
1269
      "show all i/o ports accesses" },
1270
#endif
1271
    { 0, NULL, NULL },
1272
};
1273

    
1274
static int cmp1(const char *s1, int n, const char *s2)
1275
{
1276
    if (strlen(s2) != n)
1277
        return 0;
1278
    return memcmp(s1, s2, n) == 0;
1279
}
1280

    
1281
/* takes a comma separated list of log masks. Return 0 if error. */
1282
int cpu_str_to_log_mask(const char *str)
1283
{
1284
    CPULogItem *item;
1285
    int mask;
1286
    const char *p, *p1;
1287

    
1288
    p = str;
1289
    mask = 0;
1290
    for(;;) {
1291
        p1 = strchr(p, ',');
1292
        if (!p1)
1293
            p1 = p + strlen(p);
1294
        if(cmp1(p,p1-p,"all")) {
1295
                for(item = cpu_log_items; item->mask != 0; item++) {
1296
                        mask |= item->mask;
1297
                }
1298
        } else {
1299
        for(item = cpu_log_items; item->mask != 0; item++) {
1300
            if (cmp1(p, p1 - p, item->name))
1301
                goto found;
1302
        }
1303
        return 0;
1304
        }
1305
    found:
1306
        mask |= item->mask;
1307
        if (*p1 != ',')
1308
            break;
1309
        p = p1 + 1;
1310
    }
1311
    return mask;
1312
}
1313

    
1314
void cpu_abort(CPUState *env, const char *fmt, ...)
1315
{
1316
    va_list ap;
1317
    va_list ap2;
1318

    
1319
    va_start(ap, fmt);
1320
    va_copy(ap2, ap);
1321
    fprintf(stderr, "qemu: fatal: ");
1322
    vfprintf(stderr, fmt, ap);
1323
    fprintf(stderr, "\n");
1324
#ifdef TARGET_I386
1325
    if(env->intercept & INTERCEPT_SVM_MASK) {
1326
        /* most probably the virtual machine should not
1327
           be shut down but rather caught by the VMM */
1328
        vmexit(SVM_EXIT_SHUTDOWN, 0);
1329
    }
1330
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1331
#else
1332
    cpu_dump_state(env, stderr, fprintf, 0);
1333
#endif
1334
    if (logfile) {
1335
        fprintf(logfile, "qemu: fatal: ");
1336
        vfprintf(logfile, fmt, ap2);
1337
        fprintf(logfile, "\n");
1338
#ifdef TARGET_I386
1339
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1340
#else
1341
        cpu_dump_state(env, logfile, fprintf, 0);
1342
#endif
1343
        fflush(logfile);
1344
        fclose(logfile);
1345
    }
1346
    va_end(ap2);
1347
    va_end(ap);
1348
    abort();
1349
}
1350

    
1351
CPUState *cpu_copy(CPUState *env)
1352
{
1353
    CPUState *new_env = cpu_init(env->cpu_model_str);
1354
    /* preserve chaining and index */
1355
    CPUState *next_cpu = new_env->next_cpu;
1356
    int cpu_index = new_env->cpu_index;
1357
    memcpy(new_env, env, sizeof(CPUState));
1358
    new_env->next_cpu = next_cpu;
1359
    new_env->cpu_index = cpu_index;
1360
    return new_env;
1361
}
1362

    
1363
#if !defined(CONFIG_USER_ONLY)
1364

    
1365
/* NOTE: if flush_global is true, also flush global entries (not
1366
   implemented yet) */
1367
void tlb_flush(CPUState *env, int flush_global)
1368
{
1369
    int i;
1370

    
1371
#if defined(DEBUG_TLB)
1372
    printf("tlb_flush:\n");
1373
#endif
1374
    /* must reset current TB so that interrupts cannot modify the
1375
       links while we are modifying them */
1376
    env->current_tb = NULL;
1377

    
1378
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1379
        env->tlb_table[0][i].addr_read = -1;
1380
        env->tlb_table[0][i].addr_write = -1;
1381
        env->tlb_table[0][i].addr_code = -1;
1382
        env->tlb_table[1][i].addr_read = -1;
1383
        env->tlb_table[1][i].addr_write = -1;
1384
        env->tlb_table[1][i].addr_code = -1;
1385
#if (NB_MMU_MODES >= 3)
1386
        env->tlb_table[2][i].addr_read = -1;
1387
        env->tlb_table[2][i].addr_write = -1;
1388
        env->tlb_table[2][i].addr_code = -1;
1389
#if (NB_MMU_MODES == 4)
1390
        env->tlb_table[3][i].addr_read = -1;
1391
        env->tlb_table[3][i].addr_write = -1;
1392
        env->tlb_table[3][i].addr_code = -1;
1393
#endif
1394
#endif
1395
    }
1396

    
1397
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1398

    
1399
#if !defined(CONFIG_SOFTMMU)
1400
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1401
#endif
1402
#ifdef USE_KQEMU
1403
    if (env->kqemu_enabled) {
1404
        kqemu_flush(env, flush_global);
1405
    }
1406
#endif
1407
    tlb_flush_count++;
1408
}
1409

    
1410
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1411
{
1412
    if (addr == (tlb_entry->addr_read &
1413
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1414
        addr == (tlb_entry->addr_write &
1415
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1416
        addr == (tlb_entry->addr_code &
1417
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1418
        tlb_entry->addr_read = -1;
1419
        tlb_entry->addr_write = -1;
1420
        tlb_entry->addr_code = -1;
1421
    }
1422
}
1423

    
1424
void tlb_flush_page(CPUState *env, target_ulong addr)
1425
{
1426
    int i;
1427
    TranslationBlock *tb;
1428

    
1429
#if defined(DEBUG_TLB)
1430
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1431
#endif
1432
    /* must reset current TB so that interrupts cannot modify the
1433
       links while we are modifying them */
1434
    env->current_tb = NULL;
1435

    
1436
    addr &= TARGET_PAGE_MASK;
1437
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1438
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1439
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1440
#if (NB_MMU_MODES >= 3)
1441
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1442
#if (NB_MMU_MODES == 4)
1443
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1444
#endif
1445
#endif
1446

    
1447
    /* Discard jump cache entries for any tb which might potentially
1448
       overlap the flushed page.  */
1449
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1450
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1451

    
1452
    i = tb_jmp_cache_hash_page(addr);
1453
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1454

    
1455
#if !defined(CONFIG_SOFTMMU)
1456
    if (addr < MMAP_AREA_END)
1457
        munmap((void *)addr, TARGET_PAGE_SIZE);
1458
#endif
1459
#ifdef USE_KQEMU
1460
    if (env->kqemu_enabled) {
1461
        kqemu_flush_page(env, addr);
1462
    }
1463
#endif
1464
}
1465

    
1466
/* update the TLBs so that writes to code in the virtual page 'addr'
1467
   can be detected */
1468
static void tlb_protect_code(ram_addr_t ram_addr)
1469
{
1470
    cpu_physical_memory_reset_dirty(ram_addr,
1471
                                    ram_addr + TARGET_PAGE_SIZE,
1472
                                    CODE_DIRTY_FLAG);
1473
}
1474

    
1475
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1476
   tested for self modifying code */
1477
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1478
                                    target_ulong vaddr)
1479
{
1480
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1481
}
1482

    
1483
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1484
                                         unsigned long start, unsigned long length)
1485
{
1486
    unsigned long addr;
1487
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1488
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1489
        if ((addr - start) < length) {
1490
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1491
        }
1492
    }
1493
}
1494

    
1495
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1496
                                     int dirty_flags)
1497
{
1498
    CPUState *env;
1499
    unsigned long length, start1;
1500
    int i, mask, len;
1501
    uint8_t *p;
1502

    
1503
    start &= TARGET_PAGE_MASK;
1504
    end = TARGET_PAGE_ALIGN(end);
1505

    
1506
    length = end - start;
1507
    if (length == 0)
1508
        return;
1509
    len = length >> TARGET_PAGE_BITS;
1510
#ifdef USE_KQEMU
1511
    /* XXX: should not depend on cpu context */
1512
    env = first_cpu;
1513
    if (env->kqemu_enabled) {
1514
        ram_addr_t addr;
1515
        addr = start;
1516
        for(i = 0; i < len; i++) {
1517
            kqemu_set_notdirty(env, addr);
1518
            addr += TARGET_PAGE_SIZE;
1519
        }
1520
    }
1521
#endif
1522
    mask = ~dirty_flags;
1523
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1524
    for(i = 0; i < len; i++)
1525
        p[i] &= mask;
1526

    
1527
    /* we modify the TLB cache so that the dirty bit will be set again
1528
       when accessing the range */
1529
    start1 = start + (unsigned long)phys_ram_base;
1530
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1531
        for(i = 0; i < CPU_TLB_SIZE; i++)
1532
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1533
        for(i = 0; i < CPU_TLB_SIZE; i++)
1534
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1535
#if (NB_MMU_MODES >= 3)
1536
        for(i = 0; i < CPU_TLB_SIZE; i++)
1537
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1538
#if (NB_MMU_MODES == 4)
1539
        for(i = 0; i < CPU_TLB_SIZE; i++)
1540
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1541
#endif
1542
#endif
1543
    }
1544

    
1545
#if !defined(CONFIG_SOFTMMU)
1546
    /* XXX: this is expensive */
1547
    {
1548
        VirtPageDesc *p;
1549
        int j;
1550
        target_ulong addr;
1551

    
1552
        for(i = 0; i < L1_SIZE; i++) {
1553
            p = l1_virt_map[i];
1554
            if (p) {
1555
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1556
                for(j = 0; j < L2_SIZE; j++) {
1557
                    if (p->valid_tag == virt_valid_tag &&
1558
                        p->phys_addr >= start && p->phys_addr < end &&
1559
                        (p->prot & PROT_WRITE)) {
1560
                        if (addr < MMAP_AREA_END) {
1561
                            mprotect((void *)addr, TARGET_PAGE_SIZE,
1562
                                     p->prot & ~PROT_WRITE);
1563
                        }
1564
                    }
1565
                    addr += TARGET_PAGE_SIZE;
1566
                    p++;
1567
                }
1568
            }
1569
        }
1570
    }
1571
#endif
1572
}
1573

    
1574
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1575
{
1576
    ram_addr_t ram_addr;
1577

    
1578
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1579
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1580
            tlb_entry->addend - (unsigned long)phys_ram_base;
1581
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1582
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1583
        }
1584
    }
1585
}
1586

    
1587
/* update the TLB according to the current state of the dirty bits */
1588
void cpu_tlb_update_dirty(CPUState *env)
1589
{
1590
    int i;
1591
    for(i = 0; i < CPU_TLB_SIZE; i++)
1592
        tlb_update_dirty(&env->tlb_table[0][i]);
1593
    for(i = 0; i < CPU_TLB_SIZE; i++)
1594
        tlb_update_dirty(&env->tlb_table[1][i]);
1595
#if (NB_MMU_MODES >= 3)
1596
    for(i = 0; i < CPU_TLB_SIZE; i++)
1597
        tlb_update_dirty(&env->tlb_table[2][i]);
1598
#if (NB_MMU_MODES == 4)
1599
    for(i = 0; i < CPU_TLB_SIZE; i++)
1600
        tlb_update_dirty(&env->tlb_table[3][i]);
1601
#endif
1602
#endif
1603
}
1604

    
1605
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1606
                                  unsigned long start)
1607
{
1608
    unsigned long addr;
1609
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1610
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1611
        if (addr == start) {
1612
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1613
        }
1614
    }
1615
}
1616

    
1617
/* update the TLB corresponding to virtual page vaddr and phys addr
1618
   addr so that it is no longer dirty */
1619
static inline void tlb_set_dirty(CPUState *env,
1620
                                 unsigned long addr, target_ulong vaddr)
1621
{
1622
    int i;
1623

    
1624
    addr &= TARGET_PAGE_MASK;
1625
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1626
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1627
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1628
#if (NB_MMU_MODES >= 3)
1629
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1630
#if (NB_MMU_MODES == 4)
1631
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1632
#endif
1633
#endif
1634
}
1635

    
1636
/* add a new TLB entry. At most one entry for a given virtual address
1637
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1638
   (can only happen in non SOFTMMU mode for I/O pages or pages
1639
   conflicting with the host address space). */
1640
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1641
                      target_phys_addr_t paddr, int prot,
1642
                      int mmu_idx, int is_softmmu)
1643
{
1644
    PhysPageDesc *p;
1645
    unsigned long pd;
1646
    unsigned int index;
1647
    target_ulong address;
1648
    target_phys_addr_t addend;
1649
    int ret;
1650
    CPUTLBEntry *te;
1651
    int i;
1652

    
1653
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1654
    if (!p) {
1655
        pd = IO_MEM_UNASSIGNED;
1656
    } else {
1657
        pd = p->phys_offset;
1658
    }
1659
#if defined(DEBUG_TLB)
1660
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1661
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1662
#endif
1663

    
1664
    ret = 0;
1665
#if !defined(CONFIG_SOFTMMU)
1666
    if (is_softmmu)
1667
#endif
1668
    {
1669
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1670
            /* IO memory case */
1671
            address = vaddr | pd;
1672
            addend = paddr;
1673
        } else {
1674
            /* standard memory */
1675
            address = vaddr;
1676
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1677
        }
1678

    
1679
        /* Make accesses to pages with watchpoints go via the
1680
           watchpoint trap routines.  */
1681
        for (i = 0; i < env->nb_watchpoints; i++) {
1682
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1683
                if (address & ~TARGET_PAGE_MASK) {
1684
                    env->watchpoint[i].addend = 0;
1685
                    address = vaddr | io_mem_watch;
1686
                } else {
1687
                    env->watchpoint[i].addend = pd - paddr +
1688
                        (unsigned long) phys_ram_base;
1689
                    /* TODO: Figure out how to make read watchpoints coexist
1690
                       with code.  */
1691
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1692
                }
1693
            }
1694
        }
1695

    
1696
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1697
        addend -= vaddr;
1698
        te = &env->tlb_table[mmu_idx][index];
1699
        te->addend = addend;
1700
        if (prot & PAGE_READ) {
1701
            te->addr_read = address;
1702
        } else {
1703
            te->addr_read = -1;
1704
        }
1705
        if (prot & PAGE_EXEC) {
1706
            te->addr_code = address;
1707
        } else {
1708
            te->addr_code = -1;
1709
        }
1710
        if (prot & PAGE_WRITE) {
1711
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1712
                (pd & IO_MEM_ROMD)) {
1713
                /* write access calls the I/O callback */
1714
                te->addr_write = vaddr |
1715
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1716
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1717
                       !cpu_physical_memory_is_dirty(pd)) {
1718
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1719
            } else {
1720
                te->addr_write = address;
1721
            }
1722
        } else {
1723
            te->addr_write = -1;
1724
        }
1725
    }
1726
#if !defined(CONFIG_SOFTMMU)
1727
    else {
1728
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1729
            /* IO access: no mapping is done as it will be handled by the
1730
               soft MMU */
1731
            if (!(env->hflags & HF_SOFTMMU_MASK))
1732
                ret = 2;
1733
        } else {
1734
            void *map_addr;
1735

    
1736
            if (vaddr >= MMAP_AREA_END) {
1737
                ret = 2;
1738
            } else {
1739
                if (prot & PROT_WRITE) {
1740
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1741
#if defined(TARGET_HAS_SMC) || 1
1742
                        first_tb ||
1743
#endif
1744
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1745
                         !cpu_physical_memory_is_dirty(pd))) {
1746
                        /* ROM: we do as if code was inside */
1747
                        /* if code is present, we only map as read only and save the
1748
                           original mapping */
1749
                        VirtPageDesc *vp;
1750

    
1751
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1752
                        vp->phys_addr = pd;
1753
                        vp->prot = prot;
1754
                        vp->valid_tag = virt_valid_tag;
1755
                        prot &= ~PAGE_WRITE;
1756
                    }
1757
                }
1758
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1759
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1760
                if (map_addr == MAP_FAILED) {
1761
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1762
                              paddr, vaddr);
1763
                }
1764
            }
1765
        }
1766
    }
1767
#endif
1768
    return ret;
1769
}
1770

    
1771
/* called from signal handler: invalidate the code and unprotect the
1772
   page. Return TRUE if the fault was succesfully handled. */
1773
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1774
{
1775
#if !defined(CONFIG_SOFTMMU)
1776
    VirtPageDesc *vp;
1777

    
1778
#if defined(DEBUG_TLB)
1779
    printf("page_unprotect: addr=0x%08x\n", addr);
1780
#endif
1781
    addr &= TARGET_PAGE_MASK;
1782

    
1783
    /* if it is not mapped, no need to worry here */
1784
    if (addr >= MMAP_AREA_END)
1785
        return 0;
1786
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1787
    if (!vp)
1788
        return 0;
1789
    /* NOTE: in this case, validate_tag is _not_ tested as it
1790
       validates only the code TLB */
1791
    if (vp->valid_tag != virt_valid_tag)
1792
        return 0;
1793
    if (!(vp->prot & PAGE_WRITE))
1794
        return 0;
1795
#if defined(DEBUG_TLB)
1796
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1797
           addr, vp->phys_addr, vp->prot);
1798
#endif
1799
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1800
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1801
                  (unsigned long)addr, vp->prot);
1802
    /* set the dirty bit */
1803
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1804
    /* flush the code inside */
1805
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1806
    return 1;
1807
#else
1808
    return 0;
1809
#endif
1810
}
1811

    
1812
#else
1813

    
1814
void tlb_flush(CPUState *env, int flush_global)
1815
{
1816
}
1817

    
1818
void tlb_flush_page(CPUState *env, target_ulong addr)
1819
{
1820
}
1821

    
1822
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1823
                      target_phys_addr_t paddr, int prot,
1824
                      int mmu_idx, int is_softmmu)
1825
{
1826
    return 0;
1827
}
1828

    
1829
/* dump memory mappings */
1830
void page_dump(FILE *f)
1831
{
1832
    unsigned long start, end;
1833
    int i, j, prot, prot1;
1834
    PageDesc *p;
1835

    
1836
    fprintf(f, "%-8s %-8s %-8s %s\n",
1837
            "start", "end", "size", "prot");
1838
    start = -1;
1839
    end = -1;
1840
    prot = 0;
1841
    for(i = 0; i <= L1_SIZE; i++) {
1842
        if (i < L1_SIZE)
1843
            p = l1_map[i];
1844
        else
1845
            p = NULL;
1846
        for(j = 0;j < L2_SIZE; j++) {
1847
            if (!p)
1848
                prot1 = 0;
1849
            else
1850
                prot1 = p[j].flags;
1851
            if (prot1 != prot) {
1852
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1853
                if (start != -1) {
1854
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1855
                            start, end, end - start,
1856
                            prot & PAGE_READ ? 'r' : '-',
1857
                            prot & PAGE_WRITE ? 'w' : '-',
1858
                            prot & PAGE_EXEC ? 'x' : '-');
1859
                }
1860
                if (prot1 != 0)
1861
                    start = end;
1862
                else
1863
                    start = -1;
1864
                prot = prot1;
1865
            }
1866
            if (!p)
1867
                break;
1868
        }
1869
    }
1870
}
1871

    
1872
int page_get_flags(target_ulong address)
1873
{
1874
    PageDesc *p;
1875

    
1876
    p = page_find(address >> TARGET_PAGE_BITS);
1877
    if (!p)
1878
        return 0;
1879
    return p->flags;
1880
}
1881

    
1882
/* modify the flags of a page and invalidate the code if
1883
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1884
   depending on PAGE_WRITE */
1885
void page_set_flags(target_ulong start, target_ulong end, int flags)
1886
{
1887
    PageDesc *p;
1888
    target_ulong addr;
1889

    
1890
    start = start & TARGET_PAGE_MASK;
1891
    end = TARGET_PAGE_ALIGN(end);
1892
    if (flags & PAGE_WRITE)
1893
        flags |= PAGE_WRITE_ORG;
1894
    spin_lock(&tb_lock);
1895
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1896
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1897
        /* if the write protection is set, then we invalidate the code
1898
           inside */
1899
        if (!(p->flags & PAGE_WRITE) &&
1900
            (flags & PAGE_WRITE) &&
1901
            p->first_tb) {
1902
            tb_invalidate_phys_page(addr, 0, NULL);
1903
        }
1904
        p->flags = flags;
1905
    }
1906
    spin_unlock(&tb_lock);
1907
}
1908

    
1909
int page_check_range(target_ulong start, target_ulong len, int flags)
1910
{
1911
    PageDesc *p;
1912
    target_ulong end;
1913
    target_ulong addr;
1914

    
1915
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1916
    start = start & TARGET_PAGE_MASK;
1917

    
1918
    if( end < start )
1919
        /* we've wrapped around */
1920
        return -1;
1921
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1922
        p = page_find(addr >> TARGET_PAGE_BITS);
1923
        if( !p )
1924
            return -1;
1925
        if( !(p->flags & PAGE_VALID) )
1926
            return -1;
1927

    
1928
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1929
            return -1;
1930
        if (flags & PAGE_WRITE) {
1931
            if (!(p->flags & PAGE_WRITE_ORG))
1932
                return -1;
1933
            /* unprotect the page if it was put read-only because it
1934
               contains translated code */
1935
            if (!(p->flags & PAGE_WRITE)) {
1936
                if (!page_unprotect(addr, 0, NULL))
1937
                    return -1;
1938
            }
1939
            return 0;
1940
        }
1941
    }
1942
    return 0;
1943
}
1944

    
1945
/* called from signal handler: invalidate the code and unprotect the
1946
   page. Return TRUE if the fault was succesfully handled. */
1947
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1948
{
1949
    unsigned int page_index, prot, pindex;
1950
    PageDesc *p, *p1;
1951
    target_ulong host_start, host_end, addr;
1952

    
1953
    host_start = address & qemu_host_page_mask;
1954
    page_index = host_start >> TARGET_PAGE_BITS;
1955
    p1 = page_find(page_index);
1956
    if (!p1)
1957
        return 0;
1958
    host_end = host_start + qemu_host_page_size;
1959
    p = p1;
1960
    prot = 0;
1961
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1962
        prot |= p->flags;
1963
        p++;
1964
    }
1965
    /* if the page was really writable, then we change its
1966
       protection back to writable */
1967
    if (prot & PAGE_WRITE_ORG) {
1968
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1969
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1970
            mprotect((void *)g2h(host_start), qemu_host_page_size,
1971
                     (prot & PAGE_BITS) | PAGE_WRITE);
1972
            p1[pindex].flags |= PAGE_WRITE;
1973
            /* and since the content will be modified, we must invalidate
1974
               the corresponding translated code. */
1975
            tb_invalidate_phys_page(address, pc, puc);
1976
#ifdef DEBUG_TB_CHECK
1977
            tb_invalidate_check(address);
1978
#endif
1979
            return 1;
1980
        }
1981
    }
1982
    return 0;
1983
}
1984

    
1985
static inline void tlb_set_dirty(CPUState *env,
1986
                                 unsigned long addr, target_ulong vaddr)
1987
{
1988
}
1989
#endif /* defined(CONFIG_USER_ONLY) */
1990

    
1991
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1992
                             ram_addr_t memory);
1993
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
1994
                           ram_addr_t orig_memory);
1995
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1996
                      need_subpage)                                     \
1997
    do {                                                                \
1998
        if (addr > start_addr)                                          \
1999
            start_addr2 = 0;                                            \
2000
        else {                                                          \
2001
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2002
            if (start_addr2 > 0)                                        \
2003
                need_subpage = 1;                                       \
2004
        }                                                               \
2005
                                                                        \
2006
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2007
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2008
        else {                                                          \
2009
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2010
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2011
                need_subpage = 1;                                       \
2012
        }                                                               \
2013
    } while (0)
2014

    
2015
/* register physical memory. 'size' must be a multiple of the target
2016
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2017
   io memory page */
2018
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2019
                                  ram_addr_t size,
2020
                                  ram_addr_t phys_offset)
2021
{
2022
    target_phys_addr_t addr, end_addr;
2023
    PhysPageDesc *p;
2024
    CPUState *env;
2025
    ram_addr_t orig_size = size;
2026
    void *subpage;
2027

    
2028
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2029
    end_addr = start_addr + (target_phys_addr_t)size;
2030
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2031
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2032
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2033
            ram_addr_t orig_memory = p->phys_offset;
2034
            target_phys_addr_t start_addr2, end_addr2;
2035
            int need_subpage = 0;
2036

    
2037
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2038
                          need_subpage);
2039
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2040
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2041
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2042
                                           &p->phys_offset, orig_memory);
2043
                } else {
2044
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2045
                                            >> IO_MEM_SHIFT];
2046
                }
2047
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2048
            } else {
2049
                p->phys_offset = phys_offset;
2050
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2051
                    (phys_offset & IO_MEM_ROMD))
2052
                    phys_offset += TARGET_PAGE_SIZE;
2053
            }
2054
        } else {
2055
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2056
            p->phys_offset = phys_offset;
2057
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2058
                (phys_offset & IO_MEM_ROMD))
2059
                phys_offset += TARGET_PAGE_SIZE;
2060
            else {
2061
                target_phys_addr_t start_addr2, end_addr2;
2062
                int need_subpage = 0;
2063

    
2064
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2065
                              end_addr2, need_subpage);
2066

    
2067
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2068
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2069
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2070
                    subpage_register(subpage, start_addr2, end_addr2,
2071
                                     phys_offset);
2072
                }
2073
            }
2074
        }
2075
    }
2076

    
2077
    /* since each CPU stores ram addresses in its TLB cache, we must
2078
       reset the modified entries */
2079
    /* XXX: slow ! */
2080
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2081
        tlb_flush(env, 1);
2082
    }
2083
}
2084

    
2085
/* XXX: temporary until new memory mapping API */
2086
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2087
{
2088
    PhysPageDesc *p;
2089

    
2090
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2091
    if (!p)
2092
        return IO_MEM_UNASSIGNED;
2093
    return p->phys_offset;
2094
}
2095

    
2096
/* XXX: better than nothing */
2097
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2098
{
2099
    ram_addr_t addr;
2100
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2101
        fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %ld)\n",
2102
                size, phys_ram_size);
2103
        abort();
2104
    }
2105
    addr = phys_ram_alloc_offset;
2106
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2107
    return addr;
2108
}
2109

    
2110
void qemu_ram_free(ram_addr_t addr)
2111
{
2112
}
2113

    
2114
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2115
{
2116
#ifdef DEBUG_UNASSIGNED
2117
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2118
#endif
2119
#ifdef TARGET_SPARC
2120
    do_unassigned_access(addr, 0, 0, 0);
2121
#elif TARGET_CRIS
2122
    do_unassigned_access(addr, 0, 0, 0);
2123
#endif
2124
    return 0;
2125
}
2126

    
2127
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2128
{
2129
#ifdef DEBUG_UNASSIGNED
2130
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2131
#endif
2132
#ifdef TARGET_SPARC
2133
    do_unassigned_access(addr, 1, 0, 0);
2134
#elif TARGET_CRIS
2135
    do_unassigned_access(addr, 1, 0, 0);
2136
#endif
2137
}
2138

    
2139
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2140
    unassigned_mem_readb,
2141
    unassigned_mem_readb,
2142
    unassigned_mem_readb,
2143
};
2144

    
2145
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2146
    unassigned_mem_writeb,
2147
    unassigned_mem_writeb,
2148
    unassigned_mem_writeb,
2149
};
2150

    
2151
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2152
{
2153
    unsigned long ram_addr;
2154
    int dirty_flags;
2155
    ram_addr = addr - (unsigned long)phys_ram_base;
2156
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2157
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2158
#if !defined(CONFIG_USER_ONLY)
2159
        tb_invalidate_phys_page_fast(ram_addr, 1);
2160
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2161
#endif
2162
    }
2163
    stb_p((uint8_t *)(long)addr, val);
2164
#ifdef USE_KQEMU
2165
    if (cpu_single_env->kqemu_enabled &&
2166
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2167
        kqemu_modify_page(cpu_single_env, ram_addr);
2168
#endif
2169
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2170
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2171
    /* we remove the notdirty callback only if the code has been
2172
       flushed */
2173
    if (dirty_flags == 0xff)
2174
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2175
}
2176

    
2177
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2178
{
2179
    unsigned long ram_addr;
2180
    int dirty_flags;
2181
    ram_addr = addr - (unsigned long)phys_ram_base;
2182
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2183
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2184
#if !defined(CONFIG_USER_ONLY)
2185
        tb_invalidate_phys_page_fast(ram_addr, 2);
2186
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2187
#endif
2188
    }
2189
    stw_p((uint8_t *)(long)addr, val);
2190
#ifdef USE_KQEMU
2191
    if (cpu_single_env->kqemu_enabled &&
2192
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2193
        kqemu_modify_page(cpu_single_env, ram_addr);
2194
#endif
2195
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2196
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2197
    /* we remove the notdirty callback only if the code has been
2198
       flushed */
2199
    if (dirty_flags == 0xff)
2200
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2201
}
2202

    
2203
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2204
{
2205
    unsigned long ram_addr;
2206
    int dirty_flags;
2207
    ram_addr = addr - (unsigned long)phys_ram_base;
2208
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2209
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2210
#if !defined(CONFIG_USER_ONLY)
2211
        tb_invalidate_phys_page_fast(ram_addr, 4);
2212
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2213
#endif
2214
    }
2215
    stl_p((uint8_t *)(long)addr, val);
2216
#ifdef USE_KQEMU
2217
    if (cpu_single_env->kqemu_enabled &&
2218
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2219
        kqemu_modify_page(cpu_single_env, ram_addr);
2220
#endif
2221
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2222
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2223
    /* we remove the notdirty callback only if the code has been
2224
       flushed */
2225
    if (dirty_flags == 0xff)
2226
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2227
}
2228

    
2229
static CPUReadMemoryFunc *error_mem_read[3] = {
2230
    NULL, /* never used */
2231
    NULL, /* never used */
2232
    NULL, /* never used */
2233
};
2234

    
2235
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2236
    notdirty_mem_writeb,
2237
    notdirty_mem_writew,
2238
    notdirty_mem_writel,
2239
};
2240

    
2241
#if defined(CONFIG_SOFTMMU)
2242
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2243
   so these check for a hit then pass through to the normal out-of-line
2244
   phys routines.  */
2245
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2246
{
2247
    return ldub_phys(addr);
2248
}
2249

    
2250
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2251
{
2252
    return lduw_phys(addr);
2253
}
2254

    
2255
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2256
{
2257
    return ldl_phys(addr);
2258
}
2259

    
2260
/* Generate a debug exception if a watchpoint has been hit.
2261
   Returns the real physical address of the access.  addr will be a host
2262
   address in case of a RAM location.  */
2263
static target_ulong check_watchpoint(target_phys_addr_t addr)
2264
{
2265
    CPUState *env = cpu_single_env;
2266
    target_ulong watch;
2267
    target_ulong retaddr;
2268
    int i;
2269

    
2270
    retaddr = addr;
2271
    for (i = 0; i < env->nb_watchpoints; i++) {
2272
        watch = env->watchpoint[i].vaddr;
2273
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2274
            retaddr = addr - env->watchpoint[i].addend;
2275
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2276
                cpu_single_env->watchpoint_hit = i + 1;
2277
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2278
                break;
2279
            }
2280
        }
2281
    }
2282
    return retaddr;
2283
}
2284

    
2285
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2286
                             uint32_t val)
2287
{
2288
    addr = check_watchpoint(addr);
2289
    stb_phys(addr, val);
2290
}
2291

    
2292
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2293
                             uint32_t val)
2294
{
2295
    addr = check_watchpoint(addr);
2296
    stw_phys(addr, val);
2297
}
2298

    
2299
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2300
                             uint32_t val)
2301
{
2302
    addr = check_watchpoint(addr);
2303
    stl_phys(addr, val);
2304
}
2305

    
2306
static CPUReadMemoryFunc *watch_mem_read[3] = {
2307
    watch_mem_readb,
2308
    watch_mem_readw,
2309
    watch_mem_readl,
2310
};
2311

    
2312
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2313
    watch_mem_writeb,
2314
    watch_mem_writew,
2315
    watch_mem_writel,
2316
};
2317
#endif
2318

    
2319
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2320
                                 unsigned int len)
2321
{
2322
    uint32_t ret;
2323
    unsigned int idx;
2324

    
2325
    idx = SUBPAGE_IDX(addr - mmio->base);
2326
#if defined(DEBUG_SUBPAGE)
2327
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2328
           mmio, len, addr, idx);
2329
#endif
2330
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2331

    
2332
    return ret;
2333
}
2334

    
2335
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2336
                              uint32_t value, unsigned int len)
2337
{
2338
    unsigned int idx;
2339

    
2340
    idx = SUBPAGE_IDX(addr - mmio->base);
2341
#if defined(DEBUG_SUBPAGE)
2342
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2343
           mmio, len, addr, idx, value);
2344
#endif
2345
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2346
}
2347

    
2348
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2349
{
2350
#if defined(DEBUG_SUBPAGE)
2351
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2352
#endif
2353

    
2354
    return subpage_readlen(opaque, addr, 0);
2355
}
2356

    
2357
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2358
                            uint32_t value)
2359
{
2360
#if defined(DEBUG_SUBPAGE)
2361
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2362
#endif
2363
    subpage_writelen(opaque, addr, value, 0);
2364
}
2365

    
2366
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2367
{
2368
#if defined(DEBUG_SUBPAGE)
2369
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2370
#endif
2371

    
2372
    return subpage_readlen(opaque, addr, 1);
2373
}
2374

    
2375
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2376
                            uint32_t value)
2377
{
2378
#if defined(DEBUG_SUBPAGE)
2379
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2380
#endif
2381
    subpage_writelen(opaque, addr, value, 1);
2382
}
2383

    
2384
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2385
{
2386
#if defined(DEBUG_SUBPAGE)
2387
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2388
#endif
2389

    
2390
    return subpage_readlen(opaque, addr, 2);
2391
}
2392

    
2393
static void subpage_writel (void *opaque,
2394
                         target_phys_addr_t addr, uint32_t value)
2395
{
2396
#if defined(DEBUG_SUBPAGE)
2397
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2398
#endif
2399
    subpage_writelen(opaque, addr, value, 2);
2400
}
2401

    
2402
static CPUReadMemoryFunc *subpage_read[] = {
2403
    &subpage_readb,
2404
    &subpage_readw,
2405
    &subpage_readl,
2406
};
2407

    
2408
static CPUWriteMemoryFunc *subpage_write[] = {
2409
    &subpage_writeb,
2410
    &subpage_writew,
2411
    &subpage_writel,
2412
};
2413

    
2414
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2415
                             ram_addr_t memory)
2416
{
2417
    int idx, eidx;
2418
    unsigned int i;
2419

    
2420
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2421
        return -1;
2422
    idx = SUBPAGE_IDX(start);
2423
    eidx = SUBPAGE_IDX(end);
2424
#if defined(DEBUG_SUBPAGE)
2425
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2426
           mmio, start, end, idx, eidx, memory);
2427
#endif
2428
    memory >>= IO_MEM_SHIFT;
2429
    for (; idx <= eidx; idx++) {
2430
        for (i = 0; i < 4; i++) {
2431
            if (io_mem_read[memory][i]) {
2432
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2433
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2434
            }
2435
            if (io_mem_write[memory][i]) {
2436
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2437
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2438
            }
2439
        }
2440
    }
2441

    
2442
    return 0;
2443
}
2444

    
2445
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2446
                           ram_addr_t orig_memory)
2447
{
2448
    subpage_t *mmio;
2449
    int subpage_memory;
2450

    
2451
    mmio = qemu_mallocz(sizeof(subpage_t));
2452
    if (mmio != NULL) {
2453
        mmio->base = base;
2454
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2455
#if defined(DEBUG_SUBPAGE)
2456
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2457
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2458
#endif
2459
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2460
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2461
    }
2462

    
2463
    return mmio;
2464
}
2465

    
2466
static void io_mem_init(void)
2467
{
2468
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2469
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2470
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2471
    io_mem_nb = 5;
2472

    
2473
#if defined(CONFIG_SOFTMMU)
2474
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2475
                                          watch_mem_write, NULL);
2476
#endif
2477
    /* alloc dirty bits array */
2478
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2479
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2480
}
2481

    
2482
/* mem_read and mem_write are arrays of functions containing the
2483
   function to access byte (index 0), word (index 1) and dword (index
2484
   2). Functions can be omitted with a NULL function pointer. The
2485
   registered functions may be modified dynamically later.
2486
   If io_index is non zero, the corresponding io zone is
2487
   modified. If it is zero, a new io zone is allocated. The return
2488
   value can be used with cpu_register_physical_memory(). (-1) is
2489
   returned if error. */
2490
int cpu_register_io_memory(int io_index,
2491
                           CPUReadMemoryFunc **mem_read,
2492
                           CPUWriteMemoryFunc **mem_write,
2493
                           void *opaque)
2494
{
2495
    int i, subwidth = 0;
2496

    
2497
    if (io_index <= 0) {
2498
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2499
            return -1;
2500
        io_index = io_mem_nb++;
2501
    } else {
2502
        if (io_index >= IO_MEM_NB_ENTRIES)
2503
            return -1;
2504
    }
2505

    
2506
    for(i = 0;i < 3; i++) {
2507
        if (!mem_read[i] || !mem_write[i])
2508
            subwidth = IO_MEM_SUBWIDTH;
2509
        io_mem_read[io_index][i] = mem_read[i];
2510
        io_mem_write[io_index][i] = mem_write[i];
2511
    }
2512
    io_mem_opaque[io_index] = opaque;
2513
    return (io_index << IO_MEM_SHIFT) | subwidth;
2514
}
2515

    
2516
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2517
{
2518
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2519
}
2520

    
2521
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2522
{
2523
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2524
}
2525

    
2526
/* physical memory access (slow version, mainly for debug) */
2527
#if defined(CONFIG_USER_ONLY)
2528
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2529
                            int len, int is_write)
2530
{
2531
    int l, flags;
2532
    target_ulong page;
2533
    void * p;
2534

    
2535
    while (len > 0) {
2536
        page = addr & TARGET_PAGE_MASK;
2537
        l = (page + TARGET_PAGE_SIZE) - addr;
2538
        if (l > len)
2539
            l = len;
2540
        flags = page_get_flags(page);
2541
        if (!(flags & PAGE_VALID))
2542
            return;
2543
        if (is_write) {
2544
            if (!(flags & PAGE_WRITE))
2545
                return;
2546
            /* XXX: this code should not depend on lock_user */
2547
            if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2548
                /* FIXME - should this return an error rather than just fail? */
2549
                return;
2550
            memcpy(p, buf, len);
2551
            unlock_user(p, addr, len);
2552
        } else {
2553
            if (!(flags & PAGE_READ))
2554
                return;
2555
            /* XXX: this code should not depend on lock_user */
2556
            if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2557
                /* FIXME - should this return an error rather than just fail? */
2558
                return;
2559
            memcpy(buf, p, len);
2560
            unlock_user(p, addr, 0);
2561
        }
2562
        len -= l;
2563
        buf += l;
2564
        addr += l;
2565
    }
2566
}
2567

    
2568
#else
2569
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2570
                            int len, int is_write)
2571
{
2572
    int l, io_index;
2573
    uint8_t *ptr;
2574
    uint32_t val;
2575
    target_phys_addr_t page;
2576
    unsigned long pd;
2577
    PhysPageDesc *p;
2578

    
2579
    while (len > 0) {
2580
        page = addr & TARGET_PAGE_MASK;
2581
        l = (page + TARGET_PAGE_SIZE) - addr;
2582
        if (l > len)
2583
            l = len;
2584
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2585
        if (!p) {
2586
            pd = IO_MEM_UNASSIGNED;
2587
        } else {
2588
            pd = p->phys_offset;
2589
        }
2590

    
2591
        if (is_write) {
2592
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2593
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2594
                /* XXX: could force cpu_single_env to NULL to avoid
2595
                   potential bugs */
2596
                if (l >= 4 && ((addr & 3) == 0)) {
2597
                    /* 32 bit write access */
2598
                    val = ldl_p(buf);
2599
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2600
                    l = 4;
2601
                } else if (l >= 2 && ((addr & 1) == 0)) {
2602
                    /* 16 bit write access */
2603
                    val = lduw_p(buf);
2604
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2605
                    l = 2;
2606
                } else {
2607
                    /* 8 bit write access */
2608
                    val = ldub_p(buf);
2609
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2610
                    l = 1;
2611
                }
2612
            } else {
2613
                unsigned long addr1;
2614
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2615
                /* RAM case */
2616
                ptr = phys_ram_base + addr1;
2617
                memcpy(ptr, buf, l);
2618
                if (!cpu_physical_memory_is_dirty(addr1)) {
2619
                    /* invalidate code */
2620
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2621
                    /* set dirty bit */
2622
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2623
                        (0xff & ~CODE_DIRTY_FLAG);
2624
                }
2625
            }
2626
        } else {
2627
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2628
                !(pd & IO_MEM_ROMD)) {
2629
                /* I/O case */
2630
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2631
                if (l >= 4 && ((addr & 3) == 0)) {
2632
                    /* 32 bit read access */
2633
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2634
                    stl_p(buf, val);
2635
                    l = 4;
2636
                } else if (l >= 2 && ((addr & 1) == 0)) {
2637
                    /* 16 bit read access */
2638
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2639
                    stw_p(buf, val);
2640
                    l = 2;
2641
                } else {
2642
                    /* 8 bit read access */
2643
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2644
                    stb_p(buf, val);
2645
                    l = 1;
2646
                }
2647
            } else {
2648
                /* RAM case */
2649
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2650
                    (addr & ~TARGET_PAGE_MASK);
2651
                memcpy(buf, ptr, l);
2652
            }
2653
        }
2654
        len -= l;
2655
        buf += l;
2656
        addr += l;
2657
    }
2658
}
2659

    
2660
/* used for ROM loading : can write in RAM and ROM */
2661
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2662
                                   const uint8_t *buf, int len)
2663
{
2664
    int l;
2665
    uint8_t *ptr;
2666
    target_phys_addr_t page;
2667
    unsigned long pd;
2668
    PhysPageDesc *p;
2669

    
2670
    while (len > 0) {
2671
        page = addr & TARGET_PAGE_MASK;
2672
        l = (page + TARGET_PAGE_SIZE) - addr;
2673
        if (l > len)
2674
            l = len;
2675
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2676
        if (!p) {
2677
            pd = IO_MEM_UNASSIGNED;
2678
        } else {
2679
            pd = p->phys_offset;
2680
        }
2681

    
2682
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2683
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2684
            !(pd & IO_MEM_ROMD)) {
2685
            /* do nothing */
2686
        } else {
2687
            unsigned long addr1;
2688
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2689
            /* ROM/RAM case */
2690
            ptr = phys_ram_base + addr1;
2691
            memcpy(ptr, buf, l);
2692
        }
2693
        len -= l;
2694
        buf += l;
2695
        addr += l;
2696
    }
2697
}
2698

    
2699

    
2700
/* warning: addr must be aligned */
2701
uint32_t ldl_phys(target_phys_addr_t addr)
2702
{
2703
    int io_index;
2704
    uint8_t *ptr;
2705
    uint32_t val;
2706
    unsigned long pd;
2707
    PhysPageDesc *p;
2708

    
2709
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2710
    if (!p) {
2711
        pd = IO_MEM_UNASSIGNED;
2712
    } else {
2713
        pd = p->phys_offset;
2714
    }
2715

    
2716
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2717
        !(pd & IO_MEM_ROMD)) {
2718
        /* I/O case */
2719
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2720
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2721
    } else {
2722
        /* RAM case */
2723
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2724
            (addr & ~TARGET_PAGE_MASK);
2725
        val = ldl_p(ptr);
2726
    }
2727
    return val;
2728
}
2729

    
2730
/* warning: addr must be aligned */
2731
uint64_t ldq_phys(target_phys_addr_t addr)
2732
{
2733
    int io_index;
2734
    uint8_t *ptr;
2735
    uint64_t val;
2736
    unsigned long pd;
2737
    PhysPageDesc *p;
2738

    
2739
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2740
    if (!p) {
2741
        pd = IO_MEM_UNASSIGNED;
2742
    } else {
2743
        pd = p->phys_offset;
2744
    }
2745

    
2746
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2747
        !(pd & IO_MEM_ROMD)) {
2748
        /* I/O case */
2749
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2750
#ifdef TARGET_WORDS_BIGENDIAN
2751
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2752
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2753
#else
2754
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2755
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2756
#endif
2757
    } else {
2758
        /* RAM case */
2759
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2760
            (addr & ~TARGET_PAGE_MASK);
2761
        val = ldq_p(ptr);
2762
    }
2763
    return val;
2764
}
2765

    
2766
/* XXX: optimize */
2767
uint32_t ldub_phys(target_phys_addr_t addr)
2768
{
2769
    uint8_t val;
2770
    cpu_physical_memory_read(addr, &val, 1);
2771
    return val;
2772
}
2773

    
2774
/* XXX: optimize */
2775
uint32_t lduw_phys(target_phys_addr_t addr)
2776
{
2777
    uint16_t val;
2778
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2779
    return tswap16(val);
2780
}
2781

    
2782
/* warning: addr must be aligned. The ram page is not masked as dirty
2783
   and the code inside is not invalidated. It is useful if the dirty
2784
   bits are used to track modified PTEs */
2785
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2786
{
2787
    int io_index;
2788
    uint8_t *ptr;
2789
    unsigned long pd;
2790
    PhysPageDesc *p;
2791

    
2792
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2793
    if (!p) {
2794
        pd = IO_MEM_UNASSIGNED;
2795
    } else {
2796
        pd = p->phys_offset;
2797
    }
2798

    
2799
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2800
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2801
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2802
    } else {
2803
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2804
            (addr & ~TARGET_PAGE_MASK);
2805
        stl_p(ptr, val);
2806
    }
2807
}
2808

    
2809
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2810
{
2811
    int io_index;
2812
    uint8_t *ptr;
2813
    unsigned long pd;
2814
    PhysPageDesc *p;
2815

    
2816
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2817
    if (!p) {
2818
        pd = IO_MEM_UNASSIGNED;
2819
    } else {
2820
        pd = p->phys_offset;
2821
    }
2822

    
2823
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2824
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2825
#ifdef TARGET_WORDS_BIGENDIAN
2826
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2827
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2828
#else
2829
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2830
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2831
#endif
2832
    } else {
2833
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2834
            (addr & ~TARGET_PAGE_MASK);
2835
        stq_p(ptr, val);
2836
    }
2837
}
2838

    
2839
/* warning: addr must be aligned */
2840
void stl_phys(target_phys_addr_t addr, uint32_t val)
2841
{
2842
    int io_index;
2843
    uint8_t *ptr;
2844
    unsigned long pd;
2845
    PhysPageDesc *p;
2846

    
2847
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2848
    if (!p) {
2849
        pd = IO_MEM_UNASSIGNED;
2850
    } else {
2851
        pd = p->phys_offset;
2852
    }
2853

    
2854
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2855
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2856
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2857
    } else {
2858
        unsigned long addr1;
2859
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2860
        /* RAM case */
2861
        ptr = phys_ram_base + addr1;
2862
        stl_p(ptr, val);
2863
        if (!cpu_physical_memory_is_dirty(addr1)) {
2864
            /* invalidate code */
2865
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2866
            /* set dirty bit */
2867
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2868
                (0xff & ~CODE_DIRTY_FLAG);
2869
        }
2870
    }
2871
}
2872

    
2873
/* XXX: optimize */
2874
void stb_phys(target_phys_addr_t addr, uint32_t val)
2875
{
2876
    uint8_t v = val;
2877
    cpu_physical_memory_write(addr, &v, 1);
2878
}
2879

    
2880
/* XXX: optimize */
2881
void stw_phys(target_phys_addr_t addr, uint32_t val)
2882
{
2883
    uint16_t v = tswap16(val);
2884
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2885
}
2886

    
2887
/* XXX: optimize */
2888
void stq_phys(target_phys_addr_t addr, uint64_t val)
2889
{
2890
    val = tswap64(val);
2891
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2892
}
2893

    
2894
#endif
2895

    
2896
/* virtual memory access for debug */
2897
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2898
                        uint8_t *buf, int len, int is_write)
2899
{
2900
    int l;
2901
    target_phys_addr_t phys_addr;
2902
    target_ulong page;
2903

    
2904
    while (len > 0) {
2905
        page = addr & TARGET_PAGE_MASK;
2906
        phys_addr = cpu_get_phys_page_debug(env, page);
2907
        /* if no physical page mapped, return an error */
2908
        if (phys_addr == -1)
2909
            return -1;
2910
        l = (page + TARGET_PAGE_SIZE) - addr;
2911
        if (l > len)
2912
            l = len;
2913
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2914
                               buf, l, is_write);
2915
        len -= l;
2916
        buf += l;
2917
        addr += l;
2918
    }
2919
    return 0;
2920
}
2921

    
2922
void dump_exec_info(FILE *f,
2923
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2924
{
2925
    int i, target_code_size, max_target_code_size;
2926
    int direct_jmp_count, direct_jmp2_count, cross_page;
2927
    TranslationBlock *tb;
2928

    
2929
    target_code_size = 0;
2930
    max_target_code_size = 0;
2931
    cross_page = 0;
2932
    direct_jmp_count = 0;
2933
    direct_jmp2_count = 0;
2934
    for(i = 0; i < nb_tbs; i++) {
2935
        tb = &tbs[i];
2936
        target_code_size += tb->size;
2937
        if (tb->size > max_target_code_size)
2938
            max_target_code_size = tb->size;
2939
        if (tb->page_addr[1] != -1)
2940
            cross_page++;
2941
        if (tb->tb_next_offset[0] != 0xffff) {
2942
            direct_jmp_count++;
2943
            if (tb->tb_next_offset[1] != 0xffff) {
2944
                direct_jmp2_count++;
2945
            }
2946
        }
2947
    }
2948
    /* XXX: avoid using doubles ? */
2949
    cpu_fprintf(f, "Translation buffer state:\n");
2950
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2951
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2952
                nb_tbs ? target_code_size / nb_tbs : 0,
2953
                max_target_code_size);
2954
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2955
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2956
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2957
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2958
            cross_page,
2959
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2960
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2961
                direct_jmp_count,
2962
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2963
                direct_jmp2_count,
2964
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2965
    cpu_fprintf(f, "\nStatistics:\n");
2966
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2967
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2968
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2969
#ifdef CONFIG_PROFILER
2970
    {
2971
        int64_t tot;
2972
        tot = dyngen_interm_time + dyngen_code_time;
2973
        cpu_fprintf(f, "JIT cycles          %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2974
                    tot, tot / 2.4e9);
2975
        cpu_fprintf(f, "translated TBs      %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n", 
2976
                    dyngen_tb_count, 
2977
                    dyngen_tb_count1 - dyngen_tb_count,
2978
                    dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
2979
        cpu_fprintf(f, "avg ops/TB          %0.1f max=%d\n", 
2980
                    dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
2981
        cpu_fprintf(f, "old ops/total ops   %0.1f%%\n", 
2982
                    dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
2983
        cpu_fprintf(f, "deleted ops/TB      %0.2f\n",
2984
                    dyngen_tb_count ? 
2985
                    (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
2986
        cpu_fprintf(f, "cycles/op           %0.1f\n", 
2987
                    dyngen_op_count ? (double)tot / dyngen_op_count : 0);
2988
        cpu_fprintf(f, "cycles/in byte     %0.1f\n", 
2989
                    dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
2990
        cpu_fprintf(f, "cycles/out byte     %0.1f\n", 
2991
                    dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
2992
        if (tot == 0)
2993
            tot = 1;
2994
        cpu_fprintf(f, "  gen_interm time   %0.1f%%\n", 
2995
                    (double)dyngen_interm_time / tot * 100.0);
2996
        cpu_fprintf(f, "  gen_code time     %0.1f%%\n", 
2997
                    (double)dyngen_code_time / tot * 100.0);
2998
        cpu_fprintf(f, "cpu_restore count   %" PRId64 "\n",
2999
                    dyngen_restore_count);
3000
        cpu_fprintf(f, "  avg cycles        %0.1f\n",
3001
                    dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
3002
        {
3003
            extern void dump_op_count(void);
3004
            dump_op_count();
3005
        }
3006
    }
3007
#endif
3008
}
3009

    
3010
#if !defined(CONFIG_USER_ONLY)
3011

    
3012
#define MMUSUFFIX _cmmu
3013
#define GETPC() NULL
3014
#define env cpu_single_env
3015
#define SOFTMMU_CODE_ACCESS
3016

    
3017
#define SHIFT 0
3018
#include "softmmu_template.h"
3019

    
3020
#define SHIFT 1
3021
#include "softmmu_template.h"
3022

    
3023
#define SHIFT 2
3024
#include "softmmu_template.h"
3025

    
3026
#define SHIFT 3
3027
#include "softmmu_template.h"
3028

    
3029
#undef env
3030

    
3031
#endif