Statistics
| Branch: | Revision:

root / exec.c @ 57fec1fe

History | View | Annotate | Download (90.6 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#if defined(CONFIG_USER_ONLY)
39
#include <qemu.h>
40
#endif
41

    
42
//#define DEBUG_TB_INVALIDATE
43
//#define DEBUG_FLUSH
44
//#define DEBUG_TLB
45
//#define DEBUG_UNASSIGNED
46

    
47
/* make various TB consistency checks */
48
//#define DEBUG_TB_CHECK
49
//#define DEBUG_TLB_CHECK
50

    
51
//#define DEBUG_IOPORT
52
//#define DEBUG_SUBPAGE
53

    
54
#if !defined(CONFIG_USER_ONLY)
55
/* TB consistency checks only implemented for usermode emulation.  */
56
#undef DEBUG_TB_CHECK
57
#endif
58

    
59
/* threshold to flush the translated code buffer */
60
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
61

    
62
#define SMC_BITMAP_USE_THRESHOLD 10
63

    
64
#define MMAP_AREA_START        0x00000000
65
#define MMAP_AREA_END          0xa8000000
66

    
67
#if defined(TARGET_SPARC64)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 41
69
#elif defined(TARGET_SPARC)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 36
71
#elif defined(TARGET_ALPHA)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 42
73
#define TARGET_VIRT_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_PPC64)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#else
77
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
78
#define TARGET_PHYS_ADDR_SPACE_BITS 32
79
#endif
80

    
81
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
82
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
83
int nb_tbs;
84
/* any access to the tbs or the page table must use this lock */
85
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
86

    
87
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
88
uint8_t *code_gen_ptr;
89

    
90
int phys_ram_size;
91
int phys_ram_fd;
92
uint8_t *phys_ram_base;
93
uint8_t *phys_ram_dirty;
94
static ram_addr_t phys_ram_alloc_offset = 0;
95

    
96
CPUState *first_cpu;
97
/* current CPU in the current thread. It is only valid inside
98
   cpu_exec() */
99
CPUState *cpu_single_env;
100

    
101
typedef struct PageDesc {
102
    /* list of TBs intersecting this ram page */
103
    TranslationBlock *first_tb;
104
    /* in order to optimize self modifying code, we count the number
105
       of lookups we do to a given page to use a bitmap */
106
    unsigned int code_write_count;
107
    uint8_t *code_bitmap;
108
#if defined(CONFIG_USER_ONLY)
109
    unsigned long flags;
110
#endif
111
} PageDesc;
112

    
113
typedef struct PhysPageDesc {
114
    /* offset in host memory of the page + io_index in the low 12 bits */
115
    uint32_t phys_offset;
116
} PhysPageDesc;
117

    
118
#define L2_BITS 10
119
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
120
/* XXX: this is a temporary hack for alpha target.
121
 *      In the future, this is to be replaced by a multi-level table
122
 *      to actually be able to handle the complete 64 bits address space.
123
 */
124
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
125
#else
126
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
127
#endif
128

    
129
#define L1_SIZE (1 << L1_BITS)
130
#define L2_SIZE (1 << L2_BITS)
131

    
132
static void io_mem_init(void);
133

    
134
unsigned long qemu_real_host_page_size;
135
unsigned long qemu_host_page_bits;
136
unsigned long qemu_host_page_size;
137
unsigned long qemu_host_page_mask;
138

    
139
/* XXX: for system emulation, it could just be an array */
140
static PageDesc *l1_map[L1_SIZE];
141
PhysPageDesc **l1_phys_map;
142

    
143
/* io memory support */
144
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
145
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
146
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
147
static int io_mem_nb;
148
#if defined(CONFIG_SOFTMMU)
149
static int io_mem_watch;
150
#endif
151

    
152
/* log support */
153
char *logfilename = "/tmp/qemu.log";
154
FILE *logfile;
155
int loglevel;
156
static int log_append = 0;
157

    
158
/* statistics */
159
static int tlb_flush_count;
160
static int tb_flush_count;
161
static int tb_phys_invalidate_count;
162

    
163
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
164
typedef struct subpage_t {
165
    target_phys_addr_t base;
166
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
167
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
168
    void *opaque[TARGET_PAGE_SIZE][2][4];
169
} subpage_t;
170

    
171
static void page_init(void)
172
{
173
    /* NOTE: we can always suppose that qemu_host_page_size >=
174
       TARGET_PAGE_SIZE */
175
#ifdef _WIN32
176
    {
177
        SYSTEM_INFO system_info;
178
        DWORD old_protect;
179

    
180
        GetSystemInfo(&system_info);
181
        qemu_real_host_page_size = system_info.dwPageSize;
182

    
183
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
184
                       PAGE_EXECUTE_READWRITE, &old_protect);
185
    }
186
#else
187
    qemu_real_host_page_size = getpagesize();
188
    {
189
        unsigned long start, end;
190

    
191
        start = (unsigned long)code_gen_buffer;
192
        start &= ~(qemu_real_host_page_size - 1);
193

    
194
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
195
        end += qemu_real_host_page_size - 1;
196
        end &= ~(qemu_real_host_page_size - 1);
197

    
198
        mprotect((void *)start, end - start,
199
                 PROT_READ | PROT_WRITE | PROT_EXEC);
200
    }
201
#endif
202

    
203
    if (qemu_host_page_size == 0)
204
        qemu_host_page_size = qemu_real_host_page_size;
205
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
206
        qemu_host_page_size = TARGET_PAGE_SIZE;
207
    qemu_host_page_bits = 0;
208
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
209
        qemu_host_page_bits++;
210
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
211
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
212
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
213

    
214
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
215
    {
216
        long long startaddr, endaddr;
217
        FILE *f;
218
        int n;
219

    
220
        f = fopen("/proc/self/maps", "r");
221
        if (f) {
222
            do {
223
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
224
                if (n == 2) {
225
                    page_set_flags(TARGET_PAGE_ALIGN(startaddr),
226
                                   TARGET_PAGE_ALIGN(endaddr),
227
                                   PAGE_RESERVED); 
228
                }
229
            } while (!feof(f));
230
            fclose(f);
231
        }
232
    }
233
#endif
234
}
235

    
236
static inline PageDesc *page_find_alloc(unsigned int index)
237
{
238
    PageDesc **lp, *p;
239

    
240
    lp = &l1_map[index >> L2_BITS];
241
    p = *lp;
242
    if (!p) {
243
        /* allocate if not found */
244
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
245
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
246
        *lp = p;
247
    }
248
    return p + (index & (L2_SIZE - 1));
249
}
250

    
251
static inline PageDesc *page_find(unsigned int index)
252
{
253
    PageDesc *p;
254

    
255
    p = l1_map[index >> L2_BITS];
256
    if (!p)
257
        return 0;
258
    return p + (index & (L2_SIZE - 1));
259
}
260

    
261
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
262
{
263
    void **lp, **p;
264
    PhysPageDesc *pd;
265

    
266
    p = (void **)l1_phys_map;
267
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
268

    
269
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
270
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
271
#endif
272
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
273
    p = *lp;
274
    if (!p) {
275
        /* allocate if not found */
276
        if (!alloc)
277
            return NULL;
278
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
279
        memset(p, 0, sizeof(void *) * L1_SIZE);
280
        *lp = p;
281
    }
282
#endif
283
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
284
    pd = *lp;
285
    if (!pd) {
286
        int i;
287
        /* allocate if not found */
288
        if (!alloc)
289
            return NULL;
290
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
291
        *lp = pd;
292
        for (i = 0; i < L2_SIZE; i++)
293
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
294
    }
295
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
296
}
297

    
298
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
299
{
300
    return phys_page_find_alloc(index, 0);
301
}
302

    
303
#if !defined(CONFIG_USER_ONLY)
304
static void tlb_protect_code(ram_addr_t ram_addr);
305
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
306
                                    target_ulong vaddr);
307
#endif
308

    
309
void cpu_exec_init(CPUState *env)
310
{
311
    CPUState **penv;
312
    int cpu_index;
313

    
314
    if (!code_gen_ptr) {
315
        cpu_gen_init();
316
        code_gen_ptr = code_gen_buffer;
317
        page_init();
318
        io_mem_init();
319
    }
320
    env->next_cpu = NULL;
321
    penv = &first_cpu;
322
    cpu_index = 0;
323
    while (*penv != NULL) {
324
        penv = (CPUState **)&(*penv)->next_cpu;
325
        cpu_index++;
326
    }
327
    env->cpu_index = cpu_index;
328
    env->nb_watchpoints = 0;
329
    *penv = env;
330
}
331

    
332
static inline void invalidate_page_bitmap(PageDesc *p)
333
{
334
    if (p->code_bitmap) {
335
        qemu_free(p->code_bitmap);
336
        p->code_bitmap = NULL;
337
    }
338
    p->code_write_count = 0;
339
}
340

    
341
/* set to NULL all the 'first_tb' fields in all PageDescs */
342
static void page_flush_tb(void)
343
{
344
    int i, j;
345
    PageDesc *p;
346

    
347
    for(i = 0; i < L1_SIZE; i++) {
348
        p = l1_map[i];
349
        if (p) {
350
            for(j = 0; j < L2_SIZE; j++) {
351
                p->first_tb = NULL;
352
                invalidate_page_bitmap(p);
353
                p++;
354
            }
355
        }
356
    }
357
}
358

    
359
/* flush all the translation blocks */
360
/* XXX: tb_flush is currently not thread safe */
361
void tb_flush(CPUState *env1)
362
{
363
    CPUState *env;
364
#if defined(DEBUG_FLUSH)
365
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
366
           (unsigned long)(code_gen_ptr - code_gen_buffer),
367
           nb_tbs, nb_tbs > 0 ?
368
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
369
#endif
370
    nb_tbs = 0;
371

    
372
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
373
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
374
    }
375

    
376
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
377
    page_flush_tb();
378

    
379
    code_gen_ptr = code_gen_buffer;
380
    /* XXX: flush processor icache at this point if cache flush is
381
       expensive */
382
    tb_flush_count++;
383
}
384

    
385
#ifdef DEBUG_TB_CHECK
386

    
387
static void tb_invalidate_check(target_ulong address)
388
{
389
    TranslationBlock *tb;
390
    int i;
391
    address &= TARGET_PAGE_MASK;
392
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
393
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
394
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
395
                  address >= tb->pc + tb->size)) {
396
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
397
                       address, (long)tb->pc, tb->size);
398
            }
399
        }
400
    }
401
}
402

    
403
/* verify that all the pages have correct rights for code */
404
static void tb_page_check(void)
405
{
406
    TranslationBlock *tb;
407
    int i, flags1, flags2;
408

    
409
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
410
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
411
            flags1 = page_get_flags(tb->pc);
412
            flags2 = page_get_flags(tb->pc + tb->size - 1);
413
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
414
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
415
                       (long)tb->pc, tb->size, flags1, flags2);
416
            }
417
        }
418
    }
419
}
420

    
421
void tb_jmp_check(TranslationBlock *tb)
422
{
423
    TranslationBlock *tb1;
424
    unsigned int n1;
425

    
426
    /* suppress any remaining jumps to this TB */
427
    tb1 = tb->jmp_first;
428
    for(;;) {
429
        n1 = (long)tb1 & 3;
430
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
431
        if (n1 == 2)
432
            break;
433
        tb1 = tb1->jmp_next[n1];
434
    }
435
    /* check end of list */
436
    if (tb1 != tb) {
437
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
438
    }
439
}
440

    
441
#endif
442

    
443
/* invalidate one TB */
444
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
445
                             int next_offset)
446
{
447
    TranslationBlock *tb1;
448
    for(;;) {
449
        tb1 = *ptb;
450
        if (tb1 == tb) {
451
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
452
            break;
453
        }
454
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
455
    }
456
}
457

    
458
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
459
{
460
    TranslationBlock *tb1;
461
    unsigned int n1;
462

    
463
    for(;;) {
464
        tb1 = *ptb;
465
        n1 = (long)tb1 & 3;
466
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
467
        if (tb1 == tb) {
468
            *ptb = tb1->page_next[n1];
469
            break;
470
        }
471
        ptb = &tb1->page_next[n1];
472
    }
473
}
474

    
475
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
476
{
477
    TranslationBlock *tb1, **ptb;
478
    unsigned int n1;
479

    
480
    ptb = &tb->jmp_next[n];
481
    tb1 = *ptb;
482
    if (tb1) {
483
        /* find tb(n) in circular list */
484
        for(;;) {
485
            tb1 = *ptb;
486
            n1 = (long)tb1 & 3;
487
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
488
            if (n1 == n && tb1 == tb)
489
                break;
490
            if (n1 == 2) {
491
                ptb = &tb1->jmp_first;
492
            } else {
493
                ptb = &tb1->jmp_next[n1];
494
            }
495
        }
496
        /* now we can suppress tb(n) from the list */
497
        *ptb = tb->jmp_next[n];
498

    
499
        tb->jmp_next[n] = NULL;
500
    }
501
}
502

    
503
/* reset the jump entry 'n' of a TB so that it is not chained to
504
   another TB */
505
static inline void tb_reset_jump(TranslationBlock *tb, int n)
506
{
507
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
508
}
509

    
510
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
511
{
512
    CPUState *env;
513
    PageDesc *p;
514
    unsigned int h, n1;
515
    target_ulong phys_pc;
516
    TranslationBlock *tb1, *tb2;
517

    
518
    /* remove the TB from the hash list */
519
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
520
    h = tb_phys_hash_func(phys_pc);
521
    tb_remove(&tb_phys_hash[h], tb,
522
              offsetof(TranslationBlock, phys_hash_next));
523

    
524
    /* remove the TB from the page list */
525
    if (tb->page_addr[0] != page_addr) {
526
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
527
        tb_page_remove(&p->first_tb, tb);
528
        invalidate_page_bitmap(p);
529
    }
530
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
531
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
532
        tb_page_remove(&p->first_tb, tb);
533
        invalidate_page_bitmap(p);
534
    }
535

    
536
    tb_invalidated_flag = 1;
537

    
538
    /* remove the TB from the hash list */
539
    h = tb_jmp_cache_hash_func(tb->pc);
540
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
541
        if (env->tb_jmp_cache[h] == tb)
542
            env->tb_jmp_cache[h] = NULL;
543
    }
544

    
545
    /* suppress this TB from the two jump lists */
546
    tb_jmp_remove(tb, 0);
547
    tb_jmp_remove(tb, 1);
548

    
549
    /* suppress any remaining jumps to this TB */
550
    tb1 = tb->jmp_first;
551
    for(;;) {
552
        n1 = (long)tb1 & 3;
553
        if (n1 == 2)
554
            break;
555
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
556
        tb2 = tb1->jmp_next[n1];
557
        tb_reset_jump(tb1, n1);
558
        tb1->jmp_next[n1] = NULL;
559
        tb1 = tb2;
560
    }
561
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
562

    
563
    tb_phys_invalidate_count++;
564
}
565

    
566
static inline void set_bits(uint8_t *tab, int start, int len)
567
{
568
    int end, mask, end1;
569

    
570
    end = start + len;
571
    tab += start >> 3;
572
    mask = 0xff << (start & 7);
573
    if ((start & ~7) == (end & ~7)) {
574
        if (start < end) {
575
            mask &= ~(0xff << (end & 7));
576
            *tab |= mask;
577
        }
578
    } else {
579
        *tab++ |= mask;
580
        start = (start + 8) & ~7;
581
        end1 = end & ~7;
582
        while (start < end1) {
583
            *tab++ = 0xff;
584
            start += 8;
585
        }
586
        if (start < end) {
587
            mask = ~(0xff << (end & 7));
588
            *tab |= mask;
589
        }
590
    }
591
}
592

    
593
static void build_page_bitmap(PageDesc *p)
594
{
595
    int n, tb_start, tb_end;
596
    TranslationBlock *tb;
597

    
598
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
599
    if (!p->code_bitmap)
600
        return;
601
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
602

    
603
    tb = p->first_tb;
604
    while (tb != NULL) {
605
        n = (long)tb & 3;
606
        tb = (TranslationBlock *)((long)tb & ~3);
607
        /* NOTE: this is subtle as a TB may span two physical pages */
608
        if (n == 0) {
609
            /* NOTE: tb_end may be after the end of the page, but
610
               it is not a problem */
611
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
612
            tb_end = tb_start + tb->size;
613
            if (tb_end > TARGET_PAGE_SIZE)
614
                tb_end = TARGET_PAGE_SIZE;
615
        } else {
616
            tb_start = 0;
617
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
618
        }
619
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
620
        tb = tb->page_next[n];
621
    }
622
}
623

    
624
#ifdef TARGET_HAS_PRECISE_SMC
625

    
626
static void tb_gen_code(CPUState *env,
627
                        target_ulong pc, target_ulong cs_base, int flags,
628
                        int cflags)
629
{
630
    TranslationBlock *tb;
631
    uint8_t *tc_ptr;
632
    target_ulong phys_pc, phys_page2, virt_page2;
633
    int code_gen_size;
634

    
635
    phys_pc = get_phys_addr_code(env, pc);
636
    tb = tb_alloc(pc);
637
    if (!tb) {
638
        /* flush must be done */
639
        tb_flush(env);
640
        /* cannot fail at this point */
641
        tb = tb_alloc(pc);
642
    }
643
    tc_ptr = code_gen_ptr;
644
    tb->tc_ptr = tc_ptr;
645
    tb->cs_base = cs_base;
646
    tb->flags = flags;
647
    tb->cflags = cflags;
648
    cpu_gen_code(env, tb, &code_gen_size);
649
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
650

    
651
    /* check next page if needed */
652
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
653
    phys_page2 = -1;
654
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
655
        phys_page2 = get_phys_addr_code(env, virt_page2);
656
    }
657
    tb_link_phys(tb, phys_pc, phys_page2);
658
}
659
#endif
660

    
661
/* invalidate all TBs which intersect with the target physical page
662
   starting in range [start;end[. NOTE: start and end must refer to
663
   the same physical page. 'is_cpu_write_access' should be true if called
664
   from a real cpu write access: the virtual CPU will exit the current
665
   TB if code is modified inside this TB. */
666
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
667
                                   int is_cpu_write_access)
668
{
669
    int n, current_tb_modified, current_tb_not_found, current_flags;
670
    CPUState *env = cpu_single_env;
671
    PageDesc *p;
672
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
673
    target_ulong tb_start, tb_end;
674
    target_ulong current_pc, current_cs_base;
675

    
676
    p = page_find(start >> TARGET_PAGE_BITS);
677
    if (!p)
678
        return;
679
    if (!p->code_bitmap &&
680
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
681
        is_cpu_write_access) {
682
        /* build code bitmap */
683
        build_page_bitmap(p);
684
    }
685

    
686
    /* we remove all the TBs in the range [start, end[ */
687
    /* XXX: see if in some cases it could be faster to invalidate all the code */
688
    current_tb_not_found = is_cpu_write_access;
689
    current_tb_modified = 0;
690
    current_tb = NULL; /* avoid warning */
691
    current_pc = 0; /* avoid warning */
692
    current_cs_base = 0; /* avoid warning */
693
    current_flags = 0; /* avoid warning */
694
    tb = p->first_tb;
695
    while (tb != NULL) {
696
        n = (long)tb & 3;
697
        tb = (TranslationBlock *)((long)tb & ~3);
698
        tb_next = tb->page_next[n];
699
        /* NOTE: this is subtle as a TB may span two physical pages */
700
        if (n == 0) {
701
            /* NOTE: tb_end may be after the end of the page, but
702
               it is not a problem */
703
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
704
            tb_end = tb_start + tb->size;
705
        } else {
706
            tb_start = tb->page_addr[1];
707
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
708
        }
709
        if (!(tb_end <= start || tb_start >= end)) {
710
#ifdef TARGET_HAS_PRECISE_SMC
711
            if (current_tb_not_found) {
712
                current_tb_not_found = 0;
713
                current_tb = NULL;
714
                if (env->mem_write_pc) {
715
                    /* now we have a real cpu fault */
716
                    current_tb = tb_find_pc(env->mem_write_pc);
717
                }
718
            }
719
            if (current_tb == tb &&
720
                !(current_tb->cflags & CF_SINGLE_INSN)) {
721
                /* If we are modifying the current TB, we must stop
722
                its execution. We could be more precise by checking
723
                that the modification is after the current PC, but it
724
                would require a specialized function to partially
725
                restore the CPU state */
726

    
727
                current_tb_modified = 1;
728
                cpu_restore_state(current_tb, env,
729
                                  env->mem_write_pc, NULL);
730
#if defined(TARGET_I386)
731
                current_flags = env->hflags;
732
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
733
                current_cs_base = (target_ulong)env->segs[R_CS].base;
734
                current_pc = current_cs_base + env->eip;
735
#else
736
#error unsupported CPU
737
#endif
738
            }
739
#endif /* TARGET_HAS_PRECISE_SMC */
740
            /* we need to do that to handle the case where a signal
741
               occurs while doing tb_phys_invalidate() */
742
            saved_tb = NULL;
743
            if (env) {
744
                saved_tb = env->current_tb;
745
                env->current_tb = NULL;
746
            }
747
            tb_phys_invalidate(tb, -1);
748
            if (env) {
749
                env->current_tb = saved_tb;
750
                if (env->interrupt_request && env->current_tb)
751
                    cpu_interrupt(env, env->interrupt_request);
752
            }
753
        }
754
        tb = tb_next;
755
    }
756
#if !defined(CONFIG_USER_ONLY)
757
    /* if no code remaining, no need to continue to use slow writes */
758
    if (!p->first_tb) {
759
        invalidate_page_bitmap(p);
760
        if (is_cpu_write_access) {
761
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
762
        }
763
    }
764
#endif
765
#ifdef TARGET_HAS_PRECISE_SMC
766
    if (current_tb_modified) {
767
        /* we generate a block containing just the instruction
768
           modifying the memory. It will ensure that it cannot modify
769
           itself */
770
        env->current_tb = NULL;
771
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
772
                    CF_SINGLE_INSN);
773
        cpu_resume_from_signal(env, NULL);
774
    }
775
#endif
776
}
777

    
778
/* len must be <= 8 and start must be a multiple of len */
779
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
780
{
781
    PageDesc *p;
782
    int offset, b;
783
#if 0
784
    if (1) {
785
        if (loglevel) {
786
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
787
                   cpu_single_env->mem_write_vaddr, len,
788
                   cpu_single_env->eip,
789
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
790
        }
791
    }
792
#endif
793
    p = page_find(start >> TARGET_PAGE_BITS);
794
    if (!p)
795
        return;
796
    if (p->code_bitmap) {
797
        offset = start & ~TARGET_PAGE_MASK;
798
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
799
        if (b & ((1 << len) - 1))
800
            goto do_invalidate;
801
    } else {
802
    do_invalidate:
803
        tb_invalidate_phys_page_range(start, start + len, 1);
804
    }
805
}
806

    
807
#if !defined(CONFIG_SOFTMMU)
808
static void tb_invalidate_phys_page(target_ulong addr,
809
                                    unsigned long pc, void *puc)
810
{
811
    int n, current_flags, current_tb_modified;
812
    target_ulong current_pc, current_cs_base;
813
    PageDesc *p;
814
    TranslationBlock *tb, *current_tb;
815
#ifdef TARGET_HAS_PRECISE_SMC
816
    CPUState *env = cpu_single_env;
817
#endif
818

    
819
    addr &= TARGET_PAGE_MASK;
820
    p = page_find(addr >> TARGET_PAGE_BITS);
821
    if (!p)
822
        return;
823
    tb = p->first_tb;
824
    current_tb_modified = 0;
825
    current_tb = NULL;
826
    current_pc = 0; /* avoid warning */
827
    current_cs_base = 0; /* avoid warning */
828
    current_flags = 0; /* avoid warning */
829
#ifdef TARGET_HAS_PRECISE_SMC
830
    if (tb && pc != 0) {
831
        current_tb = tb_find_pc(pc);
832
    }
833
#endif
834
    while (tb != NULL) {
835
        n = (long)tb & 3;
836
        tb = (TranslationBlock *)((long)tb & ~3);
837
#ifdef TARGET_HAS_PRECISE_SMC
838
        if (current_tb == tb &&
839
            !(current_tb->cflags & CF_SINGLE_INSN)) {
840
                /* If we are modifying the current TB, we must stop
841
                   its execution. We could be more precise by checking
842
                   that the modification is after the current PC, but it
843
                   would require a specialized function to partially
844
                   restore the CPU state */
845

    
846
            current_tb_modified = 1;
847
            cpu_restore_state(current_tb, env, pc, puc);
848
#if defined(TARGET_I386)
849
            current_flags = env->hflags;
850
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
851
            current_cs_base = (target_ulong)env->segs[R_CS].base;
852
            current_pc = current_cs_base + env->eip;
853
#else
854
#error unsupported CPU
855
#endif
856
        }
857
#endif /* TARGET_HAS_PRECISE_SMC */
858
        tb_phys_invalidate(tb, addr);
859
        tb = tb->page_next[n];
860
    }
861
    p->first_tb = NULL;
862
#ifdef TARGET_HAS_PRECISE_SMC
863
    if (current_tb_modified) {
864
        /* we generate a block containing just the instruction
865
           modifying the memory. It will ensure that it cannot modify
866
           itself */
867
        env->current_tb = NULL;
868
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
869
                    CF_SINGLE_INSN);
870
        cpu_resume_from_signal(env, puc);
871
    }
872
#endif
873
}
874
#endif
875

    
876
/* add the tb in the target page and protect it if necessary */
877
static inline void tb_alloc_page(TranslationBlock *tb,
878
                                 unsigned int n, target_ulong page_addr)
879
{
880
    PageDesc *p;
881
    TranslationBlock *last_first_tb;
882

    
883
    tb->page_addr[n] = page_addr;
884
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
885
    tb->page_next[n] = p->first_tb;
886
    last_first_tb = p->first_tb;
887
    p->first_tb = (TranslationBlock *)((long)tb | n);
888
    invalidate_page_bitmap(p);
889

    
890
#if defined(TARGET_HAS_SMC) || 1
891

    
892
#if defined(CONFIG_USER_ONLY)
893
    if (p->flags & PAGE_WRITE) {
894
        target_ulong addr;
895
        PageDesc *p2;
896
        int prot;
897

    
898
        /* force the host page as non writable (writes will have a
899
           page fault + mprotect overhead) */
900
        page_addr &= qemu_host_page_mask;
901
        prot = 0;
902
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
903
            addr += TARGET_PAGE_SIZE) {
904

    
905
            p2 = page_find (addr >> TARGET_PAGE_BITS);
906
            if (!p2)
907
                continue;
908
            prot |= p2->flags;
909
            p2->flags &= ~PAGE_WRITE;
910
            page_get_flags(addr);
911
          }
912
        mprotect(g2h(page_addr), qemu_host_page_size,
913
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
914
#ifdef DEBUG_TB_INVALIDATE
915
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
916
               page_addr);
917
#endif
918
    }
919
#else
920
    /* if some code is already present, then the pages are already
921
       protected. So we handle the case where only the first TB is
922
       allocated in a physical page */
923
    if (!last_first_tb) {
924
        tlb_protect_code(page_addr);
925
    }
926
#endif
927

    
928
#endif /* TARGET_HAS_SMC */
929
}
930

    
931
/* Allocate a new translation block. Flush the translation buffer if
932
   too many translation blocks or too much generated code. */
933
TranslationBlock *tb_alloc(target_ulong pc)
934
{
935
    TranslationBlock *tb;
936

    
937
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
938
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
939
        return NULL;
940
    tb = &tbs[nb_tbs++];
941
    tb->pc = pc;
942
    tb->cflags = 0;
943
    return tb;
944
}
945

    
946
/* add a new TB and link it to the physical page tables. phys_page2 is
947
   (-1) to indicate that only one page contains the TB. */
948
void tb_link_phys(TranslationBlock *tb,
949
                  target_ulong phys_pc, target_ulong phys_page2)
950
{
951
    unsigned int h;
952
    TranslationBlock **ptb;
953

    
954
    /* add in the physical hash table */
955
    h = tb_phys_hash_func(phys_pc);
956
    ptb = &tb_phys_hash[h];
957
    tb->phys_hash_next = *ptb;
958
    *ptb = tb;
959

    
960
    /* add in the page list */
961
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
962
    if (phys_page2 != -1)
963
        tb_alloc_page(tb, 1, phys_page2);
964
    else
965
        tb->page_addr[1] = -1;
966

    
967
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
968
    tb->jmp_next[0] = NULL;
969
    tb->jmp_next[1] = NULL;
970

    
971
    /* init original jump addresses */
972
    if (tb->tb_next_offset[0] != 0xffff)
973
        tb_reset_jump(tb, 0);
974
    if (tb->tb_next_offset[1] != 0xffff)
975
        tb_reset_jump(tb, 1);
976

    
977
#ifdef DEBUG_TB_CHECK
978
    tb_page_check();
979
#endif
980
}
981

    
982
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
983
   tb[1].tc_ptr. Return NULL if not found */
984
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
985
{
986
    int m_min, m_max, m;
987
    unsigned long v;
988
    TranslationBlock *tb;
989

    
990
    if (nb_tbs <= 0)
991
        return NULL;
992
    if (tc_ptr < (unsigned long)code_gen_buffer ||
993
        tc_ptr >= (unsigned long)code_gen_ptr)
994
        return NULL;
995
    /* binary search (cf Knuth) */
996
    m_min = 0;
997
    m_max = nb_tbs - 1;
998
    while (m_min <= m_max) {
999
        m = (m_min + m_max) >> 1;
1000
        tb = &tbs[m];
1001
        v = (unsigned long)tb->tc_ptr;
1002
        if (v == tc_ptr)
1003
            return tb;
1004
        else if (tc_ptr < v) {
1005
            m_max = m - 1;
1006
        } else {
1007
            m_min = m + 1;
1008
        }
1009
    }
1010
    return &tbs[m_max];
1011
}
1012

    
1013
static void tb_reset_jump_recursive(TranslationBlock *tb);
1014

    
1015
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1016
{
1017
    TranslationBlock *tb1, *tb_next, **ptb;
1018
    unsigned int n1;
1019

    
1020
    tb1 = tb->jmp_next[n];
1021
    if (tb1 != NULL) {
1022
        /* find head of list */
1023
        for(;;) {
1024
            n1 = (long)tb1 & 3;
1025
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1026
            if (n1 == 2)
1027
                break;
1028
            tb1 = tb1->jmp_next[n1];
1029
        }
1030
        /* we are now sure now that tb jumps to tb1 */
1031
        tb_next = tb1;
1032

    
1033
        /* remove tb from the jmp_first list */
1034
        ptb = &tb_next->jmp_first;
1035
        for(;;) {
1036
            tb1 = *ptb;
1037
            n1 = (long)tb1 & 3;
1038
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1039
            if (n1 == n && tb1 == tb)
1040
                break;
1041
            ptb = &tb1->jmp_next[n1];
1042
        }
1043
        *ptb = tb->jmp_next[n];
1044
        tb->jmp_next[n] = NULL;
1045

    
1046
        /* suppress the jump to next tb in generated code */
1047
        tb_reset_jump(tb, n);
1048

    
1049
        /* suppress jumps in the tb on which we could have jumped */
1050
        tb_reset_jump_recursive(tb_next);
1051
    }
1052
}
1053

    
1054
static void tb_reset_jump_recursive(TranslationBlock *tb)
1055
{
1056
    tb_reset_jump_recursive2(tb, 0);
1057
    tb_reset_jump_recursive2(tb, 1);
1058
}
1059

    
1060
#if defined(TARGET_HAS_ICE)
1061
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1062
{
1063
    target_phys_addr_t addr;
1064
    target_ulong pd;
1065
    ram_addr_t ram_addr;
1066
    PhysPageDesc *p;
1067

    
1068
    addr = cpu_get_phys_page_debug(env, pc);
1069
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1070
    if (!p) {
1071
        pd = IO_MEM_UNASSIGNED;
1072
    } else {
1073
        pd = p->phys_offset;
1074
    }
1075
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1076
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1077
}
1078
#endif
1079

    
1080
/* Add a watchpoint.  */
1081
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1082
{
1083
    int i;
1084

    
1085
    for (i = 0; i < env->nb_watchpoints; i++) {
1086
        if (addr == env->watchpoint[i].vaddr)
1087
            return 0;
1088
    }
1089
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1090
        return -1;
1091

    
1092
    i = env->nb_watchpoints++;
1093
    env->watchpoint[i].vaddr = addr;
1094
    tlb_flush_page(env, addr);
1095
    /* FIXME: This flush is needed because of the hack to make memory ops
1096
       terminate the TB.  It can be removed once the proper IO trap and
1097
       re-execute bits are in.  */
1098
    tb_flush(env);
1099
    return i;
1100
}
1101

    
1102
/* Remove a watchpoint.  */
1103
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1104
{
1105
    int i;
1106

    
1107
    for (i = 0; i < env->nb_watchpoints; i++) {
1108
        if (addr == env->watchpoint[i].vaddr) {
1109
            env->nb_watchpoints--;
1110
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1111
            tlb_flush_page(env, addr);
1112
            return 0;
1113
        }
1114
    }
1115
    return -1;
1116
}
1117

    
1118
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1119
   breakpoint is reached */
1120
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1121
{
1122
#if defined(TARGET_HAS_ICE)
1123
    int i;
1124

    
1125
    for(i = 0; i < env->nb_breakpoints; i++) {
1126
        if (env->breakpoints[i] == pc)
1127
            return 0;
1128
    }
1129

    
1130
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1131
        return -1;
1132
    env->breakpoints[env->nb_breakpoints++] = pc;
1133

    
1134
    breakpoint_invalidate(env, pc);
1135
    return 0;
1136
#else
1137
    return -1;
1138
#endif
1139
}
1140

    
1141
/* remove a breakpoint */
1142
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1143
{
1144
#if defined(TARGET_HAS_ICE)
1145
    int i;
1146
    for(i = 0; i < env->nb_breakpoints; i++) {
1147
        if (env->breakpoints[i] == pc)
1148
            goto found;
1149
    }
1150
    return -1;
1151
 found:
1152
    env->nb_breakpoints--;
1153
    if (i < env->nb_breakpoints)
1154
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1155

    
1156
    breakpoint_invalidate(env, pc);
1157
    return 0;
1158
#else
1159
    return -1;
1160
#endif
1161
}
1162

    
1163
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1164
   CPU loop after each instruction */
1165
void cpu_single_step(CPUState *env, int enabled)
1166
{
1167
#if defined(TARGET_HAS_ICE)
1168
    if (env->singlestep_enabled != enabled) {
1169
        env->singlestep_enabled = enabled;
1170
        /* must flush all the translated code to avoid inconsistancies */
1171
        /* XXX: only flush what is necessary */
1172
        tb_flush(env);
1173
    }
1174
#endif
1175
}
1176

    
1177
/* enable or disable low levels log */
1178
void cpu_set_log(int log_flags)
1179
{
1180
    loglevel = log_flags;
1181
    if (loglevel && !logfile) {
1182
        logfile = fopen(logfilename, log_append ? "a" : "w");
1183
        if (!logfile) {
1184
            perror(logfilename);
1185
            _exit(1);
1186
        }
1187
#if !defined(CONFIG_SOFTMMU)
1188
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1189
        {
1190
            static uint8_t logfile_buf[4096];
1191
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1192
        }
1193
#else
1194
        setvbuf(logfile, NULL, _IOLBF, 0);
1195
#endif
1196
        log_append = 1;
1197
    }
1198
    if (!loglevel && logfile) {
1199
        fclose(logfile);
1200
        logfile = NULL;
1201
    }
1202
}
1203

    
1204
void cpu_set_log_filename(const char *filename)
1205
{
1206
    logfilename = strdup(filename);
1207
    if (logfile) {
1208
        fclose(logfile);
1209
        logfile = NULL;
1210
    }
1211
    cpu_set_log(loglevel);
1212
}
1213

    
1214
/* mask must never be zero, except for A20 change call */
1215
void cpu_interrupt(CPUState *env, int mask)
1216
{
1217
    TranslationBlock *tb;
1218
    static int interrupt_lock;
1219

    
1220
    env->interrupt_request |= mask;
1221
    /* if the cpu is currently executing code, we must unlink it and
1222
       all the potentially executing TB */
1223
    tb = env->current_tb;
1224
    if (tb && !testandset(&interrupt_lock)) {
1225
        env->current_tb = NULL;
1226
        tb_reset_jump_recursive(tb);
1227
        interrupt_lock = 0;
1228
    }
1229
}
1230

    
1231
void cpu_reset_interrupt(CPUState *env, int mask)
1232
{
1233
    env->interrupt_request &= ~mask;
1234
}
1235

    
1236
CPULogItem cpu_log_items[] = {
1237
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1238
      "show generated host assembly code for each compiled TB" },
1239
    { CPU_LOG_TB_IN_ASM, "in_asm",
1240
      "show target assembly code for each compiled TB" },
1241
    { CPU_LOG_TB_OP, "op",
1242
      "show micro ops for each compiled TB" },
1243
#ifdef TARGET_I386
1244
    { CPU_LOG_TB_OP_OPT, "op_opt",
1245
      "show micro ops before eflags optimization" },
1246
#endif
1247
    { CPU_LOG_INT, "int",
1248
      "show interrupts/exceptions in short format" },
1249
    { CPU_LOG_EXEC, "exec",
1250
      "show trace before each executed TB (lots of logs)" },
1251
    { CPU_LOG_TB_CPU, "cpu",
1252
      "show CPU state before block translation" },
1253
#ifdef TARGET_I386
1254
    { CPU_LOG_PCALL, "pcall",
1255
      "show protected mode far calls/returns/exceptions" },
1256
#endif
1257
#ifdef DEBUG_IOPORT
1258
    { CPU_LOG_IOPORT, "ioport",
1259
      "show all i/o ports accesses" },
1260
#endif
1261
    { 0, NULL, NULL },
1262
};
1263

    
1264
static int cmp1(const char *s1, int n, const char *s2)
1265
{
1266
    if (strlen(s2) != n)
1267
        return 0;
1268
    return memcmp(s1, s2, n) == 0;
1269
}
1270

    
1271
/* takes a comma separated list of log masks. Return 0 if error. */
1272
int cpu_str_to_log_mask(const char *str)
1273
{
1274
    CPULogItem *item;
1275
    int mask;
1276
    const char *p, *p1;
1277

    
1278
    p = str;
1279
    mask = 0;
1280
    for(;;) {
1281
        p1 = strchr(p, ',');
1282
        if (!p1)
1283
            p1 = p + strlen(p);
1284
        if(cmp1(p,p1-p,"all")) {
1285
                for(item = cpu_log_items; item->mask != 0; item++) {
1286
                        mask |= item->mask;
1287
                }
1288
        } else {
1289
        for(item = cpu_log_items; item->mask != 0; item++) {
1290
            if (cmp1(p, p1 - p, item->name))
1291
                goto found;
1292
        }
1293
        return 0;
1294
        }
1295
    found:
1296
        mask |= item->mask;
1297
        if (*p1 != ',')
1298
            break;
1299
        p = p1 + 1;
1300
    }
1301
    return mask;
1302
}
1303

    
1304
void cpu_abort(CPUState *env, const char *fmt, ...)
1305
{
1306
    va_list ap;
1307
    va_list ap2;
1308

    
1309
    va_start(ap, fmt);
1310
    va_copy(ap2, ap);
1311
    fprintf(stderr, "qemu: fatal: ");
1312
    vfprintf(stderr, fmt, ap);
1313
    fprintf(stderr, "\n");
1314
#ifdef TARGET_I386
1315
    if(env->intercept & INTERCEPT_SVM_MASK) {
1316
        /* most probably the virtual machine should not
1317
           be shut down but rather caught by the VMM */
1318
        vmexit(SVM_EXIT_SHUTDOWN, 0);
1319
    }
1320
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1321
#else
1322
    cpu_dump_state(env, stderr, fprintf, 0);
1323
#endif
1324
    if (logfile) {
1325
        fprintf(logfile, "qemu: fatal: ");
1326
        vfprintf(logfile, fmt, ap2);
1327
        fprintf(logfile, "\n");
1328
#ifdef TARGET_I386
1329
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1330
#else
1331
        cpu_dump_state(env, logfile, fprintf, 0);
1332
#endif
1333
        fflush(logfile);
1334
        fclose(logfile);
1335
    }
1336
    va_end(ap2);
1337
    va_end(ap);
1338
    abort();
1339
}
1340

    
1341
CPUState *cpu_copy(CPUState *env)
1342
{
1343
    CPUState *new_env = cpu_init(env->cpu_model_str);
1344
    /* preserve chaining and index */
1345
    CPUState *next_cpu = new_env->next_cpu;
1346
    int cpu_index = new_env->cpu_index;
1347
    memcpy(new_env, env, sizeof(CPUState));
1348
    new_env->next_cpu = next_cpu;
1349
    new_env->cpu_index = cpu_index;
1350
    return new_env;
1351
}
1352

    
1353
#if !defined(CONFIG_USER_ONLY)
1354

    
1355
/* NOTE: if flush_global is true, also flush global entries (not
1356
   implemented yet) */
1357
void tlb_flush(CPUState *env, int flush_global)
1358
{
1359
    int i;
1360

    
1361
#if defined(DEBUG_TLB)
1362
    printf("tlb_flush:\n");
1363
#endif
1364
    /* must reset current TB so that interrupts cannot modify the
1365
       links while we are modifying them */
1366
    env->current_tb = NULL;
1367

    
1368
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1369
        env->tlb_table[0][i].addr_read = -1;
1370
        env->tlb_table[0][i].addr_write = -1;
1371
        env->tlb_table[0][i].addr_code = -1;
1372
        env->tlb_table[1][i].addr_read = -1;
1373
        env->tlb_table[1][i].addr_write = -1;
1374
        env->tlb_table[1][i].addr_code = -1;
1375
#if (NB_MMU_MODES >= 3)
1376
        env->tlb_table[2][i].addr_read = -1;
1377
        env->tlb_table[2][i].addr_write = -1;
1378
        env->tlb_table[2][i].addr_code = -1;
1379
#if (NB_MMU_MODES == 4)
1380
        env->tlb_table[3][i].addr_read = -1;
1381
        env->tlb_table[3][i].addr_write = -1;
1382
        env->tlb_table[3][i].addr_code = -1;
1383
#endif
1384
#endif
1385
    }
1386

    
1387
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1388

    
1389
#if !defined(CONFIG_SOFTMMU)
1390
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1391
#endif
1392
#ifdef USE_KQEMU
1393
    if (env->kqemu_enabled) {
1394
        kqemu_flush(env, flush_global);
1395
    }
1396
#endif
1397
    tlb_flush_count++;
1398
}
1399

    
1400
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1401
{
1402
    if (addr == (tlb_entry->addr_read &
1403
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1404
        addr == (tlb_entry->addr_write &
1405
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1406
        addr == (tlb_entry->addr_code &
1407
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1408
        tlb_entry->addr_read = -1;
1409
        tlb_entry->addr_write = -1;
1410
        tlb_entry->addr_code = -1;
1411
    }
1412
}
1413

    
1414
void tlb_flush_page(CPUState *env, target_ulong addr)
1415
{
1416
    int i;
1417
    TranslationBlock *tb;
1418

    
1419
#if defined(DEBUG_TLB)
1420
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1421
#endif
1422
    /* must reset current TB so that interrupts cannot modify the
1423
       links while we are modifying them */
1424
    env->current_tb = NULL;
1425

    
1426
    addr &= TARGET_PAGE_MASK;
1427
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1428
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1429
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1430
#if (NB_MMU_MODES >= 3)
1431
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1432
#if (NB_MMU_MODES == 4)
1433
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1434
#endif
1435
#endif
1436

    
1437
    /* Discard jump cache entries for any tb which might potentially
1438
       overlap the flushed page.  */
1439
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1440
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1441

    
1442
    i = tb_jmp_cache_hash_page(addr);
1443
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1444

    
1445
#if !defined(CONFIG_SOFTMMU)
1446
    if (addr < MMAP_AREA_END)
1447
        munmap((void *)addr, TARGET_PAGE_SIZE);
1448
#endif
1449
#ifdef USE_KQEMU
1450
    if (env->kqemu_enabled) {
1451
        kqemu_flush_page(env, addr);
1452
    }
1453
#endif
1454
}
1455

    
1456
/* update the TLBs so that writes to code in the virtual page 'addr'
1457
   can be detected */
1458
static void tlb_protect_code(ram_addr_t ram_addr)
1459
{
1460
    cpu_physical_memory_reset_dirty(ram_addr,
1461
                                    ram_addr + TARGET_PAGE_SIZE,
1462
                                    CODE_DIRTY_FLAG);
1463
}
1464

    
1465
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1466
   tested for self modifying code */
1467
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1468
                                    target_ulong vaddr)
1469
{
1470
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1471
}
1472

    
1473
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1474
                                         unsigned long start, unsigned long length)
1475
{
1476
    unsigned long addr;
1477
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1478
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1479
        if ((addr - start) < length) {
1480
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1481
        }
1482
    }
1483
}
1484

    
1485
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1486
                                     int dirty_flags)
1487
{
1488
    CPUState *env;
1489
    unsigned long length, start1;
1490
    int i, mask, len;
1491
    uint8_t *p;
1492

    
1493
    start &= TARGET_PAGE_MASK;
1494
    end = TARGET_PAGE_ALIGN(end);
1495

    
1496
    length = end - start;
1497
    if (length == 0)
1498
        return;
1499
    len = length >> TARGET_PAGE_BITS;
1500
#ifdef USE_KQEMU
1501
    /* XXX: should not depend on cpu context */
1502
    env = first_cpu;
1503
    if (env->kqemu_enabled) {
1504
        ram_addr_t addr;
1505
        addr = start;
1506
        for(i = 0; i < len; i++) {
1507
            kqemu_set_notdirty(env, addr);
1508
            addr += TARGET_PAGE_SIZE;
1509
        }
1510
    }
1511
#endif
1512
    mask = ~dirty_flags;
1513
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1514
    for(i = 0; i < len; i++)
1515
        p[i] &= mask;
1516

    
1517
    /* we modify the TLB cache so that the dirty bit will be set again
1518
       when accessing the range */
1519
    start1 = start + (unsigned long)phys_ram_base;
1520
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1521
        for(i = 0; i < CPU_TLB_SIZE; i++)
1522
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1523
        for(i = 0; i < CPU_TLB_SIZE; i++)
1524
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1525
#if (NB_MMU_MODES >= 3)
1526
        for(i = 0; i < CPU_TLB_SIZE; i++)
1527
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1528
#if (NB_MMU_MODES == 4)
1529
        for(i = 0; i < CPU_TLB_SIZE; i++)
1530
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1531
#endif
1532
#endif
1533
    }
1534

    
1535
#if !defined(CONFIG_SOFTMMU)
1536
    /* XXX: this is expensive */
1537
    {
1538
        VirtPageDesc *p;
1539
        int j;
1540
        target_ulong addr;
1541

    
1542
        for(i = 0; i < L1_SIZE; i++) {
1543
            p = l1_virt_map[i];
1544
            if (p) {
1545
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1546
                for(j = 0; j < L2_SIZE; j++) {
1547
                    if (p->valid_tag == virt_valid_tag &&
1548
                        p->phys_addr >= start && p->phys_addr < end &&
1549
                        (p->prot & PROT_WRITE)) {
1550
                        if (addr < MMAP_AREA_END) {
1551
                            mprotect((void *)addr, TARGET_PAGE_SIZE,
1552
                                     p->prot & ~PROT_WRITE);
1553
                        }
1554
                    }
1555
                    addr += TARGET_PAGE_SIZE;
1556
                    p++;
1557
                }
1558
            }
1559
        }
1560
    }
1561
#endif
1562
}
1563

    
1564
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1565
{
1566
    ram_addr_t ram_addr;
1567

    
1568
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1569
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1570
            tlb_entry->addend - (unsigned long)phys_ram_base;
1571
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1572
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1573
        }
1574
    }
1575
}
1576

    
1577
/* update the TLB according to the current state of the dirty bits */
1578
void cpu_tlb_update_dirty(CPUState *env)
1579
{
1580
    int i;
1581
    for(i = 0; i < CPU_TLB_SIZE; i++)
1582
        tlb_update_dirty(&env->tlb_table[0][i]);
1583
    for(i = 0; i < CPU_TLB_SIZE; i++)
1584
        tlb_update_dirty(&env->tlb_table[1][i]);
1585
#if (NB_MMU_MODES >= 3)
1586
    for(i = 0; i < CPU_TLB_SIZE; i++)
1587
        tlb_update_dirty(&env->tlb_table[2][i]);
1588
#if (NB_MMU_MODES == 4)
1589
    for(i = 0; i < CPU_TLB_SIZE; i++)
1590
        tlb_update_dirty(&env->tlb_table[3][i]);
1591
#endif
1592
#endif
1593
}
1594

    
1595
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1596
                                  unsigned long start)
1597
{
1598
    unsigned long addr;
1599
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1600
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1601
        if (addr == start) {
1602
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1603
        }
1604
    }
1605
}
1606

    
1607
/* update the TLB corresponding to virtual page vaddr and phys addr
1608
   addr so that it is no longer dirty */
1609
static inline void tlb_set_dirty(CPUState *env,
1610
                                 unsigned long addr, target_ulong vaddr)
1611
{
1612
    int i;
1613

    
1614
    addr &= TARGET_PAGE_MASK;
1615
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1616
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1617
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1618
#if (NB_MMU_MODES >= 3)
1619
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1620
#if (NB_MMU_MODES == 4)
1621
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1622
#endif
1623
#endif
1624
}
1625

    
1626
/* add a new TLB entry. At most one entry for a given virtual address
1627
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1628
   (can only happen in non SOFTMMU mode for I/O pages or pages
1629
   conflicting with the host address space). */
1630
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1631
                      target_phys_addr_t paddr, int prot,
1632
                      int mmu_idx, int is_softmmu)
1633
{
1634
    PhysPageDesc *p;
1635
    unsigned long pd;
1636
    unsigned int index;
1637
    target_ulong address;
1638
    target_phys_addr_t addend;
1639
    int ret;
1640
    CPUTLBEntry *te;
1641
    int i;
1642

    
1643
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1644
    if (!p) {
1645
        pd = IO_MEM_UNASSIGNED;
1646
    } else {
1647
        pd = p->phys_offset;
1648
    }
1649
#if defined(DEBUG_TLB)
1650
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1651
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1652
#endif
1653

    
1654
    ret = 0;
1655
#if !defined(CONFIG_SOFTMMU)
1656
    if (is_softmmu)
1657
#endif
1658
    {
1659
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1660
            /* IO memory case */
1661
            address = vaddr | pd;
1662
            addend = paddr;
1663
        } else {
1664
            /* standard memory */
1665
            address = vaddr;
1666
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1667
        }
1668

    
1669
        /* Make accesses to pages with watchpoints go via the
1670
           watchpoint trap routines.  */
1671
        for (i = 0; i < env->nb_watchpoints; i++) {
1672
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1673
                if (address & ~TARGET_PAGE_MASK) {
1674
                    env->watchpoint[i].addend = 0;
1675
                    address = vaddr | io_mem_watch;
1676
                } else {
1677
                    env->watchpoint[i].addend = pd - paddr +
1678
                        (unsigned long) phys_ram_base;
1679
                    /* TODO: Figure out how to make read watchpoints coexist
1680
                       with code.  */
1681
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1682
                }
1683
            }
1684
        }
1685

    
1686
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1687
        addend -= vaddr;
1688
        te = &env->tlb_table[mmu_idx][index];
1689
        te->addend = addend;
1690
        if (prot & PAGE_READ) {
1691
            te->addr_read = address;
1692
        } else {
1693
            te->addr_read = -1;
1694
        }
1695
        if (prot & PAGE_EXEC) {
1696
            te->addr_code = address;
1697
        } else {
1698
            te->addr_code = -1;
1699
        }
1700
        if (prot & PAGE_WRITE) {
1701
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1702
                (pd & IO_MEM_ROMD)) {
1703
                /* write access calls the I/O callback */
1704
                te->addr_write = vaddr |
1705
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1706
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1707
                       !cpu_physical_memory_is_dirty(pd)) {
1708
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1709
            } else {
1710
                te->addr_write = address;
1711
            }
1712
        } else {
1713
            te->addr_write = -1;
1714
        }
1715
    }
1716
#if !defined(CONFIG_SOFTMMU)
1717
    else {
1718
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1719
            /* IO access: no mapping is done as it will be handled by the
1720
               soft MMU */
1721
            if (!(env->hflags & HF_SOFTMMU_MASK))
1722
                ret = 2;
1723
        } else {
1724
            void *map_addr;
1725

    
1726
            if (vaddr >= MMAP_AREA_END) {
1727
                ret = 2;
1728
            } else {
1729
                if (prot & PROT_WRITE) {
1730
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1731
#if defined(TARGET_HAS_SMC) || 1
1732
                        first_tb ||
1733
#endif
1734
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1735
                         !cpu_physical_memory_is_dirty(pd))) {
1736
                        /* ROM: we do as if code was inside */
1737
                        /* if code is present, we only map as read only and save the
1738
                           original mapping */
1739
                        VirtPageDesc *vp;
1740

    
1741
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1742
                        vp->phys_addr = pd;
1743
                        vp->prot = prot;
1744
                        vp->valid_tag = virt_valid_tag;
1745
                        prot &= ~PAGE_WRITE;
1746
                    }
1747
                }
1748
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1749
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1750
                if (map_addr == MAP_FAILED) {
1751
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1752
                              paddr, vaddr);
1753
                }
1754
            }
1755
        }
1756
    }
1757
#endif
1758
    return ret;
1759
}
1760

    
1761
/* called from signal handler: invalidate the code and unprotect the
1762
   page. Return TRUE if the fault was succesfully handled. */
1763
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1764
{
1765
#if !defined(CONFIG_SOFTMMU)
1766
    VirtPageDesc *vp;
1767

    
1768
#if defined(DEBUG_TLB)
1769
    printf("page_unprotect: addr=0x%08x\n", addr);
1770
#endif
1771
    addr &= TARGET_PAGE_MASK;
1772

    
1773
    /* if it is not mapped, no need to worry here */
1774
    if (addr >= MMAP_AREA_END)
1775
        return 0;
1776
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1777
    if (!vp)
1778
        return 0;
1779
    /* NOTE: in this case, validate_tag is _not_ tested as it
1780
       validates only the code TLB */
1781
    if (vp->valid_tag != virt_valid_tag)
1782
        return 0;
1783
    if (!(vp->prot & PAGE_WRITE))
1784
        return 0;
1785
#if defined(DEBUG_TLB)
1786
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1787
           addr, vp->phys_addr, vp->prot);
1788
#endif
1789
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1790
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1791
                  (unsigned long)addr, vp->prot);
1792
    /* set the dirty bit */
1793
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1794
    /* flush the code inside */
1795
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1796
    return 1;
1797
#else
1798
    return 0;
1799
#endif
1800
}
1801

    
1802
#else
1803

    
1804
void tlb_flush(CPUState *env, int flush_global)
1805
{
1806
}
1807

    
1808
void tlb_flush_page(CPUState *env, target_ulong addr)
1809
{
1810
}
1811

    
1812
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1813
                      target_phys_addr_t paddr, int prot,
1814
                      int mmu_idx, int is_softmmu)
1815
{
1816
    return 0;
1817
}
1818

    
1819
/* dump memory mappings */
1820
void page_dump(FILE *f)
1821
{
1822
    unsigned long start, end;
1823
    int i, j, prot, prot1;
1824
    PageDesc *p;
1825

    
1826
    fprintf(f, "%-8s %-8s %-8s %s\n",
1827
            "start", "end", "size", "prot");
1828
    start = -1;
1829
    end = -1;
1830
    prot = 0;
1831
    for(i = 0; i <= L1_SIZE; i++) {
1832
        if (i < L1_SIZE)
1833
            p = l1_map[i];
1834
        else
1835
            p = NULL;
1836
        for(j = 0;j < L2_SIZE; j++) {
1837
            if (!p)
1838
                prot1 = 0;
1839
            else
1840
                prot1 = p[j].flags;
1841
            if (prot1 != prot) {
1842
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1843
                if (start != -1) {
1844
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1845
                            start, end, end - start,
1846
                            prot & PAGE_READ ? 'r' : '-',
1847
                            prot & PAGE_WRITE ? 'w' : '-',
1848
                            prot & PAGE_EXEC ? 'x' : '-');
1849
                }
1850
                if (prot1 != 0)
1851
                    start = end;
1852
                else
1853
                    start = -1;
1854
                prot = prot1;
1855
            }
1856
            if (!p)
1857
                break;
1858
        }
1859
    }
1860
}
1861

    
1862
int page_get_flags(target_ulong address)
1863
{
1864
    PageDesc *p;
1865

    
1866
    p = page_find(address >> TARGET_PAGE_BITS);
1867
    if (!p)
1868
        return 0;
1869
    return p->flags;
1870
}
1871

    
1872
/* modify the flags of a page and invalidate the code if
1873
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1874
   depending on PAGE_WRITE */
1875
void page_set_flags(target_ulong start, target_ulong end, int flags)
1876
{
1877
    PageDesc *p;
1878
    target_ulong addr;
1879

    
1880
    start = start & TARGET_PAGE_MASK;
1881
    end = TARGET_PAGE_ALIGN(end);
1882
    if (flags & PAGE_WRITE)
1883
        flags |= PAGE_WRITE_ORG;
1884
    spin_lock(&tb_lock);
1885
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1886
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1887
        /* if the write protection is set, then we invalidate the code
1888
           inside */
1889
        if (!(p->flags & PAGE_WRITE) &&
1890
            (flags & PAGE_WRITE) &&
1891
            p->first_tb) {
1892
            tb_invalidate_phys_page(addr, 0, NULL);
1893
        }
1894
        p->flags = flags;
1895
    }
1896
    spin_unlock(&tb_lock);
1897
}
1898

    
1899
int page_check_range(target_ulong start, target_ulong len, int flags)
1900
{
1901
    PageDesc *p;
1902
    target_ulong end;
1903
    target_ulong addr;
1904

    
1905
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1906
    start = start & TARGET_PAGE_MASK;
1907

    
1908
    if( end < start )
1909
        /* we've wrapped around */
1910
        return -1;
1911
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1912
        p = page_find(addr >> TARGET_PAGE_BITS);
1913
        if( !p )
1914
            return -1;
1915
        if( !(p->flags & PAGE_VALID) )
1916
            return -1;
1917

    
1918
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1919
            return -1;
1920
        if (flags & PAGE_WRITE) {
1921
            if (!(p->flags & PAGE_WRITE_ORG))
1922
                return -1;
1923
            /* unprotect the page if it was put read-only because it
1924
               contains translated code */
1925
            if (!(p->flags & PAGE_WRITE)) {
1926
                if (!page_unprotect(addr, 0, NULL))
1927
                    return -1;
1928
            }
1929
            return 0;
1930
        }
1931
    }
1932
    return 0;
1933
}
1934

    
1935
/* called from signal handler: invalidate the code and unprotect the
1936
   page. Return TRUE if the fault was succesfully handled. */
1937
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1938
{
1939
    unsigned int page_index, prot, pindex;
1940
    PageDesc *p, *p1;
1941
    target_ulong host_start, host_end, addr;
1942

    
1943
    host_start = address & qemu_host_page_mask;
1944
    page_index = host_start >> TARGET_PAGE_BITS;
1945
    p1 = page_find(page_index);
1946
    if (!p1)
1947
        return 0;
1948
    host_end = host_start + qemu_host_page_size;
1949
    p = p1;
1950
    prot = 0;
1951
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1952
        prot |= p->flags;
1953
        p++;
1954
    }
1955
    /* if the page was really writable, then we change its
1956
       protection back to writable */
1957
    if (prot & PAGE_WRITE_ORG) {
1958
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1959
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1960
            mprotect((void *)g2h(host_start), qemu_host_page_size,
1961
                     (prot & PAGE_BITS) | PAGE_WRITE);
1962
            p1[pindex].flags |= PAGE_WRITE;
1963
            /* and since the content will be modified, we must invalidate
1964
               the corresponding translated code. */
1965
            tb_invalidate_phys_page(address, pc, puc);
1966
#ifdef DEBUG_TB_CHECK
1967
            tb_invalidate_check(address);
1968
#endif
1969
            return 1;
1970
        }
1971
    }
1972
    return 0;
1973
}
1974

    
1975
static inline void tlb_set_dirty(CPUState *env,
1976
                                 unsigned long addr, target_ulong vaddr)
1977
{
1978
}
1979
#endif /* defined(CONFIG_USER_ONLY) */
1980

    
1981
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1982
                             int memory);
1983
static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1984
                           int orig_memory);
1985
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1986
                      need_subpage)                                     \
1987
    do {                                                                \
1988
        if (addr > start_addr)                                          \
1989
            start_addr2 = 0;                                            \
1990
        else {                                                          \
1991
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
1992
            if (start_addr2 > 0)                                        \
1993
                need_subpage = 1;                                       \
1994
        }                                                               \
1995
                                                                        \
1996
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
1997
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
1998
        else {                                                          \
1999
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2000
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2001
                need_subpage = 1;                                       \
2002
        }                                                               \
2003
    } while (0)
2004

    
2005
/* register physical memory. 'size' must be a multiple of the target
2006
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2007
   io memory page */
2008
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2009
                                  unsigned long size,
2010
                                  unsigned long phys_offset)
2011
{
2012
    target_phys_addr_t addr, end_addr;
2013
    PhysPageDesc *p;
2014
    CPUState *env;
2015
    unsigned long orig_size = size;
2016
    void *subpage;
2017

    
2018
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2019
    end_addr = start_addr + (target_phys_addr_t)size;
2020
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2021
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2022
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2023
            unsigned long orig_memory = p->phys_offset;
2024
            target_phys_addr_t start_addr2, end_addr2;
2025
            int need_subpage = 0;
2026

    
2027
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2028
                          need_subpage);
2029
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2030
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2031
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2032
                                           &p->phys_offset, orig_memory);
2033
                } else {
2034
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2035
                                            >> IO_MEM_SHIFT];
2036
                }
2037
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2038
            } else {
2039
                p->phys_offset = phys_offset;
2040
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2041
                    (phys_offset & IO_MEM_ROMD))
2042
                    phys_offset += TARGET_PAGE_SIZE;
2043
            }
2044
        } else {
2045
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2046
            p->phys_offset = phys_offset;
2047
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2048
                (phys_offset & IO_MEM_ROMD))
2049
                phys_offset += TARGET_PAGE_SIZE;
2050
            else {
2051
                target_phys_addr_t start_addr2, end_addr2;
2052
                int need_subpage = 0;
2053

    
2054
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2055
                              end_addr2, need_subpage);
2056

    
2057
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2058
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2059
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2060
                    subpage_register(subpage, start_addr2, end_addr2,
2061
                                     phys_offset);
2062
                }
2063
            }
2064
        }
2065
    }
2066

    
2067
    /* since each CPU stores ram addresses in its TLB cache, we must
2068
       reset the modified entries */
2069
    /* XXX: slow ! */
2070
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2071
        tlb_flush(env, 1);
2072
    }
2073
}
2074

    
2075
/* XXX: temporary until new memory mapping API */
2076
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2077
{
2078
    PhysPageDesc *p;
2079

    
2080
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2081
    if (!p)
2082
        return IO_MEM_UNASSIGNED;
2083
    return p->phys_offset;
2084
}
2085

    
2086
/* XXX: better than nothing */
2087
ram_addr_t qemu_ram_alloc(unsigned int size)
2088
{
2089
    ram_addr_t addr;
2090
    if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2091
        fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2092
                size, phys_ram_size);
2093
        abort();
2094
    }
2095
    addr = phys_ram_alloc_offset;
2096
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2097
    return addr;
2098
}
2099

    
2100
void qemu_ram_free(ram_addr_t addr)
2101
{
2102
}
2103

    
2104
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2105
{
2106
#ifdef DEBUG_UNASSIGNED
2107
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2108
#endif
2109
#ifdef TARGET_SPARC
2110
    do_unassigned_access(addr, 0, 0, 0);
2111
#elif TARGET_CRIS
2112
    do_unassigned_access(addr, 0, 0, 0);
2113
#endif
2114
    return 0;
2115
}
2116

    
2117
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2118
{
2119
#ifdef DEBUG_UNASSIGNED
2120
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2121
#endif
2122
#ifdef TARGET_SPARC
2123
    do_unassigned_access(addr, 1, 0, 0);
2124
#elif TARGET_CRIS
2125
    do_unassigned_access(addr, 1, 0, 0);
2126
#endif
2127
}
2128

    
2129
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2130
    unassigned_mem_readb,
2131
    unassigned_mem_readb,
2132
    unassigned_mem_readb,
2133
};
2134

    
2135
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2136
    unassigned_mem_writeb,
2137
    unassigned_mem_writeb,
2138
    unassigned_mem_writeb,
2139
};
2140

    
2141
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2142
{
2143
    unsigned long ram_addr;
2144
    int dirty_flags;
2145
    ram_addr = addr - (unsigned long)phys_ram_base;
2146
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2147
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2148
#if !defined(CONFIG_USER_ONLY)
2149
        tb_invalidate_phys_page_fast(ram_addr, 1);
2150
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2151
#endif
2152
    }
2153
    stb_p((uint8_t *)(long)addr, val);
2154
#ifdef USE_KQEMU
2155
    if (cpu_single_env->kqemu_enabled &&
2156
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2157
        kqemu_modify_page(cpu_single_env, ram_addr);
2158
#endif
2159
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2160
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2161
    /* we remove the notdirty callback only if the code has been
2162
       flushed */
2163
    if (dirty_flags == 0xff)
2164
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2165
}
2166

    
2167
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2168
{
2169
    unsigned long ram_addr;
2170
    int dirty_flags;
2171
    ram_addr = addr - (unsigned long)phys_ram_base;
2172
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2173
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2174
#if !defined(CONFIG_USER_ONLY)
2175
        tb_invalidate_phys_page_fast(ram_addr, 2);
2176
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2177
#endif
2178
    }
2179
    stw_p((uint8_t *)(long)addr, val);
2180
#ifdef USE_KQEMU
2181
    if (cpu_single_env->kqemu_enabled &&
2182
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2183
        kqemu_modify_page(cpu_single_env, ram_addr);
2184
#endif
2185
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2186
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2187
    /* we remove the notdirty callback only if the code has been
2188
       flushed */
2189
    if (dirty_flags == 0xff)
2190
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2191
}
2192

    
2193
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2194
{
2195
    unsigned long ram_addr;
2196
    int dirty_flags;
2197
    ram_addr = addr - (unsigned long)phys_ram_base;
2198
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2199
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2200
#if !defined(CONFIG_USER_ONLY)
2201
        tb_invalidate_phys_page_fast(ram_addr, 4);
2202
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2203
#endif
2204
    }
2205
    stl_p((uint8_t *)(long)addr, val);
2206
#ifdef USE_KQEMU
2207
    if (cpu_single_env->kqemu_enabled &&
2208
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2209
        kqemu_modify_page(cpu_single_env, ram_addr);
2210
#endif
2211
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2212
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2213
    /* we remove the notdirty callback only if the code has been
2214
       flushed */
2215
    if (dirty_flags == 0xff)
2216
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2217
}
2218

    
2219
static CPUReadMemoryFunc *error_mem_read[3] = {
2220
    NULL, /* never used */
2221
    NULL, /* never used */
2222
    NULL, /* never used */
2223
};
2224

    
2225
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2226
    notdirty_mem_writeb,
2227
    notdirty_mem_writew,
2228
    notdirty_mem_writel,
2229
};
2230

    
2231
#if defined(CONFIG_SOFTMMU)
2232
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2233
   so these check for a hit then pass through to the normal out-of-line
2234
   phys routines.  */
2235
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2236
{
2237
    return ldub_phys(addr);
2238
}
2239

    
2240
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2241
{
2242
    return lduw_phys(addr);
2243
}
2244

    
2245
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2246
{
2247
    return ldl_phys(addr);
2248
}
2249

    
2250
/* Generate a debug exception if a watchpoint has been hit.
2251
   Returns the real physical address of the access.  addr will be a host
2252
   address in case of a RAM location.  */
2253
static target_ulong check_watchpoint(target_phys_addr_t addr)
2254
{
2255
    CPUState *env = cpu_single_env;
2256
    target_ulong watch;
2257
    target_ulong retaddr;
2258
    int i;
2259

    
2260
    retaddr = addr;
2261
    for (i = 0; i < env->nb_watchpoints; i++) {
2262
        watch = env->watchpoint[i].vaddr;
2263
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2264
            retaddr = addr - env->watchpoint[i].addend;
2265
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2266
                cpu_single_env->watchpoint_hit = i + 1;
2267
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2268
                break;
2269
            }
2270
        }
2271
    }
2272
    return retaddr;
2273
}
2274

    
2275
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2276
                             uint32_t val)
2277
{
2278
    addr = check_watchpoint(addr);
2279
    stb_phys(addr, val);
2280
}
2281

    
2282
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2283
                             uint32_t val)
2284
{
2285
    addr = check_watchpoint(addr);
2286
    stw_phys(addr, val);
2287
}
2288

    
2289
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2290
                             uint32_t val)
2291
{
2292
    addr = check_watchpoint(addr);
2293
    stl_phys(addr, val);
2294
}
2295

    
2296
static CPUReadMemoryFunc *watch_mem_read[3] = {
2297
    watch_mem_readb,
2298
    watch_mem_readw,
2299
    watch_mem_readl,
2300
};
2301

    
2302
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2303
    watch_mem_writeb,
2304
    watch_mem_writew,
2305
    watch_mem_writel,
2306
};
2307
#endif
2308

    
2309
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2310
                                 unsigned int len)
2311
{
2312
    uint32_t ret;
2313
    unsigned int idx;
2314

    
2315
    idx = SUBPAGE_IDX(addr - mmio->base);
2316
#if defined(DEBUG_SUBPAGE)
2317
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2318
           mmio, len, addr, idx);
2319
#endif
2320
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2321

    
2322
    return ret;
2323
}
2324

    
2325
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2326
                              uint32_t value, unsigned int len)
2327
{
2328
    unsigned int idx;
2329

    
2330
    idx = SUBPAGE_IDX(addr - mmio->base);
2331
#if defined(DEBUG_SUBPAGE)
2332
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2333
           mmio, len, addr, idx, value);
2334
#endif
2335
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2336
}
2337

    
2338
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2339
{
2340
#if defined(DEBUG_SUBPAGE)
2341
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2342
#endif
2343

    
2344
    return subpage_readlen(opaque, addr, 0);
2345
}
2346

    
2347
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2348
                            uint32_t value)
2349
{
2350
#if defined(DEBUG_SUBPAGE)
2351
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2352
#endif
2353
    subpage_writelen(opaque, addr, value, 0);
2354
}
2355

    
2356
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2357
{
2358
#if defined(DEBUG_SUBPAGE)
2359
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2360
#endif
2361

    
2362
    return subpage_readlen(opaque, addr, 1);
2363
}
2364

    
2365
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2366
                            uint32_t value)
2367
{
2368
#if defined(DEBUG_SUBPAGE)
2369
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2370
#endif
2371
    subpage_writelen(opaque, addr, value, 1);
2372
}
2373

    
2374
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2375
{
2376
#if defined(DEBUG_SUBPAGE)
2377
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2378
#endif
2379

    
2380
    return subpage_readlen(opaque, addr, 2);
2381
}
2382

    
2383
static void subpage_writel (void *opaque,
2384
                         target_phys_addr_t addr, uint32_t value)
2385
{
2386
#if defined(DEBUG_SUBPAGE)
2387
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2388
#endif
2389
    subpage_writelen(opaque, addr, value, 2);
2390
}
2391

    
2392
static CPUReadMemoryFunc *subpage_read[] = {
2393
    &subpage_readb,
2394
    &subpage_readw,
2395
    &subpage_readl,
2396
};
2397

    
2398
static CPUWriteMemoryFunc *subpage_write[] = {
2399
    &subpage_writeb,
2400
    &subpage_writew,
2401
    &subpage_writel,
2402
};
2403

    
2404
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2405
                             int memory)
2406
{
2407
    int idx, eidx;
2408
    unsigned int i;
2409

    
2410
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2411
        return -1;
2412
    idx = SUBPAGE_IDX(start);
2413
    eidx = SUBPAGE_IDX(end);
2414
#if defined(DEBUG_SUBPAGE)
2415
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2416
           mmio, start, end, idx, eidx, memory);
2417
#endif
2418
    memory >>= IO_MEM_SHIFT;
2419
    for (; idx <= eidx; idx++) {
2420
        for (i = 0; i < 4; i++) {
2421
            if (io_mem_read[memory][i]) {
2422
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2423
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2424
            }
2425
            if (io_mem_write[memory][i]) {
2426
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2427
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2428
            }
2429
        }
2430
    }
2431

    
2432
    return 0;
2433
}
2434

    
2435
static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2436
                           int orig_memory)
2437
{
2438
    subpage_t *mmio;
2439
    int subpage_memory;
2440

    
2441
    mmio = qemu_mallocz(sizeof(subpage_t));
2442
    if (mmio != NULL) {
2443
        mmio->base = base;
2444
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2445
#if defined(DEBUG_SUBPAGE)
2446
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2447
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2448
#endif
2449
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2450
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2451
    }
2452

    
2453
    return mmio;
2454
}
2455

    
2456
static void io_mem_init(void)
2457
{
2458
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2459
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2460
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2461
    io_mem_nb = 5;
2462

    
2463
#if defined(CONFIG_SOFTMMU)
2464
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2465
                                          watch_mem_write, NULL);
2466
#endif
2467
    /* alloc dirty bits array */
2468
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2469
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2470
}
2471

    
2472
/* mem_read and mem_write are arrays of functions containing the
2473
   function to access byte (index 0), word (index 1) and dword (index
2474
   2). Functions can be omitted with a NULL function pointer. The
2475
   registered functions may be modified dynamically later.
2476
   If io_index is non zero, the corresponding io zone is
2477
   modified. If it is zero, a new io zone is allocated. The return
2478
   value can be used with cpu_register_physical_memory(). (-1) is
2479
   returned if error. */
2480
int cpu_register_io_memory(int io_index,
2481
                           CPUReadMemoryFunc **mem_read,
2482
                           CPUWriteMemoryFunc **mem_write,
2483
                           void *opaque)
2484
{
2485
    int i, subwidth = 0;
2486

    
2487
    if (io_index <= 0) {
2488
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2489
            return -1;
2490
        io_index = io_mem_nb++;
2491
    } else {
2492
        if (io_index >= IO_MEM_NB_ENTRIES)
2493
            return -1;
2494
    }
2495

    
2496
    for(i = 0;i < 3; i++) {
2497
        if (!mem_read[i] || !mem_write[i])
2498
            subwidth = IO_MEM_SUBWIDTH;
2499
        io_mem_read[io_index][i] = mem_read[i];
2500
        io_mem_write[io_index][i] = mem_write[i];
2501
    }
2502
    io_mem_opaque[io_index] = opaque;
2503
    return (io_index << IO_MEM_SHIFT) | subwidth;
2504
}
2505

    
2506
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2507
{
2508
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2509
}
2510

    
2511
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2512
{
2513
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2514
}
2515

    
2516
/* physical memory access (slow version, mainly for debug) */
2517
#if defined(CONFIG_USER_ONLY)
2518
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2519
                            int len, int is_write)
2520
{
2521
    int l, flags;
2522
    target_ulong page;
2523
    void * p;
2524

    
2525
    while (len > 0) {
2526
        page = addr & TARGET_PAGE_MASK;
2527
        l = (page + TARGET_PAGE_SIZE) - addr;
2528
        if (l > len)
2529
            l = len;
2530
        flags = page_get_flags(page);
2531
        if (!(flags & PAGE_VALID))
2532
            return;
2533
        if (is_write) {
2534
            if (!(flags & PAGE_WRITE))
2535
                return;
2536
            /* XXX: this code should not depend on lock_user */
2537
            if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2538
                /* FIXME - should this return an error rather than just fail? */
2539
                return;
2540
            memcpy(p, buf, len);
2541
            unlock_user(p, addr, len);
2542
        } else {
2543
            if (!(flags & PAGE_READ))
2544
                return;
2545
            /* XXX: this code should not depend on lock_user */
2546
            if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2547
                /* FIXME - should this return an error rather than just fail? */
2548
                return;
2549
            memcpy(buf, p, len);
2550
            unlock_user(p, addr, 0);
2551
        }
2552
        len -= l;
2553
        buf += l;
2554
        addr += l;
2555
    }
2556
}
2557

    
2558
#else
2559
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2560
                            int len, int is_write)
2561
{
2562
    int l, io_index;
2563
    uint8_t *ptr;
2564
    uint32_t val;
2565
    target_phys_addr_t page;
2566
    unsigned long pd;
2567
    PhysPageDesc *p;
2568

    
2569
    while (len > 0) {
2570
        page = addr & TARGET_PAGE_MASK;
2571
        l = (page + TARGET_PAGE_SIZE) - addr;
2572
        if (l > len)
2573
            l = len;
2574
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2575
        if (!p) {
2576
            pd = IO_MEM_UNASSIGNED;
2577
        } else {
2578
            pd = p->phys_offset;
2579
        }
2580

    
2581
        if (is_write) {
2582
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2583
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2584
                /* XXX: could force cpu_single_env to NULL to avoid
2585
                   potential bugs */
2586
                if (l >= 4 && ((addr & 3) == 0)) {
2587
                    /* 32 bit write access */
2588
                    val = ldl_p(buf);
2589
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2590
                    l = 4;
2591
                } else if (l >= 2 && ((addr & 1) == 0)) {
2592
                    /* 16 bit write access */
2593
                    val = lduw_p(buf);
2594
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2595
                    l = 2;
2596
                } else {
2597
                    /* 8 bit write access */
2598
                    val = ldub_p(buf);
2599
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2600
                    l = 1;
2601
                }
2602
            } else {
2603
                unsigned long addr1;
2604
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2605
                /* RAM case */
2606
                ptr = phys_ram_base + addr1;
2607
                memcpy(ptr, buf, l);
2608
                if (!cpu_physical_memory_is_dirty(addr1)) {
2609
                    /* invalidate code */
2610
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2611
                    /* set dirty bit */
2612
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2613
                        (0xff & ~CODE_DIRTY_FLAG);
2614
                }
2615
            }
2616
        } else {
2617
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2618
                !(pd & IO_MEM_ROMD)) {
2619
                /* I/O case */
2620
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2621
                if (l >= 4 && ((addr & 3) == 0)) {
2622
                    /* 32 bit read access */
2623
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2624
                    stl_p(buf, val);
2625
                    l = 4;
2626
                } else if (l >= 2 && ((addr & 1) == 0)) {
2627
                    /* 16 bit read access */
2628
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2629
                    stw_p(buf, val);
2630
                    l = 2;
2631
                } else {
2632
                    /* 8 bit read access */
2633
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2634
                    stb_p(buf, val);
2635
                    l = 1;
2636
                }
2637
            } else {
2638
                /* RAM case */
2639
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2640
                    (addr & ~TARGET_PAGE_MASK);
2641
                memcpy(buf, ptr, l);
2642
            }
2643
        }
2644
        len -= l;
2645
        buf += l;
2646
        addr += l;
2647
    }
2648
}
2649

    
2650
/* used for ROM loading : can write in RAM and ROM */
2651
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2652
                                   const uint8_t *buf, int len)
2653
{
2654
    int l;
2655
    uint8_t *ptr;
2656
    target_phys_addr_t page;
2657
    unsigned long pd;
2658
    PhysPageDesc *p;
2659

    
2660
    while (len > 0) {
2661
        page = addr & TARGET_PAGE_MASK;
2662
        l = (page + TARGET_PAGE_SIZE) - addr;
2663
        if (l > len)
2664
            l = len;
2665
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2666
        if (!p) {
2667
            pd = IO_MEM_UNASSIGNED;
2668
        } else {
2669
            pd = p->phys_offset;
2670
        }
2671

    
2672
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2673
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2674
            !(pd & IO_MEM_ROMD)) {
2675
            /* do nothing */
2676
        } else {
2677
            unsigned long addr1;
2678
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2679
            /* ROM/RAM case */
2680
            ptr = phys_ram_base + addr1;
2681
            memcpy(ptr, buf, l);
2682
        }
2683
        len -= l;
2684
        buf += l;
2685
        addr += l;
2686
    }
2687
}
2688

    
2689

    
2690
/* warning: addr must be aligned */
2691
uint32_t ldl_phys(target_phys_addr_t addr)
2692
{
2693
    int io_index;
2694
    uint8_t *ptr;
2695
    uint32_t val;
2696
    unsigned long pd;
2697
    PhysPageDesc *p;
2698

    
2699
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2700
    if (!p) {
2701
        pd = IO_MEM_UNASSIGNED;
2702
    } else {
2703
        pd = p->phys_offset;
2704
    }
2705

    
2706
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2707
        !(pd & IO_MEM_ROMD)) {
2708
        /* I/O case */
2709
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2710
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2711
    } else {
2712
        /* RAM case */
2713
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2714
            (addr & ~TARGET_PAGE_MASK);
2715
        val = ldl_p(ptr);
2716
    }
2717
    return val;
2718
}
2719

    
2720
/* warning: addr must be aligned */
2721
uint64_t ldq_phys(target_phys_addr_t addr)
2722
{
2723
    int io_index;
2724
    uint8_t *ptr;
2725
    uint64_t val;
2726
    unsigned long pd;
2727
    PhysPageDesc *p;
2728

    
2729
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2730
    if (!p) {
2731
        pd = IO_MEM_UNASSIGNED;
2732
    } else {
2733
        pd = p->phys_offset;
2734
    }
2735

    
2736
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2737
        !(pd & IO_MEM_ROMD)) {
2738
        /* I/O case */
2739
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2740
#ifdef TARGET_WORDS_BIGENDIAN
2741
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2742
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2743
#else
2744
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2745
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2746
#endif
2747
    } else {
2748
        /* RAM case */
2749
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2750
            (addr & ~TARGET_PAGE_MASK);
2751
        val = ldq_p(ptr);
2752
    }
2753
    return val;
2754
}
2755

    
2756
/* XXX: optimize */
2757
uint32_t ldub_phys(target_phys_addr_t addr)
2758
{
2759
    uint8_t val;
2760
    cpu_physical_memory_read(addr, &val, 1);
2761
    return val;
2762
}
2763

    
2764
/* XXX: optimize */
2765
uint32_t lduw_phys(target_phys_addr_t addr)
2766
{
2767
    uint16_t val;
2768
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2769
    return tswap16(val);
2770
}
2771

    
2772
/* warning: addr must be aligned. The ram page is not masked as dirty
2773
   and the code inside is not invalidated. It is useful if the dirty
2774
   bits are used to track modified PTEs */
2775
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2776
{
2777
    int io_index;
2778
    uint8_t *ptr;
2779
    unsigned long pd;
2780
    PhysPageDesc *p;
2781

    
2782
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2783
    if (!p) {
2784
        pd = IO_MEM_UNASSIGNED;
2785
    } else {
2786
        pd = p->phys_offset;
2787
    }
2788

    
2789
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2790
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2791
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2792
    } else {
2793
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2794
            (addr & ~TARGET_PAGE_MASK);
2795
        stl_p(ptr, val);
2796
    }
2797
}
2798

    
2799
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2800
{
2801
    int io_index;
2802
    uint8_t *ptr;
2803
    unsigned long pd;
2804
    PhysPageDesc *p;
2805

    
2806
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2807
    if (!p) {
2808
        pd = IO_MEM_UNASSIGNED;
2809
    } else {
2810
        pd = p->phys_offset;
2811
    }
2812

    
2813
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2814
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2815
#ifdef TARGET_WORDS_BIGENDIAN
2816
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2817
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2818
#else
2819
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2820
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2821
#endif
2822
    } else {
2823
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2824
            (addr & ~TARGET_PAGE_MASK);
2825
        stq_p(ptr, val);
2826
    }
2827
}
2828

    
2829
/* warning: addr must be aligned */
2830
void stl_phys(target_phys_addr_t addr, uint32_t val)
2831
{
2832
    int io_index;
2833
    uint8_t *ptr;
2834
    unsigned long pd;
2835
    PhysPageDesc *p;
2836

    
2837
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2838
    if (!p) {
2839
        pd = IO_MEM_UNASSIGNED;
2840
    } else {
2841
        pd = p->phys_offset;
2842
    }
2843

    
2844
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2845
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2846
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2847
    } else {
2848
        unsigned long addr1;
2849
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2850
        /* RAM case */
2851
        ptr = phys_ram_base + addr1;
2852
        stl_p(ptr, val);
2853
        if (!cpu_physical_memory_is_dirty(addr1)) {
2854
            /* invalidate code */
2855
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2856
            /* set dirty bit */
2857
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2858
                (0xff & ~CODE_DIRTY_FLAG);
2859
        }
2860
    }
2861
}
2862

    
2863
/* XXX: optimize */
2864
void stb_phys(target_phys_addr_t addr, uint32_t val)
2865
{
2866
    uint8_t v = val;
2867
    cpu_physical_memory_write(addr, &v, 1);
2868
}
2869

    
2870
/* XXX: optimize */
2871
void stw_phys(target_phys_addr_t addr, uint32_t val)
2872
{
2873
    uint16_t v = tswap16(val);
2874
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2875
}
2876

    
2877
/* XXX: optimize */
2878
void stq_phys(target_phys_addr_t addr, uint64_t val)
2879
{
2880
    val = tswap64(val);
2881
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2882
}
2883

    
2884
#endif
2885

    
2886
/* virtual memory access for debug */
2887
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2888
                        uint8_t *buf, int len, int is_write)
2889
{
2890
    int l;
2891
    target_phys_addr_t phys_addr;
2892
    target_ulong page;
2893

    
2894
    while (len > 0) {
2895
        page = addr & TARGET_PAGE_MASK;
2896
        phys_addr = cpu_get_phys_page_debug(env, page);
2897
        /* if no physical page mapped, return an error */
2898
        if (phys_addr == -1)
2899
            return -1;
2900
        l = (page + TARGET_PAGE_SIZE) - addr;
2901
        if (l > len)
2902
            l = len;
2903
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2904
                               buf, l, is_write);
2905
        len -= l;
2906
        buf += l;
2907
        addr += l;
2908
    }
2909
    return 0;
2910
}
2911

    
2912
void dump_exec_info(FILE *f,
2913
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2914
{
2915
    int i, target_code_size, max_target_code_size;
2916
    int direct_jmp_count, direct_jmp2_count, cross_page;
2917
    TranslationBlock *tb;
2918

    
2919
    target_code_size = 0;
2920
    max_target_code_size = 0;
2921
    cross_page = 0;
2922
    direct_jmp_count = 0;
2923
    direct_jmp2_count = 0;
2924
    for(i = 0; i < nb_tbs; i++) {
2925
        tb = &tbs[i];
2926
        target_code_size += tb->size;
2927
        if (tb->size > max_target_code_size)
2928
            max_target_code_size = tb->size;
2929
        if (tb->page_addr[1] != -1)
2930
            cross_page++;
2931
        if (tb->tb_next_offset[0] != 0xffff) {
2932
            direct_jmp_count++;
2933
            if (tb->tb_next_offset[1] != 0xffff) {
2934
                direct_jmp2_count++;
2935
            }
2936
        }
2937
    }
2938
    /* XXX: avoid using doubles ? */
2939
    cpu_fprintf(f, "Translation buffer state:\n");
2940
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2941
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2942
                nb_tbs ? target_code_size / nb_tbs : 0,
2943
                max_target_code_size);
2944
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2945
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2946
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2947
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2948
            cross_page,
2949
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2950
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2951
                direct_jmp_count,
2952
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2953
                direct_jmp2_count,
2954
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2955
    cpu_fprintf(f, "\nStatistics:\n");
2956
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2957
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2958
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2959
#ifdef CONFIG_PROFILER
2960
    {
2961
        int64_t tot;
2962
        tot = dyngen_interm_time + dyngen_code_time;
2963
        cpu_fprintf(f, "JIT cycles          %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2964
                    tot, tot / 2.4e9);
2965
        cpu_fprintf(f, "translated TBs      %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n", 
2966
                    dyngen_tb_count, 
2967
                    dyngen_tb_count1 - dyngen_tb_count,
2968
                    dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
2969
        cpu_fprintf(f, "avg ops/TB          %0.1f max=%d\n", 
2970
                    dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
2971
        cpu_fprintf(f, "old ops/total ops   %0.1f%%\n", 
2972
                    dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
2973
        cpu_fprintf(f, "deleted ops/TB      %0.2f\n",
2974
                    dyngen_tb_count ? 
2975
                    (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
2976
        cpu_fprintf(f, "cycles/op           %0.1f\n", 
2977
                    dyngen_op_count ? (double)tot / dyngen_op_count : 0);
2978
        cpu_fprintf(f, "cycles/in byte     %0.1f\n", 
2979
                    dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
2980
        cpu_fprintf(f, "cycles/out byte     %0.1f\n", 
2981
                    dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
2982
        if (tot == 0)
2983
            tot = 1;
2984
        cpu_fprintf(f, "  gen_interm time   %0.1f%%\n", 
2985
                    (double)dyngen_interm_time / tot * 100.0);
2986
        cpu_fprintf(f, "  gen_code time     %0.1f%%\n", 
2987
                    (double)dyngen_code_time / tot * 100.0);
2988
        cpu_fprintf(f, "cpu_restore count   %" PRId64 "\n",
2989
                    dyngen_restore_count);
2990
        cpu_fprintf(f, "  avg cycles        %0.1f\n",
2991
                    dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
2992
        {
2993
            extern void dump_op_count(void);
2994
            dump_op_count();
2995
        }
2996
    }
2997
#endif
2998
}
2999

    
3000
#if !defined(CONFIG_USER_ONLY)
3001

    
3002
#define MMUSUFFIX _cmmu
3003
#define GETPC() NULL
3004
#define env cpu_single_env
3005
#define SOFTMMU_CODE_ACCESS
3006

    
3007
#define SHIFT 0
3008
#include "softmmu_template.h"
3009

    
3010
#define SHIFT 1
3011
#include "softmmu_template.h"
3012

    
3013
#define SHIFT 2
3014
#include "softmmu_template.h"
3015

    
3016
#define SHIFT 3
3017
#include "softmmu_template.h"
3018

    
3019
#undef env
3020

    
3021
#endif