Statistics
| Branch: | Revision:

root / exec.c @ 03875444

History | View | Annotate | Download (90.8 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#if defined(CONFIG_USER_ONLY)
40
#include <qemu.h>
41
#endif
42

    
43
//#define DEBUG_TB_INVALIDATE
44
//#define DEBUG_FLUSH
45
//#define DEBUG_TLB
46
//#define DEBUG_UNASSIGNED
47

    
48
/* make various TB consistency checks */
49
//#define DEBUG_TB_CHECK
50
//#define DEBUG_TLB_CHECK
51

    
52
//#define DEBUG_IOPORT
53
//#define DEBUG_SUBPAGE
54

    
55
#if !defined(CONFIG_USER_ONLY)
56
/* TB consistency checks only implemented for usermode emulation.  */
57
#undef DEBUG_TB_CHECK
58
#endif
59

    
60
/* threshold to flush the translated code buffer */
61
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#define MMAP_AREA_START        0x00000000
66
#define MMAP_AREA_END          0xa8000000
67

    
68
#if defined(TARGET_SPARC64)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 41
70
#elif defined(TARGET_SPARC)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 36
72
#elif defined(TARGET_ALPHA)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#define TARGET_VIRT_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_PPC64)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 42
77
#else
78
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
79
#define TARGET_PHYS_ADDR_SPACE_BITS 32
80
#endif
81

    
82
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
83
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84
int nb_tbs;
85
/* any access to the tbs or the page table must use this lock */
86
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87

    
88
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
89
uint8_t *code_gen_ptr;
90

    
91
int phys_ram_size;
92
int phys_ram_fd;
93
uint8_t *phys_ram_base;
94
uint8_t *phys_ram_dirty;
95
static ram_addr_t phys_ram_alloc_offset = 0;
96

    
97
CPUState *first_cpu;
98
/* current CPU in the current thread. It is only valid inside
99
   cpu_exec() */
100
CPUState *cpu_single_env;
101

    
102
typedef struct PageDesc {
103
    /* list of TBs intersecting this ram page */
104
    TranslationBlock *first_tb;
105
    /* in order to optimize self modifying code, we count the number
106
       of lookups we do to a given page to use a bitmap */
107
    unsigned int code_write_count;
108
    uint8_t *code_bitmap;
109
#if defined(CONFIG_USER_ONLY)
110
    unsigned long flags;
111
#endif
112
} PageDesc;
113

    
114
typedef struct PhysPageDesc {
115
    /* offset in host memory of the page + io_index in the low 12 bits */
116
    uint32_t phys_offset;
117
} PhysPageDesc;
118

    
119
#define L2_BITS 10
120
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
121
/* XXX: this is a temporary hack for alpha target.
122
 *      In the future, this is to be replaced by a multi-level table
123
 *      to actually be able to handle the complete 64 bits address space.
124
 */
125
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
126
#else
127
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
128
#endif
129

    
130
#define L1_SIZE (1 << L1_BITS)
131
#define L2_SIZE (1 << L2_BITS)
132

    
133
static void io_mem_init(void);
134

    
135
unsigned long qemu_real_host_page_size;
136
unsigned long qemu_host_page_bits;
137
unsigned long qemu_host_page_size;
138
unsigned long qemu_host_page_mask;
139

    
140
/* XXX: for system emulation, it could just be an array */
141
static PageDesc *l1_map[L1_SIZE];
142
PhysPageDesc **l1_phys_map;
143

    
144
/* io memory support */
145
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
146
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
147
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
148
static int io_mem_nb;
149
#if defined(CONFIG_SOFTMMU)
150
static int io_mem_watch;
151
#endif
152

    
153
/* log support */
154
char *logfilename = "/tmp/qemu.log";
155
FILE *logfile;
156
int loglevel;
157
static int log_append = 0;
158

    
159
/* statistics */
160
static int tlb_flush_count;
161
static int tb_flush_count;
162
static int tb_phys_invalidate_count;
163

    
164
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
165
typedef struct subpage_t {
166
    target_phys_addr_t base;
167
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
168
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
169
    void *opaque[TARGET_PAGE_SIZE][2][4];
170
} subpage_t;
171

    
172
static void page_init(void)
173
{
174
    /* NOTE: we can always suppose that qemu_host_page_size >=
175
       TARGET_PAGE_SIZE */
176
#ifdef _WIN32
177
    {
178
        SYSTEM_INFO system_info;
179
        DWORD old_protect;
180

    
181
        GetSystemInfo(&system_info);
182
        qemu_real_host_page_size = system_info.dwPageSize;
183

    
184
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
185
                       PAGE_EXECUTE_READWRITE, &old_protect);
186
    }
187
#else
188
    qemu_real_host_page_size = getpagesize();
189
    {
190
        unsigned long start, end;
191

    
192
        start = (unsigned long)code_gen_buffer;
193
        start &= ~(qemu_real_host_page_size - 1);
194

    
195
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
196
        end += qemu_real_host_page_size - 1;
197
        end &= ~(qemu_real_host_page_size - 1);
198

    
199
        mprotect((void *)start, end - start,
200
                 PROT_READ | PROT_WRITE | PROT_EXEC);
201
    }
202
#endif
203

    
204
    if (qemu_host_page_size == 0)
205
        qemu_host_page_size = qemu_real_host_page_size;
206
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
207
        qemu_host_page_size = TARGET_PAGE_SIZE;
208
    qemu_host_page_bits = 0;
209
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
210
        qemu_host_page_bits++;
211
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
212
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
213
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
214

    
215
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
216
    {
217
        long long startaddr, endaddr;
218
        FILE *f;
219
        int n;
220

    
221
        f = fopen("/proc/self/maps", "r");
222
        if (f) {
223
            do {
224
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
225
                if (n == 2) {
226
                    page_set_flags(TARGET_PAGE_ALIGN(startaddr),
227
                                   TARGET_PAGE_ALIGN(endaddr),
228
                                   PAGE_RESERVED); 
229
                }
230
            } while (!feof(f));
231
            fclose(f);
232
        }
233
    }
234
#endif
235
}
236

    
237
static inline PageDesc *page_find_alloc(unsigned int index)
238
{
239
    PageDesc **lp, *p;
240

    
241
    lp = &l1_map[index >> L2_BITS];
242
    p = *lp;
243
    if (!p) {
244
        /* allocate if not found */
245
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
246
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
247
        *lp = p;
248
    }
249
    return p + (index & (L2_SIZE - 1));
250
}
251

    
252
static inline PageDesc *page_find(unsigned int index)
253
{
254
    PageDesc *p;
255

    
256
    p = l1_map[index >> L2_BITS];
257
    if (!p)
258
        return 0;
259
    return p + (index & (L2_SIZE - 1));
260
}
261

    
262
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
263
{
264
    void **lp, **p;
265
    PhysPageDesc *pd;
266

    
267
    p = (void **)l1_phys_map;
268
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
269

    
270
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
271
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
272
#endif
273
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
274
    p = *lp;
275
    if (!p) {
276
        /* allocate if not found */
277
        if (!alloc)
278
            return NULL;
279
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
280
        memset(p, 0, sizeof(void *) * L1_SIZE);
281
        *lp = p;
282
    }
283
#endif
284
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
285
    pd = *lp;
286
    if (!pd) {
287
        int i;
288
        /* allocate if not found */
289
        if (!alloc)
290
            return NULL;
291
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
292
        *lp = pd;
293
        for (i = 0; i < L2_SIZE; i++)
294
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
295
    }
296
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
297
}
298

    
299
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
300
{
301
    return phys_page_find_alloc(index, 0);
302
}
303

    
304
#if !defined(CONFIG_USER_ONLY)
305
static void tlb_protect_code(ram_addr_t ram_addr);
306
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
307
                                    target_ulong vaddr);
308
#endif
309

    
310
void cpu_exec_init(CPUState *env)
311
{
312
    CPUState **penv;
313
    int cpu_index;
314

    
315
    if (!code_gen_ptr) {
316
        cpu_gen_init();
317
        code_gen_ptr = code_gen_buffer;
318
        page_init();
319
        io_mem_init();
320
    }
321
    env->next_cpu = NULL;
322
    penv = &first_cpu;
323
    cpu_index = 0;
324
    while (*penv != NULL) {
325
        penv = (CPUState **)&(*penv)->next_cpu;
326
        cpu_index++;
327
    }
328
    env->cpu_index = cpu_index;
329
    env->nb_watchpoints = 0;
330
    *penv = env;
331
}
332

    
333
static inline void invalidate_page_bitmap(PageDesc *p)
334
{
335
    if (p->code_bitmap) {
336
        qemu_free(p->code_bitmap);
337
        p->code_bitmap = NULL;
338
    }
339
    p->code_write_count = 0;
340
}
341

    
342
/* set to NULL all the 'first_tb' fields in all PageDescs */
343
static void page_flush_tb(void)
344
{
345
    int i, j;
346
    PageDesc *p;
347

    
348
    for(i = 0; i < L1_SIZE; i++) {
349
        p = l1_map[i];
350
        if (p) {
351
            for(j = 0; j < L2_SIZE; j++) {
352
                p->first_tb = NULL;
353
                invalidate_page_bitmap(p);
354
                p++;
355
            }
356
        }
357
    }
358
}
359

    
360
/* flush all the translation blocks */
361
/* XXX: tb_flush is currently not thread safe */
362
void tb_flush(CPUState *env1)
363
{
364
    CPUState *env;
365
#if defined(DEBUG_FLUSH)
366
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
367
           (unsigned long)(code_gen_ptr - code_gen_buffer),
368
           nb_tbs, nb_tbs > 0 ?
369
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
370
#endif
371
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
372
        cpu_abort(env1, "Internal error: code buffer overflow\n");
373

    
374
    nb_tbs = 0;
375

    
376
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
377
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
378
    }
379

    
380
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
381
    page_flush_tb();
382

    
383
    code_gen_ptr = code_gen_buffer;
384
    /* XXX: flush processor icache at this point if cache flush is
385
       expensive */
386
    tb_flush_count++;
387
}
388

    
389
#ifdef DEBUG_TB_CHECK
390

    
391
static void tb_invalidate_check(target_ulong address)
392
{
393
    TranslationBlock *tb;
394
    int i;
395
    address &= TARGET_PAGE_MASK;
396
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
397
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
398
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
399
                  address >= tb->pc + tb->size)) {
400
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
401
                       address, (long)tb->pc, tb->size);
402
            }
403
        }
404
    }
405
}
406

    
407
/* verify that all the pages have correct rights for code */
408
static void tb_page_check(void)
409
{
410
    TranslationBlock *tb;
411
    int i, flags1, flags2;
412

    
413
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
414
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
415
            flags1 = page_get_flags(tb->pc);
416
            flags2 = page_get_flags(tb->pc + tb->size - 1);
417
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
418
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
419
                       (long)tb->pc, tb->size, flags1, flags2);
420
            }
421
        }
422
    }
423
}
424

    
425
void tb_jmp_check(TranslationBlock *tb)
426
{
427
    TranslationBlock *tb1;
428
    unsigned int n1;
429

    
430
    /* suppress any remaining jumps to this TB */
431
    tb1 = tb->jmp_first;
432
    for(;;) {
433
        n1 = (long)tb1 & 3;
434
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
435
        if (n1 == 2)
436
            break;
437
        tb1 = tb1->jmp_next[n1];
438
    }
439
    /* check end of list */
440
    if (tb1 != tb) {
441
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
442
    }
443
}
444

    
445
#endif
446

    
447
/* invalidate one TB */
448
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
449
                             int next_offset)
450
{
451
    TranslationBlock *tb1;
452
    for(;;) {
453
        tb1 = *ptb;
454
        if (tb1 == tb) {
455
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
456
            break;
457
        }
458
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
459
    }
460
}
461

    
462
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
463
{
464
    TranslationBlock *tb1;
465
    unsigned int n1;
466

    
467
    for(;;) {
468
        tb1 = *ptb;
469
        n1 = (long)tb1 & 3;
470
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
471
        if (tb1 == tb) {
472
            *ptb = tb1->page_next[n1];
473
            break;
474
        }
475
        ptb = &tb1->page_next[n1];
476
    }
477
}
478

    
479
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
480
{
481
    TranslationBlock *tb1, **ptb;
482
    unsigned int n1;
483

    
484
    ptb = &tb->jmp_next[n];
485
    tb1 = *ptb;
486
    if (tb1) {
487
        /* find tb(n) in circular list */
488
        for(;;) {
489
            tb1 = *ptb;
490
            n1 = (long)tb1 & 3;
491
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
492
            if (n1 == n && tb1 == tb)
493
                break;
494
            if (n1 == 2) {
495
                ptb = &tb1->jmp_first;
496
            } else {
497
                ptb = &tb1->jmp_next[n1];
498
            }
499
        }
500
        /* now we can suppress tb(n) from the list */
501
        *ptb = tb->jmp_next[n];
502

    
503
        tb->jmp_next[n] = NULL;
504
    }
505
}
506

    
507
/* reset the jump entry 'n' of a TB so that it is not chained to
508
   another TB */
509
static inline void tb_reset_jump(TranslationBlock *tb, int n)
510
{
511
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
512
}
513

    
514
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
515
{
516
    CPUState *env;
517
    PageDesc *p;
518
    unsigned int h, n1;
519
    target_ulong phys_pc;
520
    TranslationBlock *tb1, *tb2;
521

    
522
    /* remove the TB from the hash list */
523
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
524
    h = tb_phys_hash_func(phys_pc);
525
    tb_remove(&tb_phys_hash[h], tb,
526
              offsetof(TranslationBlock, phys_hash_next));
527

    
528
    /* remove the TB from the page list */
529
    if (tb->page_addr[0] != page_addr) {
530
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
531
        tb_page_remove(&p->first_tb, tb);
532
        invalidate_page_bitmap(p);
533
    }
534
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
535
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
536
        tb_page_remove(&p->first_tb, tb);
537
        invalidate_page_bitmap(p);
538
    }
539

    
540
    tb_invalidated_flag = 1;
541

    
542
    /* remove the TB from the hash list */
543
    h = tb_jmp_cache_hash_func(tb->pc);
544
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
545
        if (env->tb_jmp_cache[h] == tb)
546
            env->tb_jmp_cache[h] = NULL;
547
    }
548

    
549
    /* suppress this TB from the two jump lists */
550
    tb_jmp_remove(tb, 0);
551
    tb_jmp_remove(tb, 1);
552

    
553
    /* suppress any remaining jumps to this TB */
554
    tb1 = tb->jmp_first;
555
    for(;;) {
556
        n1 = (long)tb1 & 3;
557
        if (n1 == 2)
558
            break;
559
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
560
        tb2 = tb1->jmp_next[n1];
561
        tb_reset_jump(tb1, n1);
562
        tb1->jmp_next[n1] = NULL;
563
        tb1 = tb2;
564
    }
565
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
566

    
567
    tb_phys_invalidate_count++;
568
}
569

    
570
static inline void set_bits(uint8_t *tab, int start, int len)
571
{
572
    int end, mask, end1;
573

    
574
    end = start + len;
575
    tab += start >> 3;
576
    mask = 0xff << (start & 7);
577
    if ((start & ~7) == (end & ~7)) {
578
        if (start < end) {
579
            mask &= ~(0xff << (end & 7));
580
            *tab |= mask;
581
        }
582
    } else {
583
        *tab++ |= mask;
584
        start = (start + 8) & ~7;
585
        end1 = end & ~7;
586
        while (start < end1) {
587
            *tab++ = 0xff;
588
            start += 8;
589
        }
590
        if (start < end) {
591
            mask = ~(0xff << (end & 7));
592
            *tab |= mask;
593
        }
594
    }
595
}
596

    
597
static void build_page_bitmap(PageDesc *p)
598
{
599
    int n, tb_start, tb_end;
600
    TranslationBlock *tb;
601

    
602
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
603
    if (!p->code_bitmap)
604
        return;
605
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
606

    
607
    tb = p->first_tb;
608
    while (tb != NULL) {
609
        n = (long)tb & 3;
610
        tb = (TranslationBlock *)((long)tb & ~3);
611
        /* NOTE: this is subtle as a TB may span two physical pages */
612
        if (n == 0) {
613
            /* NOTE: tb_end may be after the end of the page, but
614
               it is not a problem */
615
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
616
            tb_end = tb_start + tb->size;
617
            if (tb_end > TARGET_PAGE_SIZE)
618
                tb_end = TARGET_PAGE_SIZE;
619
        } else {
620
            tb_start = 0;
621
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
622
        }
623
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
624
        tb = tb->page_next[n];
625
    }
626
}
627

    
628
#ifdef TARGET_HAS_PRECISE_SMC
629

    
630
static void tb_gen_code(CPUState *env,
631
                        target_ulong pc, target_ulong cs_base, int flags,
632
                        int cflags)
633
{
634
    TranslationBlock *tb;
635
    uint8_t *tc_ptr;
636
    target_ulong phys_pc, phys_page2, virt_page2;
637
    int code_gen_size;
638

    
639
    phys_pc = get_phys_addr_code(env, pc);
640
    tb = tb_alloc(pc);
641
    if (!tb) {
642
        /* flush must be done */
643
        tb_flush(env);
644
        /* cannot fail at this point */
645
        tb = tb_alloc(pc);
646
    }
647
    tc_ptr = code_gen_ptr;
648
    tb->tc_ptr = tc_ptr;
649
    tb->cs_base = cs_base;
650
    tb->flags = flags;
651
    tb->cflags = cflags;
652
    cpu_gen_code(env, tb, &code_gen_size);
653
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
654

    
655
    /* check next page if needed */
656
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
657
    phys_page2 = -1;
658
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
659
        phys_page2 = get_phys_addr_code(env, virt_page2);
660
    }
661
    tb_link_phys(tb, phys_pc, phys_page2);
662
}
663
#endif
664

    
665
/* invalidate all TBs which intersect with the target physical page
666
   starting in range [start;end[. NOTE: start and end must refer to
667
   the same physical page. 'is_cpu_write_access' should be true if called
668
   from a real cpu write access: the virtual CPU will exit the current
669
   TB if code is modified inside this TB. */
670
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
671
                                   int is_cpu_write_access)
672
{
673
    int n, current_tb_modified, current_tb_not_found, current_flags;
674
    CPUState *env = cpu_single_env;
675
    PageDesc *p;
676
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
677
    target_ulong tb_start, tb_end;
678
    target_ulong current_pc, current_cs_base;
679

    
680
    p = page_find(start >> TARGET_PAGE_BITS);
681
    if (!p)
682
        return;
683
    if (!p->code_bitmap &&
684
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
685
        is_cpu_write_access) {
686
        /* build code bitmap */
687
        build_page_bitmap(p);
688
    }
689

    
690
    /* we remove all the TBs in the range [start, end[ */
691
    /* XXX: see if in some cases it could be faster to invalidate all the code */
692
    current_tb_not_found = is_cpu_write_access;
693
    current_tb_modified = 0;
694
    current_tb = NULL; /* avoid warning */
695
    current_pc = 0; /* avoid warning */
696
    current_cs_base = 0; /* avoid warning */
697
    current_flags = 0; /* avoid warning */
698
    tb = p->first_tb;
699
    while (tb != NULL) {
700
        n = (long)tb & 3;
701
        tb = (TranslationBlock *)((long)tb & ~3);
702
        tb_next = tb->page_next[n];
703
        /* NOTE: this is subtle as a TB may span two physical pages */
704
        if (n == 0) {
705
            /* NOTE: tb_end may be after the end of the page, but
706
               it is not a problem */
707
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
708
            tb_end = tb_start + tb->size;
709
        } else {
710
            tb_start = tb->page_addr[1];
711
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
712
        }
713
        if (!(tb_end <= start || tb_start >= end)) {
714
#ifdef TARGET_HAS_PRECISE_SMC
715
            if (current_tb_not_found) {
716
                current_tb_not_found = 0;
717
                current_tb = NULL;
718
                if (env->mem_write_pc) {
719
                    /* now we have a real cpu fault */
720
                    current_tb = tb_find_pc(env->mem_write_pc);
721
                }
722
            }
723
            if (current_tb == tb &&
724
                !(current_tb->cflags & CF_SINGLE_INSN)) {
725
                /* If we are modifying the current TB, we must stop
726
                its execution. We could be more precise by checking
727
                that the modification is after the current PC, but it
728
                would require a specialized function to partially
729
                restore the CPU state */
730

    
731
                current_tb_modified = 1;
732
                cpu_restore_state(current_tb, env,
733
                                  env->mem_write_pc, NULL);
734
#if defined(TARGET_I386)
735
                current_flags = env->hflags;
736
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
737
                current_cs_base = (target_ulong)env->segs[R_CS].base;
738
                current_pc = current_cs_base + env->eip;
739
#else
740
#error unsupported CPU
741
#endif
742
            }
743
#endif /* TARGET_HAS_PRECISE_SMC */
744
            /* we need to do that to handle the case where a signal
745
               occurs while doing tb_phys_invalidate() */
746
            saved_tb = NULL;
747
            if (env) {
748
                saved_tb = env->current_tb;
749
                env->current_tb = NULL;
750
            }
751
            tb_phys_invalidate(tb, -1);
752
            if (env) {
753
                env->current_tb = saved_tb;
754
                if (env->interrupt_request && env->current_tb)
755
                    cpu_interrupt(env, env->interrupt_request);
756
            }
757
        }
758
        tb = tb_next;
759
    }
760
#if !defined(CONFIG_USER_ONLY)
761
    /* if no code remaining, no need to continue to use slow writes */
762
    if (!p->first_tb) {
763
        invalidate_page_bitmap(p);
764
        if (is_cpu_write_access) {
765
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
766
        }
767
    }
768
#endif
769
#ifdef TARGET_HAS_PRECISE_SMC
770
    if (current_tb_modified) {
771
        /* we generate a block containing just the instruction
772
           modifying the memory. It will ensure that it cannot modify
773
           itself */
774
        env->current_tb = NULL;
775
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
776
                    CF_SINGLE_INSN);
777
        cpu_resume_from_signal(env, NULL);
778
    }
779
#endif
780
}
781

    
782
/* len must be <= 8 and start must be a multiple of len */
783
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
784
{
785
    PageDesc *p;
786
    int offset, b;
787
#if 0
788
    if (1) {
789
        if (loglevel) {
790
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
791
                   cpu_single_env->mem_write_vaddr, len,
792
                   cpu_single_env->eip,
793
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
794
        }
795
    }
796
#endif
797
    p = page_find(start >> TARGET_PAGE_BITS);
798
    if (!p)
799
        return;
800
    if (p->code_bitmap) {
801
        offset = start & ~TARGET_PAGE_MASK;
802
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
803
        if (b & ((1 << len) - 1))
804
            goto do_invalidate;
805
    } else {
806
    do_invalidate:
807
        tb_invalidate_phys_page_range(start, start + len, 1);
808
    }
809
}
810

    
811
#if !defined(CONFIG_SOFTMMU)
812
static void tb_invalidate_phys_page(target_ulong addr,
813
                                    unsigned long pc, void *puc)
814
{
815
    int n, current_flags, current_tb_modified;
816
    target_ulong current_pc, current_cs_base;
817
    PageDesc *p;
818
    TranslationBlock *tb, *current_tb;
819
#ifdef TARGET_HAS_PRECISE_SMC
820
    CPUState *env = cpu_single_env;
821
#endif
822

    
823
    addr &= TARGET_PAGE_MASK;
824
    p = page_find(addr >> TARGET_PAGE_BITS);
825
    if (!p)
826
        return;
827
    tb = p->first_tb;
828
    current_tb_modified = 0;
829
    current_tb = NULL;
830
    current_pc = 0; /* avoid warning */
831
    current_cs_base = 0; /* avoid warning */
832
    current_flags = 0; /* avoid warning */
833
#ifdef TARGET_HAS_PRECISE_SMC
834
    if (tb && pc != 0) {
835
        current_tb = tb_find_pc(pc);
836
    }
837
#endif
838
    while (tb != NULL) {
839
        n = (long)tb & 3;
840
        tb = (TranslationBlock *)((long)tb & ~3);
841
#ifdef TARGET_HAS_PRECISE_SMC
842
        if (current_tb == tb &&
843
            !(current_tb->cflags & CF_SINGLE_INSN)) {
844
                /* If we are modifying the current TB, we must stop
845
                   its execution. We could be more precise by checking
846
                   that the modification is after the current PC, but it
847
                   would require a specialized function to partially
848
                   restore the CPU state */
849

    
850
            current_tb_modified = 1;
851
            cpu_restore_state(current_tb, env, pc, puc);
852
#if defined(TARGET_I386)
853
            current_flags = env->hflags;
854
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
855
            current_cs_base = (target_ulong)env->segs[R_CS].base;
856
            current_pc = current_cs_base + env->eip;
857
#else
858
#error unsupported CPU
859
#endif
860
        }
861
#endif /* TARGET_HAS_PRECISE_SMC */
862
        tb_phys_invalidate(tb, addr);
863
        tb = tb->page_next[n];
864
    }
865
    p->first_tb = NULL;
866
#ifdef TARGET_HAS_PRECISE_SMC
867
    if (current_tb_modified) {
868
        /* we generate a block containing just the instruction
869
           modifying the memory. It will ensure that it cannot modify
870
           itself */
871
        env->current_tb = NULL;
872
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
873
                    CF_SINGLE_INSN);
874
        cpu_resume_from_signal(env, puc);
875
    }
876
#endif
877
}
878
#endif
879

    
880
/* add the tb in the target page and protect it if necessary */
881
static inline void tb_alloc_page(TranslationBlock *tb,
882
                                 unsigned int n, target_ulong page_addr)
883
{
884
    PageDesc *p;
885
    TranslationBlock *last_first_tb;
886

    
887
    tb->page_addr[n] = page_addr;
888
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
889
    tb->page_next[n] = p->first_tb;
890
    last_first_tb = p->first_tb;
891
    p->first_tb = (TranslationBlock *)((long)tb | n);
892
    invalidate_page_bitmap(p);
893

    
894
#if defined(TARGET_HAS_SMC) || 1
895

    
896
#if defined(CONFIG_USER_ONLY)
897
    if (p->flags & PAGE_WRITE) {
898
        target_ulong addr;
899
        PageDesc *p2;
900
        int prot;
901

    
902
        /* force the host page as non writable (writes will have a
903
           page fault + mprotect overhead) */
904
        page_addr &= qemu_host_page_mask;
905
        prot = 0;
906
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
907
            addr += TARGET_PAGE_SIZE) {
908

    
909
            p2 = page_find (addr >> TARGET_PAGE_BITS);
910
            if (!p2)
911
                continue;
912
            prot |= p2->flags;
913
            p2->flags &= ~PAGE_WRITE;
914
            page_get_flags(addr);
915
          }
916
        mprotect(g2h(page_addr), qemu_host_page_size,
917
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
918
#ifdef DEBUG_TB_INVALIDATE
919
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
920
               page_addr);
921
#endif
922
    }
923
#else
924
    /* if some code is already present, then the pages are already
925
       protected. So we handle the case where only the first TB is
926
       allocated in a physical page */
927
    if (!last_first_tb) {
928
        tlb_protect_code(page_addr);
929
    }
930
#endif
931

    
932
#endif /* TARGET_HAS_SMC */
933
}
934

    
935
/* Allocate a new translation block. Flush the translation buffer if
936
   too many translation blocks or too much generated code. */
937
TranslationBlock *tb_alloc(target_ulong pc)
938
{
939
    TranslationBlock *tb;
940

    
941
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
942
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
943
        return NULL;
944
    tb = &tbs[nb_tbs++];
945
    tb->pc = pc;
946
    tb->cflags = 0;
947
    return tb;
948
}
949

    
950
/* add a new TB and link it to the physical page tables. phys_page2 is
951
   (-1) to indicate that only one page contains the TB. */
952
void tb_link_phys(TranslationBlock *tb,
953
                  target_ulong phys_pc, target_ulong phys_page2)
954
{
955
    unsigned int h;
956
    TranslationBlock **ptb;
957

    
958
    /* add in the physical hash table */
959
    h = tb_phys_hash_func(phys_pc);
960
    ptb = &tb_phys_hash[h];
961
    tb->phys_hash_next = *ptb;
962
    *ptb = tb;
963

    
964
    /* add in the page list */
965
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
966
    if (phys_page2 != -1)
967
        tb_alloc_page(tb, 1, phys_page2);
968
    else
969
        tb->page_addr[1] = -1;
970

    
971
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
972
    tb->jmp_next[0] = NULL;
973
    tb->jmp_next[1] = NULL;
974

    
975
    /* init original jump addresses */
976
    if (tb->tb_next_offset[0] != 0xffff)
977
        tb_reset_jump(tb, 0);
978
    if (tb->tb_next_offset[1] != 0xffff)
979
        tb_reset_jump(tb, 1);
980

    
981
#ifdef DEBUG_TB_CHECK
982
    tb_page_check();
983
#endif
984
}
985

    
986
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
987
   tb[1].tc_ptr. Return NULL if not found */
988
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
989
{
990
    int m_min, m_max, m;
991
    unsigned long v;
992
    TranslationBlock *tb;
993

    
994
    if (nb_tbs <= 0)
995
        return NULL;
996
    if (tc_ptr < (unsigned long)code_gen_buffer ||
997
        tc_ptr >= (unsigned long)code_gen_ptr)
998
        return NULL;
999
    /* binary search (cf Knuth) */
1000
    m_min = 0;
1001
    m_max = nb_tbs - 1;
1002
    while (m_min <= m_max) {
1003
        m = (m_min + m_max) >> 1;
1004
        tb = &tbs[m];
1005
        v = (unsigned long)tb->tc_ptr;
1006
        if (v == tc_ptr)
1007
            return tb;
1008
        else if (tc_ptr < v) {
1009
            m_max = m - 1;
1010
        } else {
1011
            m_min = m + 1;
1012
        }
1013
    }
1014
    return &tbs[m_max];
1015
}
1016

    
1017
static void tb_reset_jump_recursive(TranslationBlock *tb);
1018

    
1019
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1020
{
1021
    TranslationBlock *tb1, *tb_next, **ptb;
1022
    unsigned int n1;
1023

    
1024
    tb1 = tb->jmp_next[n];
1025
    if (tb1 != NULL) {
1026
        /* find head of list */
1027
        for(;;) {
1028
            n1 = (long)tb1 & 3;
1029
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1030
            if (n1 == 2)
1031
                break;
1032
            tb1 = tb1->jmp_next[n1];
1033
        }
1034
        /* we are now sure now that tb jumps to tb1 */
1035
        tb_next = tb1;
1036

    
1037
        /* remove tb from the jmp_first list */
1038
        ptb = &tb_next->jmp_first;
1039
        for(;;) {
1040
            tb1 = *ptb;
1041
            n1 = (long)tb1 & 3;
1042
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1043
            if (n1 == n && tb1 == tb)
1044
                break;
1045
            ptb = &tb1->jmp_next[n1];
1046
        }
1047
        *ptb = tb->jmp_next[n];
1048
        tb->jmp_next[n] = NULL;
1049

    
1050
        /* suppress the jump to next tb in generated code */
1051
        tb_reset_jump(tb, n);
1052

    
1053
        /* suppress jumps in the tb on which we could have jumped */
1054
        tb_reset_jump_recursive(tb_next);
1055
    }
1056
}
1057

    
1058
static void tb_reset_jump_recursive(TranslationBlock *tb)
1059
{
1060
    tb_reset_jump_recursive2(tb, 0);
1061
    tb_reset_jump_recursive2(tb, 1);
1062
}
1063

    
1064
#if defined(TARGET_HAS_ICE)
1065
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1066
{
1067
    target_phys_addr_t addr;
1068
    target_ulong pd;
1069
    ram_addr_t ram_addr;
1070
    PhysPageDesc *p;
1071

    
1072
    addr = cpu_get_phys_page_debug(env, pc);
1073
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1074
    if (!p) {
1075
        pd = IO_MEM_UNASSIGNED;
1076
    } else {
1077
        pd = p->phys_offset;
1078
    }
1079
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1080
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1081
}
1082
#endif
1083

    
1084
/* Add a watchpoint.  */
1085
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1086
{
1087
    int i;
1088

    
1089
    for (i = 0; i < env->nb_watchpoints; i++) {
1090
        if (addr == env->watchpoint[i].vaddr)
1091
            return 0;
1092
    }
1093
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1094
        return -1;
1095

    
1096
    i = env->nb_watchpoints++;
1097
    env->watchpoint[i].vaddr = addr;
1098
    tlb_flush_page(env, addr);
1099
    /* FIXME: This flush is needed because of the hack to make memory ops
1100
       terminate the TB.  It can be removed once the proper IO trap and
1101
       re-execute bits are in.  */
1102
    tb_flush(env);
1103
    return i;
1104
}
1105

    
1106
/* Remove a watchpoint.  */
1107
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1108
{
1109
    int i;
1110

    
1111
    for (i = 0; i < env->nb_watchpoints; i++) {
1112
        if (addr == env->watchpoint[i].vaddr) {
1113
            env->nb_watchpoints--;
1114
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1115
            tlb_flush_page(env, addr);
1116
            return 0;
1117
        }
1118
    }
1119
    return -1;
1120
}
1121

    
1122
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1123
   breakpoint is reached */
1124
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1125
{
1126
#if defined(TARGET_HAS_ICE)
1127
    int i;
1128

    
1129
    for(i = 0; i < env->nb_breakpoints; i++) {
1130
        if (env->breakpoints[i] == pc)
1131
            return 0;
1132
    }
1133

    
1134
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1135
        return -1;
1136
    env->breakpoints[env->nb_breakpoints++] = pc;
1137

    
1138
    breakpoint_invalidate(env, pc);
1139
    return 0;
1140
#else
1141
    return -1;
1142
#endif
1143
}
1144

    
1145
/* remove a breakpoint */
1146
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1147
{
1148
#if defined(TARGET_HAS_ICE)
1149
    int i;
1150
    for(i = 0; i < env->nb_breakpoints; i++) {
1151
        if (env->breakpoints[i] == pc)
1152
            goto found;
1153
    }
1154
    return -1;
1155
 found:
1156
    env->nb_breakpoints--;
1157
    if (i < env->nb_breakpoints)
1158
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1159

    
1160
    breakpoint_invalidate(env, pc);
1161
    return 0;
1162
#else
1163
    return -1;
1164
#endif
1165
}
1166

    
1167
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1168
   CPU loop after each instruction */
1169
void cpu_single_step(CPUState *env, int enabled)
1170
{
1171
#if defined(TARGET_HAS_ICE)
1172
    if (env->singlestep_enabled != enabled) {
1173
        env->singlestep_enabled = enabled;
1174
        /* must flush all the translated code to avoid inconsistancies */
1175
        /* XXX: only flush what is necessary */
1176
        tb_flush(env);
1177
    }
1178
#endif
1179
}
1180

    
1181
/* enable or disable low levels log */
1182
void cpu_set_log(int log_flags)
1183
{
1184
    loglevel = log_flags;
1185
    if (loglevel && !logfile) {
1186
        logfile = fopen(logfilename, log_append ? "a" : "w");
1187
        if (!logfile) {
1188
            perror(logfilename);
1189
            _exit(1);
1190
        }
1191
#if !defined(CONFIG_SOFTMMU)
1192
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1193
        {
1194
            static uint8_t logfile_buf[4096];
1195
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1196
        }
1197
#else
1198
        setvbuf(logfile, NULL, _IOLBF, 0);
1199
#endif
1200
        log_append = 1;
1201
    }
1202
    if (!loglevel && logfile) {
1203
        fclose(logfile);
1204
        logfile = NULL;
1205
    }
1206
}
1207

    
1208
void cpu_set_log_filename(const char *filename)
1209
{
1210
    logfilename = strdup(filename);
1211
    if (logfile) {
1212
        fclose(logfile);
1213
        logfile = NULL;
1214
    }
1215
    cpu_set_log(loglevel);
1216
}
1217

    
1218
/* mask must never be zero, except for A20 change call */
1219
void cpu_interrupt(CPUState *env, int mask)
1220
{
1221
    TranslationBlock *tb;
1222
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1223

    
1224
    env->interrupt_request |= mask;
1225
    /* if the cpu is currently executing code, we must unlink it and
1226
       all the potentially executing TB */
1227
    tb = env->current_tb;
1228
    if (tb && !testandset(&interrupt_lock)) {
1229
        env->current_tb = NULL;
1230
        tb_reset_jump_recursive(tb);
1231
        resetlock(&interrupt_lock);
1232
    }
1233
}
1234

    
1235
void cpu_reset_interrupt(CPUState *env, int mask)
1236
{
1237
    env->interrupt_request &= ~mask;
1238
}
1239

    
1240
CPULogItem cpu_log_items[] = {
1241
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1242
      "show generated host assembly code for each compiled TB" },
1243
    { CPU_LOG_TB_IN_ASM, "in_asm",
1244
      "show target assembly code for each compiled TB" },
1245
    { CPU_LOG_TB_OP, "op",
1246
      "show micro ops for each compiled TB" },
1247
    { CPU_LOG_TB_OP_OPT, "op_opt",
1248
      "show micro ops "
1249
#ifdef TARGET_I386
1250
      "before eflags optimization and "
1251
#endif
1252
      "after liveness analysis" },
1253
    { CPU_LOG_INT, "int",
1254
      "show interrupts/exceptions in short format" },
1255
    { CPU_LOG_EXEC, "exec",
1256
      "show trace before each executed TB (lots of logs)" },
1257
    { CPU_LOG_TB_CPU, "cpu",
1258
      "show CPU state before block translation" },
1259
#ifdef TARGET_I386
1260
    { CPU_LOG_PCALL, "pcall",
1261
      "show protected mode far calls/returns/exceptions" },
1262
#endif
1263
#ifdef DEBUG_IOPORT
1264
    { CPU_LOG_IOPORT, "ioport",
1265
      "show all i/o ports accesses" },
1266
#endif
1267
    { 0, NULL, NULL },
1268
};
1269

    
1270
static int cmp1(const char *s1, int n, const char *s2)
1271
{
1272
    if (strlen(s2) != n)
1273
        return 0;
1274
    return memcmp(s1, s2, n) == 0;
1275
}
1276

    
1277
/* takes a comma separated list of log masks. Return 0 if error. */
1278
int cpu_str_to_log_mask(const char *str)
1279
{
1280
    CPULogItem *item;
1281
    int mask;
1282
    const char *p, *p1;
1283

    
1284
    p = str;
1285
    mask = 0;
1286
    for(;;) {
1287
        p1 = strchr(p, ',');
1288
        if (!p1)
1289
            p1 = p + strlen(p);
1290
        if(cmp1(p,p1-p,"all")) {
1291
                for(item = cpu_log_items; item->mask != 0; item++) {
1292
                        mask |= item->mask;
1293
                }
1294
        } else {
1295
        for(item = cpu_log_items; item->mask != 0; item++) {
1296
            if (cmp1(p, p1 - p, item->name))
1297
                goto found;
1298
        }
1299
        return 0;
1300
        }
1301
    found:
1302
        mask |= item->mask;
1303
        if (*p1 != ',')
1304
            break;
1305
        p = p1 + 1;
1306
    }
1307
    return mask;
1308
}
1309

    
1310
void cpu_abort(CPUState *env, const char *fmt, ...)
1311
{
1312
    va_list ap;
1313
    va_list ap2;
1314

    
1315
    va_start(ap, fmt);
1316
    va_copy(ap2, ap);
1317
    fprintf(stderr, "qemu: fatal: ");
1318
    vfprintf(stderr, fmt, ap);
1319
    fprintf(stderr, "\n");
1320
#ifdef TARGET_I386
1321
    if(env->intercept & INTERCEPT_SVM_MASK) {
1322
        /* most probably the virtual machine should not
1323
           be shut down but rather caught by the VMM */
1324
        vmexit(SVM_EXIT_SHUTDOWN, 0);
1325
    }
1326
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1327
#else
1328
    cpu_dump_state(env, stderr, fprintf, 0);
1329
#endif
1330
    if (logfile) {
1331
        fprintf(logfile, "qemu: fatal: ");
1332
        vfprintf(logfile, fmt, ap2);
1333
        fprintf(logfile, "\n");
1334
#ifdef TARGET_I386
1335
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1336
#else
1337
        cpu_dump_state(env, logfile, fprintf, 0);
1338
#endif
1339
        fflush(logfile);
1340
        fclose(logfile);
1341
    }
1342
    va_end(ap2);
1343
    va_end(ap);
1344
    abort();
1345
}
1346

    
1347
CPUState *cpu_copy(CPUState *env)
1348
{
1349
    CPUState *new_env = cpu_init(env->cpu_model_str);
1350
    /* preserve chaining and index */
1351
    CPUState *next_cpu = new_env->next_cpu;
1352
    int cpu_index = new_env->cpu_index;
1353
    memcpy(new_env, env, sizeof(CPUState));
1354
    new_env->next_cpu = next_cpu;
1355
    new_env->cpu_index = cpu_index;
1356
    return new_env;
1357
}
1358

    
1359
#if !defined(CONFIG_USER_ONLY)
1360

    
1361
/* NOTE: if flush_global is true, also flush global entries (not
1362
   implemented yet) */
1363
void tlb_flush(CPUState *env, int flush_global)
1364
{
1365
    int i;
1366

    
1367
#if defined(DEBUG_TLB)
1368
    printf("tlb_flush:\n");
1369
#endif
1370
    /* must reset current TB so that interrupts cannot modify the
1371
       links while we are modifying them */
1372
    env->current_tb = NULL;
1373

    
1374
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1375
        env->tlb_table[0][i].addr_read = -1;
1376
        env->tlb_table[0][i].addr_write = -1;
1377
        env->tlb_table[0][i].addr_code = -1;
1378
        env->tlb_table[1][i].addr_read = -1;
1379
        env->tlb_table[1][i].addr_write = -1;
1380
        env->tlb_table[1][i].addr_code = -1;
1381
#if (NB_MMU_MODES >= 3)
1382
        env->tlb_table[2][i].addr_read = -1;
1383
        env->tlb_table[2][i].addr_write = -1;
1384
        env->tlb_table[2][i].addr_code = -1;
1385
#if (NB_MMU_MODES == 4)
1386
        env->tlb_table[3][i].addr_read = -1;
1387
        env->tlb_table[3][i].addr_write = -1;
1388
        env->tlb_table[3][i].addr_code = -1;
1389
#endif
1390
#endif
1391
    }
1392

    
1393
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1394

    
1395
#if !defined(CONFIG_SOFTMMU)
1396
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1397
#endif
1398
#ifdef USE_KQEMU
1399
    if (env->kqemu_enabled) {
1400
        kqemu_flush(env, flush_global);
1401
    }
1402
#endif
1403
    tlb_flush_count++;
1404
}
1405

    
1406
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1407
{
1408
    if (addr == (tlb_entry->addr_read &
1409
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1410
        addr == (tlb_entry->addr_write &
1411
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1412
        addr == (tlb_entry->addr_code &
1413
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1414
        tlb_entry->addr_read = -1;
1415
        tlb_entry->addr_write = -1;
1416
        tlb_entry->addr_code = -1;
1417
    }
1418
}
1419

    
1420
void tlb_flush_page(CPUState *env, target_ulong addr)
1421
{
1422
    int i;
1423
    TranslationBlock *tb;
1424

    
1425
#if defined(DEBUG_TLB)
1426
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1427
#endif
1428
    /* must reset current TB so that interrupts cannot modify the
1429
       links while we are modifying them */
1430
    env->current_tb = NULL;
1431

    
1432
    addr &= TARGET_PAGE_MASK;
1433
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1434
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1435
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1436
#if (NB_MMU_MODES >= 3)
1437
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1438
#if (NB_MMU_MODES == 4)
1439
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1440
#endif
1441
#endif
1442

    
1443
    /* Discard jump cache entries for any tb which might potentially
1444
       overlap the flushed page.  */
1445
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1446
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1447

    
1448
    i = tb_jmp_cache_hash_page(addr);
1449
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1450

    
1451
#if !defined(CONFIG_SOFTMMU)
1452
    if (addr < MMAP_AREA_END)
1453
        munmap((void *)addr, TARGET_PAGE_SIZE);
1454
#endif
1455
#ifdef USE_KQEMU
1456
    if (env->kqemu_enabled) {
1457
        kqemu_flush_page(env, addr);
1458
    }
1459
#endif
1460
}
1461

    
1462
/* update the TLBs so that writes to code in the virtual page 'addr'
1463
   can be detected */
1464
static void tlb_protect_code(ram_addr_t ram_addr)
1465
{
1466
    cpu_physical_memory_reset_dirty(ram_addr,
1467
                                    ram_addr + TARGET_PAGE_SIZE,
1468
                                    CODE_DIRTY_FLAG);
1469
}
1470

    
1471
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1472
   tested for self modifying code */
1473
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1474
                                    target_ulong vaddr)
1475
{
1476
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1477
}
1478

    
1479
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1480
                                         unsigned long start, unsigned long length)
1481
{
1482
    unsigned long addr;
1483
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1484
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1485
        if ((addr - start) < length) {
1486
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1487
        }
1488
    }
1489
}
1490

    
1491
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1492
                                     int dirty_flags)
1493
{
1494
    CPUState *env;
1495
    unsigned long length, start1;
1496
    int i, mask, len;
1497
    uint8_t *p;
1498

    
1499
    start &= TARGET_PAGE_MASK;
1500
    end = TARGET_PAGE_ALIGN(end);
1501

    
1502
    length = end - start;
1503
    if (length == 0)
1504
        return;
1505
    len = length >> TARGET_PAGE_BITS;
1506
#ifdef USE_KQEMU
1507
    /* XXX: should not depend on cpu context */
1508
    env = first_cpu;
1509
    if (env->kqemu_enabled) {
1510
        ram_addr_t addr;
1511
        addr = start;
1512
        for(i = 0; i < len; i++) {
1513
            kqemu_set_notdirty(env, addr);
1514
            addr += TARGET_PAGE_SIZE;
1515
        }
1516
    }
1517
#endif
1518
    mask = ~dirty_flags;
1519
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1520
    for(i = 0; i < len; i++)
1521
        p[i] &= mask;
1522

    
1523
    /* we modify the TLB cache so that the dirty bit will be set again
1524
       when accessing the range */
1525
    start1 = start + (unsigned long)phys_ram_base;
1526
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1527
        for(i = 0; i < CPU_TLB_SIZE; i++)
1528
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1529
        for(i = 0; i < CPU_TLB_SIZE; i++)
1530
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1531
#if (NB_MMU_MODES >= 3)
1532
        for(i = 0; i < CPU_TLB_SIZE; i++)
1533
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1534
#if (NB_MMU_MODES == 4)
1535
        for(i = 0; i < CPU_TLB_SIZE; i++)
1536
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1537
#endif
1538
#endif
1539
    }
1540

    
1541
#if !defined(CONFIG_SOFTMMU)
1542
    /* XXX: this is expensive */
1543
    {
1544
        VirtPageDesc *p;
1545
        int j;
1546
        target_ulong addr;
1547

    
1548
        for(i = 0; i < L1_SIZE; i++) {
1549
            p = l1_virt_map[i];
1550
            if (p) {
1551
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1552
                for(j = 0; j < L2_SIZE; j++) {
1553
                    if (p->valid_tag == virt_valid_tag &&
1554
                        p->phys_addr >= start && p->phys_addr < end &&
1555
                        (p->prot & PROT_WRITE)) {
1556
                        if (addr < MMAP_AREA_END) {
1557
                            mprotect((void *)addr, TARGET_PAGE_SIZE,
1558
                                     p->prot & ~PROT_WRITE);
1559
                        }
1560
                    }
1561
                    addr += TARGET_PAGE_SIZE;
1562
                    p++;
1563
                }
1564
            }
1565
        }
1566
    }
1567
#endif
1568
}
1569

    
1570
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1571
{
1572
    ram_addr_t ram_addr;
1573

    
1574
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1575
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1576
            tlb_entry->addend - (unsigned long)phys_ram_base;
1577
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1578
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1579
        }
1580
    }
1581
}
1582

    
1583
/* update the TLB according to the current state of the dirty bits */
1584
void cpu_tlb_update_dirty(CPUState *env)
1585
{
1586
    int i;
1587
    for(i = 0; i < CPU_TLB_SIZE; i++)
1588
        tlb_update_dirty(&env->tlb_table[0][i]);
1589
    for(i = 0; i < CPU_TLB_SIZE; i++)
1590
        tlb_update_dirty(&env->tlb_table[1][i]);
1591
#if (NB_MMU_MODES >= 3)
1592
    for(i = 0; i < CPU_TLB_SIZE; i++)
1593
        tlb_update_dirty(&env->tlb_table[2][i]);
1594
#if (NB_MMU_MODES == 4)
1595
    for(i = 0; i < CPU_TLB_SIZE; i++)
1596
        tlb_update_dirty(&env->tlb_table[3][i]);
1597
#endif
1598
#endif
1599
}
1600

    
1601
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1602
                                  unsigned long start)
1603
{
1604
    unsigned long addr;
1605
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1606
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1607
        if (addr == start) {
1608
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1609
        }
1610
    }
1611
}
1612

    
1613
/* update the TLB corresponding to virtual page vaddr and phys addr
1614
   addr so that it is no longer dirty */
1615
static inline void tlb_set_dirty(CPUState *env,
1616
                                 unsigned long addr, target_ulong vaddr)
1617
{
1618
    int i;
1619

    
1620
    addr &= TARGET_PAGE_MASK;
1621
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1622
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1623
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1624
#if (NB_MMU_MODES >= 3)
1625
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1626
#if (NB_MMU_MODES == 4)
1627
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1628
#endif
1629
#endif
1630
}
1631

    
1632
/* add a new TLB entry. At most one entry for a given virtual address
1633
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1634
   (can only happen in non SOFTMMU mode for I/O pages or pages
1635
   conflicting with the host address space). */
1636
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1637
                      target_phys_addr_t paddr, int prot,
1638
                      int mmu_idx, int is_softmmu)
1639
{
1640
    PhysPageDesc *p;
1641
    unsigned long pd;
1642
    unsigned int index;
1643
    target_ulong address;
1644
    target_phys_addr_t addend;
1645
    int ret;
1646
    CPUTLBEntry *te;
1647
    int i;
1648

    
1649
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1650
    if (!p) {
1651
        pd = IO_MEM_UNASSIGNED;
1652
    } else {
1653
        pd = p->phys_offset;
1654
    }
1655
#if defined(DEBUG_TLB)
1656
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1657
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1658
#endif
1659

    
1660
    ret = 0;
1661
#if !defined(CONFIG_SOFTMMU)
1662
    if (is_softmmu)
1663
#endif
1664
    {
1665
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1666
            /* IO memory case */
1667
            address = vaddr | pd;
1668
            addend = paddr;
1669
        } else {
1670
            /* standard memory */
1671
            address = vaddr;
1672
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1673
        }
1674

    
1675
        /* Make accesses to pages with watchpoints go via the
1676
           watchpoint trap routines.  */
1677
        for (i = 0; i < env->nb_watchpoints; i++) {
1678
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1679
                if (address & ~TARGET_PAGE_MASK) {
1680
                    env->watchpoint[i].addend = 0;
1681
                    address = vaddr | io_mem_watch;
1682
                } else {
1683
                    env->watchpoint[i].addend = pd - paddr +
1684
                        (unsigned long) phys_ram_base;
1685
                    /* TODO: Figure out how to make read watchpoints coexist
1686
                       with code.  */
1687
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1688
                }
1689
            }
1690
        }
1691

    
1692
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1693
        addend -= vaddr;
1694
        te = &env->tlb_table[mmu_idx][index];
1695
        te->addend = addend;
1696
        if (prot & PAGE_READ) {
1697
            te->addr_read = address;
1698
        } else {
1699
            te->addr_read = -1;
1700
        }
1701
        if (prot & PAGE_EXEC) {
1702
            te->addr_code = address;
1703
        } else {
1704
            te->addr_code = -1;
1705
        }
1706
        if (prot & PAGE_WRITE) {
1707
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1708
                (pd & IO_MEM_ROMD)) {
1709
                /* write access calls the I/O callback */
1710
                te->addr_write = vaddr |
1711
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1712
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1713
                       !cpu_physical_memory_is_dirty(pd)) {
1714
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1715
            } else {
1716
                te->addr_write = address;
1717
            }
1718
        } else {
1719
            te->addr_write = -1;
1720
        }
1721
    }
1722
#if !defined(CONFIG_SOFTMMU)
1723
    else {
1724
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1725
            /* IO access: no mapping is done as it will be handled by the
1726
               soft MMU */
1727
            if (!(env->hflags & HF_SOFTMMU_MASK))
1728
                ret = 2;
1729
        } else {
1730
            void *map_addr;
1731

    
1732
            if (vaddr >= MMAP_AREA_END) {
1733
                ret = 2;
1734
            } else {
1735
                if (prot & PROT_WRITE) {
1736
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1737
#if defined(TARGET_HAS_SMC) || 1
1738
                        first_tb ||
1739
#endif
1740
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1741
                         !cpu_physical_memory_is_dirty(pd))) {
1742
                        /* ROM: we do as if code was inside */
1743
                        /* if code is present, we only map as read only and save the
1744
                           original mapping */
1745
                        VirtPageDesc *vp;
1746

    
1747
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1748
                        vp->phys_addr = pd;
1749
                        vp->prot = prot;
1750
                        vp->valid_tag = virt_valid_tag;
1751
                        prot &= ~PAGE_WRITE;
1752
                    }
1753
                }
1754
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1755
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1756
                if (map_addr == MAP_FAILED) {
1757
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1758
                              paddr, vaddr);
1759
                }
1760
            }
1761
        }
1762
    }
1763
#endif
1764
    return ret;
1765
}
1766

    
1767
/* called from signal handler: invalidate the code and unprotect the
1768
   page. Return TRUE if the fault was succesfully handled. */
1769
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1770
{
1771
#if !defined(CONFIG_SOFTMMU)
1772
    VirtPageDesc *vp;
1773

    
1774
#if defined(DEBUG_TLB)
1775
    printf("page_unprotect: addr=0x%08x\n", addr);
1776
#endif
1777
    addr &= TARGET_PAGE_MASK;
1778

    
1779
    /* if it is not mapped, no need to worry here */
1780
    if (addr >= MMAP_AREA_END)
1781
        return 0;
1782
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1783
    if (!vp)
1784
        return 0;
1785
    /* NOTE: in this case, validate_tag is _not_ tested as it
1786
       validates only the code TLB */
1787
    if (vp->valid_tag != virt_valid_tag)
1788
        return 0;
1789
    if (!(vp->prot & PAGE_WRITE))
1790
        return 0;
1791
#if defined(DEBUG_TLB)
1792
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1793
           addr, vp->phys_addr, vp->prot);
1794
#endif
1795
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1796
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1797
                  (unsigned long)addr, vp->prot);
1798
    /* set the dirty bit */
1799
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1800
    /* flush the code inside */
1801
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1802
    return 1;
1803
#else
1804
    return 0;
1805
#endif
1806
}
1807

    
1808
#else
1809

    
1810
void tlb_flush(CPUState *env, int flush_global)
1811
{
1812
}
1813

    
1814
void tlb_flush_page(CPUState *env, target_ulong addr)
1815
{
1816
}
1817

    
1818
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1819
                      target_phys_addr_t paddr, int prot,
1820
                      int mmu_idx, int is_softmmu)
1821
{
1822
    return 0;
1823
}
1824

    
1825
/* dump memory mappings */
1826
void page_dump(FILE *f)
1827
{
1828
    unsigned long start, end;
1829
    int i, j, prot, prot1;
1830
    PageDesc *p;
1831

    
1832
    fprintf(f, "%-8s %-8s %-8s %s\n",
1833
            "start", "end", "size", "prot");
1834
    start = -1;
1835
    end = -1;
1836
    prot = 0;
1837
    for(i = 0; i <= L1_SIZE; i++) {
1838
        if (i < L1_SIZE)
1839
            p = l1_map[i];
1840
        else
1841
            p = NULL;
1842
        for(j = 0;j < L2_SIZE; j++) {
1843
            if (!p)
1844
                prot1 = 0;
1845
            else
1846
                prot1 = p[j].flags;
1847
            if (prot1 != prot) {
1848
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1849
                if (start != -1) {
1850
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1851
                            start, end, end - start,
1852
                            prot & PAGE_READ ? 'r' : '-',
1853
                            prot & PAGE_WRITE ? 'w' : '-',
1854
                            prot & PAGE_EXEC ? 'x' : '-');
1855
                }
1856
                if (prot1 != 0)
1857
                    start = end;
1858
                else
1859
                    start = -1;
1860
                prot = prot1;
1861
            }
1862
            if (!p)
1863
                break;
1864
        }
1865
    }
1866
}
1867

    
1868
int page_get_flags(target_ulong address)
1869
{
1870
    PageDesc *p;
1871

    
1872
    p = page_find(address >> TARGET_PAGE_BITS);
1873
    if (!p)
1874
        return 0;
1875
    return p->flags;
1876
}
1877

    
1878
/* modify the flags of a page and invalidate the code if
1879
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1880
   depending on PAGE_WRITE */
1881
void page_set_flags(target_ulong start, target_ulong end, int flags)
1882
{
1883
    PageDesc *p;
1884
    target_ulong addr;
1885

    
1886
    start = start & TARGET_PAGE_MASK;
1887
    end = TARGET_PAGE_ALIGN(end);
1888
    if (flags & PAGE_WRITE)
1889
        flags |= PAGE_WRITE_ORG;
1890
    spin_lock(&tb_lock);
1891
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1892
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1893
        /* if the write protection is set, then we invalidate the code
1894
           inside */
1895
        if (!(p->flags & PAGE_WRITE) &&
1896
            (flags & PAGE_WRITE) &&
1897
            p->first_tb) {
1898
            tb_invalidate_phys_page(addr, 0, NULL);
1899
        }
1900
        p->flags = flags;
1901
    }
1902
    spin_unlock(&tb_lock);
1903
}
1904

    
1905
int page_check_range(target_ulong start, target_ulong len, int flags)
1906
{
1907
    PageDesc *p;
1908
    target_ulong end;
1909
    target_ulong addr;
1910

    
1911
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1912
    start = start & TARGET_PAGE_MASK;
1913

    
1914
    if( end < start )
1915
        /* we've wrapped around */
1916
        return -1;
1917
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1918
        p = page_find(addr >> TARGET_PAGE_BITS);
1919
        if( !p )
1920
            return -1;
1921
        if( !(p->flags & PAGE_VALID) )
1922
            return -1;
1923

    
1924
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1925
            return -1;
1926
        if (flags & PAGE_WRITE) {
1927
            if (!(p->flags & PAGE_WRITE_ORG))
1928
                return -1;
1929
            /* unprotect the page if it was put read-only because it
1930
               contains translated code */
1931
            if (!(p->flags & PAGE_WRITE)) {
1932
                if (!page_unprotect(addr, 0, NULL))
1933
                    return -1;
1934
            }
1935
            return 0;
1936
        }
1937
    }
1938
    return 0;
1939
}
1940

    
1941
/* called from signal handler: invalidate the code and unprotect the
1942
   page. Return TRUE if the fault was succesfully handled. */
1943
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1944
{
1945
    unsigned int page_index, prot, pindex;
1946
    PageDesc *p, *p1;
1947
    target_ulong host_start, host_end, addr;
1948

    
1949
    host_start = address & qemu_host_page_mask;
1950
    page_index = host_start >> TARGET_PAGE_BITS;
1951
    p1 = page_find(page_index);
1952
    if (!p1)
1953
        return 0;
1954
    host_end = host_start + qemu_host_page_size;
1955
    p = p1;
1956
    prot = 0;
1957
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1958
        prot |= p->flags;
1959
        p++;
1960
    }
1961
    /* if the page was really writable, then we change its
1962
       protection back to writable */
1963
    if (prot & PAGE_WRITE_ORG) {
1964
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1965
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1966
            mprotect((void *)g2h(host_start), qemu_host_page_size,
1967
                     (prot & PAGE_BITS) | PAGE_WRITE);
1968
            p1[pindex].flags |= PAGE_WRITE;
1969
            /* and since the content will be modified, we must invalidate
1970
               the corresponding translated code. */
1971
            tb_invalidate_phys_page(address, pc, puc);
1972
#ifdef DEBUG_TB_CHECK
1973
            tb_invalidate_check(address);
1974
#endif
1975
            return 1;
1976
        }
1977
    }
1978
    return 0;
1979
}
1980

    
1981
static inline void tlb_set_dirty(CPUState *env,
1982
                                 unsigned long addr, target_ulong vaddr)
1983
{
1984
}
1985
#endif /* defined(CONFIG_USER_ONLY) */
1986

    
1987
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1988
                             int memory);
1989
static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1990
                           int orig_memory);
1991
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1992
                      need_subpage)                                     \
1993
    do {                                                                \
1994
        if (addr > start_addr)                                          \
1995
            start_addr2 = 0;                                            \
1996
        else {                                                          \
1997
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
1998
            if (start_addr2 > 0)                                        \
1999
                need_subpage = 1;                                       \
2000
        }                                                               \
2001
                                                                        \
2002
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2003
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2004
        else {                                                          \
2005
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2006
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2007
                need_subpage = 1;                                       \
2008
        }                                                               \
2009
    } while (0)
2010

    
2011
/* register physical memory. 'size' must be a multiple of the target
2012
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2013
   io memory page */
2014
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2015
                                  unsigned long size,
2016
                                  unsigned long phys_offset)
2017
{
2018
    target_phys_addr_t addr, end_addr;
2019
    PhysPageDesc *p;
2020
    CPUState *env;
2021
    unsigned long orig_size = size;
2022
    void *subpage;
2023

    
2024
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2025
    end_addr = start_addr + (target_phys_addr_t)size;
2026
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2027
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2028
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2029
            unsigned long orig_memory = p->phys_offset;
2030
            target_phys_addr_t start_addr2, end_addr2;
2031
            int need_subpage = 0;
2032

    
2033
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2034
                          need_subpage);
2035
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2036
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2037
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2038
                                           &p->phys_offset, orig_memory);
2039
                } else {
2040
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2041
                                            >> IO_MEM_SHIFT];
2042
                }
2043
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2044
            } else {
2045
                p->phys_offset = phys_offset;
2046
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2047
                    (phys_offset & IO_MEM_ROMD))
2048
                    phys_offset += TARGET_PAGE_SIZE;
2049
            }
2050
        } else {
2051
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2052
            p->phys_offset = phys_offset;
2053
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2054
                (phys_offset & IO_MEM_ROMD))
2055
                phys_offset += TARGET_PAGE_SIZE;
2056
            else {
2057
                target_phys_addr_t start_addr2, end_addr2;
2058
                int need_subpage = 0;
2059

    
2060
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2061
                              end_addr2, need_subpage);
2062

    
2063
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2064
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2065
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2066
                    subpage_register(subpage, start_addr2, end_addr2,
2067
                                     phys_offset);
2068
                }
2069
            }
2070
        }
2071
    }
2072

    
2073
    /* since each CPU stores ram addresses in its TLB cache, we must
2074
       reset the modified entries */
2075
    /* XXX: slow ! */
2076
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2077
        tlb_flush(env, 1);
2078
    }
2079
}
2080

    
2081
/* XXX: temporary until new memory mapping API */
2082
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2083
{
2084
    PhysPageDesc *p;
2085

    
2086
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2087
    if (!p)
2088
        return IO_MEM_UNASSIGNED;
2089
    return p->phys_offset;
2090
}
2091

    
2092
/* XXX: better than nothing */
2093
ram_addr_t qemu_ram_alloc(unsigned int size)
2094
{
2095
    ram_addr_t addr;
2096
    if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2097
        fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2098
                size, phys_ram_size);
2099
        abort();
2100
    }
2101
    addr = phys_ram_alloc_offset;
2102
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2103
    return addr;
2104
}
2105

    
2106
void qemu_ram_free(ram_addr_t addr)
2107
{
2108
}
2109

    
2110
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2111
{
2112
#ifdef DEBUG_UNASSIGNED
2113
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2114
#endif
2115
#ifdef TARGET_SPARC
2116
    do_unassigned_access(addr, 0, 0, 0);
2117
#elif TARGET_CRIS
2118
    do_unassigned_access(addr, 0, 0, 0);
2119
#endif
2120
    return 0;
2121
}
2122

    
2123
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2124
{
2125
#ifdef DEBUG_UNASSIGNED
2126
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2127
#endif
2128
#ifdef TARGET_SPARC
2129
    do_unassigned_access(addr, 1, 0, 0);
2130
#elif TARGET_CRIS
2131
    do_unassigned_access(addr, 1, 0, 0);
2132
#endif
2133
}
2134

    
2135
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2136
    unassigned_mem_readb,
2137
    unassigned_mem_readb,
2138
    unassigned_mem_readb,
2139
};
2140

    
2141
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2142
    unassigned_mem_writeb,
2143
    unassigned_mem_writeb,
2144
    unassigned_mem_writeb,
2145
};
2146

    
2147
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2148
{
2149
    unsigned long ram_addr;
2150
    int dirty_flags;
2151
    ram_addr = addr - (unsigned long)phys_ram_base;
2152
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2153
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2154
#if !defined(CONFIG_USER_ONLY)
2155
        tb_invalidate_phys_page_fast(ram_addr, 1);
2156
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2157
#endif
2158
    }
2159
    stb_p((uint8_t *)(long)addr, val);
2160
#ifdef USE_KQEMU
2161
    if (cpu_single_env->kqemu_enabled &&
2162
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2163
        kqemu_modify_page(cpu_single_env, ram_addr);
2164
#endif
2165
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2166
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2167
    /* we remove the notdirty callback only if the code has been
2168
       flushed */
2169
    if (dirty_flags == 0xff)
2170
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2171
}
2172

    
2173
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2174
{
2175
    unsigned long ram_addr;
2176
    int dirty_flags;
2177
    ram_addr = addr - (unsigned long)phys_ram_base;
2178
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2179
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2180
#if !defined(CONFIG_USER_ONLY)
2181
        tb_invalidate_phys_page_fast(ram_addr, 2);
2182
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2183
#endif
2184
    }
2185
    stw_p((uint8_t *)(long)addr, val);
2186
#ifdef USE_KQEMU
2187
    if (cpu_single_env->kqemu_enabled &&
2188
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2189
        kqemu_modify_page(cpu_single_env, ram_addr);
2190
#endif
2191
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2192
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2193
    /* we remove the notdirty callback only if the code has been
2194
       flushed */
2195
    if (dirty_flags == 0xff)
2196
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2197
}
2198

    
2199
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2200
{
2201
    unsigned long ram_addr;
2202
    int dirty_flags;
2203
    ram_addr = addr - (unsigned long)phys_ram_base;
2204
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2205
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2206
#if !defined(CONFIG_USER_ONLY)
2207
        tb_invalidate_phys_page_fast(ram_addr, 4);
2208
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2209
#endif
2210
    }
2211
    stl_p((uint8_t *)(long)addr, val);
2212
#ifdef USE_KQEMU
2213
    if (cpu_single_env->kqemu_enabled &&
2214
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2215
        kqemu_modify_page(cpu_single_env, ram_addr);
2216
#endif
2217
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2218
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2219
    /* we remove the notdirty callback only if the code has been
2220
       flushed */
2221
    if (dirty_flags == 0xff)
2222
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2223
}
2224

    
2225
static CPUReadMemoryFunc *error_mem_read[3] = {
2226
    NULL, /* never used */
2227
    NULL, /* never used */
2228
    NULL, /* never used */
2229
};
2230

    
2231
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2232
    notdirty_mem_writeb,
2233
    notdirty_mem_writew,
2234
    notdirty_mem_writel,
2235
};
2236

    
2237
#if defined(CONFIG_SOFTMMU)
2238
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2239
   so these check for a hit then pass through to the normal out-of-line
2240
   phys routines.  */
2241
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2242
{
2243
    return ldub_phys(addr);
2244
}
2245

    
2246
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2247
{
2248
    return lduw_phys(addr);
2249
}
2250

    
2251
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2252
{
2253
    return ldl_phys(addr);
2254
}
2255

    
2256
/* Generate a debug exception if a watchpoint has been hit.
2257
   Returns the real physical address of the access.  addr will be a host
2258
   address in case of a RAM location.  */
2259
static target_ulong check_watchpoint(target_phys_addr_t addr)
2260
{
2261
    CPUState *env = cpu_single_env;
2262
    target_ulong watch;
2263
    target_ulong retaddr;
2264
    int i;
2265

    
2266
    retaddr = addr;
2267
    for (i = 0; i < env->nb_watchpoints; i++) {
2268
        watch = env->watchpoint[i].vaddr;
2269
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2270
            retaddr = addr - env->watchpoint[i].addend;
2271
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2272
                cpu_single_env->watchpoint_hit = i + 1;
2273
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2274
                break;
2275
            }
2276
        }
2277
    }
2278
    return retaddr;
2279
}
2280

    
2281
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2282
                             uint32_t val)
2283
{
2284
    addr = check_watchpoint(addr);
2285
    stb_phys(addr, val);
2286
}
2287

    
2288
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2289
                             uint32_t val)
2290
{
2291
    addr = check_watchpoint(addr);
2292
    stw_phys(addr, val);
2293
}
2294

    
2295
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2296
                             uint32_t val)
2297
{
2298
    addr = check_watchpoint(addr);
2299
    stl_phys(addr, val);
2300
}
2301

    
2302
static CPUReadMemoryFunc *watch_mem_read[3] = {
2303
    watch_mem_readb,
2304
    watch_mem_readw,
2305
    watch_mem_readl,
2306
};
2307

    
2308
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2309
    watch_mem_writeb,
2310
    watch_mem_writew,
2311
    watch_mem_writel,
2312
};
2313
#endif
2314

    
2315
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2316
                                 unsigned int len)
2317
{
2318
    uint32_t ret;
2319
    unsigned int idx;
2320

    
2321
    idx = SUBPAGE_IDX(addr - mmio->base);
2322
#if defined(DEBUG_SUBPAGE)
2323
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2324
           mmio, len, addr, idx);
2325
#endif
2326
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2327

    
2328
    return ret;
2329
}
2330

    
2331
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2332
                              uint32_t value, unsigned int len)
2333
{
2334
    unsigned int idx;
2335

    
2336
    idx = SUBPAGE_IDX(addr - mmio->base);
2337
#if defined(DEBUG_SUBPAGE)
2338
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2339
           mmio, len, addr, idx, value);
2340
#endif
2341
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2342
}
2343

    
2344
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2345
{
2346
#if defined(DEBUG_SUBPAGE)
2347
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2348
#endif
2349

    
2350
    return subpage_readlen(opaque, addr, 0);
2351
}
2352

    
2353
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2354
                            uint32_t value)
2355
{
2356
#if defined(DEBUG_SUBPAGE)
2357
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2358
#endif
2359
    subpage_writelen(opaque, addr, value, 0);
2360
}
2361

    
2362
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2363
{
2364
#if defined(DEBUG_SUBPAGE)
2365
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2366
#endif
2367

    
2368
    return subpage_readlen(opaque, addr, 1);
2369
}
2370

    
2371
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2372
                            uint32_t value)
2373
{
2374
#if defined(DEBUG_SUBPAGE)
2375
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2376
#endif
2377
    subpage_writelen(opaque, addr, value, 1);
2378
}
2379

    
2380
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2381
{
2382
#if defined(DEBUG_SUBPAGE)
2383
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2384
#endif
2385

    
2386
    return subpage_readlen(opaque, addr, 2);
2387
}
2388

    
2389
static void subpage_writel (void *opaque,
2390
                         target_phys_addr_t addr, uint32_t value)
2391
{
2392
#if defined(DEBUG_SUBPAGE)
2393
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2394
#endif
2395
    subpage_writelen(opaque, addr, value, 2);
2396
}
2397

    
2398
static CPUReadMemoryFunc *subpage_read[] = {
2399
    &subpage_readb,
2400
    &subpage_readw,
2401
    &subpage_readl,
2402
};
2403

    
2404
static CPUWriteMemoryFunc *subpage_write[] = {
2405
    &subpage_writeb,
2406
    &subpage_writew,
2407
    &subpage_writel,
2408
};
2409

    
2410
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2411
                             int memory)
2412
{
2413
    int idx, eidx;
2414
    unsigned int i;
2415

    
2416
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2417
        return -1;
2418
    idx = SUBPAGE_IDX(start);
2419
    eidx = SUBPAGE_IDX(end);
2420
#if defined(DEBUG_SUBPAGE)
2421
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2422
           mmio, start, end, idx, eidx, memory);
2423
#endif
2424
    memory >>= IO_MEM_SHIFT;
2425
    for (; idx <= eidx; idx++) {
2426
        for (i = 0; i < 4; i++) {
2427
            if (io_mem_read[memory][i]) {
2428
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2429
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2430
            }
2431
            if (io_mem_write[memory][i]) {
2432
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2433
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2434
            }
2435
        }
2436
    }
2437

    
2438
    return 0;
2439
}
2440

    
2441
static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2442
                           int orig_memory)
2443
{
2444
    subpage_t *mmio;
2445
    int subpage_memory;
2446

    
2447
    mmio = qemu_mallocz(sizeof(subpage_t));
2448
    if (mmio != NULL) {
2449
        mmio->base = base;
2450
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2451
#if defined(DEBUG_SUBPAGE)
2452
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2453
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2454
#endif
2455
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2456
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2457
    }
2458

    
2459
    return mmio;
2460
}
2461

    
2462
static void io_mem_init(void)
2463
{
2464
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2465
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2466
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2467
    io_mem_nb = 5;
2468

    
2469
#if defined(CONFIG_SOFTMMU)
2470
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2471
                                          watch_mem_write, NULL);
2472
#endif
2473
    /* alloc dirty bits array */
2474
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2475
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2476
}
2477

    
2478
/* mem_read and mem_write are arrays of functions containing the
2479
   function to access byte (index 0), word (index 1) and dword (index
2480
   2). Functions can be omitted with a NULL function pointer. The
2481
   registered functions may be modified dynamically later.
2482
   If io_index is non zero, the corresponding io zone is
2483
   modified. If it is zero, a new io zone is allocated. The return
2484
   value can be used with cpu_register_physical_memory(). (-1) is
2485
   returned if error. */
2486
int cpu_register_io_memory(int io_index,
2487
                           CPUReadMemoryFunc **mem_read,
2488
                           CPUWriteMemoryFunc **mem_write,
2489
                           void *opaque)
2490
{
2491
    int i, subwidth = 0;
2492

    
2493
    if (io_index <= 0) {
2494
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2495
            return -1;
2496
        io_index = io_mem_nb++;
2497
    } else {
2498
        if (io_index >= IO_MEM_NB_ENTRIES)
2499
            return -1;
2500
    }
2501

    
2502
    for(i = 0;i < 3; i++) {
2503
        if (!mem_read[i] || !mem_write[i])
2504
            subwidth = IO_MEM_SUBWIDTH;
2505
        io_mem_read[io_index][i] = mem_read[i];
2506
        io_mem_write[io_index][i] = mem_write[i];
2507
    }
2508
    io_mem_opaque[io_index] = opaque;
2509
    return (io_index << IO_MEM_SHIFT) | subwidth;
2510
}
2511

    
2512
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2513
{
2514
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2515
}
2516

    
2517
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2518
{
2519
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2520
}
2521

    
2522
/* physical memory access (slow version, mainly for debug) */
2523
#if defined(CONFIG_USER_ONLY)
2524
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2525
                            int len, int is_write)
2526
{
2527
    int l, flags;
2528
    target_ulong page;
2529
    void * p;
2530

    
2531
    while (len > 0) {
2532
        page = addr & TARGET_PAGE_MASK;
2533
        l = (page + TARGET_PAGE_SIZE) - addr;
2534
        if (l > len)
2535
            l = len;
2536
        flags = page_get_flags(page);
2537
        if (!(flags & PAGE_VALID))
2538
            return;
2539
        if (is_write) {
2540
            if (!(flags & PAGE_WRITE))
2541
                return;
2542
            /* XXX: this code should not depend on lock_user */
2543
            if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
2544
                /* FIXME - should this return an error rather than just fail? */
2545
                return;
2546
            memcpy(p, buf, len);
2547
            unlock_user(p, addr, len);
2548
        } else {
2549
            if (!(flags & PAGE_READ))
2550
                return;
2551
            /* XXX: this code should not depend on lock_user */
2552
            if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
2553
                /* FIXME - should this return an error rather than just fail? */
2554
                return;
2555
            memcpy(buf, p, len);
2556
            unlock_user(p, addr, 0);
2557
        }
2558
        len -= l;
2559
        buf += l;
2560
        addr += l;
2561
    }
2562
}
2563

    
2564
#else
2565
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2566
                            int len, int is_write)
2567
{
2568
    int l, io_index;
2569
    uint8_t *ptr;
2570
    uint32_t val;
2571
    target_phys_addr_t page;
2572
    unsigned long pd;
2573
    PhysPageDesc *p;
2574

    
2575
    while (len > 0) {
2576
        page = addr & TARGET_PAGE_MASK;
2577
        l = (page + TARGET_PAGE_SIZE) - addr;
2578
        if (l > len)
2579
            l = len;
2580
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2581
        if (!p) {
2582
            pd = IO_MEM_UNASSIGNED;
2583
        } else {
2584
            pd = p->phys_offset;
2585
        }
2586

    
2587
        if (is_write) {
2588
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2589
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2590
                /* XXX: could force cpu_single_env to NULL to avoid
2591
                   potential bugs */
2592
                if (l >= 4 && ((addr & 3) == 0)) {
2593
                    /* 32 bit write access */
2594
                    val = ldl_p(buf);
2595
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2596
                    l = 4;
2597
                } else if (l >= 2 && ((addr & 1) == 0)) {
2598
                    /* 16 bit write access */
2599
                    val = lduw_p(buf);
2600
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2601
                    l = 2;
2602
                } else {
2603
                    /* 8 bit write access */
2604
                    val = ldub_p(buf);
2605
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2606
                    l = 1;
2607
                }
2608
            } else {
2609
                unsigned long addr1;
2610
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2611
                /* RAM case */
2612
                ptr = phys_ram_base + addr1;
2613
                memcpy(ptr, buf, l);
2614
                if (!cpu_physical_memory_is_dirty(addr1)) {
2615
                    /* invalidate code */
2616
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2617
                    /* set dirty bit */
2618
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2619
                        (0xff & ~CODE_DIRTY_FLAG);
2620
                }
2621
            }
2622
        } else {
2623
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2624
                !(pd & IO_MEM_ROMD)) {
2625
                /* I/O case */
2626
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2627
                if (l >= 4 && ((addr & 3) == 0)) {
2628
                    /* 32 bit read access */
2629
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2630
                    stl_p(buf, val);
2631
                    l = 4;
2632
                } else if (l >= 2 && ((addr & 1) == 0)) {
2633
                    /* 16 bit read access */
2634
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2635
                    stw_p(buf, val);
2636
                    l = 2;
2637
                } else {
2638
                    /* 8 bit read access */
2639
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2640
                    stb_p(buf, val);
2641
                    l = 1;
2642
                }
2643
            } else {
2644
                /* RAM case */
2645
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2646
                    (addr & ~TARGET_PAGE_MASK);
2647
                memcpy(buf, ptr, l);
2648
            }
2649
        }
2650
        len -= l;
2651
        buf += l;
2652
        addr += l;
2653
    }
2654
}
2655

    
2656
/* used for ROM loading : can write in RAM and ROM */
2657
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2658
                                   const uint8_t *buf, int len)
2659
{
2660
    int l;
2661
    uint8_t *ptr;
2662
    target_phys_addr_t page;
2663
    unsigned long pd;
2664
    PhysPageDesc *p;
2665

    
2666
    while (len > 0) {
2667
        page = addr & TARGET_PAGE_MASK;
2668
        l = (page + TARGET_PAGE_SIZE) - addr;
2669
        if (l > len)
2670
            l = len;
2671
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2672
        if (!p) {
2673
            pd = IO_MEM_UNASSIGNED;
2674
        } else {
2675
            pd = p->phys_offset;
2676
        }
2677

    
2678
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2679
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2680
            !(pd & IO_MEM_ROMD)) {
2681
            /* do nothing */
2682
        } else {
2683
            unsigned long addr1;
2684
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2685
            /* ROM/RAM case */
2686
            ptr = phys_ram_base + addr1;
2687
            memcpy(ptr, buf, l);
2688
        }
2689
        len -= l;
2690
        buf += l;
2691
        addr += l;
2692
    }
2693
}
2694

    
2695

    
2696
/* warning: addr must be aligned */
2697
uint32_t ldl_phys(target_phys_addr_t addr)
2698
{
2699
    int io_index;
2700
    uint8_t *ptr;
2701
    uint32_t val;
2702
    unsigned long pd;
2703
    PhysPageDesc *p;
2704

    
2705
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2706
    if (!p) {
2707
        pd = IO_MEM_UNASSIGNED;
2708
    } else {
2709
        pd = p->phys_offset;
2710
    }
2711

    
2712
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2713
        !(pd & IO_MEM_ROMD)) {
2714
        /* I/O case */
2715
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2716
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2717
    } else {
2718
        /* RAM case */
2719
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2720
            (addr & ~TARGET_PAGE_MASK);
2721
        val = ldl_p(ptr);
2722
    }
2723
    return val;
2724
}
2725

    
2726
/* warning: addr must be aligned */
2727
uint64_t ldq_phys(target_phys_addr_t addr)
2728
{
2729
    int io_index;
2730
    uint8_t *ptr;
2731
    uint64_t val;
2732
    unsigned long pd;
2733
    PhysPageDesc *p;
2734

    
2735
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2736
    if (!p) {
2737
        pd = IO_MEM_UNASSIGNED;
2738
    } else {
2739
        pd = p->phys_offset;
2740
    }
2741

    
2742
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2743
        !(pd & IO_MEM_ROMD)) {
2744
        /* I/O case */
2745
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2746
#ifdef TARGET_WORDS_BIGENDIAN
2747
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2748
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2749
#else
2750
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2751
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2752
#endif
2753
    } else {
2754
        /* RAM case */
2755
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2756
            (addr & ~TARGET_PAGE_MASK);
2757
        val = ldq_p(ptr);
2758
    }
2759
    return val;
2760
}
2761

    
2762
/* XXX: optimize */
2763
uint32_t ldub_phys(target_phys_addr_t addr)
2764
{
2765
    uint8_t val;
2766
    cpu_physical_memory_read(addr, &val, 1);
2767
    return val;
2768
}
2769

    
2770
/* XXX: optimize */
2771
uint32_t lduw_phys(target_phys_addr_t addr)
2772
{
2773
    uint16_t val;
2774
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2775
    return tswap16(val);
2776
}
2777

    
2778
/* warning: addr must be aligned. The ram page is not masked as dirty
2779
   and the code inside is not invalidated. It is useful if the dirty
2780
   bits are used to track modified PTEs */
2781
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2782
{
2783
    int io_index;
2784
    uint8_t *ptr;
2785
    unsigned long pd;
2786
    PhysPageDesc *p;
2787

    
2788
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2789
    if (!p) {
2790
        pd = IO_MEM_UNASSIGNED;
2791
    } else {
2792
        pd = p->phys_offset;
2793
    }
2794

    
2795
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2796
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2797
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2798
    } else {
2799
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2800
            (addr & ~TARGET_PAGE_MASK);
2801
        stl_p(ptr, val);
2802
    }
2803
}
2804

    
2805
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2806
{
2807
    int io_index;
2808
    uint8_t *ptr;
2809
    unsigned long pd;
2810
    PhysPageDesc *p;
2811

    
2812
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2813
    if (!p) {
2814
        pd = IO_MEM_UNASSIGNED;
2815
    } else {
2816
        pd = p->phys_offset;
2817
    }
2818

    
2819
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2820
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2821
#ifdef TARGET_WORDS_BIGENDIAN
2822
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2823
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2824
#else
2825
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2826
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2827
#endif
2828
    } else {
2829
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2830
            (addr & ~TARGET_PAGE_MASK);
2831
        stq_p(ptr, val);
2832
    }
2833
}
2834

    
2835
/* warning: addr must be aligned */
2836
void stl_phys(target_phys_addr_t addr, uint32_t val)
2837
{
2838
    int io_index;
2839
    uint8_t *ptr;
2840
    unsigned long pd;
2841
    PhysPageDesc *p;
2842

    
2843
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2844
    if (!p) {
2845
        pd = IO_MEM_UNASSIGNED;
2846
    } else {
2847
        pd = p->phys_offset;
2848
    }
2849

    
2850
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2851
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2852
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2853
    } else {
2854
        unsigned long addr1;
2855
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2856
        /* RAM case */
2857
        ptr = phys_ram_base + addr1;
2858
        stl_p(ptr, val);
2859
        if (!cpu_physical_memory_is_dirty(addr1)) {
2860
            /* invalidate code */
2861
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2862
            /* set dirty bit */
2863
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2864
                (0xff & ~CODE_DIRTY_FLAG);
2865
        }
2866
    }
2867
}
2868

    
2869
/* XXX: optimize */
2870
void stb_phys(target_phys_addr_t addr, uint32_t val)
2871
{
2872
    uint8_t v = val;
2873
    cpu_physical_memory_write(addr, &v, 1);
2874
}
2875

    
2876
/* XXX: optimize */
2877
void stw_phys(target_phys_addr_t addr, uint32_t val)
2878
{
2879
    uint16_t v = tswap16(val);
2880
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2881
}
2882

    
2883
/* XXX: optimize */
2884
void stq_phys(target_phys_addr_t addr, uint64_t val)
2885
{
2886
    val = tswap64(val);
2887
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2888
}
2889

    
2890
#endif
2891

    
2892
/* virtual memory access for debug */
2893
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2894
                        uint8_t *buf, int len, int is_write)
2895
{
2896
    int l;
2897
    target_phys_addr_t phys_addr;
2898
    target_ulong page;
2899

    
2900
    while (len > 0) {
2901
        page = addr & TARGET_PAGE_MASK;
2902
        phys_addr = cpu_get_phys_page_debug(env, page);
2903
        /* if no physical page mapped, return an error */
2904
        if (phys_addr == -1)
2905
            return -1;
2906
        l = (page + TARGET_PAGE_SIZE) - addr;
2907
        if (l > len)
2908
            l = len;
2909
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2910
                               buf, l, is_write);
2911
        len -= l;
2912
        buf += l;
2913
        addr += l;
2914
    }
2915
    return 0;
2916
}
2917

    
2918
void dump_exec_info(FILE *f,
2919
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2920
{
2921
    int i, target_code_size, max_target_code_size;
2922
    int direct_jmp_count, direct_jmp2_count, cross_page;
2923
    TranslationBlock *tb;
2924

    
2925
    target_code_size = 0;
2926
    max_target_code_size = 0;
2927
    cross_page = 0;
2928
    direct_jmp_count = 0;
2929
    direct_jmp2_count = 0;
2930
    for(i = 0; i < nb_tbs; i++) {
2931
        tb = &tbs[i];
2932
        target_code_size += tb->size;
2933
        if (tb->size > max_target_code_size)
2934
            max_target_code_size = tb->size;
2935
        if (tb->page_addr[1] != -1)
2936
            cross_page++;
2937
        if (tb->tb_next_offset[0] != 0xffff) {
2938
            direct_jmp_count++;
2939
            if (tb->tb_next_offset[1] != 0xffff) {
2940
                direct_jmp2_count++;
2941
            }
2942
        }
2943
    }
2944
    /* XXX: avoid using doubles ? */
2945
    cpu_fprintf(f, "Translation buffer state:\n");
2946
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2947
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2948
                nb_tbs ? target_code_size / nb_tbs : 0,
2949
                max_target_code_size);
2950
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2951
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2952
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2953
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2954
            cross_page,
2955
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2956
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2957
                direct_jmp_count,
2958
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2959
                direct_jmp2_count,
2960
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2961
    cpu_fprintf(f, "\nStatistics:\n");
2962
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2963
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2964
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2965
#ifdef CONFIG_PROFILER
2966
    {
2967
        int64_t tot;
2968
        tot = dyngen_interm_time + dyngen_code_time;
2969
        cpu_fprintf(f, "JIT cycles          %" PRId64 " (%0.3f s at 2.4 GHz)\n",
2970
                    tot, tot / 2.4e9);
2971
        cpu_fprintf(f, "translated TBs      %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n", 
2972
                    dyngen_tb_count, 
2973
                    dyngen_tb_count1 - dyngen_tb_count,
2974
                    dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
2975
        cpu_fprintf(f, "avg ops/TB          %0.1f max=%d\n", 
2976
                    dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
2977
        cpu_fprintf(f, "old ops/total ops   %0.1f%%\n", 
2978
                    dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
2979
        cpu_fprintf(f, "deleted ops/TB      %0.2f\n",
2980
                    dyngen_tb_count ? 
2981
                    (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
2982
        cpu_fprintf(f, "cycles/op           %0.1f\n", 
2983
                    dyngen_op_count ? (double)tot / dyngen_op_count : 0);
2984
        cpu_fprintf(f, "cycles/in byte     %0.1f\n", 
2985
                    dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
2986
        cpu_fprintf(f, "cycles/out byte     %0.1f\n", 
2987
                    dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
2988
        if (tot == 0)
2989
            tot = 1;
2990
        cpu_fprintf(f, "  gen_interm time   %0.1f%%\n", 
2991
                    (double)dyngen_interm_time / tot * 100.0);
2992
        cpu_fprintf(f, "  gen_code time     %0.1f%%\n", 
2993
                    (double)dyngen_code_time / tot * 100.0);
2994
        cpu_fprintf(f, "cpu_restore count   %" PRId64 "\n",
2995
                    dyngen_restore_count);
2996
        cpu_fprintf(f, "  avg cycles        %0.1f\n",
2997
                    dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
2998
        {
2999
            extern void dump_op_count(void);
3000
            dump_op_count();
3001
        }
3002
    }
3003
#endif
3004
}
3005

    
3006
#if !defined(CONFIG_USER_ONLY)
3007

    
3008
#define MMUSUFFIX _cmmu
3009
#define GETPC() NULL
3010
#define env cpu_single_env
3011
#define SOFTMMU_CODE_ACCESS
3012

    
3013
#define SHIFT 0
3014
#include "softmmu_template.h"
3015

    
3016
#define SHIFT 1
3017
#include "softmmu_template.h"
3018

    
3019
#define SHIFT 2
3020
#include "softmmu_template.h"
3021

    
3022
#define SHIFT 3
3023
#include "softmmu_template.h"
3024

    
3025
#undef env
3026

    
3027
#endif