Statistics
| Branch: | Revision:

root / exec.c @ b4f0a316

History | View | Annotate | Download (78.3 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#if defined(CONFIG_USER_ONLY)
38
#include <qemu.h>
39
#endif
40

    
41
//#define DEBUG_TB_INVALIDATE
42
//#define DEBUG_FLUSH
43
//#define DEBUG_TLB
44
//#define DEBUG_UNASSIGNED
45

    
46
/* make various TB consistency checks */
47
//#define DEBUG_TB_CHECK 
48
//#define DEBUG_TLB_CHECK 
49

    
50
//#define DEBUG_IOPORT
51

    
52
#if !defined(CONFIG_USER_ONLY)
53
/* TB consistency checks only implemented for usermode emulation.  */
54
#undef DEBUG_TB_CHECK
55
#endif
56

    
57
/* threshold to flush the translated code buffer */
58
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
59

    
60
#define SMC_BITMAP_USE_THRESHOLD 10
61

    
62
#define MMAP_AREA_START        0x00000000
63
#define MMAP_AREA_END          0xa8000000
64

    
65
#if defined(TARGET_SPARC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67
#elif defined(TARGET_ALPHA)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 42
69
#define TARGET_VIRT_ADDR_SPACE_BITS 42
70
#elif defined(TARGET_PPC64)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 42
72
#else
73
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
74
#define TARGET_PHYS_ADDR_SPACE_BITS 32
75
#endif
76

    
77
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
78
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
79
int nb_tbs;
80
/* any access to the tbs or the page table must use this lock */
81
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
82

    
83
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
84
uint8_t *code_gen_ptr;
85

    
86
int phys_ram_size;
87
int phys_ram_fd;
88
uint8_t *phys_ram_base;
89
uint8_t *phys_ram_dirty;
90
static ram_addr_t phys_ram_alloc_offset = 0;
91

    
92
CPUState *first_cpu;
93
/* current CPU in the current thread. It is only valid inside
94
   cpu_exec() */
95
CPUState *cpu_single_env; 
96

    
97
typedef struct PageDesc {
98
    /* list of TBs intersecting this ram page */
99
    TranslationBlock *first_tb;
100
    /* in order to optimize self modifying code, we count the number
101
       of lookups we do to a given page to use a bitmap */
102
    unsigned int code_write_count;
103
    uint8_t *code_bitmap;
104
#if defined(CONFIG_USER_ONLY)
105
    unsigned long flags;
106
#endif
107
} PageDesc;
108

    
109
typedef struct PhysPageDesc {
110
    /* offset in host memory of the page + io_index in the low 12 bits */
111
    uint32_t phys_offset;
112
} PhysPageDesc;
113

    
114
#define L2_BITS 10
115
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
116
/* XXX: this is a temporary hack for alpha target.
117
 *      In the future, this is to be replaced by a multi-level table
118
 *      to actually be able to handle the complete 64 bits address space.
119
 */
120
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
121
#else
122
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
123
#endif
124

    
125
#define L1_SIZE (1 << L1_BITS)
126
#define L2_SIZE (1 << L2_BITS)
127

    
128
static void io_mem_init(void);
129

    
130
unsigned long qemu_real_host_page_size;
131
unsigned long qemu_host_page_bits;
132
unsigned long qemu_host_page_size;
133
unsigned long qemu_host_page_mask;
134

    
135
/* XXX: for system emulation, it could just be an array */
136
static PageDesc *l1_map[L1_SIZE];
137
PhysPageDesc **l1_phys_map;
138

    
139
/* io memory support */
140
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
141
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
142
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
143
static int io_mem_nb;
144
#if defined(CONFIG_SOFTMMU)
145
static int io_mem_watch;
146
#endif
147

    
148
/* log support */
149
char *logfilename = "/tmp/qemu.log";
150
FILE *logfile;
151
int loglevel;
152

    
153
/* statistics */
154
static int tlb_flush_count;
155
static int tb_flush_count;
156
static int tb_phys_invalidate_count;
157

    
158
static void page_init(void)
159
{
160
    /* NOTE: we can always suppose that qemu_host_page_size >=
161
       TARGET_PAGE_SIZE */
162
#ifdef _WIN32
163
    {
164
        SYSTEM_INFO system_info;
165
        DWORD old_protect;
166
        
167
        GetSystemInfo(&system_info);
168
        qemu_real_host_page_size = system_info.dwPageSize;
169
        
170
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
171
                       PAGE_EXECUTE_READWRITE, &old_protect);
172
    }
173
#else
174
    qemu_real_host_page_size = getpagesize();
175
    {
176
        unsigned long start, end;
177

    
178
        start = (unsigned long)code_gen_buffer;
179
        start &= ~(qemu_real_host_page_size - 1);
180
        
181
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
182
        end += qemu_real_host_page_size - 1;
183
        end &= ~(qemu_real_host_page_size - 1);
184
        
185
        mprotect((void *)start, end - start, 
186
                 PROT_READ | PROT_WRITE | PROT_EXEC);
187
    }
188
#endif
189

    
190
    if (qemu_host_page_size == 0)
191
        qemu_host_page_size = qemu_real_host_page_size;
192
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
193
        qemu_host_page_size = TARGET_PAGE_SIZE;
194
    qemu_host_page_bits = 0;
195
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
196
        qemu_host_page_bits++;
197
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
198
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
199
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
200
}
201

    
202
static inline PageDesc *page_find_alloc(unsigned int index)
203
{
204
    PageDesc **lp, *p;
205

    
206
    lp = &l1_map[index >> L2_BITS];
207
    p = *lp;
208
    if (!p) {
209
        /* allocate if not found */
210
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
211
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
212
        *lp = p;
213
    }
214
    return p + (index & (L2_SIZE - 1));
215
}
216

    
217
static inline PageDesc *page_find(unsigned int index)
218
{
219
    PageDesc *p;
220

    
221
    p = l1_map[index >> L2_BITS];
222
    if (!p)
223
        return 0;
224
    return p + (index & (L2_SIZE - 1));
225
}
226

    
227
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
228
{
229
    void **lp, **p;
230
    PhysPageDesc *pd;
231

    
232
    p = (void **)l1_phys_map;
233
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
234

    
235
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
236
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
237
#endif
238
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
239
    p = *lp;
240
    if (!p) {
241
        /* allocate if not found */
242
        if (!alloc)
243
            return NULL;
244
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
245
        memset(p, 0, sizeof(void *) * L1_SIZE);
246
        *lp = p;
247
    }
248
#endif
249
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
250
    pd = *lp;
251
    if (!pd) {
252
        int i;
253
        /* allocate if not found */
254
        if (!alloc)
255
            return NULL;
256
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
257
        *lp = pd;
258
        for (i = 0; i < L2_SIZE; i++)
259
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
260
    }
261
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
262
}
263

    
264
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
265
{
266
    return phys_page_find_alloc(index, 0);
267
}
268

    
269
#if !defined(CONFIG_USER_ONLY)
270
static void tlb_protect_code(ram_addr_t ram_addr);
271
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
272
                                    target_ulong vaddr);
273
#endif
274

    
275
void cpu_exec_init(CPUState *env)
276
{
277
    CPUState **penv;
278
    int cpu_index;
279

    
280
    if (!code_gen_ptr) {
281
        code_gen_ptr = code_gen_buffer;
282
        page_init();
283
        io_mem_init();
284
    }
285
    env->next_cpu = NULL;
286
    penv = &first_cpu;
287
    cpu_index = 0;
288
    while (*penv != NULL) {
289
        penv = (CPUState **)&(*penv)->next_cpu;
290
        cpu_index++;
291
    }
292
    env->cpu_index = cpu_index;
293
    env->nb_watchpoints = 0;
294
    *penv = env;
295
}
296

    
297
static inline void invalidate_page_bitmap(PageDesc *p)
298
{
299
    if (p->code_bitmap) {
300
        qemu_free(p->code_bitmap);
301
        p->code_bitmap = NULL;
302
    }
303
    p->code_write_count = 0;
304
}
305

    
306
/* set to NULL all the 'first_tb' fields in all PageDescs */
307
static void page_flush_tb(void)
308
{
309
    int i, j;
310
    PageDesc *p;
311

    
312
    for(i = 0; i < L1_SIZE; i++) {
313
        p = l1_map[i];
314
        if (p) {
315
            for(j = 0; j < L2_SIZE; j++) {
316
                p->first_tb = NULL;
317
                invalidate_page_bitmap(p);
318
                p++;
319
            }
320
        }
321
    }
322
}
323

    
324
/* flush all the translation blocks */
325
/* XXX: tb_flush is currently not thread safe */
326
void tb_flush(CPUState *env1)
327
{
328
    CPUState *env;
329
#if defined(DEBUG_FLUSH)
330
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
331
           code_gen_ptr - code_gen_buffer, 
332
           nb_tbs, 
333
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
334
#endif
335
    nb_tbs = 0;
336
    
337
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
338
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
339
    }
340

    
341
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
342
    page_flush_tb();
343

    
344
    code_gen_ptr = code_gen_buffer;
345
    /* XXX: flush processor icache at this point if cache flush is
346
       expensive */
347
    tb_flush_count++;
348
}
349

    
350
#ifdef DEBUG_TB_CHECK
351

    
352
static void tb_invalidate_check(target_ulong address)
353
{
354
    TranslationBlock *tb;
355
    int i;
356
    address &= TARGET_PAGE_MASK;
357
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
358
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
359
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
360
                  address >= tb->pc + tb->size)) {
361
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
362
                       address, (long)tb->pc, tb->size);
363
            }
364
        }
365
    }
366
}
367

    
368
/* verify that all the pages have correct rights for code */
369
static void tb_page_check(void)
370
{
371
    TranslationBlock *tb;
372
    int i, flags1, flags2;
373
    
374
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
375
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
376
            flags1 = page_get_flags(tb->pc);
377
            flags2 = page_get_flags(tb->pc + tb->size - 1);
378
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
379
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
380
                       (long)tb->pc, tb->size, flags1, flags2);
381
            }
382
        }
383
    }
384
}
385

    
386
void tb_jmp_check(TranslationBlock *tb)
387
{
388
    TranslationBlock *tb1;
389
    unsigned int n1;
390

    
391
    /* suppress any remaining jumps to this TB */
392
    tb1 = tb->jmp_first;
393
    for(;;) {
394
        n1 = (long)tb1 & 3;
395
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
396
        if (n1 == 2)
397
            break;
398
        tb1 = tb1->jmp_next[n1];
399
    }
400
    /* check end of list */
401
    if (tb1 != tb) {
402
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
403
    }
404
}
405

    
406
#endif
407

    
408
/* invalidate one TB */
409
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
410
                             int next_offset)
411
{
412
    TranslationBlock *tb1;
413
    for(;;) {
414
        tb1 = *ptb;
415
        if (tb1 == tb) {
416
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
417
            break;
418
        }
419
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
420
    }
421
}
422

    
423
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
424
{
425
    TranslationBlock *tb1;
426
    unsigned int n1;
427

    
428
    for(;;) {
429
        tb1 = *ptb;
430
        n1 = (long)tb1 & 3;
431
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
432
        if (tb1 == tb) {
433
            *ptb = tb1->page_next[n1];
434
            break;
435
        }
436
        ptb = &tb1->page_next[n1];
437
    }
438
}
439

    
440
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
441
{
442
    TranslationBlock *tb1, **ptb;
443
    unsigned int n1;
444

    
445
    ptb = &tb->jmp_next[n];
446
    tb1 = *ptb;
447
    if (tb1) {
448
        /* find tb(n) in circular list */
449
        for(;;) {
450
            tb1 = *ptb;
451
            n1 = (long)tb1 & 3;
452
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
453
            if (n1 == n && tb1 == tb)
454
                break;
455
            if (n1 == 2) {
456
                ptb = &tb1->jmp_first;
457
            } else {
458
                ptb = &tb1->jmp_next[n1];
459
            }
460
        }
461
        /* now we can suppress tb(n) from the list */
462
        *ptb = tb->jmp_next[n];
463

    
464
        tb->jmp_next[n] = NULL;
465
    }
466
}
467

    
468
/* reset the jump entry 'n' of a TB so that it is not chained to
469
   another TB */
470
static inline void tb_reset_jump(TranslationBlock *tb, int n)
471
{
472
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
473
}
474

    
475
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
476
{
477
    CPUState *env;
478
    PageDesc *p;
479
    unsigned int h, n1;
480
    target_ulong phys_pc;
481
    TranslationBlock *tb1, *tb2;
482
    
483
    /* remove the TB from the hash list */
484
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
485
    h = tb_phys_hash_func(phys_pc);
486
    tb_remove(&tb_phys_hash[h], tb, 
487
              offsetof(TranslationBlock, phys_hash_next));
488

    
489
    /* remove the TB from the page list */
490
    if (tb->page_addr[0] != page_addr) {
491
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
492
        tb_page_remove(&p->first_tb, tb);
493
        invalidate_page_bitmap(p);
494
    }
495
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
496
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
497
        tb_page_remove(&p->first_tb, tb);
498
        invalidate_page_bitmap(p);
499
    }
500

    
501
    tb_invalidated_flag = 1;
502

    
503
    /* remove the TB from the hash list */
504
    h = tb_jmp_cache_hash_func(tb->pc);
505
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
506
        if (env->tb_jmp_cache[h] == tb)
507
            env->tb_jmp_cache[h] = NULL;
508
    }
509

    
510
    /* suppress this TB from the two jump lists */
511
    tb_jmp_remove(tb, 0);
512
    tb_jmp_remove(tb, 1);
513

    
514
    /* suppress any remaining jumps to this TB */
515
    tb1 = tb->jmp_first;
516
    for(;;) {
517
        n1 = (long)tb1 & 3;
518
        if (n1 == 2)
519
            break;
520
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
521
        tb2 = tb1->jmp_next[n1];
522
        tb_reset_jump(tb1, n1);
523
        tb1->jmp_next[n1] = NULL;
524
        tb1 = tb2;
525
    }
526
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
527

    
528
    tb_phys_invalidate_count++;
529
}
530

    
531
static inline void set_bits(uint8_t *tab, int start, int len)
532
{
533
    int end, mask, end1;
534

    
535
    end = start + len;
536
    tab += start >> 3;
537
    mask = 0xff << (start & 7);
538
    if ((start & ~7) == (end & ~7)) {
539
        if (start < end) {
540
            mask &= ~(0xff << (end & 7));
541
            *tab |= mask;
542
        }
543
    } else {
544
        *tab++ |= mask;
545
        start = (start + 8) & ~7;
546
        end1 = end & ~7;
547
        while (start < end1) {
548
            *tab++ = 0xff;
549
            start += 8;
550
        }
551
        if (start < end) {
552
            mask = ~(0xff << (end & 7));
553
            *tab |= mask;
554
        }
555
    }
556
}
557

    
558
static void build_page_bitmap(PageDesc *p)
559
{
560
    int n, tb_start, tb_end;
561
    TranslationBlock *tb;
562
    
563
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
564
    if (!p->code_bitmap)
565
        return;
566
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
567

    
568
    tb = p->first_tb;
569
    while (tb != NULL) {
570
        n = (long)tb & 3;
571
        tb = (TranslationBlock *)((long)tb & ~3);
572
        /* NOTE: this is subtle as a TB may span two physical pages */
573
        if (n == 0) {
574
            /* NOTE: tb_end may be after the end of the page, but
575
               it is not a problem */
576
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
577
            tb_end = tb_start + tb->size;
578
            if (tb_end > TARGET_PAGE_SIZE)
579
                tb_end = TARGET_PAGE_SIZE;
580
        } else {
581
            tb_start = 0;
582
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
583
        }
584
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
585
        tb = tb->page_next[n];
586
    }
587
}
588

    
589
#ifdef TARGET_HAS_PRECISE_SMC
590

    
591
static void tb_gen_code(CPUState *env, 
592
                        target_ulong pc, target_ulong cs_base, int flags,
593
                        int cflags)
594
{
595
    TranslationBlock *tb;
596
    uint8_t *tc_ptr;
597
    target_ulong phys_pc, phys_page2, virt_page2;
598
    int code_gen_size;
599

    
600
    phys_pc = get_phys_addr_code(env, pc);
601
    tb = tb_alloc(pc);
602
    if (!tb) {
603
        /* flush must be done */
604
        tb_flush(env);
605
        /* cannot fail at this point */
606
        tb = tb_alloc(pc);
607
    }
608
    tc_ptr = code_gen_ptr;
609
    tb->tc_ptr = tc_ptr;
610
    tb->cs_base = cs_base;
611
    tb->flags = flags;
612
    tb->cflags = cflags;
613
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
614
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
615
    
616
    /* check next page if needed */
617
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
618
    phys_page2 = -1;
619
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
620
        phys_page2 = get_phys_addr_code(env, virt_page2);
621
    }
622
    tb_link_phys(tb, phys_pc, phys_page2);
623
}
624
#endif
625
    
626
/* invalidate all TBs which intersect with the target physical page
627
   starting in range [start;end[. NOTE: start and end must refer to
628
   the same physical page. 'is_cpu_write_access' should be true if called
629
   from a real cpu write access: the virtual CPU will exit the current
630
   TB if code is modified inside this TB. */
631
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
632
                                   int is_cpu_write_access)
633
{
634
    int n, current_tb_modified, current_tb_not_found, current_flags;
635
    CPUState *env = cpu_single_env;
636
    PageDesc *p;
637
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
638
    target_ulong tb_start, tb_end;
639
    target_ulong current_pc, current_cs_base;
640

    
641
    p = page_find(start >> TARGET_PAGE_BITS);
642
    if (!p) 
643
        return;
644
    if (!p->code_bitmap && 
645
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
646
        is_cpu_write_access) {
647
        /* build code bitmap */
648
        build_page_bitmap(p);
649
    }
650

    
651
    /* we remove all the TBs in the range [start, end[ */
652
    /* XXX: see if in some cases it could be faster to invalidate all the code */
653
    current_tb_not_found = is_cpu_write_access;
654
    current_tb_modified = 0;
655
    current_tb = NULL; /* avoid warning */
656
    current_pc = 0; /* avoid warning */
657
    current_cs_base = 0; /* avoid warning */
658
    current_flags = 0; /* avoid warning */
659
    tb = p->first_tb;
660
    while (tb != NULL) {
661
        n = (long)tb & 3;
662
        tb = (TranslationBlock *)((long)tb & ~3);
663
        tb_next = tb->page_next[n];
664
        /* NOTE: this is subtle as a TB may span two physical pages */
665
        if (n == 0) {
666
            /* NOTE: tb_end may be after the end of the page, but
667
               it is not a problem */
668
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
669
            tb_end = tb_start + tb->size;
670
        } else {
671
            tb_start = tb->page_addr[1];
672
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
673
        }
674
        if (!(tb_end <= start || tb_start >= end)) {
675
#ifdef TARGET_HAS_PRECISE_SMC
676
            if (current_tb_not_found) {
677
                current_tb_not_found = 0;
678
                current_tb = NULL;
679
                if (env->mem_write_pc) {
680
                    /* now we have a real cpu fault */
681
                    current_tb = tb_find_pc(env->mem_write_pc);
682
                }
683
            }
684
            if (current_tb == tb &&
685
                !(current_tb->cflags & CF_SINGLE_INSN)) {
686
                /* If we are modifying the current TB, we must stop
687
                its execution. We could be more precise by checking
688
                that the modification is after the current PC, but it
689
                would require a specialized function to partially
690
                restore the CPU state */
691
                
692
                current_tb_modified = 1;
693
                cpu_restore_state(current_tb, env, 
694
                                  env->mem_write_pc, NULL);
695
#if defined(TARGET_I386)
696
                current_flags = env->hflags;
697
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
698
                current_cs_base = (target_ulong)env->segs[R_CS].base;
699
                current_pc = current_cs_base + env->eip;
700
#else
701
#error unsupported CPU
702
#endif
703
            }
704
#endif /* TARGET_HAS_PRECISE_SMC */
705
            /* we need to do that to handle the case where a signal
706
               occurs while doing tb_phys_invalidate() */
707
            saved_tb = NULL;
708
            if (env) {
709
                saved_tb = env->current_tb;
710
                env->current_tb = NULL;
711
            }
712
            tb_phys_invalidate(tb, -1);
713
            if (env) {
714
                env->current_tb = saved_tb;
715
                if (env->interrupt_request && env->current_tb)
716
                    cpu_interrupt(env, env->interrupt_request);
717
            }
718
        }
719
        tb = tb_next;
720
    }
721
#if !defined(CONFIG_USER_ONLY)
722
    /* if no code remaining, no need to continue to use slow writes */
723
    if (!p->first_tb) {
724
        invalidate_page_bitmap(p);
725
        if (is_cpu_write_access) {
726
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
727
        }
728
    }
729
#endif
730
#ifdef TARGET_HAS_PRECISE_SMC
731
    if (current_tb_modified) {
732
        /* we generate a block containing just the instruction
733
           modifying the memory. It will ensure that it cannot modify
734
           itself */
735
        env->current_tb = NULL;
736
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
737
                    CF_SINGLE_INSN);
738
        cpu_resume_from_signal(env, NULL);
739
    }
740
#endif
741
}
742

    
743
/* len must be <= 8 and start must be a multiple of len */
744
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
745
{
746
    PageDesc *p;
747
    int offset, b;
748
#if 0
749
    if (1) {
750
        if (loglevel) {
751
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
752
                   cpu_single_env->mem_write_vaddr, len, 
753
                   cpu_single_env->eip, 
754
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
755
        }
756
    }
757
#endif
758
    p = page_find(start >> TARGET_PAGE_BITS);
759
    if (!p) 
760
        return;
761
    if (p->code_bitmap) {
762
        offset = start & ~TARGET_PAGE_MASK;
763
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
764
        if (b & ((1 << len) - 1))
765
            goto do_invalidate;
766
    } else {
767
    do_invalidate:
768
        tb_invalidate_phys_page_range(start, start + len, 1);
769
    }
770
}
771

    
772
#if !defined(CONFIG_SOFTMMU)
773
static void tb_invalidate_phys_page(target_ulong addr, 
774
                                    unsigned long pc, void *puc)
775
{
776
    int n, current_flags, current_tb_modified;
777
    target_ulong current_pc, current_cs_base;
778
    PageDesc *p;
779
    TranslationBlock *tb, *current_tb;
780
#ifdef TARGET_HAS_PRECISE_SMC
781
    CPUState *env = cpu_single_env;
782
#endif
783

    
784
    addr &= TARGET_PAGE_MASK;
785
    p = page_find(addr >> TARGET_PAGE_BITS);
786
    if (!p) 
787
        return;
788
    tb = p->first_tb;
789
    current_tb_modified = 0;
790
    current_tb = NULL;
791
    current_pc = 0; /* avoid warning */
792
    current_cs_base = 0; /* avoid warning */
793
    current_flags = 0; /* avoid warning */
794
#ifdef TARGET_HAS_PRECISE_SMC
795
    if (tb && pc != 0) {
796
        current_tb = tb_find_pc(pc);
797
    }
798
#endif
799
    while (tb != NULL) {
800
        n = (long)tb & 3;
801
        tb = (TranslationBlock *)((long)tb & ~3);
802
#ifdef TARGET_HAS_PRECISE_SMC
803
        if (current_tb == tb &&
804
            !(current_tb->cflags & CF_SINGLE_INSN)) {
805
                /* If we are modifying the current TB, we must stop
806
                   its execution. We could be more precise by checking
807
                   that the modification is after the current PC, but it
808
                   would require a specialized function to partially
809
                   restore the CPU state */
810
            
811
            current_tb_modified = 1;
812
            cpu_restore_state(current_tb, env, pc, puc);
813
#if defined(TARGET_I386)
814
            current_flags = env->hflags;
815
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
816
            current_cs_base = (target_ulong)env->segs[R_CS].base;
817
            current_pc = current_cs_base + env->eip;
818
#else
819
#error unsupported CPU
820
#endif
821
        }
822
#endif /* TARGET_HAS_PRECISE_SMC */
823
        tb_phys_invalidate(tb, addr);
824
        tb = tb->page_next[n];
825
    }
826
    p->first_tb = NULL;
827
#ifdef TARGET_HAS_PRECISE_SMC
828
    if (current_tb_modified) {
829
        /* we generate a block containing just the instruction
830
           modifying the memory. It will ensure that it cannot modify
831
           itself */
832
        env->current_tb = NULL;
833
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
834
                    CF_SINGLE_INSN);
835
        cpu_resume_from_signal(env, puc);
836
    }
837
#endif
838
}
839
#endif
840

    
841
/* add the tb in the target page and protect it if necessary */
842
static inline void tb_alloc_page(TranslationBlock *tb, 
843
                                 unsigned int n, target_ulong page_addr)
844
{
845
    PageDesc *p;
846
    TranslationBlock *last_first_tb;
847

    
848
    tb->page_addr[n] = page_addr;
849
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
850
    tb->page_next[n] = p->first_tb;
851
    last_first_tb = p->first_tb;
852
    p->first_tb = (TranslationBlock *)((long)tb | n);
853
    invalidate_page_bitmap(p);
854

    
855
#if defined(TARGET_HAS_SMC) || 1
856

    
857
#if defined(CONFIG_USER_ONLY)
858
    if (p->flags & PAGE_WRITE) {
859
        target_ulong addr;
860
        PageDesc *p2;
861
        int prot;
862

    
863
        /* force the host page as non writable (writes will have a
864
           page fault + mprotect overhead) */
865
        page_addr &= qemu_host_page_mask;
866
        prot = 0;
867
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
868
            addr += TARGET_PAGE_SIZE) {
869

    
870
            p2 = page_find (addr >> TARGET_PAGE_BITS);
871
            if (!p2)
872
                continue;
873
            prot |= p2->flags;
874
            p2->flags &= ~PAGE_WRITE;
875
            page_get_flags(addr);
876
          }
877
        mprotect(g2h(page_addr), qemu_host_page_size, 
878
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
879
#ifdef DEBUG_TB_INVALIDATE
880
        printf("protecting code page: 0x%08lx\n", 
881
               page_addr);
882
#endif
883
    }
884
#else
885
    /* if some code is already present, then the pages are already
886
       protected. So we handle the case where only the first TB is
887
       allocated in a physical page */
888
    if (!last_first_tb) {
889
        tlb_protect_code(page_addr);
890
    }
891
#endif
892

    
893
#endif /* TARGET_HAS_SMC */
894
}
895

    
896
/* Allocate a new translation block. Flush the translation buffer if
897
   too many translation blocks or too much generated code. */
898
TranslationBlock *tb_alloc(target_ulong pc)
899
{
900
    TranslationBlock *tb;
901

    
902
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
903
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
904
        return NULL;
905
    tb = &tbs[nb_tbs++];
906
    tb->pc = pc;
907
    tb->cflags = 0;
908
    return tb;
909
}
910

    
911
/* add a new TB and link it to the physical page tables. phys_page2 is
912
   (-1) to indicate that only one page contains the TB. */
913
void tb_link_phys(TranslationBlock *tb, 
914
                  target_ulong phys_pc, target_ulong phys_page2)
915
{
916
    unsigned int h;
917
    TranslationBlock **ptb;
918

    
919
    /* add in the physical hash table */
920
    h = tb_phys_hash_func(phys_pc);
921
    ptb = &tb_phys_hash[h];
922
    tb->phys_hash_next = *ptb;
923
    *ptb = tb;
924

    
925
    /* add in the page list */
926
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
927
    if (phys_page2 != -1)
928
        tb_alloc_page(tb, 1, phys_page2);
929
    else
930
        tb->page_addr[1] = -1;
931

    
932
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
933
    tb->jmp_next[0] = NULL;
934
    tb->jmp_next[1] = NULL;
935
#ifdef USE_CODE_COPY
936
    tb->cflags &= ~CF_FP_USED;
937
    if (tb->cflags & CF_TB_FP_USED)
938
        tb->cflags |= CF_FP_USED;
939
#endif
940

    
941
    /* init original jump addresses */
942
    if (tb->tb_next_offset[0] != 0xffff)
943
        tb_reset_jump(tb, 0);
944
    if (tb->tb_next_offset[1] != 0xffff)
945
        tb_reset_jump(tb, 1);
946

    
947
#ifdef DEBUG_TB_CHECK
948
    tb_page_check();
949
#endif
950
}
951

    
952
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
953
   tb[1].tc_ptr. Return NULL if not found */
954
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
955
{
956
    int m_min, m_max, m;
957
    unsigned long v;
958
    TranslationBlock *tb;
959

    
960
    if (nb_tbs <= 0)
961
        return NULL;
962
    if (tc_ptr < (unsigned long)code_gen_buffer ||
963
        tc_ptr >= (unsigned long)code_gen_ptr)
964
        return NULL;
965
    /* binary search (cf Knuth) */
966
    m_min = 0;
967
    m_max = nb_tbs - 1;
968
    while (m_min <= m_max) {
969
        m = (m_min + m_max) >> 1;
970
        tb = &tbs[m];
971
        v = (unsigned long)tb->tc_ptr;
972
        if (v == tc_ptr)
973
            return tb;
974
        else if (tc_ptr < v) {
975
            m_max = m - 1;
976
        } else {
977
            m_min = m + 1;
978
        }
979
    } 
980
    return &tbs[m_max];
981
}
982

    
983
static void tb_reset_jump_recursive(TranslationBlock *tb);
984

    
985
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
986
{
987
    TranslationBlock *tb1, *tb_next, **ptb;
988
    unsigned int n1;
989

    
990
    tb1 = tb->jmp_next[n];
991
    if (tb1 != NULL) {
992
        /* find head of list */
993
        for(;;) {
994
            n1 = (long)tb1 & 3;
995
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
996
            if (n1 == 2)
997
                break;
998
            tb1 = tb1->jmp_next[n1];
999
        }
1000
        /* we are now sure now that tb jumps to tb1 */
1001
        tb_next = tb1;
1002

    
1003
        /* remove tb from the jmp_first list */
1004
        ptb = &tb_next->jmp_first;
1005
        for(;;) {
1006
            tb1 = *ptb;
1007
            n1 = (long)tb1 & 3;
1008
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1009
            if (n1 == n && tb1 == tb)
1010
                break;
1011
            ptb = &tb1->jmp_next[n1];
1012
        }
1013
        *ptb = tb->jmp_next[n];
1014
        tb->jmp_next[n] = NULL;
1015
        
1016
        /* suppress the jump to next tb in generated code */
1017
        tb_reset_jump(tb, n);
1018

    
1019
        /* suppress jumps in the tb on which we could have jumped */
1020
        tb_reset_jump_recursive(tb_next);
1021
    }
1022
}
1023

    
1024
static void tb_reset_jump_recursive(TranslationBlock *tb)
1025
{
1026
    tb_reset_jump_recursive2(tb, 0);
1027
    tb_reset_jump_recursive2(tb, 1);
1028
}
1029

    
1030
#if defined(TARGET_HAS_ICE)
1031
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1032
{
1033
    target_phys_addr_t addr;
1034
    target_ulong pd;
1035
    ram_addr_t ram_addr;
1036
    PhysPageDesc *p;
1037

    
1038
    addr = cpu_get_phys_page_debug(env, pc);
1039
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1040
    if (!p) {
1041
        pd = IO_MEM_UNASSIGNED;
1042
    } else {
1043
        pd = p->phys_offset;
1044
    }
1045
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1046
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1047
}
1048
#endif
1049

    
1050
/* Add a watchpoint.  */
1051
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1052
{
1053
    int i;
1054

    
1055
    for (i = 0; i < env->nb_watchpoints; i++) {
1056
        if (addr == env->watchpoint[i].vaddr)
1057
            return 0;
1058
    }
1059
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1060
        return -1;
1061

    
1062
    i = env->nb_watchpoints++;
1063
    env->watchpoint[i].vaddr = addr;
1064
    tlb_flush_page(env, addr);
1065
    /* FIXME: This flush is needed because of the hack to make memory ops
1066
       terminate the TB.  It can be removed once the proper IO trap and
1067
       re-execute bits are in.  */
1068
    tb_flush(env);
1069
    return i;
1070
}
1071

    
1072
/* Remove a watchpoint.  */
1073
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1074
{
1075
    int i;
1076

    
1077
    for (i = 0; i < env->nb_watchpoints; i++) {
1078
        if (addr == env->watchpoint[i].vaddr) {
1079
            env->nb_watchpoints--;
1080
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1081
            tlb_flush_page(env, addr);
1082
            return 0;
1083
        }
1084
    }
1085
    return -1;
1086
}
1087

    
1088
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1089
   breakpoint is reached */
1090
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1091
{
1092
#if defined(TARGET_HAS_ICE)
1093
    int i;
1094
    
1095
    for(i = 0; i < env->nb_breakpoints; i++) {
1096
        if (env->breakpoints[i] == pc)
1097
            return 0;
1098
    }
1099

    
1100
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1101
        return -1;
1102
    env->breakpoints[env->nb_breakpoints++] = pc;
1103
    
1104
    breakpoint_invalidate(env, pc);
1105
    return 0;
1106
#else
1107
    return -1;
1108
#endif
1109
}
1110

    
1111
/* remove a breakpoint */
1112
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1113
{
1114
#if defined(TARGET_HAS_ICE)
1115
    int i;
1116
    for(i = 0; i < env->nb_breakpoints; i++) {
1117
        if (env->breakpoints[i] == pc)
1118
            goto found;
1119
    }
1120
    return -1;
1121
 found:
1122
    env->nb_breakpoints--;
1123
    if (i < env->nb_breakpoints)
1124
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1125

    
1126
    breakpoint_invalidate(env, pc);
1127
    return 0;
1128
#else
1129
    return -1;
1130
#endif
1131
}
1132

    
1133
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1134
   CPU loop after each instruction */
1135
void cpu_single_step(CPUState *env, int enabled)
1136
{
1137
#if defined(TARGET_HAS_ICE)
1138
    if (env->singlestep_enabled != enabled) {
1139
        env->singlestep_enabled = enabled;
1140
        /* must flush all the translated code to avoid inconsistancies */
1141
        /* XXX: only flush what is necessary */
1142
        tb_flush(env);
1143
    }
1144
#endif
1145
}
1146

    
1147
/* enable or disable low levels log */
1148
void cpu_set_log(int log_flags)
1149
{
1150
    loglevel = log_flags;
1151
    if (loglevel && !logfile) {
1152
        logfile = fopen(logfilename, "w");
1153
        if (!logfile) {
1154
            perror(logfilename);
1155
            _exit(1);
1156
        }
1157
#if !defined(CONFIG_SOFTMMU)
1158
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1159
        {
1160
            static uint8_t logfile_buf[4096];
1161
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1162
        }
1163
#else
1164
        setvbuf(logfile, NULL, _IOLBF, 0);
1165
#endif
1166
    }
1167
}
1168

    
1169
void cpu_set_log_filename(const char *filename)
1170
{
1171
    logfilename = strdup(filename);
1172
}
1173

    
1174
/* mask must never be zero, except for A20 change call */
1175
void cpu_interrupt(CPUState *env, int mask)
1176
{
1177
    TranslationBlock *tb;
1178
    static int interrupt_lock;
1179

    
1180
    env->interrupt_request |= mask;
1181
    /* if the cpu is currently executing code, we must unlink it and
1182
       all the potentially executing TB */
1183
    tb = env->current_tb;
1184
    if (tb && !testandset(&interrupt_lock)) {
1185
        env->current_tb = NULL;
1186
        tb_reset_jump_recursive(tb);
1187
        interrupt_lock = 0;
1188
    }
1189
}
1190

    
1191
void cpu_reset_interrupt(CPUState *env, int mask)
1192
{
1193
    env->interrupt_request &= ~mask;
1194
}
1195

    
1196
CPULogItem cpu_log_items[] = {
1197
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1198
      "show generated host assembly code for each compiled TB" },
1199
    { CPU_LOG_TB_IN_ASM, "in_asm",
1200
      "show target assembly code for each compiled TB" },
1201
    { CPU_LOG_TB_OP, "op", 
1202
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1203
#ifdef TARGET_I386
1204
    { CPU_LOG_TB_OP_OPT, "op_opt",
1205
      "show micro ops after optimization for each compiled TB" },
1206
#endif
1207
    { CPU_LOG_INT, "int",
1208
      "show interrupts/exceptions in short format" },
1209
    { CPU_LOG_EXEC, "exec",
1210
      "show trace before each executed TB (lots of logs)" },
1211
    { CPU_LOG_TB_CPU, "cpu",
1212
      "show CPU state before bloc translation" },
1213
#ifdef TARGET_I386
1214
    { CPU_LOG_PCALL, "pcall",
1215
      "show protected mode far calls/returns/exceptions" },
1216
#endif
1217
#ifdef DEBUG_IOPORT
1218
    { CPU_LOG_IOPORT, "ioport",
1219
      "show all i/o ports accesses" },
1220
#endif
1221
    { 0, NULL, NULL },
1222
};
1223

    
1224
static int cmp1(const char *s1, int n, const char *s2)
1225
{
1226
    if (strlen(s2) != n)
1227
        return 0;
1228
    return memcmp(s1, s2, n) == 0;
1229
}
1230
      
1231
/* takes a comma separated list of log masks. Return 0 if error. */
1232
int cpu_str_to_log_mask(const char *str)
1233
{
1234
    CPULogItem *item;
1235
    int mask;
1236
    const char *p, *p1;
1237

    
1238
    p = str;
1239
    mask = 0;
1240
    for(;;) {
1241
        p1 = strchr(p, ',');
1242
        if (!p1)
1243
            p1 = p + strlen(p);
1244
        if(cmp1(p,p1-p,"all")) {
1245
                for(item = cpu_log_items; item->mask != 0; item++) {
1246
                        mask |= item->mask;
1247
                }
1248
        } else {
1249
        for(item = cpu_log_items; item->mask != 0; item++) {
1250
            if (cmp1(p, p1 - p, item->name))
1251
                goto found;
1252
        }
1253
        return 0;
1254
        }
1255
    found:
1256
        mask |= item->mask;
1257
        if (*p1 != ',')
1258
            break;
1259
        p = p1 + 1;
1260
    }
1261
    return mask;
1262
}
1263

    
1264
void cpu_abort(CPUState *env, const char *fmt, ...)
1265
{
1266
    va_list ap;
1267

    
1268
    va_start(ap, fmt);
1269
    fprintf(stderr, "qemu: fatal: ");
1270
    vfprintf(stderr, fmt, ap);
1271
    fprintf(stderr, "\n");
1272
#ifdef TARGET_I386
1273
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1274
#else
1275
    cpu_dump_state(env, stderr, fprintf, 0);
1276
#endif
1277
    va_end(ap);
1278
    abort();
1279
}
1280

    
1281
CPUState *cpu_copy(CPUState *env)
1282
{
1283
    CPUState *new_env = cpu_init();
1284
    /* preserve chaining and index */
1285
    CPUState *next_cpu = new_env->next_cpu;
1286
    int cpu_index = new_env->cpu_index;
1287
    memcpy(new_env, env, sizeof(CPUState));
1288
    new_env->next_cpu = next_cpu;
1289
    new_env->cpu_index = cpu_index;
1290
    return new_env;
1291
}
1292

    
1293
#if !defined(CONFIG_USER_ONLY)
1294

    
1295
/* NOTE: if flush_global is true, also flush global entries (not
1296
   implemented yet) */
1297
void tlb_flush(CPUState *env, int flush_global)
1298
{
1299
    int i;
1300

    
1301
#if defined(DEBUG_TLB)
1302
    printf("tlb_flush:\n");
1303
#endif
1304
    /* must reset current TB so that interrupts cannot modify the
1305
       links while we are modifying them */
1306
    env->current_tb = NULL;
1307

    
1308
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1309
        env->tlb_table[0][i].addr_read = -1;
1310
        env->tlb_table[0][i].addr_write = -1;
1311
        env->tlb_table[0][i].addr_code = -1;
1312
        env->tlb_table[1][i].addr_read = -1;
1313
        env->tlb_table[1][i].addr_write = -1;
1314
        env->tlb_table[1][i].addr_code = -1;
1315
#if (NB_MMU_MODES >= 3)
1316
        env->tlb_table[2][i].addr_read = -1;
1317
        env->tlb_table[2][i].addr_write = -1;
1318
        env->tlb_table[2][i].addr_code = -1;
1319
#if (NB_MMU_MODES == 4)
1320
        env->tlb_table[3][i].addr_read = -1;
1321
        env->tlb_table[3][i].addr_write = -1;
1322
        env->tlb_table[3][i].addr_code = -1;
1323
#endif
1324
#endif
1325
    }
1326

    
1327
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1328

    
1329
#if !defined(CONFIG_SOFTMMU)
1330
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1331
#endif
1332
#ifdef USE_KQEMU
1333
    if (env->kqemu_enabled) {
1334
        kqemu_flush(env, flush_global);
1335
    }
1336
#endif
1337
    tlb_flush_count++;
1338
}
1339

    
1340
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1341
{
1342
    if (addr == (tlb_entry->addr_read & 
1343
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1344
        addr == (tlb_entry->addr_write & 
1345
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1346
        addr == (tlb_entry->addr_code & 
1347
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1348
        tlb_entry->addr_read = -1;
1349
        tlb_entry->addr_write = -1;
1350
        tlb_entry->addr_code = -1;
1351
    }
1352
}
1353

    
1354
void tlb_flush_page(CPUState *env, target_ulong addr)
1355
{
1356
    int i;
1357
    TranslationBlock *tb;
1358

    
1359
#if defined(DEBUG_TLB)
1360
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1361
#endif
1362
    /* must reset current TB so that interrupts cannot modify the
1363
       links while we are modifying them */
1364
    env->current_tb = NULL;
1365

    
1366
    addr &= TARGET_PAGE_MASK;
1367
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1368
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1369
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1370
#if (NB_MMU_MODES >= 3)
1371
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1372
#if (NB_MMU_MODES == 4)
1373
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1374
#endif
1375
#endif
1376

    
1377
    /* Discard jump cache entries for any tb which might potentially
1378
       overlap the flushed page.  */
1379
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1380
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1381

    
1382
    i = tb_jmp_cache_hash_page(addr);
1383
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1384

    
1385
#if !defined(CONFIG_SOFTMMU)
1386
    if (addr < MMAP_AREA_END)
1387
        munmap((void *)addr, TARGET_PAGE_SIZE);
1388
#endif
1389
#ifdef USE_KQEMU
1390
    if (env->kqemu_enabled) {
1391
        kqemu_flush_page(env, addr);
1392
    }
1393
#endif
1394
}
1395

    
1396
/* update the TLBs so that writes to code in the virtual page 'addr'
1397
   can be detected */
1398
static void tlb_protect_code(ram_addr_t ram_addr)
1399
{
1400
    cpu_physical_memory_reset_dirty(ram_addr, 
1401
                                    ram_addr + TARGET_PAGE_SIZE,
1402
                                    CODE_DIRTY_FLAG);
1403
}
1404

    
1405
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1406
   tested for self modifying code */
1407
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
1408
                                    target_ulong vaddr)
1409
{
1410
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1411
}
1412

    
1413
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1414
                                         unsigned long start, unsigned long length)
1415
{
1416
    unsigned long addr;
1417
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1418
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1419
        if ((addr - start) < length) {
1420
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1421
        }
1422
    }
1423
}
1424

    
1425
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1426
                                     int dirty_flags)
1427
{
1428
    CPUState *env;
1429
    unsigned long length, start1;
1430
    int i, mask, len;
1431
    uint8_t *p;
1432

    
1433
    start &= TARGET_PAGE_MASK;
1434
    end = TARGET_PAGE_ALIGN(end);
1435

    
1436
    length = end - start;
1437
    if (length == 0)
1438
        return;
1439
    len = length >> TARGET_PAGE_BITS;
1440
#ifdef USE_KQEMU
1441
    /* XXX: should not depend on cpu context */
1442
    env = first_cpu;
1443
    if (env->kqemu_enabled) {
1444
        ram_addr_t addr;
1445
        addr = start;
1446
        for(i = 0; i < len; i++) {
1447
            kqemu_set_notdirty(env, addr);
1448
            addr += TARGET_PAGE_SIZE;
1449
        }
1450
    }
1451
#endif
1452
    mask = ~dirty_flags;
1453
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1454
    for(i = 0; i < len; i++)
1455
        p[i] &= mask;
1456

    
1457
    /* we modify the TLB cache so that the dirty bit will be set again
1458
       when accessing the range */
1459
    start1 = start + (unsigned long)phys_ram_base;
1460
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1461
        for(i = 0; i < CPU_TLB_SIZE; i++)
1462
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1463
        for(i = 0; i < CPU_TLB_SIZE; i++)
1464
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1465
#if (NB_MMU_MODES >= 3)
1466
        for(i = 0; i < CPU_TLB_SIZE; i++)
1467
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1468
#if (NB_MMU_MODES == 4)
1469
        for(i = 0; i < CPU_TLB_SIZE; i++)
1470
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1471
#endif
1472
#endif
1473
    }
1474

    
1475
#if !defined(CONFIG_SOFTMMU)
1476
    /* XXX: this is expensive */
1477
    {
1478
        VirtPageDesc *p;
1479
        int j;
1480
        target_ulong addr;
1481

    
1482
        for(i = 0; i < L1_SIZE; i++) {
1483
            p = l1_virt_map[i];
1484
            if (p) {
1485
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1486
                for(j = 0; j < L2_SIZE; j++) {
1487
                    if (p->valid_tag == virt_valid_tag &&
1488
                        p->phys_addr >= start && p->phys_addr < end &&
1489
                        (p->prot & PROT_WRITE)) {
1490
                        if (addr < MMAP_AREA_END) {
1491
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1492
                                     p->prot & ~PROT_WRITE);
1493
                        }
1494
                    }
1495
                    addr += TARGET_PAGE_SIZE;
1496
                    p++;
1497
                }
1498
            }
1499
        }
1500
    }
1501
#endif
1502
}
1503

    
1504
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1505
{
1506
    ram_addr_t ram_addr;
1507

    
1508
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1509
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 
1510
            tlb_entry->addend - (unsigned long)phys_ram_base;
1511
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1512
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1513
        }
1514
    }
1515
}
1516

    
1517
/* update the TLB according to the current state of the dirty bits */
1518
void cpu_tlb_update_dirty(CPUState *env)
1519
{
1520
    int i;
1521
    for(i = 0; i < CPU_TLB_SIZE; i++)
1522
        tlb_update_dirty(&env->tlb_table[0][i]);
1523
    for(i = 0; i < CPU_TLB_SIZE; i++)
1524
        tlb_update_dirty(&env->tlb_table[1][i]);
1525
#if (NB_MMU_MODES >= 3)
1526
    for(i = 0; i < CPU_TLB_SIZE; i++)
1527
        tlb_update_dirty(&env->tlb_table[2][i]);
1528
#if (NB_MMU_MODES == 4)
1529
    for(i = 0; i < CPU_TLB_SIZE; i++)
1530
        tlb_update_dirty(&env->tlb_table[3][i]);
1531
#endif
1532
#endif
1533
}
1534

    
1535
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1536
                                  unsigned long start)
1537
{
1538
    unsigned long addr;
1539
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1540
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1541
        if (addr == start) {
1542
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1543
        }
1544
    }
1545
}
1546

    
1547
/* update the TLB corresponding to virtual page vaddr and phys addr
1548
   addr so that it is no longer dirty */
1549
static inline void tlb_set_dirty(CPUState *env,
1550
                                 unsigned long addr, target_ulong vaddr)
1551
{
1552
    int i;
1553

    
1554
    addr &= TARGET_PAGE_MASK;
1555
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1556
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1557
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1558
#if (NB_MMU_MODES >= 3)
1559
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1560
#if (NB_MMU_MODES == 4)
1561
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1562
#endif
1563
#endif
1564
}
1565

    
1566
/* add a new TLB entry. At most one entry for a given virtual address
1567
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1568
   (can only happen in non SOFTMMU mode for I/O pages or pages
1569
   conflicting with the host address space). */
1570
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1571
                      target_phys_addr_t paddr, int prot, 
1572
                      int is_user, int is_softmmu)
1573
{
1574
    PhysPageDesc *p;
1575
    unsigned long pd;
1576
    unsigned int index;
1577
    target_ulong address;
1578
    target_phys_addr_t addend;
1579
    int ret;
1580
    CPUTLBEntry *te;
1581
    int i;
1582

    
1583
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1584
    if (!p) {
1585
        pd = IO_MEM_UNASSIGNED;
1586
    } else {
1587
        pd = p->phys_offset;
1588
    }
1589
#if defined(DEBUG_TLB)
1590
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1591
           vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1592
#endif
1593

    
1594
    ret = 0;
1595
#if !defined(CONFIG_SOFTMMU)
1596
    if (is_softmmu) 
1597
#endif
1598
    {
1599
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1600
            /* IO memory case */
1601
            address = vaddr | pd;
1602
            addend = paddr;
1603
        } else {
1604
            /* standard memory */
1605
            address = vaddr;
1606
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1607
        }
1608

    
1609
        /* Make accesses to pages with watchpoints go via the
1610
           watchpoint trap routines.  */
1611
        for (i = 0; i < env->nb_watchpoints; i++) {
1612
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1613
                if (address & ~TARGET_PAGE_MASK) {
1614
                    env->watchpoint[i].is_ram = 0;
1615
                    address = vaddr | io_mem_watch;
1616
                } else {
1617
                    env->watchpoint[i].is_ram = 1;
1618
                    /* TODO: Figure out how to make read watchpoints coexist
1619
                       with code.  */
1620
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1621
                }
1622
            }
1623
        }
1624
        
1625
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1626
        addend -= vaddr;
1627
        te = &env->tlb_table[is_user][index];
1628
        te->addend = addend;
1629
        if (prot & PAGE_READ) {
1630
            te->addr_read = address;
1631
        } else {
1632
            te->addr_read = -1;
1633
        }
1634
        if (prot & PAGE_EXEC) {
1635
            te->addr_code = address;
1636
        } else {
1637
            te->addr_code = -1;
1638
        }
1639
        if (prot & PAGE_WRITE) {
1640
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1641
                (pd & IO_MEM_ROMD)) {
1642
                /* write access calls the I/O callback */
1643
                te->addr_write = vaddr | 
1644
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1645
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1646
                       !cpu_physical_memory_is_dirty(pd)) {
1647
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1648
            } else {
1649
                te->addr_write = address;
1650
            }
1651
        } else {
1652
            te->addr_write = -1;
1653
        }
1654
    }
1655
#if !defined(CONFIG_SOFTMMU)
1656
    else {
1657
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1658
            /* IO access: no mapping is done as it will be handled by the
1659
               soft MMU */
1660
            if (!(env->hflags & HF_SOFTMMU_MASK))
1661
                ret = 2;
1662
        } else {
1663
            void *map_addr;
1664

    
1665
            if (vaddr >= MMAP_AREA_END) {
1666
                ret = 2;
1667
            } else {
1668
                if (prot & PROT_WRITE) {
1669
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1670
#if defined(TARGET_HAS_SMC) || 1
1671
                        first_tb ||
1672
#endif
1673
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1674
                         !cpu_physical_memory_is_dirty(pd))) {
1675
                        /* ROM: we do as if code was inside */
1676
                        /* if code is present, we only map as read only and save the
1677
                           original mapping */
1678
                        VirtPageDesc *vp;
1679
                        
1680
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1681
                        vp->phys_addr = pd;
1682
                        vp->prot = prot;
1683
                        vp->valid_tag = virt_valid_tag;
1684
                        prot &= ~PAGE_WRITE;
1685
                    }
1686
                }
1687
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1688
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1689
                if (map_addr == MAP_FAILED) {
1690
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1691
                              paddr, vaddr);
1692
                }
1693
            }
1694
        }
1695
    }
1696
#endif
1697
    return ret;
1698
}
1699

    
1700
/* called from signal handler: invalidate the code and unprotect the
1701
   page. Return TRUE if the fault was succesfully handled. */
1702
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1703
{
1704
#if !defined(CONFIG_SOFTMMU)
1705
    VirtPageDesc *vp;
1706

    
1707
#if defined(DEBUG_TLB)
1708
    printf("page_unprotect: addr=0x%08x\n", addr);
1709
#endif
1710
    addr &= TARGET_PAGE_MASK;
1711

    
1712
    /* if it is not mapped, no need to worry here */
1713
    if (addr >= MMAP_AREA_END)
1714
        return 0;
1715
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1716
    if (!vp)
1717
        return 0;
1718
    /* NOTE: in this case, validate_tag is _not_ tested as it
1719
       validates only the code TLB */
1720
    if (vp->valid_tag != virt_valid_tag)
1721
        return 0;
1722
    if (!(vp->prot & PAGE_WRITE))
1723
        return 0;
1724
#if defined(DEBUG_TLB)
1725
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1726
           addr, vp->phys_addr, vp->prot);
1727
#endif
1728
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1729
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1730
                  (unsigned long)addr, vp->prot);
1731
    /* set the dirty bit */
1732
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1733
    /* flush the code inside */
1734
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1735
    return 1;
1736
#else
1737
    return 0;
1738
#endif
1739
}
1740

    
1741
#else
1742

    
1743
void tlb_flush(CPUState *env, int flush_global)
1744
{
1745
}
1746

    
1747
void tlb_flush_page(CPUState *env, target_ulong addr)
1748
{
1749
}
1750

    
1751
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1752
                      target_phys_addr_t paddr, int prot, 
1753
                      int is_user, int is_softmmu)
1754
{
1755
    return 0;
1756
}
1757

    
1758
/* dump memory mappings */
1759
void page_dump(FILE *f)
1760
{
1761
    unsigned long start, end;
1762
    int i, j, prot, prot1;
1763
    PageDesc *p;
1764

    
1765
    fprintf(f, "%-8s %-8s %-8s %s\n",
1766
            "start", "end", "size", "prot");
1767
    start = -1;
1768
    end = -1;
1769
    prot = 0;
1770
    for(i = 0; i <= L1_SIZE; i++) {
1771
        if (i < L1_SIZE)
1772
            p = l1_map[i];
1773
        else
1774
            p = NULL;
1775
        for(j = 0;j < L2_SIZE; j++) {
1776
            if (!p)
1777
                prot1 = 0;
1778
            else
1779
                prot1 = p[j].flags;
1780
            if (prot1 != prot) {
1781
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1782
                if (start != -1) {
1783
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1784
                            start, end, end - start, 
1785
                            prot & PAGE_READ ? 'r' : '-',
1786
                            prot & PAGE_WRITE ? 'w' : '-',
1787
                            prot & PAGE_EXEC ? 'x' : '-');
1788
                }
1789
                if (prot1 != 0)
1790
                    start = end;
1791
                else
1792
                    start = -1;
1793
                prot = prot1;
1794
            }
1795
            if (!p)
1796
                break;
1797
        }
1798
    }
1799
}
1800

    
1801
int page_get_flags(target_ulong address)
1802
{
1803
    PageDesc *p;
1804

    
1805
    p = page_find(address >> TARGET_PAGE_BITS);
1806
    if (!p)
1807
        return 0;
1808
    return p->flags;
1809
}
1810

    
1811
/* modify the flags of a page and invalidate the code if
1812
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1813
   depending on PAGE_WRITE */
1814
void page_set_flags(target_ulong start, target_ulong end, int flags)
1815
{
1816
    PageDesc *p;
1817
    target_ulong addr;
1818

    
1819
    start = start & TARGET_PAGE_MASK;
1820
    end = TARGET_PAGE_ALIGN(end);
1821
    if (flags & PAGE_WRITE)
1822
        flags |= PAGE_WRITE_ORG;
1823
    spin_lock(&tb_lock);
1824
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1825
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1826
        /* if the write protection is set, then we invalidate the code
1827
           inside */
1828
        if (!(p->flags & PAGE_WRITE) && 
1829
            (flags & PAGE_WRITE) &&
1830
            p->first_tb) {
1831
            tb_invalidate_phys_page(addr, 0, NULL);
1832
        }
1833
        p->flags = flags;
1834
    }
1835
    spin_unlock(&tb_lock);
1836
}
1837

    
1838
/* called from signal handler: invalidate the code and unprotect the
1839
   page. Return TRUE if the fault was succesfully handled. */
1840
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1841
{
1842
    unsigned int page_index, prot, pindex;
1843
    PageDesc *p, *p1;
1844
    target_ulong host_start, host_end, addr;
1845

    
1846
    host_start = address & qemu_host_page_mask;
1847
    page_index = host_start >> TARGET_PAGE_BITS;
1848
    p1 = page_find(page_index);
1849
    if (!p1)
1850
        return 0;
1851
    host_end = host_start + qemu_host_page_size;
1852
    p = p1;
1853
    prot = 0;
1854
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1855
        prot |= p->flags;
1856
        p++;
1857
    }
1858
    /* if the page was really writable, then we change its
1859
       protection back to writable */
1860
    if (prot & PAGE_WRITE_ORG) {
1861
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1862
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1863
            mprotect((void *)g2h(host_start), qemu_host_page_size, 
1864
                     (prot & PAGE_BITS) | PAGE_WRITE);
1865
            p1[pindex].flags |= PAGE_WRITE;
1866
            /* and since the content will be modified, we must invalidate
1867
               the corresponding translated code. */
1868
            tb_invalidate_phys_page(address, pc, puc);
1869
#ifdef DEBUG_TB_CHECK
1870
            tb_invalidate_check(address);
1871
#endif
1872
            return 1;
1873
        }
1874
    }
1875
    return 0;
1876
}
1877

    
1878
/* call this function when system calls directly modify a memory area */
1879
/* ??? This should be redundant now we have lock_user.  */
1880
void page_unprotect_range(target_ulong data, target_ulong data_size)
1881
{
1882
    target_ulong start, end, addr;
1883

    
1884
    start = data;
1885
    end = start + data_size;
1886
    start &= TARGET_PAGE_MASK;
1887
    end = TARGET_PAGE_ALIGN(end);
1888
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1889
        page_unprotect(addr, 0, NULL);
1890
    }
1891
}
1892

    
1893
static inline void tlb_set_dirty(CPUState *env,
1894
                                 unsigned long addr, target_ulong vaddr)
1895
{
1896
}
1897
#endif /* defined(CONFIG_USER_ONLY) */
1898

    
1899
/* register physical memory. 'size' must be a multiple of the target
1900
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1901
   io memory page */
1902
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1903
                                  unsigned long size,
1904
                                  unsigned long phys_offset)
1905
{
1906
    target_phys_addr_t addr, end_addr;
1907
    PhysPageDesc *p;
1908
    CPUState *env;
1909

    
1910
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1911
    end_addr = start_addr + size;
1912
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1913
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1914
        p->phys_offset = phys_offset;
1915
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1916
            (phys_offset & IO_MEM_ROMD))
1917
            phys_offset += TARGET_PAGE_SIZE;
1918
    }
1919
    
1920
    /* since each CPU stores ram addresses in its TLB cache, we must
1921
       reset the modified entries */
1922
    /* XXX: slow ! */
1923
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1924
        tlb_flush(env, 1);
1925
    }
1926
}
1927

    
1928
/* XXX: temporary until new memory mapping API */
1929
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1930
{
1931
    PhysPageDesc *p;
1932

    
1933
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1934
    if (!p)
1935
        return IO_MEM_UNASSIGNED;
1936
    return p->phys_offset;
1937
}
1938

    
1939
/* XXX: better than nothing */
1940
ram_addr_t qemu_ram_alloc(unsigned int size)
1941
{
1942
    ram_addr_t addr;
1943
    if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1944
        fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n", 
1945
                size, phys_ram_size);
1946
        abort();
1947
    }
1948
    addr = phys_ram_alloc_offset;
1949
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1950
    return addr;
1951
}
1952

    
1953
void qemu_ram_free(ram_addr_t addr)
1954
{
1955
}
1956

    
1957
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1958
{
1959
#ifdef DEBUG_UNASSIGNED
1960
    printf("Unassigned mem read  0x%08x\n", (int)addr);
1961
#endif
1962
#ifdef TARGET_SPARC
1963
    // Not enabled yet because of bugs in gdbstub etc.
1964
    //raise_exception(TT_DATA_ACCESS);
1965
#endif
1966
    return 0;
1967
}
1968

    
1969
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1970
{
1971
#ifdef DEBUG_UNASSIGNED
1972
    printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1973
#endif
1974
#ifdef TARGET_SPARC
1975
    // Not enabled yet because of bugs in gdbstub etc.
1976
    //raise_exception(TT_DATA_ACCESS);
1977
#endif
1978
}
1979

    
1980
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1981
    unassigned_mem_readb,
1982
    unassigned_mem_readb,
1983
    unassigned_mem_readb,
1984
};
1985

    
1986
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1987
    unassigned_mem_writeb,
1988
    unassigned_mem_writeb,
1989
    unassigned_mem_writeb,
1990
};
1991

    
1992
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1993
{
1994
    unsigned long ram_addr;
1995
    int dirty_flags;
1996
    ram_addr = addr - (unsigned long)phys_ram_base;
1997
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1998
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1999
#if !defined(CONFIG_USER_ONLY)
2000
        tb_invalidate_phys_page_fast(ram_addr, 1);
2001
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2002
#endif
2003
    }
2004
    stb_p((uint8_t *)(long)addr, val);
2005
#ifdef USE_KQEMU
2006
    if (cpu_single_env->kqemu_enabled &&
2007
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2008
        kqemu_modify_page(cpu_single_env, ram_addr);
2009
#endif
2010
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2011
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2012
    /* we remove the notdirty callback only if the code has been
2013
       flushed */
2014
    if (dirty_flags == 0xff)
2015
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2016
}
2017

    
2018
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2019
{
2020
    unsigned long ram_addr;
2021
    int dirty_flags;
2022
    ram_addr = addr - (unsigned long)phys_ram_base;
2023
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2024
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2025
#if !defined(CONFIG_USER_ONLY)
2026
        tb_invalidate_phys_page_fast(ram_addr, 2);
2027
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2028
#endif
2029
    }
2030
    stw_p((uint8_t *)(long)addr, val);
2031
#ifdef USE_KQEMU
2032
    if (cpu_single_env->kqemu_enabled &&
2033
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2034
        kqemu_modify_page(cpu_single_env, ram_addr);
2035
#endif
2036
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2037
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2038
    /* we remove the notdirty callback only if the code has been
2039
       flushed */
2040
    if (dirty_flags == 0xff)
2041
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2042
}
2043

    
2044
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2045
{
2046
    unsigned long ram_addr;
2047
    int dirty_flags;
2048
    ram_addr = addr - (unsigned long)phys_ram_base;
2049
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2050
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2051
#if !defined(CONFIG_USER_ONLY)
2052
        tb_invalidate_phys_page_fast(ram_addr, 4);
2053
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2054
#endif
2055
    }
2056
    stl_p((uint8_t *)(long)addr, val);
2057
#ifdef USE_KQEMU
2058
    if (cpu_single_env->kqemu_enabled &&
2059
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2060
        kqemu_modify_page(cpu_single_env, ram_addr);
2061
#endif
2062
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2063
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2064
    /* we remove the notdirty callback only if the code has been
2065
       flushed */
2066
    if (dirty_flags == 0xff)
2067
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2068
}
2069

    
2070
static CPUReadMemoryFunc *error_mem_read[3] = {
2071
    NULL, /* never used */
2072
    NULL, /* never used */
2073
    NULL, /* never used */
2074
};
2075

    
2076
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2077
    notdirty_mem_writeb,
2078
    notdirty_mem_writew,
2079
    notdirty_mem_writel,
2080
};
2081

    
2082
#if defined(CONFIG_SOFTMMU)
2083
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2084
   so these check for a hit then pass through to the normal out-of-line
2085
   phys routines.  */
2086
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2087
{
2088
    return ldub_phys(addr);
2089
}
2090

    
2091
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2092
{
2093
    return lduw_phys(addr);
2094
}
2095

    
2096
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2097
{
2098
    return ldl_phys(addr);
2099
}
2100

    
2101
/* Generate a debug exception if a watchpoint has been hit.
2102
   Returns the real physical address of the access.  addr will be a host
2103
   address in the is_ram case.  */
2104
static target_ulong check_watchpoint(target_phys_addr_t addr)
2105
{
2106
    CPUState *env = cpu_single_env;
2107
    target_ulong watch;
2108
    target_ulong retaddr;
2109
    int i;
2110

    
2111
    retaddr = addr;
2112
    for (i = 0; i < env->nb_watchpoints; i++) {
2113
        watch = env->watchpoint[i].vaddr;
2114
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2115
            if (env->watchpoint[i].is_ram)
2116
                retaddr = addr - (unsigned long)phys_ram_base;
2117
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2118
                cpu_single_env->watchpoint_hit = i + 1;
2119
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2120
                break;
2121
            }
2122
        }
2123
    }
2124
    return retaddr;
2125
}
2126

    
2127
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2128
                             uint32_t val)
2129
{
2130
    addr = check_watchpoint(addr);
2131
    stb_phys(addr, val);
2132
}
2133

    
2134
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2135
                             uint32_t val)
2136
{
2137
    addr = check_watchpoint(addr);
2138
    stw_phys(addr, val);
2139
}
2140

    
2141
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2142
                             uint32_t val)
2143
{
2144
    addr = check_watchpoint(addr);
2145
    stl_phys(addr, val);
2146
}
2147

    
2148
static CPUReadMemoryFunc *watch_mem_read[3] = {
2149
    watch_mem_readb,
2150
    watch_mem_readw,
2151
    watch_mem_readl,
2152
};
2153

    
2154
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2155
    watch_mem_writeb,
2156
    watch_mem_writew,
2157
    watch_mem_writel,
2158
};
2159
#endif
2160

    
2161
static void io_mem_init(void)
2162
{
2163
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2164
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2165
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2166
    io_mem_nb = 5;
2167

    
2168
#if defined(CONFIG_SOFTMMU)
2169
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2170
                                          watch_mem_write, NULL);
2171
#endif
2172
    /* alloc dirty bits array */
2173
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2174
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2175
}
2176

    
2177
/* mem_read and mem_write are arrays of functions containing the
2178
   function to access byte (index 0), word (index 1) and dword (index
2179
   2). All functions must be supplied. If io_index is non zero, the
2180
   corresponding io zone is modified. If it is zero, a new io zone is
2181
   allocated. The return value can be used with
2182
   cpu_register_physical_memory(). (-1) is returned if error. */
2183
int cpu_register_io_memory(int io_index,
2184
                           CPUReadMemoryFunc **mem_read,
2185
                           CPUWriteMemoryFunc **mem_write,
2186
                           void *opaque)
2187
{
2188
    int i;
2189

    
2190
    if (io_index <= 0) {
2191
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2192
            return -1;
2193
        io_index = io_mem_nb++;
2194
    } else {
2195
        if (io_index >= IO_MEM_NB_ENTRIES)
2196
            return -1;
2197
    }
2198

    
2199
    for(i = 0;i < 3; i++) {
2200
        io_mem_read[io_index][i] = mem_read[i];
2201
        io_mem_write[io_index][i] = mem_write[i];
2202
    }
2203
    io_mem_opaque[io_index] = opaque;
2204
    return io_index << IO_MEM_SHIFT;
2205
}
2206

    
2207
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2208
{
2209
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2210
}
2211

    
2212
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2213
{
2214
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2215
}
2216

    
2217
/* physical memory access (slow version, mainly for debug) */
2218
#if defined(CONFIG_USER_ONLY)
2219
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2220
                            int len, int is_write)
2221
{
2222
    int l, flags;
2223
    target_ulong page;
2224
    void * p;
2225

    
2226
    while (len > 0) {
2227
        page = addr & TARGET_PAGE_MASK;
2228
        l = (page + TARGET_PAGE_SIZE) - addr;
2229
        if (l > len)
2230
            l = len;
2231
        flags = page_get_flags(page);
2232
        if (!(flags & PAGE_VALID))
2233
            return;
2234
        if (is_write) {
2235
            if (!(flags & PAGE_WRITE))
2236
                return;
2237
            p = lock_user(addr, len, 0);
2238
            memcpy(p, buf, len);
2239
            unlock_user(p, addr, len);
2240
        } else {
2241
            if (!(flags & PAGE_READ))
2242
                return;
2243
            p = lock_user(addr, len, 1);
2244
            memcpy(buf, p, len);
2245
            unlock_user(p, addr, 0);
2246
        }
2247
        len -= l;
2248
        buf += l;
2249
        addr += l;
2250
    }
2251
}
2252

    
2253
#else
2254
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2255
                            int len, int is_write)
2256
{
2257
    int l, io_index;
2258
    uint8_t *ptr;
2259
    uint32_t val;
2260
    target_phys_addr_t page;
2261
    unsigned long pd;
2262
    PhysPageDesc *p;
2263
    
2264
    while (len > 0) {
2265
        page = addr & TARGET_PAGE_MASK;
2266
        l = (page + TARGET_PAGE_SIZE) - addr;
2267
        if (l > len)
2268
            l = len;
2269
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2270
        if (!p) {
2271
            pd = IO_MEM_UNASSIGNED;
2272
        } else {
2273
            pd = p->phys_offset;
2274
        }
2275
        
2276
        if (is_write) {
2277
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2278
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2279
                /* XXX: could force cpu_single_env to NULL to avoid
2280
                   potential bugs */
2281
                if (l >= 4 && ((addr & 3) == 0)) {
2282
                    /* 32 bit write access */
2283
                    val = ldl_p(buf);
2284
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2285
                    l = 4;
2286
                } else if (l >= 2 && ((addr & 1) == 0)) {
2287
                    /* 16 bit write access */
2288
                    val = lduw_p(buf);
2289
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2290
                    l = 2;
2291
                } else {
2292
                    /* 8 bit write access */
2293
                    val = ldub_p(buf);
2294
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2295
                    l = 1;
2296
                }
2297
            } else {
2298
                unsigned long addr1;
2299
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2300
                /* RAM case */
2301
                ptr = phys_ram_base + addr1;
2302
                memcpy(ptr, buf, l);
2303
                if (!cpu_physical_memory_is_dirty(addr1)) {
2304
                    /* invalidate code */
2305
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2306
                    /* set dirty bit */
2307
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 
2308
                        (0xff & ~CODE_DIRTY_FLAG);
2309
                }
2310
            }
2311
        } else {
2312
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2313
                !(pd & IO_MEM_ROMD)) {
2314
                /* I/O case */
2315
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2316
                if (l >= 4 && ((addr & 3) == 0)) {
2317
                    /* 32 bit read access */
2318
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2319
                    stl_p(buf, val);
2320
                    l = 4;
2321
                } else if (l >= 2 && ((addr & 1) == 0)) {
2322
                    /* 16 bit read access */
2323
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2324
                    stw_p(buf, val);
2325
                    l = 2;
2326
                } else {
2327
                    /* 8 bit read access */
2328
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2329
                    stb_p(buf, val);
2330
                    l = 1;
2331
                }
2332
            } else {
2333
                /* RAM case */
2334
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2335
                    (addr & ~TARGET_PAGE_MASK);
2336
                memcpy(buf, ptr, l);
2337
            }
2338
        }
2339
        len -= l;
2340
        buf += l;
2341
        addr += l;
2342
    }
2343
}
2344

    
2345
/* used for ROM loading : can write in RAM and ROM */
2346
void cpu_physical_memory_write_rom(target_phys_addr_t addr, 
2347
                                   const uint8_t *buf, int len)
2348
{
2349
    int l;
2350
    uint8_t *ptr;
2351
    target_phys_addr_t page;
2352
    unsigned long pd;
2353
    PhysPageDesc *p;
2354
    
2355
    while (len > 0) {
2356
        page = addr & TARGET_PAGE_MASK;
2357
        l = (page + TARGET_PAGE_SIZE) - addr;
2358
        if (l > len)
2359
            l = len;
2360
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2361
        if (!p) {
2362
            pd = IO_MEM_UNASSIGNED;
2363
        } else {
2364
            pd = p->phys_offset;
2365
        }
2366
        
2367
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2368
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2369
            !(pd & IO_MEM_ROMD)) {
2370
            /* do nothing */
2371
        } else {
2372
            unsigned long addr1;
2373
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2374
            /* ROM/RAM case */
2375
            ptr = phys_ram_base + addr1;
2376
            memcpy(ptr, buf, l);
2377
        }
2378
        len -= l;
2379
        buf += l;
2380
        addr += l;
2381
    }
2382
}
2383

    
2384

    
2385
/* warning: addr must be aligned */
2386
uint32_t ldl_phys(target_phys_addr_t addr)
2387
{
2388
    int io_index;
2389
    uint8_t *ptr;
2390
    uint32_t val;
2391
    unsigned long pd;
2392
    PhysPageDesc *p;
2393

    
2394
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2395
    if (!p) {
2396
        pd = IO_MEM_UNASSIGNED;
2397
    } else {
2398
        pd = p->phys_offset;
2399
    }
2400
        
2401
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2402
        !(pd & IO_MEM_ROMD)) {
2403
        /* I/O case */
2404
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2405
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2406
    } else {
2407
        /* RAM case */
2408
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2409
            (addr & ~TARGET_PAGE_MASK);
2410
        val = ldl_p(ptr);
2411
    }
2412
    return val;
2413
}
2414

    
2415
/* warning: addr must be aligned */
2416
uint64_t ldq_phys(target_phys_addr_t addr)
2417
{
2418
    int io_index;
2419
    uint8_t *ptr;
2420
    uint64_t val;
2421
    unsigned long pd;
2422
    PhysPageDesc *p;
2423

    
2424
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2425
    if (!p) {
2426
        pd = IO_MEM_UNASSIGNED;
2427
    } else {
2428
        pd = p->phys_offset;
2429
    }
2430
        
2431
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2432
        !(pd & IO_MEM_ROMD)) {
2433
        /* I/O case */
2434
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2435
#ifdef TARGET_WORDS_BIGENDIAN
2436
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2437
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2438
#else
2439
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2440
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2441
#endif
2442
    } else {
2443
        /* RAM case */
2444
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2445
            (addr & ~TARGET_PAGE_MASK);
2446
        val = ldq_p(ptr);
2447
    }
2448
    return val;
2449
}
2450

    
2451
/* XXX: optimize */
2452
uint32_t ldub_phys(target_phys_addr_t addr)
2453
{
2454
    uint8_t val;
2455
    cpu_physical_memory_read(addr, &val, 1);
2456
    return val;
2457
}
2458

    
2459
/* XXX: optimize */
2460
uint32_t lduw_phys(target_phys_addr_t addr)
2461
{
2462
    uint16_t val;
2463
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2464
    return tswap16(val);
2465
}
2466

    
2467
/* warning: addr must be aligned. The ram page is not masked as dirty
2468
   and the code inside is not invalidated. It is useful if the dirty
2469
   bits are used to track modified PTEs */
2470
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2471
{
2472
    int io_index;
2473
    uint8_t *ptr;
2474
    unsigned long pd;
2475
    PhysPageDesc *p;
2476

    
2477
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2478
    if (!p) {
2479
        pd = IO_MEM_UNASSIGNED;
2480
    } else {
2481
        pd = p->phys_offset;
2482
    }
2483
        
2484
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2485
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2486
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2487
    } else {
2488
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2489
            (addr & ~TARGET_PAGE_MASK);
2490
        stl_p(ptr, val);
2491
    }
2492
}
2493

    
2494
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2495
{
2496
    int io_index;
2497
    uint8_t *ptr;
2498
    unsigned long pd;
2499
    PhysPageDesc *p;
2500

    
2501
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2502
    if (!p) {
2503
        pd = IO_MEM_UNASSIGNED;
2504
    } else {
2505
        pd = p->phys_offset;
2506
    }
2507
        
2508
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2509
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2510
#ifdef TARGET_WORDS_BIGENDIAN
2511
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2512
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2513
#else
2514
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2515
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2516
#endif
2517
    } else {
2518
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2519
            (addr & ~TARGET_PAGE_MASK);
2520
        stq_p(ptr, val);
2521
    }
2522
}
2523

    
2524
/* warning: addr must be aligned */
2525
void stl_phys(target_phys_addr_t addr, uint32_t val)
2526
{
2527
    int io_index;
2528
    uint8_t *ptr;
2529
    unsigned long pd;
2530
    PhysPageDesc *p;
2531

    
2532
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2533
    if (!p) {
2534
        pd = IO_MEM_UNASSIGNED;
2535
    } else {
2536
        pd = p->phys_offset;
2537
    }
2538
        
2539
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2540
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2541
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2542
    } else {
2543
        unsigned long addr1;
2544
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2545
        /* RAM case */
2546
        ptr = phys_ram_base + addr1;
2547
        stl_p(ptr, val);
2548
        if (!cpu_physical_memory_is_dirty(addr1)) {
2549
            /* invalidate code */
2550
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2551
            /* set dirty bit */
2552
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2553
                (0xff & ~CODE_DIRTY_FLAG);
2554
        }
2555
    }
2556
}
2557

    
2558
/* XXX: optimize */
2559
void stb_phys(target_phys_addr_t addr, uint32_t val)
2560
{
2561
    uint8_t v = val;
2562
    cpu_physical_memory_write(addr, &v, 1);
2563
}
2564

    
2565
/* XXX: optimize */
2566
void stw_phys(target_phys_addr_t addr, uint32_t val)
2567
{
2568
    uint16_t v = tswap16(val);
2569
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2570
}
2571

    
2572
/* XXX: optimize */
2573
void stq_phys(target_phys_addr_t addr, uint64_t val)
2574
{
2575
    val = tswap64(val);
2576
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2577
}
2578

    
2579
#endif
2580

    
2581
/* virtual memory access for debug */
2582
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2583
                        uint8_t *buf, int len, int is_write)
2584
{
2585
    int l;
2586
    target_phys_addr_t phys_addr;
2587
    target_ulong page;
2588

    
2589
    while (len > 0) {
2590
        page = addr & TARGET_PAGE_MASK;
2591
        phys_addr = cpu_get_phys_page_debug(env, page);
2592
        /* if no physical page mapped, return an error */
2593
        if (phys_addr == -1)
2594
            return -1;
2595
        l = (page + TARGET_PAGE_SIZE) - addr;
2596
        if (l > len)
2597
            l = len;
2598
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2599
                               buf, l, is_write);
2600
        len -= l;
2601
        buf += l;
2602
        addr += l;
2603
    }
2604
    return 0;
2605
}
2606

    
2607
void dump_exec_info(FILE *f,
2608
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2609
{
2610
    int i, target_code_size, max_target_code_size;
2611
    int direct_jmp_count, direct_jmp2_count, cross_page;
2612
    TranslationBlock *tb;
2613
    
2614
    target_code_size = 0;
2615
    max_target_code_size = 0;
2616
    cross_page = 0;
2617
    direct_jmp_count = 0;
2618
    direct_jmp2_count = 0;
2619
    for(i = 0; i < nb_tbs; i++) {
2620
        tb = &tbs[i];
2621
        target_code_size += tb->size;
2622
        if (tb->size > max_target_code_size)
2623
            max_target_code_size = tb->size;
2624
        if (tb->page_addr[1] != -1)
2625
            cross_page++;
2626
        if (tb->tb_next_offset[0] != 0xffff) {
2627
            direct_jmp_count++;
2628
            if (tb->tb_next_offset[1] != 0xffff) {
2629
                direct_jmp2_count++;
2630
            }
2631
        }
2632
    }
2633
    /* XXX: avoid using doubles ? */
2634
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2635
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
2636
                nb_tbs ? target_code_size / nb_tbs : 0,
2637
                max_target_code_size);
2638
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
2639
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2640
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2641
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
2642
            cross_page, 
2643
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2644
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2645
                direct_jmp_count, 
2646
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2647
                direct_jmp2_count,
2648
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2649
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2650
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2651
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2652
}
2653

    
2654
#if !defined(CONFIG_USER_ONLY) 
2655

    
2656
#define MMUSUFFIX _cmmu
2657
#define GETPC() NULL
2658
#define env cpu_single_env
2659
#define SOFTMMU_CODE_ACCESS
2660

    
2661
#define SHIFT 0
2662
#include "softmmu_template.h"
2663

    
2664
#define SHIFT 1
2665
#include "softmmu_template.h"
2666

    
2667
#define SHIFT 2
2668
#include "softmmu_template.h"
2669

    
2670
#define SHIFT 3
2671
#include "softmmu_template.h"
2672

    
2673
#undef env
2674

    
2675
#endif