Statistics
| Branch: | Revision:

root / exec.c @ cbeb0857

History | View | Annotate | Download (78 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#if defined(CONFIG_USER_ONLY)
38
#include <qemu.h>
39
#endif
40

    
41
//#define DEBUG_TB_INVALIDATE
42
//#define DEBUG_FLUSH
43
//#define DEBUG_TLB
44
//#define DEBUG_UNASSIGNED
45

    
46
/* make various TB consistency checks */
47
//#define DEBUG_TB_CHECK 
48
//#define DEBUG_TLB_CHECK 
49

    
50
//#define DEBUG_IOPORT
51

    
52
#if !defined(CONFIG_USER_ONLY)
53
/* TB consistency checks only implemented for usermode emulation.  */
54
#undef DEBUG_TB_CHECK
55
#endif
56

    
57
/* threshold to flush the translated code buffer */
58
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
59

    
60
#define SMC_BITMAP_USE_THRESHOLD 10
61

    
62
#define MMAP_AREA_START        0x00000000
63
#define MMAP_AREA_END          0xa8000000
64

    
65
#if defined(TARGET_SPARC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67
#elif defined(TARGET_ALPHA)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 42
69
#define TARGET_VIRT_ADDR_SPACE_BITS 42
70
#elif defined(TARGET_PPC64)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 42
72
#else
73
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
74
#define TARGET_PHYS_ADDR_SPACE_BITS 32
75
#endif
76

    
77
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
78
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
79
int nb_tbs;
80
/* any access to the tbs or the page table must use this lock */
81
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
82

    
83
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
84
uint8_t *code_gen_ptr;
85

    
86
int phys_ram_size;
87
int phys_ram_fd;
88
uint8_t *phys_ram_base;
89
uint8_t *phys_ram_dirty;
90
static ram_addr_t phys_ram_alloc_offset = 0;
91

    
92
CPUState *first_cpu;
93
/* current CPU in the current thread. It is only valid inside
94
   cpu_exec() */
95
CPUState *cpu_single_env; 
96

    
97
typedef struct PageDesc {
98
    /* list of TBs intersecting this ram page */
99
    TranslationBlock *first_tb;
100
    /* in order to optimize self modifying code, we count the number
101
       of lookups we do to a given page to use a bitmap */
102
    unsigned int code_write_count;
103
    uint8_t *code_bitmap;
104
#if defined(CONFIG_USER_ONLY)
105
    unsigned long flags;
106
#endif
107
} PageDesc;
108

    
109
typedef struct PhysPageDesc {
110
    /* offset in host memory of the page + io_index in the low 12 bits */
111
    uint32_t phys_offset;
112
} PhysPageDesc;
113

    
114
#define L2_BITS 10
115
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
116
/* XXX: this is a temporary hack for alpha target.
117
 *      In the future, this is to be replaced by a multi-level table
118
 *      to actually be able to handle the complete 64 bits address space.
119
 */
120
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
121
#else
122
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
123
#endif
124

    
125
#define L1_SIZE (1 << L1_BITS)
126
#define L2_SIZE (1 << L2_BITS)
127

    
128
static void io_mem_init(void);
129

    
130
unsigned long qemu_real_host_page_size;
131
unsigned long qemu_host_page_bits;
132
unsigned long qemu_host_page_size;
133
unsigned long qemu_host_page_mask;
134

    
135
/* XXX: for system emulation, it could just be an array */
136
static PageDesc *l1_map[L1_SIZE];
137
PhysPageDesc **l1_phys_map;
138

    
139
/* io memory support */
140
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
141
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
142
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
143
static int io_mem_nb;
144
#if defined(CONFIG_SOFTMMU)
145
static int io_mem_watch;
146
#endif
147

    
148
/* log support */
149
char *logfilename = "/tmp/qemu.log";
150
FILE *logfile;
151
int loglevel;
152

    
153
/* statistics */
154
static int tlb_flush_count;
155
static int tb_flush_count;
156
static int tb_phys_invalidate_count;
157

    
158
static void page_init(void)
159
{
160
    /* NOTE: we can always suppose that qemu_host_page_size >=
161
       TARGET_PAGE_SIZE */
162
#ifdef _WIN32
163
    {
164
        SYSTEM_INFO system_info;
165
        DWORD old_protect;
166
        
167
        GetSystemInfo(&system_info);
168
        qemu_real_host_page_size = system_info.dwPageSize;
169
        
170
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
171
                       PAGE_EXECUTE_READWRITE, &old_protect);
172
    }
173
#else
174
    qemu_real_host_page_size = getpagesize();
175
    {
176
        unsigned long start, end;
177

    
178
        start = (unsigned long)code_gen_buffer;
179
        start &= ~(qemu_real_host_page_size - 1);
180
        
181
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
182
        end += qemu_real_host_page_size - 1;
183
        end &= ~(qemu_real_host_page_size - 1);
184
        
185
        mprotect((void *)start, end - start, 
186
                 PROT_READ | PROT_WRITE | PROT_EXEC);
187
    }
188
#endif
189

    
190
    if (qemu_host_page_size == 0)
191
        qemu_host_page_size = qemu_real_host_page_size;
192
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
193
        qemu_host_page_size = TARGET_PAGE_SIZE;
194
    qemu_host_page_bits = 0;
195
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
196
        qemu_host_page_bits++;
197
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
198
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
199
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
200
}
201

    
202
static inline PageDesc *page_find_alloc(unsigned int index)
203
{
204
    PageDesc **lp, *p;
205

    
206
    lp = &l1_map[index >> L2_BITS];
207
    p = *lp;
208
    if (!p) {
209
        /* allocate if not found */
210
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
211
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
212
        *lp = p;
213
    }
214
    return p + (index & (L2_SIZE - 1));
215
}
216

    
217
static inline PageDesc *page_find(unsigned int index)
218
{
219
    PageDesc *p;
220

    
221
    p = l1_map[index >> L2_BITS];
222
    if (!p)
223
        return 0;
224
    return p + (index & (L2_SIZE - 1));
225
}
226

    
227
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
228
{
229
    void **lp, **p;
230
    PhysPageDesc *pd;
231

    
232
    p = (void **)l1_phys_map;
233
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
234

    
235
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
236
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
237
#endif
238
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
239
    p = *lp;
240
    if (!p) {
241
        /* allocate if not found */
242
        if (!alloc)
243
            return NULL;
244
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
245
        memset(p, 0, sizeof(void *) * L1_SIZE);
246
        *lp = p;
247
    }
248
#endif
249
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
250
    pd = *lp;
251
    if (!pd) {
252
        int i;
253
        /* allocate if not found */
254
        if (!alloc)
255
            return NULL;
256
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
257
        *lp = pd;
258
        for (i = 0; i < L2_SIZE; i++)
259
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
260
    }
261
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
262
}
263

    
264
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
265
{
266
    return phys_page_find_alloc(index, 0);
267
}
268

    
269
#if !defined(CONFIG_USER_ONLY)
270
static void tlb_protect_code(ram_addr_t ram_addr);
271
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
272
                                    target_ulong vaddr);
273
#endif
274

    
275
void cpu_exec_init(CPUState *env)
276
{
277
    CPUState **penv;
278
    int cpu_index;
279

    
280
    if (!code_gen_ptr) {
281
        code_gen_ptr = code_gen_buffer;
282
        page_init();
283
        io_mem_init();
284
    }
285
    env->next_cpu = NULL;
286
    penv = &first_cpu;
287
    cpu_index = 0;
288
    while (*penv != NULL) {
289
        penv = (CPUState **)&(*penv)->next_cpu;
290
        cpu_index++;
291
    }
292
    env->cpu_index = cpu_index;
293
    env->nb_watchpoints = 0;
294
    *penv = env;
295
}
296

    
297
static inline void invalidate_page_bitmap(PageDesc *p)
298
{
299
    if (p->code_bitmap) {
300
        qemu_free(p->code_bitmap);
301
        p->code_bitmap = NULL;
302
    }
303
    p->code_write_count = 0;
304
}
305

    
306
/* set to NULL all the 'first_tb' fields in all PageDescs */
307
static void page_flush_tb(void)
308
{
309
    int i, j;
310
    PageDesc *p;
311

    
312
    for(i = 0; i < L1_SIZE; i++) {
313
        p = l1_map[i];
314
        if (p) {
315
            for(j = 0; j < L2_SIZE; j++) {
316
                p->first_tb = NULL;
317
                invalidate_page_bitmap(p);
318
                p++;
319
            }
320
        }
321
    }
322
}
323

    
324
/* flush all the translation blocks */
325
/* XXX: tb_flush is currently not thread safe */
326
void tb_flush(CPUState *env1)
327
{
328
    CPUState *env;
329
#if defined(DEBUG_FLUSH)
330
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
331
           code_gen_ptr - code_gen_buffer, 
332
           nb_tbs, 
333
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
334
#endif
335
    nb_tbs = 0;
336
    
337
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
338
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
339
    }
340

    
341
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
342
    page_flush_tb();
343

    
344
    code_gen_ptr = code_gen_buffer;
345
    /* XXX: flush processor icache at this point if cache flush is
346
       expensive */
347
    tb_flush_count++;
348
}
349

    
350
#ifdef DEBUG_TB_CHECK
351

    
352
static void tb_invalidate_check(target_ulong address)
353
{
354
    TranslationBlock *tb;
355
    int i;
356
    address &= TARGET_PAGE_MASK;
357
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
358
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
359
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
360
                  address >= tb->pc + tb->size)) {
361
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
362
                       address, (long)tb->pc, tb->size);
363
            }
364
        }
365
    }
366
}
367

    
368
/* verify that all the pages have correct rights for code */
369
static void tb_page_check(void)
370
{
371
    TranslationBlock *tb;
372
    int i, flags1, flags2;
373
    
374
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
375
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
376
            flags1 = page_get_flags(tb->pc);
377
            flags2 = page_get_flags(tb->pc + tb->size - 1);
378
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
379
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
380
                       (long)tb->pc, tb->size, flags1, flags2);
381
            }
382
        }
383
    }
384
}
385

    
386
void tb_jmp_check(TranslationBlock *tb)
387
{
388
    TranslationBlock *tb1;
389
    unsigned int n1;
390

    
391
    /* suppress any remaining jumps to this TB */
392
    tb1 = tb->jmp_first;
393
    for(;;) {
394
        n1 = (long)tb1 & 3;
395
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
396
        if (n1 == 2)
397
            break;
398
        tb1 = tb1->jmp_next[n1];
399
    }
400
    /* check end of list */
401
    if (tb1 != tb) {
402
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
403
    }
404
}
405

    
406
#endif
407

    
408
/* invalidate one TB */
409
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
410
                             int next_offset)
411
{
412
    TranslationBlock *tb1;
413
    for(;;) {
414
        tb1 = *ptb;
415
        if (tb1 == tb) {
416
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
417
            break;
418
        }
419
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
420
    }
421
}
422

    
423
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
424
{
425
    TranslationBlock *tb1;
426
    unsigned int n1;
427

    
428
    for(;;) {
429
        tb1 = *ptb;
430
        n1 = (long)tb1 & 3;
431
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
432
        if (tb1 == tb) {
433
            *ptb = tb1->page_next[n1];
434
            break;
435
        }
436
        ptb = &tb1->page_next[n1];
437
    }
438
}
439

    
440
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
441
{
442
    TranslationBlock *tb1, **ptb;
443
    unsigned int n1;
444

    
445
    ptb = &tb->jmp_next[n];
446
    tb1 = *ptb;
447
    if (tb1) {
448
        /* find tb(n) in circular list */
449
        for(;;) {
450
            tb1 = *ptb;
451
            n1 = (long)tb1 & 3;
452
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
453
            if (n1 == n && tb1 == tb)
454
                break;
455
            if (n1 == 2) {
456
                ptb = &tb1->jmp_first;
457
            } else {
458
                ptb = &tb1->jmp_next[n1];
459
            }
460
        }
461
        /* now we can suppress tb(n) from the list */
462
        *ptb = tb->jmp_next[n];
463

    
464
        tb->jmp_next[n] = NULL;
465
    }
466
}
467

    
468
/* reset the jump entry 'n' of a TB so that it is not chained to
469
   another TB */
470
static inline void tb_reset_jump(TranslationBlock *tb, int n)
471
{
472
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
473
}
474

    
475
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
476
{
477
    CPUState *env;
478
    PageDesc *p;
479
    unsigned int h, n1;
480
    target_ulong phys_pc;
481
    TranslationBlock *tb1, *tb2;
482
    
483
    /* remove the TB from the hash list */
484
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
485
    h = tb_phys_hash_func(phys_pc);
486
    tb_remove(&tb_phys_hash[h], tb, 
487
              offsetof(TranslationBlock, phys_hash_next));
488

    
489
    /* remove the TB from the page list */
490
    if (tb->page_addr[0] != page_addr) {
491
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
492
        tb_page_remove(&p->first_tb, tb);
493
        invalidate_page_bitmap(p);
494
    }
495
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
496
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
497
        tb_page_remove(&p->first_tb, tb);
498
        invalidate_page_bitmap(p);
499
    }
500

    
501
    tb_invalidated_flag = 1;
502

    
503
    /* remove the TB from the hash list */
504
    h = tb_jmp_cache_hash_func(tb->pc);
505
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
506
        if (env->tb_jmp_cache[h] == tb)
507
            env->tb_jmp_cache[h] = NULL;
508
    }
509

    
510
    /* suppress this TB from the two jump lists */
511
    tb_jmp_remove(tb, 0);
512
    tb_jmp_remove(tb, 1);
513

    
514
    /* suppress any remaining jumps to this TB */
515
    tb1 = tb->jmp_first;
516
    for(;;) {
517
        n1 = (long)tb1 & 3;
518
        if (n1 == 2)
519
            break;
520
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
521
        tb2 = tb1->jmp_next[n1];
522
        tb_reset_jump(tb1, n1);
523
        tb1->jmp_next[n1] = NULL;
524
        tb1 = tb2;
525
    }
526
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
527

    
528
    tb_phys_invalidate_count++;
529
}
530

    
531
static inline void set_bits(uint8_t *tab, int start, int len)
532
{
533
    int end, mask, end1;
534

    
535
    end = start + len;
536
    tab += start >> 3;
537
    mask = 0xff << (start & 7);
538
    if ((start & ~7) == (end & ~7)) {
539
        if (start < end) {
540
            mask &= ~(0xff << (end & 7));
541
            *tab |= mask;
542
        }
543
    } else {
544
        *tab++ |= mask;
545
        start = (start + 8) & ~7;
546
        end1 = end & ~7;
547
        while (start < end1) {
548
            *tab++ = 0xff;
549
            start += 8;
550
        }
551
        if (start < end) {
552
            mask = ~(0xff << (end & 7));
553
            *tab |= mask;
554
        }
555
    }
556
}
557

    
558
static void build_page_bitmap(PageDesc *p)
559
{
560
    int n, tb_start, tb_end;
561
    TranslationBlock *tb;
562
    
563
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
564
    if (!p->code_bitmap)
565
        return;
566
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
567

    
568
    tb = p->first_tb;
569
    while (tb != NULL) {
570
        n = (long)tb & 3;
571
        tb = (TranslationBlock *)((long)tb & ~3);
572
        /* NOTE: this is subtle as a TB may span two physical pages */
573
        if (n == 0) {
574
            /* NOTE: tb_end may be after the end of the page, but
575
               it is not a problem */
576
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
577
            tb_end = tb_start + tb->size;
578
            if (tb_end > TARGET_PAGE_SIZE)
579
                tb_end = TARGET_PAGE_SIZE;
580
        } else {
581
            tb_start = 0;
582
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
583
        }
584
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
585
        tb = tb->page_next[n];
586
    }
587
}
588

    
589
#ifdef TARGET_HAS_PRECISE_SMC
590

    
591
static void tb_gen_code(CPUState *env, 
592
                        target_ulong pc, target_ulong cs_base, int flags,
593
                        int cflags)
594
{
595
    TranslationBlock *tb;
596
    uint8_t *tc_ptr;
597
    target_ulong phys_pc, phys_page2, virt_page2;
598
    int code_gen_size;
599

    
600
    phys_pc = get_phys_addr_code(env, pc);
601
    tb = tb_alloc(pc);
602
    if (!tb) {
603
        /* flush must be done */
604
        tb_flush(env);
605
        /* cannot fail at this point */
606
        tb = tb_alloc(pc);
607
    }
608
    tc_ptr = code_gen_ptr;
609
    tb->tc_ptr = tc_ptr;
610
    tb->cs_base = cs_base;
611
    tb->flags = flags;
612
    tb->cflags = cflags;
613
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
614
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
615
    
616
    /* check next page if needed */
617
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
618
    phys_page2 = -1;
619
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
620
        phys_page2 = get_phys_addr_code(env, virt_page2);
621
    }
622
    tb_link_phys(tb, phys_pc, phys_page2);
623
}
624
#endif
625
    
626
/* invalidate all TBs which intersect with the target physical page
627
   starting in range [start;end[. NOTE: start and end must refer to
628
   the same physical page. 'is_cpu_write_access' should be true if called
629
   from a real cpu write access: the virtual CPU will exit the current
630
   TB if code is modified inside this TB. */
631
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
632
                                   int is_cpu_write_access)
633
{
634
    int n, current_tb_modified, current_tb_not_found, current_flags;
635
    CPUState *env = cpu_single_env;
636
    PageDesc *p;
637
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
638
    target_ulong tb_start, tb_end;
639
    target_ulong current_pc, current_cs_base;
640

    
641
    p = page_find(start >> TARGET_PAGE_BITS);
642
    if (!p) 
643
        return;
644
    if (!p->code_bitmap && 
645
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
646
        is_cpu_write_access) {
647
        /* build code bitmap */
648
        build_page_bitmap(p);
649
    }
650

    
651
    /* we remove all the TBs in the range [start, end[ */
652
    /* XXX: see if in some cases it could be faster to invalidate all the code */
653
    current_tb_not_found = is_cpu_write_access;
654
    current_tb_modified = 0;
655
    current_tb = NULL; /* avoid warning */
656
    current_pc = 0; /* avoid warning */
657
    current_cs_base = 0; /* avoid warning */
658
    current_flags = 0; /* avoid warning */
659
    tb = p->first_tb;
660
    while (tb != NULL) {
661
        n = (long)tb & 3;
662
        tb = (TranslationBlock *)((long)tb & ~3);
663
        tb_next = tb->page_next[n];
664
        /* NOTE: this is subtle as a TB may span two physical pages */
665
        if (n == 0) {
666
            /* NOTE: tb_end may be after the end of the page, but
667
               it is not a problem */
668
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
669
            tb_end = tb_start + tb->size;
670
        } else {
671
            tb_start = tb->page_addr[1];
672
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
673
        }
674
        if (!(tb_end <= start || tb_start >= end)) {
675
#ifdef TARGET_HAS_PRECISE_SMC
676
            if (current_tb_not_found) {
677
                current_tb_not_found = 0;
678
                current_tb = NULL;
679
                if (env->mem_write_pc) {
680
                    /* now we have a real cpu fault */
681
                    current_tb = tb_find_pc(env->mem_write_pc);
682
                }
683
            }
684
            if (current_tb == tb &&
685
                !(current_tb->cflags & CF_SINGLE_INSN)) {
686
                /* If we are modifying the current TB, we must stop
687
                its execution. We could be more precise by checking
688
                that the modification is after the current PC, but it
689
                would require a specialized function to partially
690
                restore the CPU state */
691
                
692
                current_tb_modified = 1;
693
                cpu_restore_state(current_tb, env, 
694
                                  env->mem_write_pc, NULL);
695
#if defined(TARGET_I386)
696
                current_flags = env->hflags;
697
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
698
                current_cs_base = (target_ulong)env->segs[R_CS].base;
699
                current_pc = current_cs_base + env->eip;
700
#else
701
#error unsupported CPU
702
#endif
703
            }
704
#endif /* TARGET_HAS_PRECISE_SMC */
705
            /* we need to do that to handle the case where a signal
706
               occurs while doing tb_phys_invalidate() */
707
            saved_tb = NULL;
708
            if (env) {
709
                saved_tb = env->current_tb;
710
                env->current_tb = NULL;
711
            }
712
            tb_phys_invalidate(tb, -1);
713
            if (env) {
714
                env->current_tb = saved_tb;
715
                if (env->interrupt_request && env->current_tb)
716
                    cpu_interrupt(env, env->interrupt_request);
717
            }
718
        }
719
        tb = tb_next;
720
    }
721
#if !defined(CONFIG_USER_ONLY)
722
    /* if no code remaining, no need to continue to use slow writes */
723
    if (!p->first_tb) {
724
        invalidate_page_bitmap(p);
725
        if (is_cpu_write_access) {
726
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
727
        }
728
    }
729
#endif
730
#ifdef TARGET_HAS_PRECISE_SMC
731
    if (current_tb_modified) {
732
        /* we generate a block containing just the instruction
733
           modifying the memory. It will ensure that it cannot modify
734
           itself */
735
        env->current_tb = NULL;
736
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
737
                    CF_SINGLE_INSN);
738
        cpu_resume_from_signal(env, NULL);
739
    }
740
#endif
741
}
742

    
743
/* len must be <= 8 and start must be a multiple of len */
744
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
745
{
746
    PageDesc *p;
747
    int offset, b;
748
#if 0
749
    if (1) {
750
        if (loglevel) {
751
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
752
                   cpu_single_env->mem_write_vaddr, len, 
753
                   cpu_single_env->eip, 
754
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
755
        }
756
    }
757
#endif
758
    p = page_find(start >> TARGET_PAGE_BITS);
759
    if (!p) 
760
        return;
761
    if (p->code_bitmap) {
762
        offset = start & ~TARGET_PAGE_MASK;
763
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
764
        if (b & ((1 << len) - 1))
765
            goto do_invalidate;
766
    } else {
767
    do_invalidate:
768
        tb_invalidate_phys_page_range(start, start + len, 1);
769
    }
770
}
771

    
772
#if !defined(CONFIG_SOFTMMU)
773
static void tb_invalidate_phys_page(target_ulong addr, 
774
                                    unsigned long pc, void *puc)
775
{
776
    int n, current_flags, current_tb_modified;
777
    target_ulong current_pc, current_cs_base;
778
    PageDesc *p;
779
    TranslationBlock *tb, *current_tb;
780
#ifdef TARGET_HAS_PRECISE_SMC
781
    CPUState *env = cpu_single_env;
782
#endif
783

    
784
    addr &= TARGET_PAGE_MASK;
785
    p = page_find(addr >> TARGET_PAGE_BITS);
786
    if (!p) 
787
        return;
788
    tb = p->first_tb;
789
    current_tb_modified = 0;
790
    current_tb = NULL;
791
    current_pc = 0; /* avoid warning */
792
    current_cs_base = 0; /* avoid warning */
793
    current_flags = 0; /* avoid warning */
794
#ifdef TARGET_HAS_PRECISE_SMC
795
    if (tb && pc != 0) {
796
        current_tb = tb_find_pc(pc);
797
    }
798
#endif
799
    while (tb != NULL) {
800
        n = (long)tb & 3;
801
        tb = (TranslationBlock *)((long)tb & ~3);
802
#ifdef TARGET_HAS_PRECISE_SMC
803
        if (current_tb == tb &&
804
            !(current_tb->cflags & CF_SINGLE_INSN)) {
805
                /* If we are modifying the current TB, we must stop
806
                   its execution. We could be more precise by checking
807
                   that the modification is after the current PC, but it
808
                   would require a specialized function to partially
809
                   restore the CPU state */
810
            
811
            current_tb_modified = 1;
812
            cpu_restore_state(current_tb, env, pc, puc);
813
#if defined(TARGET_I386)
814
            current_flags = env->hflags;
815
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
816
            current_cs_base = (target_ulong)env->segs[R_CS].base;
817
            current_pc = current_cs_base + env->eip;
818
#else
819
#error unsupported CPU
820
#endif
821
        }
822
#endif /* TARGET_HAS_PRECISE_SMC */
823
        tb_phys_invalidate(tb, addr);
824
        tb = tb->page_next[n];
825
    }
826
    p->first_tb = NULL;
827
#ifdef TARGET_HAS_PRECISE_SMC
828
    if (current_tb_modified) {
829
        /* we generate a block containing just the instruction
830
           modifying the memory. It will ensure that it cannot modify
831
           itself */
832
        env->current_tb = NULL;
833
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
834
                    CF_SINGLE_INSN);
835
        cpu_resume_from_signal(env, puc);
836
    }
837
#endif
838
}
839
#endif
840

    
841
/* add the tb in the target page and protect it if necessary */
842
static inline void tb_alloc_page(TranslationBlock *tb, 
843
                                 unsigned int n, target_ulong page_addr)
844
{
845
    PageDesc *p;
846
    TranslationBlock *last_first_tb;
847

    
848
    tb->page_addr[n] = page_addr;
849
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
850
    tb->page_next[n] = p->first_tb;
851
    last_first_tb = p->first_tb;
852
    p->first_tb = (TranslationBlock *)((long)tb | n);
853
    invalidate_page_bitmap(p);
854

    
855
#if defined(TARGET_HAS_SMC) || 1
856

    
857
#if defined(CONFIG_USER_ONLY)
858
    if (p->flags & PAGE_WRITE) {
859
        target_ulong addr;
860
        PageDesc *p2;
861
        int prot;
862

    
863
        /* force the host page as non writable (writes will have a
864
           page fault + mprotect overhead) */
865
        page_addr &= qemu_host_page_mask;
866
        prot = 0;
867
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
868
            addr += TARGET_PAGE_SIZE) {
869

    
870
            p2 = page_find (addr >> TARGET_PAGE_BITS);
871
            if (!p2)
872
                continue;
873
            prot |= p2->flags;
874
            p2->flags &= ~PAGE_WRITE;
875
            page_get_flags(addr);
876
          }
877
        mprotect(g2h(page_addr), qemu_host_page_size, 
878
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
879
#ifdef DEBUG_TB_INVALIDATE
880
        printf("protecting code page: 0x%08lx\n", 
881
               page_addr);
882
#endif
883
    }
884
#else
885
    /* if some code is already present, then the pages are already
886
       protected. So we handle the case where only the first TB is
887
       allocated in a physical page */
888
    if (!last_first_tb) {
889
        tlb_protect_code(page_addr);
890
    }
891
#endif
892

    
893
#endif /* TARGET_HAS_SMC */
894
}
895

    
896
/* Allocate a new translation block. Flush the translation buffer if
897
   too many translation blocks or too much generated code. */
898
TranslationBlock *tb_alloc(target_ulong pc)
899
{
900
    TranslationBlock *tb;
901

    
902
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
903
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
904
        return NULL;
905
    tb = &tbs[nb_tbs++];
906
    tb->pc = pc;
907
    tb->cflags = 0;
908
    return tb;
909
}
910

    
911
/* add a new TB and link it to the physical page tables. phys_page2 is
912
   (-1) to indicate that only one page contains the TB. */
913
void tb_link_phys(TranslationBlock *tb, 
914
                  target_ulong phys_pc, target_ulong phys_page2)
915
{
916
    unsigned int h;
917
    TranslationBlock **ptb;
918

    
919
    /* add in the physical hash table */
920
    h = tb_phys_hash_func(phys_pc);
921
    ptb = &tb_phys_hash[h];
922
    tb->phys_hash_next = *ptb;
923
    *ptb = tb;
924

    
925
    /* add in the page list */
926
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
927
    if (phys_page2 != -1)
928
        tb_alloc_page(tb, 1, phys_page2);
929
    else
930
        tb->page_addr[1] = -1;
931

    
932
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
933
    tb->jmp_next[0] = NULL;
934
    tb->jmp_next[1] = NULL;
935
#ifdef USE_CODE_COPY
936
    tb->cflags &= ~CF_FP_USED;
937
    if (tb->cflags & CF_TB_FP_USED)
938
        tb->cflags |= CF_FP_USED;
939
#endif
940

    
941
    /* init original jump addresses */
942
    if (tb->tb_next_offset[0] != 0xffff)
943
        tb_reset_jump(tb, 0);
944
    if (tb->tb_next_offset[1] != 0xffff)
945
        tb_reset_jump(tb, 1);
946

    
947
#ifdef DEBUG_TB_CHECK
948
    tb_page_check();
949
#endif
950
}
951

    
952
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
953
   tb[1].tc_ptr. Return NULL if not found */
954
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
955
{
956
    int m_min, m_max, m;
957
    unsigned long v;
958
    TranslationBlock *tb;
959

    
960
    if (nb_tbs <= 0)
961
        return NULL;
962
    if (tc_ptr < (unsigned long)code_gen_buffer ||
963
        tc_ptr >= (unsigned long)code_gen_ptr)
964
        return NULL;
965
    /* binary search (cf Knuth) */
966
    m_min = 0;
967
    m_max = nb_tbs - 1;
968
    while (m_min <= m_max) {
969
        m = (m_min + m_max) >> 1;
970
        tb = &tbs[m];
971
        v = (unsigned long)tb->tc_ptr;
972
        if (v == tc_ptr)
973
            return tb;
974
        else if (tc_ptr < v) {
975
            m_max = m - 1;
976
        } else {
977
            m_min = m + 1;
978
        }
979
    } 
980
    return &tbs[m_max];
981
}
982

    
983
static void tb_reset_jump_recursive(TranslationBlock *tb);
984

    
985
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
986
{
987
    TranslationBlock *tb1, *tb_next, **ptb;
988
    unsigned int n1;
989

    
990
    tb1 = tb->jmp_next[n];
991
    if (tb1 != NULL) {
992
        /* find head of list */
993
        for(;;) {
994
            n1 = (long)tb1 & 3;
995
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
996
            if (n1 == 2)
997
                break;
998
            tb1 = tb1->jmp_next[n1];
999
        }
1000
        /* we are now sure now that tb jumps to tb1 */
1001
        tb_next = tb1;
1002

    
1003
        /* remove tb from the jmp_first list */
1004
        ptb = &tb_next->jmp_first;
1005
        for(;;) {
1006
            tb1 = *ptb;
1007
            n1 = (long)tb1 & 3;
1008
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1009
            if (n1 == n && tb1 == tb)
1010
                break;
1011
            ptb = &tb1->jmp_next[n1];
1012
        }
1013
        *ptb = tb->jmp_next[n];
1014
        tb->jmp_next[n] = NULL;
1015
        
1016
        /* suppress the jump to next tb in generated code */
1017
        tb_reset_jump(tb, n);
1018

    
1019
        /* suppress jumps in the tb on which we could have jumped */
1020
        tb_reset_jump_recursive(tb_next);
1021
    }
1022
}
1023

    
1024
static void tb_reset_jump_recursive(TranslationBlock *tb)
1025
{
1026
    tb_reset_jump_recursive2(tb, 0);
1027
    tb_reset_jump_recursive2(tb, 1);
1028
}
1029

    
1030
#if defined(TARGET_HAS_ICE)
1031
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1032
{
1033
    target_ulong addr, pd;
1034
    ram_addr_t ram_addr;
1035
    PhysPageDesc *p;
1036

    
1037
    addr = cpu_get_phys_page_debug(env, pc);
1038
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1039
    if (!p) {
1040
        pd = IO_MEM_UNASSIGNED;
1041
    } else {
1042
        pd = p->phys_offset;
1043
    }
1044
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1045
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1046
}
1047
#endif
1048

    
1049
/* Add a watchpoint.  */
1050
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1051
{
1052
    int i;
1053

    
1054
    for (i = 0; i < env->nb_watchpoints; i++) {
1055
        if (addr == env->watchpoint[i].vaddr)
1056
            return 0;
1057
    }
1058
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1059
        return -1;
1060

    
1061
    i = env->nb_watchpoints++;
1062
    env->watchpoint[i].vaddr = addr;
1063
    tlb_flush_page(env, addr);
1064
    /* FIXME: This flush is needed because of the hack to make memory ops
1065
       terminate the TB.  It can be removed once the proper IO trap and
1066
       re-execute bits are in.  */
1067
    tb_flush(env);
1068
    return i;
1069
}
1070

    
1071
/* Remove a watchpoint.  */
1072
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1073
{
1074
    int i;
1075

    
1076
    for (i = 0; i < env->nb_watchpoints; i++) {
1077
        if (addr == env->watchpoint[i].vaddr) {
1078
            env->nb_watchpoints--;
1079
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1080
            tlb_flush_page(env, addr);
1081
            return 0;
1082
        }
1083
    }
1084
    return -1;
1085
}
1086

    
1087
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1088
   breakpoint is reached */
1089
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1090
{
1091
#if defined(TARGET_HAS_ICE)
1092
    int i;
1093
    
1094
    for(i = 0; i < env->nb_breakpoints; i++) {
1095
        if (env->breakpoints[i] == pc)
1096
            return 0;
1097
    }
1098

    
1099
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1100
        return -1;
1101
    env->breakpoints[env->nb_breakpoints++] = pc;
1102
    
1103
    breakpoint_invalidate(env, pc);
1104
    return 0;
1105
#else
1106
    return -1;
1107
#endif
1108
}
1109

    
1110
/* remove a breakpoint */
1111
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1112
{
1113
#if defined(TARGET_HAS_ICE)
1114
    int i;
1115
    for(i = 0; i < env->nb_breakpoints; i++) {
1116
        if (env->breakpoints[i] == pc)
1117
            goto found;
1118
    }
1119
    return -1;
1120
 found:
1121
    env->nb_breakpoints--;
1122
    if (i < env->nb_breakpoints)
1123
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1124

    
1125
    breakpoint_invalidate(env, pc);
1126
    return 0;
1127
#else
1128
    return -1;
1129
#endif
1130
}
1131

    
1132
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1133
   CPU loop after each instruction */
1134
void cpu_single_step(CPUState *env, int enabled)
1135
{
1136
#if defined(TARGET_HAS_ICE)
1137
    if (env->singlestep_enabled != enabled) {
1138
        env->singlestep_enabled = enabled;
1139
        /* must flush all the translated code to avoid inconsistancies */
1140
        /* XXX: only flush what is necessary */
1141
        tb_flush(env);
1142
    }
1143
#endif
1144
}
1145

    
1146
/* enable or disable low levels log */
1147
void cpu_set_log(int log_flags)
1148
{
1149
    loglevel = log_flags;
1150
    if (loglevel && !logfile) {
1151
        logfile = fopen(logfilename, "w");
1152
        if (!logfile) {
1153
            perror(logfilename);
1154
            _exit(1);
1155
        }
1156
#if !defined(CONFIG_SOFTMMU)
1157
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1158
        {
1159
            static uint8_t logfile_buf[4096];
1160
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1161
        }
1162
#else
1163
        setvbuf(logfile, NULL, _IOLBF, 0);
1164
#endif
1165
    }
1166
}
1167

    
1168
void cpu_set_log_filename(const char *filename)
1169
{
1170
    logfilename = strdup(filename);
1171
}
1172

    
1173
/* mask must never be zero, except for A20 change call */
1174
void cpu_interrupt(CPUState *env, int mask)
1175
{
1176
    TranslationBlock *tb;
1177
    static int interrupt_lock;
1178

    
1179
    env->interrupt_request |= mask;
1180
    /* if the cpu is currently executing code, we must unlink it and
1181
       all the potentially executing TB */
1182
    tb = env->current_tb;
1183
    if (tb && !testandset(&interrupt_lock)) {
1184
        env->current_tb = NULL;
1185
        tb_reset_jump_recursive(tb);
1186
        interrupt_lock = 0;
1187
    }
1188
}
1189

    
1190
void cpu_reset_interrupt(CPUState *env, int mask)
1191
{
1192
    env->interrupt_request &= ~mask;
1193
}
1194

    
1195
CPULogItem cpu_log_items[] = {
1196
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1197
      "show generated host assembly code for each compiled TB" },
1198
    { CPU_LOG_TB_IN_ASM, "in_asm",
1199
      "show target assembly code for each compiled TB" },
1200
    { CPU_LOG_TB_OP, "op", 
1201
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1202
#ifdef TARGET_I386
1203
    { CPU_LOG_TB_OP_OPT, "op_opt",
1204
      "show micro ops after optimization for each compiled TB" },
1205
#endif
1206
    { CPU_LOG_INT, "int",
1207
      "show interrupts/exceptions in short format" },
1208
    { CPU_LOG_EXEC, "exec",
1209
      "show trace before each executed TB (lots of logs)" },
1210
    { CPU_LOG_TB_CPU, "cpu",
1211
      "show CPU state before bloc translation" },
1212
#ifdef TARGET_I386
1213
    { CPU_LOG_PCALL, "pcall",
1214
      "show protected mode far calls/returns/exceptions" },
1215
#endif
1216
#ifdef DEBUG_IOPORT
1217
    { CPU_LOG_IOPORT, "ioport",
1218
      "show all i/o ports accesses" },
1219
#endif
1220
    { 0, NULL, NULL },
1221
};
1222

    
1223
static int cmp1(const char *s1, int n, const char *s2)
1224
{
1225
    if (strlen(s2) != n)
1226
        return 0;
1227
    return memcmp(s1, s2, n) == 0;
1228
}
1229
      
1230
/* takes a comma separated list of log masks. Return 0 if error. */
1231
int cpu_str_to_log_mask(const char *str)
1232
{
1233
    CPULogItem *item;
1234
    int mask;
1235
    const char *p, *p1;
1236

    
1237
    p = str;
1238
    mask = 0;
1239
    for(;;) {
1240
        p1 = strchr(p, ',');
1241
        if (!p1)
1242
            p1 = p + strlen(p);
1243
        if(cmp1(p,p1-p,"all")) {
1244
                for(item = cpu_log_items; item->mask != 0; item++) {
1245
                        mask |= item->mask;
1246
                }
1247
        } else {
1248
        for(item = cpu_log_items; item->mask != 0; item++) {
1249
            if (cmp1(p, p1 - p, item->name))
1250
                goto found;
1251
        }
1252
        return 0;
1253
        }
1254
    found:
1255
        mask |= item->mask;
1256
        if (*p1 != ',')
1257
            break;
1258
        p = p1 + 1;
1259
    }
1260
    return mask;
1261
}
1262

    
1263
void cpu_abort(CPUState *env, const char *fmt, ...)
1264
{
1265
    va_list ap;
1266

    
1267
    va_start(ap, fmt);
1268
    fprintf(stderr, "qemu: fatal: ");
1269
    vfprintf(stderr, fmt, ap);
1270
    fprintf(stderr, "\n");
1271
#ifdef TARGET_I386
1272
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1273
#else
1274
    cpu_dump_state(env, stderr, fprintf, 0);
1275
#endif
1276
    va_end(ap);
1277
    abort();
1278
}
1279

    
1280
CPUState *cpu_copy(CPUState *env)
1281
{
1282
    CPUState *new_env = cpu_init();
1283
    /* preserve chaining and index */
1284
    CPUState *next_cpu = new_env->next_cpu;
1285
    int cpu_index = new_env->cpu_index;
1286
    memcpy(new_env, env, sizeof(CPUState));
1287
    new_env->next_cpu = next_cpu;
1288
    new_env->cpu_index = cpu_index;
1289
    return new_env;
1290
}
1291

    
1292
#if !defined(CONFIG_USER_ONLY)
1293

    
1294
/* NOTE: if flush_global is true, also flush global entries (not
1295
   implemented yet) */
1296
void tlb_flush(CPUState *env, int flush_global)
1297
{
1298
    int i;
1299

    
1300
#if defined(DEBUG_TLB)
1301
    printf("tlb_flush:\n");
1302
#endif
1303
    /* must reset current TB so that interrupts cannot modify the
1304
       links while we are modifying them */
1305
    env->current_tb = NULL;
1306

    
1307
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1308
        env->tlb_table[0][i].addr_read = -1;
1309
        env->tlb_table[0][i].addr_write = -1;
1310
        env->tlb_table[0][i].addr_code = -1;
1311
        env->tlb_table[1][i].addr_read = -1;
1312
        env->tlb_table[1][i].addr_write = -1;
1313
        env->tlb_table[1][i].addr_code = -1;
1314
#if (NB_MMU_MODES >= 3)
1315
        env->tlb_table[2][i].addr_read = -1;
1316
        env->tlb_table[2][i].addr_write = -1;
1317
        env->tlb_table[2][i].addr_code = -1;
1318
#if (NB_MMU_MODES == 4)
1319
        env->tlb_table[3][i].addr_read = -1;
1320
        env->tlb_table[3][i].addr_write = -1;
1321
        env->tlb_table[3][i].addr_code = -1;
1322
#endif
1323
#endif
1324
    }
1325

    
1326
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1327

    
1328
#if !defined(CONFIG_SOFTMMU)
1329
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1330
#endif
1331
#ifdef USE_KQEMU
1332
    if (env->kqemu_enabled) {
1333
        kqemu_flush(env, flush_global);
1334
    }
1335
#endif
1336
    tlb_flush_count++;
1337
}
1338

    
1339
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1340
{
1341
    if (addr == (tlb_entry->addr_read & 
1342
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1343
        addr == (tlb_entry->addr_write & 
1344
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1345
        addr == (tlb_entry->addr_code & 
1346
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1347
        tlb_entry->addr_read = -1;
1348
        tlb_entry->addr_write = -1;
1349
        tlb_entry->addr_code = -1;
1350
    }
1351
}
1352

    
1353
void tlb_flush_page(CPUState *env, target_ulong addr)
1354
{
1355
    int i;
1356
    TranslationBlock *tb;
1357

    
1358
#if defined(DEBUG_TLB)
1359
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1360
#endif
1361
    /* must reset current TB so that interrupts cannot modify the
1362
       links while we are modifying them */
1363
    env->current_tb = NULL;
1364

    
1365
    addr &= TARGET_PAGE_MASK;
1366
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1367
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1368
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1369
#if (NB_MMU_MODES >= 3)
1370
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1371
#if (NB_MMU_MODES == 4)
1372
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1373
#endif
1374
#endif
1375

    
1376
    /* Discard jump cache entries for any tb which might potentially
1377
       overlap the flushed page.  */
1378
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1379
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1380

    
1381
    i = tb_jmp_cache_hash_page(addr);
1382
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1383

    
1384
#if !defined(CONFIG_SOFTMMU)
1385
    if (addr < MMAP_AREA_END)
1386
        munmap((void *)addr, TARGET_PAGE_SIZE);
1387
#endif
1388
#ifdef USE_KQEMU
1389
    if (env->kqemu_enabled) {
1390
        kqemu_flush_page(env, addr);
1391
    }
1392
#endif
1393
}
1394

    
1395
/* update the TLBs so that writes to code in the virtual page 'addr'
1396
   can be detected */
1397
static void tlb_protect_code(ram_addr_t ram_addr)
1398
{
1399
    cpu_physical_memory_reset_dirty(ram_addr, 
1400
                                    ram_addr + TARGET_PAGE_SIZE,
1401
                                    CODE_DIRTY_FLAG);
1402
}
1403

    
1404
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1405
   tested for self modifying code */
1406
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
1407
                                    target_ulong vaddr)
1408
{
1409
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1410
}
1411

    
1412
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1413
                                         unsigned long start, unsigned long length)
1414
{
1415
    unsigned long addr;
1416
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1417
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1418
        if ((addr - start) < length) {
1419
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1420
        }
1421
    }
1422
}
1423

    
1424
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1425
                                     int dirty_flags)
1426
{
1427
    CPUState *env;
1428
    unsigned long length, start1;
1429
    int i, mask, len;
1430
    uint8_t *p;
1431

    
1432
    start &= TARGET_PAGE_MASK;
1433
    end = TARGET_PAGE_ALIGN(end);
1434

    
1435
    length = end - start;
1436
    if (length == 0)
1437
        return;
1438
    len = length >> TARGET_PAGE_BITS;
1439
#ifdef USE_KQEMU
1440
    /* XXX: should not depend on cpu context */
1441
    env = first_cpu;
1442
    if (env->kqemu_enabled) {
1443
        ram_addr_t addr;
1444
        addr = start;
1445
        for(i = 0; i < len; i++) {
1446
            kqemu_set_notdirty(env, addr);
1447
            addr += TARGET_PAGE_SIZE;
1448
        }
1449
    }
1450
#endif
1451
    mask = ~dirty_flags;
1452
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1453
    for(i = 0; i < len; i++)
1454
        p[i] &= mask;
1455

    
1456
    /* we modify the TLB cache so that the dirty bit will be set again
1457
       when accessing the range */
1458
    start1 = start + (unsigned long)phys_ram_base;
1459
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1460
        for(i = 0; i < CPU_TLB_SIZE; i++)
1461
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1462
        for(i = 0; i < CPU_TLB_SIZE; i++)
1463
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1464
#if (NB_MMU_MODES >= 3)
1465
        for(i = 0; i < CPU_TLB_SIZE; i++)
1466
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1467
#if (NB_MMU_MODES == 4)
1468
        for(i = 0; i < CPU_TLB_SIZE; i++)
1469
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1470
#endif
1471
#endif
1472
    }
1473

    
1474
#if !defined(CONFIG_SOFTMMU)
1475
    /* XXX: this is expensive */
1476
    {
1477
        VirtPageDesc *p;
1478
        int j;
1479
        target_ulong addr;
1480

    
1481
        for(i = 0; i < L1_SIZE; i++) {
1482
            p = l1_virt_map[i];
1483
            if (p) {
1484
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1485
                for(j = 0; j < L2_SIZE; j++) {
1486
                    if (p->valid_tag == virt_valid_tag &&
1487
                        p->phys_addr >= start && p->phys_addr < end &&
1488
                        (p->prot & PROT_WRITE)) {
1489
                        if (addr < MMAP_AREA_END) {
1490
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1491
                                     p->prot & ~PROT_WRITE);
1492
                        }
1493
                    }
1494
                    addr += TARGET_PAGE_SIZE;
1495
                    p++;
1496
                }
1497
            }
1498
        }
1499
    }
1500
#endif
1501
}
1502

    
1503
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1504
{
1505
    ram_addr_t ram_addr;
1506

    
1507
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1508
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 
1509
            tlb_entry->addend - (unsigned long)phys_ram_base;
1510
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1511
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1512
        }
1513
    }
1514
}
1515

    
1516
/* update the TLB according to the current state of the dirty bits */
1517
void cpu_tlb_update_dirty(CPUState *env)
1518
{
1519
    int i;
1520
    for(i = 0; i < CPU_TLB_SIZE; i++)
1521
        tlb_update_dirty(&env->tlb_table[0][i]);
1522
    for(i = 0; i < CPU_TLB_SIZE; i++)
1523
        tlb_update_dirty(&env->tlb_table[1][i]);
1524
#if (NB_MMU_MODES >= 3)
1525
    for(i = 0; i < CPU_TLB_SIZE; i++)
1526
        tlb_update_dirty(&env->tlb_table[2][i]);
1527
#if (NB_MMU_MODES == 4)
1528
    for(i = 0; i < CPU_TLB_SIZE; i++)
1529
        tlb_update_dirty(&env->tlb_table[3][i]);
1530
#endif
1531
#endif
1532
}
1533

    
1534
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1535
                                  unsigned long start)
1536
{
1537
    unsigned long addr;
1538
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1539
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1540
        if (addr == start) {
1541
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1542
        }
1543
    }
1544
}
1545

    
1546
/* update the TLB corresponding to virtual page vaddr and phys addr
1547
   addr so that it is no longer dirty */
1548
static inline void tlb_set_dirty(CPUState *env,
1549
                                 unsigned long addr, target_ulong vaddr)
1550
{
1551
    int i;
1552

    
1553
    addr &= TARGET_PAGE_MASK;
1554
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1555
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1556
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1557
#if (NB_MMU_MODES >= 3)
1558
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1559
#if (NB_MMU_MODES == 4)
1560
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1561
#endif
1562
#endif
1563
}
1564

    
1565
/* add a new TLB entry. At most one entry for a given virtual address
1566
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1567
   (can only happen in non SOFTMMU mode for I/O pages or pages
1568
   conflicting with the host address space). */
1569
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1570
                      target_phys_addr_t paddr, int prot, 
1571
                      int is_user, int is_softmmu)
1572
{
1573
    PhysPageDesc *p;
1574
    unsigned long pd;
1575
    unsigned int index;
1576
    target_ulong address;
1577
    target_phys_addr_t addend;
1578
    int ret;
1579
    CPUTLBEntry *te;
1580
    int i;
1581

    
1582
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1583
    if (!p) {
1584
        pd = IO_MEM_UNASSIGNED;
1585
    } else {
1586
        pd = p->phys_offset;
1587
    }
1588
#if defined(DEBUG_TLB)
1589
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1590
           vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1591
#endif
1592

    
1593
    ret = 0;
1594
#if !defined(CONFIG_SOFTMMU)
1595
    if (is_softmmu) 
1596
#endif
1597
    {
1598
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1599
            /* IO memory case */
1600
            address = vaddr | pd;
1601
            addend = paddr;
1602
        } else {
1603
            /* standard memory */
1604
            address = vaddr;
1605
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1606
        }
1607

    
1608
        /* Make accesses to pages with watchpoints go via the
1609
           watchpoint trap routines.  */
1610
        for (i = 0; i < env->nb_watchpoints; i++) {
1611
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1612
                if (address & ~TARGET_PAGE_MASK) {
1613
                    env->watchpoint[i].is_ram = 0;
1614
                    address = vaddr | io_mem_watch;
1615
                } else {
1616
                    env->watchpoint[i].is_ram = 1;
1617
                    /* TODO: Figure out how to make read watchpoints coexist
1618
                       with code.  */
1619
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1620
                }
1621
            }
1622
        }
1623
        
1624
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1625
        addend -= vaddr;
1626
        te = &env->tlb_table[is_user][index];
1627
        te->addend = addend;
1628
        if (prot & PAGE_READ) {
1629
            te->addr_read = address;
1630
        } else {
1631
            te->addr_read = -1;
1632
        }
1633
        if (prot & PAGE_EXEC) {
1634
            te->addr_code = address;
1635
        } else {
1636
            te->addr_code = -1;
1637
        }
1638
        if (prot & PAGE_WRITE) {
1639
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1640
                (pd & IO_MEM_ROMD)) {
1641
                /* write access calls the I/O callback */
1642
                te->addr_write = vaddr | 
1643
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1644
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1645
                       !cpu_physical_memory_is_dirty(pd)) {
1646
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1647
            } else {
1648
                te->addr_write = address;
1649
            }
1650
        } else {
1651
            te->addr_write = -1;
1652
        }
1653
    }
1654
#if !defined(CONFIG_SOFTMMU)
1655
    else {
1656
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1657
            /* IO access: no mapping is done as it will be handled by the
1658
               soft MMU */
1659
            if (!(env->hflags & HF_SOFTMMU_MASK))
1660
                ret = 2;
1661
        } else {
1662
            void *map_addr;
1663

    
1664
            if (vaddr >= MMAP_AREA_END) {
1665
                ret = 2;
1666
            } else {
1667
                if (prot & PROT_WRITE) {
1668
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1669
#if defined(TARGET_HAS_SMC) || 1
1670
                        first_tb ||
1671
#endif
1672
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1673
                         !cpu_physical_memory_is_dirty(pd))) {
1674
                        /* ROM: we do as if code was inside */
1675
                        /* if code is present, we only map as read only and save the
1676
                           original mapping */
1677
                        VirtPageDesc *vp;
1678
                        
1679
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1680
                        vp->phys_addr = pd;
1681
                        vp->prot = prot;
1682
                        vp->valid_tag = virt_valid_tag;
1683
                        prot &= ~PAGE_WRITE;
1684
                    }
1685
                }
1686
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1687
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1688
                if (map_addr == MAP_FAILED) {
1689
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1690
                              paddr, vaddr);
1691
                }
1692
            }
1693
        }
1694
    }
1695
#endif
1696
    return ret;
1697
}
1698

    
1699
/* called from signal handler: invalidate the code and unprotect the
1700
   page. Return TRUE if the fault was succesfully handled. */
1701
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1702
{
1703
#if !defined(CONFIG_SOFTMMU)
1704
    VirtPageDesc *vp;
1705

    
1706
#if defined(DEBUG_TLB)
1707
    printf("page_unprotect: addr=0x%08x\n", addr);
1708
#endif
1709
    addr &= TARGET_PAGE_MASK;
1710

    
1711
    /* if it is not mapped, no need to worry here */
1712
    if (addr >= MMAP_AREA_END)
1713
        return 0;
1714
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1715
    if (!vp)
1716
        return 0;
1717
    /* NOTE: in this case, validate_tag is _not_ tested as it
1718
       validates only the code TLB */
1719
    if (vp->valid_tag != virt_valid_tag)
1720
        return 0;
1721
    if (!(vp->prot & PAGE_WRITE))
1722
        return 0;
1723
#if defined(DEBUG_TLB)
1724
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1725
           addr, vp->phys_addr, vp->prot);
1726
#endif
1727
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1728
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1729
                  (unsigned long)addr, vp->prot);
1730
    /* set the dirty bit */
1731
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1732
    /* flush the code inside */
1733
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1734
    return 1;
1735
#else
1736
    return 0;
1737
#endif
1738
}
1739

    
1740
#else
1741

    
1742
void tlb_flush(CPUState *env, int flush_global)
1743
{
1744
}
1745

    
1746
void tlb_flush_page(CPUState *env, target_ulong addr)
1747
{
1748
}
1749

    
1750
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1751
                      target_phys_addr_t paddr, int prot, 
1752
                      int is_user, int is_softmmu)
1753
{
1754
    return 0;
1755
}
1756

    
1757
/* dump memory mappings */
1758
void page_dump(FILE *f)
1759
{
1760
    unsigned long start, end;
1761
    int i, j, prot, prot1;
1762
    PageDesc *p;
1763

    
1764
    fprintf(f, "%-8s %-8s %-8s %s\n",
1765
            "start", "end", "size", "prot");
1766
    start = -1;
1767
    end = -1;
1768
    prot = 0;
1769
    for(i = 0; i <= L1_SIZE; i++) {
1770
        if (i < L1_SIZE)
1771
            p = l1_map[i];
1772
        else
1773
            p = NULL;
1774
        for(j = 0;j < L2_SIZE; j++) {
1775
            if (!p)
1776
                prot1 = 0;
1777
            else
1778
                prot1 = p[j].flags;
1779
            if (prot1 != prot) {
1780
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1781
                if (start != -1) {
1782
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1783
                            start, end, end - start, 
1784
                            prot & PAGE_READ ? 'r' : '-',
1785
                            prot & PAGE_WRITE ? 'w' : '-',
1786
                            prot & PAGE_EXEC ? 'x' : '-');
1787
                }
1788
                if (prot1 != 0)
1789
                    start = end;
1790
                else
1791
                    start = -1;
1792
                prot = prot1;
1793
            }
1794
            if (!p)
1795
                break;
1796
        }
1797
    }
1798
}
1799

    
1800
int page_get_flags(target_ulong address)
1801
{
1802
    PageDesc *p;
1803

    
1804
    p = page_find(address >> TARGET_PAGE_BITS);
1805
    if (!p)
1806
        return 0;
1807
    return p->flags;
1808
}
1809

    
1810
/* modify the flags of a page and invalidate the code if
1811
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1812
   depending on PAGE_WRITE */
1813
void page_set_flags(target_ulong start, target_ulong end, int flags)
1814
{
1815
    PageDesc *p;
1816
    target_ulong addr;
1817

    
1818
    start = start & TARGET_PAGE_MASK;
1819
    end = TARGET_PAGE_ALIGN(end);
1820
    if (flags & PAGE_WRITE)
1821
        flags |= PAGE_WRITE_ORG;
1822
    spin_lock(&tb_lock);
1823
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1824
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1825
        /* if the write protection is set, then we invalidate the code
1826
           inside */
1827
        if (!(p->flags & PAGE_WRITE) && 
1828
            (flags & PAGE_WRITE) &&
1829
            p->first_tb) {
1830
            tb_invalidate_phys_page(addr, 0, NULL);
1831
        }
1832
        p->flags = flags;
1833
    }
1834
    spin_unlock(&tb_lock);
1835
}
1836

    
1837
/* called from signal handler: invalidate the code and unprotect the
1838
   page. Return TRUE if the fault was succesfully handled. */
1839
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1840
{
1841
    unsigned int page_index, prot, pindex;
1842
    PageDesc *p, *p1;
1843
    target_ulong host_start, host_end, addr;
1844

    
1845
    host_start = address & qemu_host_page_mask;
1846
    page_index = host_start >> TARGET_PAGE_BITS;
1847
    p1 = page_find(page_index);
1848
    if (!p1)
1849
        return 0;
1850
    host_end = host_start + qemu_host_page_size;
1851
    p = p1;
1852
    prot = 0;
1853
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1854
        prot |= p->flags;
1855
        p++;
1856
    }
1857
    /* if the page was really writable, then we change its
1858
       protection back to writable */
1859
    if (prot & PAGE_WRITE_ORG) {
1860
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1861
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1862
            mprotect((void *)g2h(host_start), qemu_host_page_size, 
1863
                     (prot & PAGE_BITS) | PAGE_WRITE);
1864
            p1[pindex].flags |= PAGE_WRITE;
1865
            /* and since the content will be modified, we must invalidate
1866
               the corresponding translated code. */
1867
            tb_invalidate_phys_page(address, pc, puc);
1868
#ifdef DEBUG_TB_CHECK
1869
            tb_invalidate_check(address);
1870
#endif
1871
            return 1;
1872
        }
1873
    }
1874
    return 0;
1875
}
1876

    
1877
/* call this function when system calls directly modify a memory area */
1878
/* ??? This should be redundant now we have lock_user.  */
1879
void page_unprotect_range(target_ulong data, target_ulong data_size)
1880
{
1881
    target_ulong start, end, addr;
1882

    
1883
    start = data;
1884
    end = start + data_size;
1885
    start &= TARGET_PAGE_MASK;
1886
    end = TARGET_PAGE_ALIGN(end);
1887
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1888
        page_unprotect(addr, 0, NULL);
1889
    }
1890
}
1891

    
1892
static inline void tlb_set_dirty(CPUState *env,
1893
                                 unsigned long addr, target_ulong vaddr)
1894
{
1895
}
1896
#endif /* defined(CONFIG_USER_ONLY) */
1897

    
1898
/* register physical memory. 'size' must be a multiple of the target
1899
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1900
   io memory page */
1901
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1902
                                  unsigned long size,
1903
                                  unsigned long phys_offset)
1904
{
1905
    target_phys_addr_t addr, end_addr;
1906
    PhysPageDesc *p;
1907
    CPUState *env;
1908

    
1909
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1910
    end_addr = start_addr + size;
1911
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1912
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1913
        p->phys_offset = phys_offset;
1914
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1915
            (phys_offset & IO_MEM_ROMD))
1916
            phys_offset += TARGET_PAGE_SIZE;
1917
    }
1918
    
1919
    /* since each CPU stores ram addresses in its TLB cache, we must
1920
       reset the modified entries */
1921
    /* XXX: slow ! */
1922
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1923
        tlb_flush(env, 1);
1924
    }
1925
}
1926

    
1927
/* XXX: temporary until new memory mapping API */
1928
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1929
{
1930
    PhysPageDesc *p;
1931

    
1932
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1933
    if (!p)
1934
        return IO_MEM_UNASSIGNED;
1935
    return p->phys_offset;
1936
}
1937

    
1938
/* XXX: better than nothing */
1939
ram_addr_t qemu_ram_alloc(unsigned int size)
1940
{
1941
    ram_addr_t addr;
1942
    if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1943
        fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n", 
1944
                size, phys_ram_size);
1945
        abort();
1946
    }
1947
    addr = phys_ram_alloc_offset;
1948
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1949
    return addr;
1950
}
1951

    
1952
void qemu_ram_free(ram_addr_t addr)
1953
{
1954
}
1955

    
1956
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1957
{
1958
#ifdef DEBUG_UNASSIGNED
1959
    printf("Unassigned mem read  0x%08x\n", (int)addr);
1960
#endif
1961
    return 0;
1962
}
1963

    
1964
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1965
{
1966
#ifdef DEBUG_UNASSIGNED
1967
    printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1968
#endif
1969
}
1970

    
1971
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1972
    unassigned_mem_readb,
1973
    unassigned_mem_readb,
1974
    unassigned_mem_readb,
1975
};
1976

    
1977
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1978
    unassigned_mem_writeb,
1979
    unassigned_mem_writeb,
1980
    unassigned_mem_writeb,
1981
};
1982

    
1983
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1984
{
1985
    unsigned long ram_addr;
1986
    int dirty_flags;
1987
    ram_addr = addr - (unsigned long)phys_ram_base;
1988
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1989
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1990
#if !defined(CONFIG_USER_ONLY)
1991
        tb_invalidate_phys_page_fast(ram_addr, 1);
1992
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1993
#endif
1994
    }
1995
    stb_p((uint8_t *)(long)addr, val);
1996
#ifdef USE_KQEMU
1997
    if (cpu_single_env->kqemu_enabled &&
1998
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1999
        kqemu_modify_page(cpu_single_env, ram_addr);
2000
#endif
2001
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2002
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2003
    /* we remove the notdirty callback only if the code has been
2004
       flushed */
2005
    if (dirty_flags == 0xff)
2006
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2007
}
2008

    
2009
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2010
{
2011
    unsigned long ram_addr;
2012
    int dirty_flags;
2013
    ram_addr = addr - (unsigned long)phys_ram_base;
2014
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2015
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2016
#if !defined(CONFIG_USER_ONLY)
2017
        tb_invalidate_phys_page_fast(ram_addr, 2);
2018
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2019
#endif
2020
    }
2021
    stw_p((uint8_t *)(long)addr, val);
2022
#ifdef USE_KQEMU
2023
    if (cpu_single_env->kqemu_enabled &&
2024
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2025
        kqemu_modify_page(cpu_single_env, ram_addr);
2026
#endif
2027
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2028
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2029
    /* we remove the notdirty callback only if the code has been
2030
       flushed */
2031
    if (dirty_flags == 0xff)
2032
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2033
}
2034

    
2035
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2036
{
2037
    unsigned long ram_addr;
2038
    int dirty_flags;
2039
    ram_addr = addr - (unsigned long)phys_ram_base;
2040
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2041
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2042
#if !defined(CONFIG_USER_ONLY)
2043
        tb_invalidate_phys_page_fast(ram_addr, 4);
2044
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2045
#endif
2046
    }
2047
    stl_p((uint8_t *)(long)addr, val);
2048
#ifdef USE_KQEMU
2049
    if (cpu_single_env->kqemu_enabled &&
2050
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2051
        kqemu_modify_page(cpu_single_env, ram_addr);
2052
#endif
2053
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2054
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2055
    /* we remove the notdirty callback only if the code has been
2056
       flushed */
2057
    if (dirty_flags == 0xff)
2058
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2059
}
2060

    
2061
static CPUReadMemoryFunc *error_mem_read[3] = {
2062
    NULL, /* never used */
2063
    NULL, /* never used */
2064
    NULL, /* never used */
2065
};
2066

    
2067
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2068
    notdirty_mem_writeb,
2069
    notdirty_mem_writew,
2070
    notdirty_mem_writel,
2071
};
2072

    
2073
#if defined(CONFIG_SOFTMMU)
2074
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2075
   so these check for a hit then pass through to the normal out-of-line
2076
   phys routines.  */
2077
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2078
{
2079
    return ldub_phys(addr);
2080
}
2081

    
2082
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2083
{
2084
    return lduw_phys(addr);
2085
}
2086

    
2087
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2088
{
2089
    return ldl_phys(addr);
2090
}
2091

    
2092
/* Generate a debug exception if a watchpoint has been hit.
2093
   Returns the real physical address of the access.  addr will be a host
2094
   address in the is_ram case.  */
2095
static target_ulong check_watchpoint(target_phys_addr_t addr)
2096
{
2097
    CPUState *env = cpu_single_env;
2098
    target_ulong watch;
2099
    target_ulong retaddr;
2100
    int i;
2101

    
2102
    retaddr = addr;
2103
    for (i = 0; i < env->nb_watchpoints; i++) {
2104
        watch = env->watchpoint[i].vaddr;
2105
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2106
            if (env->watchpoint[i].is_ram)
2107
                retaddr = addr - (unsigned long)phys_ram_base;
2108
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2109
                cpu_single_env->watchpoint_hit = i + 1;
2110
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2111
                break;
2112
            }
2113
        }
2114
    }
2115
    return retaddr;
2116
}
2117

    
2118
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2119
                             uint32_t val)
2120
{
2121
    addr = check_watchpoint(addr);
2122
    stb_phys(addr, val);
2123
}
2124

    
2125
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2126
                             uint32_t val)
2127
{
2128
    addr = check_watchpoint(addr);
2129
    stw_phys(addr, val);
2130
}
2131

    
2132
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2133
                             uint32_t val)
2134
{
2135
    addr = check_watchpoint(addr);
2136
    stl_phys(addr, val);
2137
}
2138

    
2139
static CPUReadMemoryFunc *watch_mem_read[3] = {
2140
    watch_mem_readb,
2141
    watch_mem_readw,
2142
    watch_mem_readl,
2143
};
2144

    
2145
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2146
    watch_mem_writeb,
2147
    watch_mem_writew,
2148
    watch_mem_writel,
2149
};
2150
#endif
2151

    
2152
static void io_mem_init(void)
2153
{
2154
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2155
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2156
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2157
    io_mem_nb = 5;
2158

    
2159
#if defined(CONFIG_SOFTMMU)
2160
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2161
                                          watch_mem_write, NULL);
2162
#endif
2163
    /* alloc dirty bits array */
2164
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2165
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2166
}
2167

    
2168
/* mem_read and mem_write are arrays of functions containing the
2169
   function to access byte (index 0), word (index 1) and dword (index
2170
   2). All functions must be supplied. If io_index is non zero, the
2171
   corresponding io zone is modified. If it is zero, a new io zone is
2172
   allocated. The return value can be used with
2173
   cpu_register_physical_memory(). (-1) is returned if error. */
2174
int cpu_register_io_memory(int io_index,
2175
                           CPUReadMemoryFunc **mem_read,
2176
                           CPUWriteMemoryFunc **mem_write,
2177
                           void *opaque)
2178
{
2179
    int i;
2180

    
2181
    if (io_index <= 0) {
2182
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2183
            return -1;
2184
        io_index = io_mem_nb++;
2185
    } else {
2186
        if (io_index >= IO_MEM_NB_ENTRIES)
2187
            return -1;
2188
    }
2189

    
2190
    for(i = 0;i < 3; i++) {
2191
        io_mem_read[io_index][i] = mem_read[i];
2192
        io_mem_write[io_index][i] = mem_write[i];
2193
    }
2194
    io_mem_opaque[io_index] = opaque;
2195
    return io_index << IO_MEM_SHIFT;
2196
}
2197

    
2198
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2199
{
2200
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2201
}
2202

    
2203
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2204
{
2205
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2206
}
2207

    
2208
/* physical memory access (slow version, mainly for debug) */
2209
#if defined(CONFIG_USER_ONLY)
2210
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2211
                            int len, int is_write)
2212
{
2213
    int l, flags;
2214
    target_ulong page;
2215
    void * p;
2216

    
2217
    while (len > 0) {
2218
        page = addr & TARGET_PAGE_MASK;
2219
        l = (page + TARGET_PAGE_SIZE) - addr;
2220
        if (l > len)
2221
            l = len;
2222
        flags = page_get_flags(page);
2223
        if (!(flags & PAGE_VALID))
2224
            return;
2225
        if (is_write) {
2226
            if (!(flags & PAGE_WRITE))
2227
                return;
2228
            p = lock_user(addr, len, 0);
2229
            memcpy(p, buf, len);
2230
            unlock_user(p, addr, len);
2231
        } else {
2232
            if (!(flags & PAGE_READ))
2233
                return;
2234
            p = lock_user(addr, len, 1);
2235
            memcpy(buf, p, len);
2236
            unlock_user(p, addr, 0);
2237
        }
2238
        len -= l;
2239
        buf += l;
2240
        addr += l;
2241
    }
2242
}
2243

    
2244
#else
2245
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2246
                            int len, int is_write)
2247
{
2248
    int l, io_index;
2249
    uint8_t *ptr;
2250
    uint32_t val;
2251
    target_phys_addr_t page;
2252
    unsigned long pd;
2253
    PhysPageDesc *p;
2254
    
2255
    while (len > 0) {
2256
        page = addr & TARGET_PAGE_MASK;
2257
        l = (page + TARGET_PAGE_SIZE) - addr;
2258
        if (l > len)
2259
            l = len;
2260
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2261
        if (!p) {
2262
            pd = IO_MEM_UNASSIGNED;
2263
        } else {
2264
            pd = p->phys_offset;
2265
        }
2266
        
2267
        if (is_write) {
2268
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2269
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2270
                /* XXX: could force cpu_single_env to NULL to avoid
2271
                   potential bugs */
2272
                if (l >= 4 && ((addr & 3) == 0)) {
2273
                    /* 32 bit write access */
2274
                    val = ldl_p(buf);
2275
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2276
                    l = 4;
2277
                } else if (l >= 2 && ((addr & 1) == 0)) {
2278
                    /* 16 bit write access */
2279
                    val = lduw_p(buf);
2280
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2281
                    l = 2;
2282
                } else {
2283
                    /* 8 bit write access */
2284
                    val = ldub_p(buf);
2285
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2286
                    l = 1;
2287
                }
2288
            } else {
2289
                unsigned long addr1;
2290
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2291
                /* RAM case */
2292
                ptr = phys_ram_base + addr1;
2293
                memcpy(ptr, buf, l);
2294
                if (!cpu_physical_memory_is_dirty(addr1)) {
2295
                    /* invalidate code */
2296
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2297
                    /* set dirty bit */
2298
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 
2299
                        (0xff & ~CODE_DIRTY_FLAG);
2300
                }
2301
            }
2302
        } else {
2303
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2304
                !(pd & IO_MEM_ROMD)) {
2305
                /* I/O case */
2306
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2307
                if (l >= 4 && ((addr & 3) == 0)) {
2308
                    /* 32 bit read access */
2309
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2310
                    stl_p(buf, val);
2311
                    l = 4;
2312
                } else if (l >= 2 && ((addr & 1) == 0)) {
2313
                    /* 16 bit read access */
2314
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2315
                    stw_p(buf, val);
2316
                    l = 2;
2317
                } else {
2318
                    /* 8 bit read access */
2319
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2320
                    stb_p(buf, val);
2321
                    l = 1;
2322
                }
2323
            } else {
2324
                /* RAM case */
2325
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2326
                    (addr & ~TARGET_PAGE_MASK);
2327
                memcpy(buf, ptr, l);
2328
            }
2329
        }
2330
        len -= l;
2331
        buf += l;
2332
        addr += l;
2333
    }
2334
}
2335

    
2336
/* used for ROM loading : can write in RAM and ROM */
2337
void cpu_physical_memory_write_rom(target_phys_addr_t addr, 
2338
                                   const uint8_t *buf, int len)
2339
{
2340
    int l;
2341
    uint8_t *ptr;
2342
    target_phys_addr_t page;
2343
    unsigned long pd;
2344
    PhysPageDesc *p;
2345
    
2346
    while (len > 0) {
2347
        page = addr & TARGET_PAGE_MASK;
2348
        l = (page + TARGET_PAGE_SIZE) - addr;
2349
        if (l > len)
2350
            l = len;
2351
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2352
        if (!p) {
2353
            pd = IO_MEM_UNASSIGNED;
2354
        } else {
2355
            pd = p->phys_offset;
2356
        }
2357
        
2358
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2359
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2360
            !(pd & IO_MEM_ROMD)) {
2361
            /* do nothing */
2362
        } else {
2363
            unsigned long addr1;
2364
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2365
            /* ROM/RAM case */
2366
            ptr = phys_ram_base + addr1;
2367
            memcpy(ptr, buf, l);
2368
        }
2369
        len -= l;
2370
        buf += l;
2371
        addr += l;
2372
    }
2373
}
2374

    
2375

    
2376
/* warning: addr must be aligned */
2377
uint32_t ldl_phys(target_phys_addr_t addr)
2378
{
2379
    int io_index;
2380
    uint8_t *ptr;
2381
    uint32_t val;
2382
    unsigned long pd;
2383
    PhysPageDesc *p;
2384

    
2385
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2386
    if (!p) {
2387
        pd = IO_MEM_UNASSIGNED;
2388
    } else {
2389
        pd = p->phys_offset;
2390
    }
2391
        
2392
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2393
        !(pd & IO_MEM_ROMD)) {
2394
        /* I/O case */
2395
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2396
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2397
    } else {
2398
        /* RAM case */
2399
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2400
            (addr & ~TARGET_PAGE_MASK);
2401
        val = ldl_p(ptr);
2402
    }
2403
    return val;
2404
}
2405

    
2406
/* warning: addr must be aligned */
2407
uint64_t ldq_phys(target_phys_addr_t addr)
2408
{
2409
    int io_index;
2410
    uint8_t *ptr;
2411
    uint64_t val;
2412
    unsigned long pd;
2413
    PhysPageDesc *p;
2414

    
2415
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2416
    if (!p) {
2417
        pd = IO_MEM_UNASSIGNED;
2418
    } else {
2419
        pd = p->phys_offset;
2420
    }
2421
        
2422
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2423
        !(pd & IO_MEM_ROMD)) {
2424
        /* I/O case */
2425
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2426
#ifdef TARGET_WORDS_BIGENDIAN
2427
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2428
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2429
#else
2430
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2431
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2432
#endif
2433
    } else {
2434
        /* RAM case */
2435
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2436
            (addr & ~TARGET_PAGE_MASK);
2437
        val = ldq_p(ptr);
2438
    }
2439
    return val;
2440
}
2441

    
2442
/* XXX: optimize */
2443
uint32_t ldub_phys(target_phys_addr_t addr)
2444
{
2445
    uint8_t val;
2446
    cpu_physical_memory_read(addr, &val, 1);
2447
    return val;
2448
}
2449

    
2450
/* XXX: optimize */
2451
uint32_t lduw_phys(target_phys_addr_t addr)
2452
{
2453
    uint16_t val;
2454
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2455
    return tswap16(val);
2456
}
2457

    
2458
/* warning: addr must be aligned. The ram page is not masked as dirty
2459
   and the code inside is not invalidated. It is useful if the dirty
2460
   bits are used to track modified PTEs */
2461
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2462
{
2463
    int io_index;
2464
    uint8_t *ptr;
2465
    unsigned long pd;
2466
    PhysPageDesc *p;
2467

    
2468
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2469
    if (!p) {
2470
        pd = IO_MEM_UNASSIGNED;
2471
    } else {
2472
        pd = p->phys_offset;
2473
    }
2474
        
2475
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2476
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2477
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2478
    } else {
2479
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2480
            (addr & ~TARGET_PAGE_MASK);
2481
        stl_p(ptr, val);
2482
    }
2483
}
2484

    
2485
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2486
{
2487
    int io_index;
2488
    uint8_t *ptr;
2489
    unsigned long pd;
2490
    PhysPageDesc *p;
2491

    
2492
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2493
    if (!p) {
2494
        pd = IO_MEM_UNASSIGNED;
2495
    } else {
2496
        pd = p->phys_offset;
2497
    }
2498
        
2499
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2500
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2501
#ifdef TARGET_WORDS_BIGENDIAN
2502
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2503
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2504
#else
2505
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2506
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2507
#endif
2508
    } else {
2509
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2510
            (addr & ~TARGET_PAGE_MASK);
2511
        stq_p(ptr, val);
2512
    }
2513
}
2514

    
2515
/* warning: addr must be aligned */
2516
void stl_phys(target_phys_addr_t addr, uint32_t val)
2517
{
2518
    int io_index;
2519
    uint8_t *ptr;
2520
    unsigned long pd;
2521
    PhysPageDesc *p;
2522

    
2523
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2524
    if (!p) {
2525
        pd = IO_MEM_UNASSIGNED;
2526
    } else {
2527
        pd = p->phys_offset;
2528
    }
2529
        
2530
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2531
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2532
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2533
    } else {
2534
        unsigned long addr1;
2535
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2536
        /* RAM case */
2537
        ptr = phys_ram_base + addr1;
2538
        stl_p(ptr, val);
2539
        if (!cpu_physical_memory_is_dirty(addr1)) {
2540
            /* invalidate code */
2541
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2542
            /* set dirty bit */
2543
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2544
                (0xff & ~CODE_DIRTY_FLAG);
2545
        }
2546
    }
2547
}
2548

    
2549
/* XXX: optimize */
2550
void stb_phys(target_phys_addr_t addr, uint32_t val)
2551
{
2552
    uint8_t v = val;
2553
    cpu_physical_memory_write(addr, &v, 1);
2554
}
2555

    
2556
/* XXX: optimize */
2557
void stw_phys(target_phys_addr_t addr, uint32_t val)
2558
{
2559
    uint16_t v = tswap16(val);
2560
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2561
}
2562

    
2563
/* XXX: optimize */
2564
void stq_phys(target_phys_addr_t addr, uint64_t val)
2565
{
2566
    val = tswap64(val);
2567
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2568
}
2569

    
2570
#endif
2571

    
2572
/* virtual memory access for debug */
2573
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2574
                        uint8_t *buf, int len, int is_write)
2575
{
2576
    int l;
2577
    target_ulong page, phys_addr;
2578

    
2579
    while (len > 0) {
2580
        page = addr & TARGET_PAGE_MASK;
2581
        phys_addr = cpu_get_phys_page_debug(env, page);
2582
        /* if no physical page mapped, return an error */
2583
        if (phys_addr == -1)
2584
            return -1;
2585
        l = (page + TARGET_PAGE_SIZE) - addr;
2586
        if (l > len)
2587
            l = len;
2588
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2589
                               buf, l, is_write);
2590
        len -= l;
2591
        buf += l;
2592
        addr += l;
2593
    }
2594
    return 0;
2595
}
2596

    
2597
void dump_exec_info(FILE *f,
2598
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2599
{
2600
    int i, target_code_size, max_target_code_size;
2601
    int direct_jmp_count, direct_jmp2_count, cross_page;
2602
    TranslationBlock *tb;
2603
    
2604
    target_code_size = 0;
2605
    max_target_code_size = 0;
2606
    cross_page = 0;
2607
    direct_jmp_count = 0;
2608
    direct_jmp2_count = 0;
2609
    for(i = 0; i < nb_tbs; i++) {
2610
        tb = &tbs[i];
2611
        target_code_size += tb->size;
2612
        if (tb->size > max_target_code_size)
2613
            max_target_code_size = tb->size;
2614
        if (tb->page_addr[1] != -1)
2615
            cross_page++;
2616
        if (tb->tb_next_offset[0] != 0xffff) {
2617
            direct_jmp_count++;
2618
            if (tb->tb_next_offset[1] != 0xffff) {
2619
                direct_jmp2_count++;
2620
            }
2621
        }
2622
    }
2623
    /* XXX: avoid using doubles ? */
2624
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2625
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
2626
                nb_tbs ? target_code_size / nb_tbs : 0,
2627
                max_target_code_size);
2628
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
2629
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2630
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2631
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
2632
            cross_page, 
2633
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2634
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2635
                direct_jmp_count, 
2636
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2637
                direct_jmp2_count,
2638
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2639
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2640
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2641
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2642
}
2643

    
2644
#if !defined(CONFIG_USER_ONLY) 
2645

    
2646
#define MMUSUFFIX _cmmu
2647
#define GETPC() NULL
2648
#define env cpu_single_env
2649
#define SOFTMMU_CODE_ACCESS
2650

    
2651
#define SHIFT 0
2652
#include "softmmu_template.h"
2653

    
2654
#define SHIFT 1
2655
#include "softmmu_template.h"
2656

    
2657
#define SHIFT 2
2658
#include "softmmu_template.h"
2659

    
2660
#define SHIFT 3
2661
#include "softmmu_template.h"
2662

    
2663
#undef env
2664

    
2665
#endif