Statistics
| Branch: | Revision:

root / exec.c @ 86cc1ce0

History | View | Annotate | Download (77.6 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#if defined(CONFIG_USER_ONLY)
38
#include <qemu.h>
39
#endif
40

    
41
//#define DEBUG_TB_INVALIDATE
42
//#define DEBUG_FLUSH
43
//#define DEBUG_TLB
44
//#define DEBUG_UNASSIGNED
45

    
46
/* make various TB consistency checks */
47
//#define DEBUG_TB_CHECK 
48
//#define DEBUG_TLB_CHECK 
49

    
50
//#define DEBUG_IOPORT
51

    
52
#if !defined(CONFIG_USER_ONLY)
53
/* TB consistency checks only implemented for usermode emulation.  */
54
#undef DEBUG_TB_CHECK
55
#endif
56

    
57
/* threshold to flush the translated code buffer */
58
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
59

    
60
#define SMC_BITMAP_USE_THRESHOLD 10
61

    
62
#define MMAP_AREA_START        0x00000000
63
#define MMAP_AREA_END          0xa8000000
64

    
65
#if defined(TARGET_SPARC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67
#elif defined(TARGET_PPC64)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 42
69
#else
70
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
71
#define TARGET_PHYS_ADDR_SPACE_BITS 32
72
#endif
73

    
74
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
75
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
76
int nb_tbs;
77
/* any access to the tbs or the page table must use this lock */
78
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
79

    
80
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
81
uint8_t *code_gen_ptr;
82

    
83
int phys_ram_size;
84
int phys_ram_fd;
85
uint8_t *phys_ram_base;
86
uint8_t *phys_ram_dirty;
87
static ram_addr_t phys_ram_alloc_offset = 0;
88

    
89
CPUState *first_cpu;
90
/* current CPU in the current thread. It is only valid inside
91
   cpu_exec() */
92
CPUState *cpu_single_env; 
93

    
94
typedef struct PageDesc {
95
    /* list of TBs intersecting this ram page */
96
    TranslationBlock *first_tb;
97
    /* in order to optimize self modifying code, we count the number
98
       of lookups we do to a given page to use a bitmap */
99
    unsigned int code_write_count;
100
    uint8_t *code_bitmap;
101
#if defined(CONFIG_USER_ONLY)
102
    unsigned long flags;
103
#endif
104
} PageDesc;
105

    
106
typedef struct PhysPageDesc {
107
    /* offset in host memory of the page + io_index in the low 12 bits */
108
    uint32_t phys_offset;
109
} PhysPageDesc;
110

    
111
#define L2_BITS 10
112
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
113

    
114
#define L1_SIZE (1 << L1_BITS)
115
#define L2_SIZE (1 << L2_BITS)
116

    
117
static void io_mem_init(void);
118

    
119
unsigned long qemu_real_host_page_size;
120
unsigned long qemu_host_page_bits;
121
unsigned long qemu_host_page_size;
122
unsigned long qemu_host_page_mask;
123

    
124
/* XXX: for system emulation, it could just be an array */
125
static PageDesc *l1_map[L1_SIZE];
126
PhysPageDesc **l1_phys_map;
127

    
128
/* io memory support */
129
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
130
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
131
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
132
static int io_mem_nb;
133
#if defined(CONFIG_SOFTMMU)
134
static int io_mem_watch;
135
#endif
136

    
137
/* log support */
138
char *logfilename = "/tmp/qemu.log";
139
FILE *logfile;
140
int loglevel;
141

    
142
/* statistics */
143
static int tlb_flush_count;
144
static int tb_flush_count;
145
static int tb_phys_invalidate_count;
146

    
147
static void page_init(void)
148
{
149
    /* NOTE: we can always suppose that qemu_host_page_size >=
150
       TARGET_PAGE_SIZE */
151
#ifdef _WIN32
152
    {
153
        SYSTEM_INFO system_info;
154
        DWORD old_protect;
155
        
156
        GetSystemInfo(&system_info);
157
        qemu_real_host_page_size = system_info.dwPageSize;
158
        
159
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
160
                       PAGE_EXECUTE_READWRITE, &old_protect);
161
    }
162
#else
163
    qemu_real_host_page_size = getpagesize();
164
    {
165
        unsigned long start, end;
166

    
167
        start = (unsigned long)code_gen_buffer;
168
        start &= ~(qemu_real_host_page_size - 1);
169
        
170
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
171
        end += qemu_real_host_page_size - 1;
172
        end &= ~(qemu_real_host_page_size - 1);
173
        
174
        mprotect((void *)start, end - start, 
175
                 PROT_READ | PROT_WRITE | PROT_EXEC);
176
    }
177
#endif
178

    
179
    if (qemu_host_page_size == 0)
180
        qemu_host_page_size = qemu_real_host_page_size;
181
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
182
        qemu_host_page_size = TARGET_PAGE_SIZE;
183
    qemu_host_page_bits = 0;
184
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
185
        qemu_host_page_bits++;
186
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
187
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
188
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
189
}
190

    
191
static inline PageDesc *page_find_alloc(unsigned int index)
192
{
193
    PageDesc **lp, *p;
194

    
195
    lp = &l1_map[index >> L2_BITS];
196
    p = *lp;
197
    if (!p) {
198
        /* allocate if not found */
199
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
200
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
201
        *lp = p;
202
    }
203
    return p + (index & (L2_SIZE - 1));
204
}
205

    
206
static inline PageDesc *page_find(unsigned int index)
207
{
208
    PageDesc *p;
209

    
210
    p = l1_map[index >> L2_BITS];
211
    if (!p)
212
        return 0;
213
    return p + (index & (L2_SIZE - 1));
214
}
215

    
216
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
217
{
218
    void **lp, **p;
219
    PhysPageDesc *pd;
220

    
221
    p = (void **)l1_phys_map;
222
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
223

    
224
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
225
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
226
#endif
227
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
228
    p = *lp;
229
    if (!p) {
230
        /* allocate if not found */
231
        if (!alloc)
232
            return NULL;
233
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
234
        memset(p, 0, sizeof(void *) * L1_SIZE);
235
        *lp = p;
236
    }
237
#endif
238
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
239
    pd = *lp;
240
    if (!pd) {
241
        int i;
242
        /* allocate if not found */
243
        if (!alloc)
244
            return NULL;
245
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
246
        *lp = pd;
247
        for (i = 0; i < L2_SIZE; i++)
248
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
249
    }
250
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
251
}
252

    
253
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
254
{
255
    return phys_page_find_alloc(index, 0);
256
}
257

    
258
#if !defined(CONFIG_USER_ONLY)
259
static void tlb_protect_code(ram_addr_t ram_addr);
260
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
261
                                    target_ulong vaddr);
262
#endif
263

    
264
void cpu_exec_init(CPUState *env)
265
{
266
    CPUState **penv;
267
    int cpu_index;
268

    
269
    if (!code_gen_ptr) {
270
        code_gen_ptr = code_gen_buffer;
271
        page_init();
272
        io_mem_init();
273
    }
274
    env->next_cpu = NULL;
275
    penv = &first_cpu;
276
    cpu_index = 0;
277
    while (*penv != NULL) {
278
        penv = (CPUState **)&(*penv)->next_cpu;
279
        cpu_index++;
280
    }
281
    env->cpu_index = cpu_index;
282
    env->nb_watchpoints = 0;
283
    *penv = env;
284
}
285

    
286
static inline void invalidate_page_bitmap(PageDesc *p)
287
{
288
    if (p->code_bitmap) {
289
        qemu_free(p->code_bitmap);
290
        p->code_bitmap = NULL;
291
    }
292
    p->code_write_count = 0;
293
}
294

    
295
/* set to NULL all the 'first_tb' fields in all PageDescs */
296
static void page_flush_tb(void)
297
{
298
    int i, j;
299
    PageDesc *p;
300

    
301
    for(i = 0; i < L1_SIZE; i++) {
302
        p = l1_map[i];
303
        if (p) {
304
            for(j = 0; j < L2_SIZE; j++) {
305
                p->first_tb = NULL;
306
                invalidate_page_bitmap(p);
307
                p++;
308
            }
309
        }
310
    }
311
}
312

    
313
/* flush all the translation blocks */
314
/* XXX: tb_flush is currently not thread safe */
315
void tb_flush(CPUState *env1)
316
{
317
    CPUState *env;
318
#if defined(DEBUG_FLUSH)
319
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
320
           code_gen_ptr - code_gen_buffer, 
321
           nb_tbs, 
322
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
323
#endif
324
    nb_tbs = 0;
325
    
326
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
327
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
328
    }
329

    
330
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
331
    page_flush_tb();
332

    
333
    code_gen_ptr = code_gen_buffer;
334
    /* XXX: flush processor icache at this point if cache flush is
335
       expensive */
336
    tb_flush_count++;
337
}
338

    
339
#ifdef DEBUG_TB_CHECK
340

    
341
static void tb_invalidate_check(target_ulong address)
342
{
343
    TranslationBlock *tb;
344
    int i;
345
    address &= TARGET_PAGE_MASK;
346
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
347
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
348
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
349
                  address >= tb->pc + tb->size)) {
350
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
351
                       address, (long)tb->pc, tb->size);
352
            }
353
        }
354
    }
355
}
356

    
357
/* verify that all the pages have correct rights for code */
358
static void tb_page_check(void)
359
{
360
    TranslationBlock *tb;
361
    int i, flags1, flags2;
362
    
363
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
364
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
365
            flags1 = page_get_flags(tb->pc);
366
            flags2 = page_get_flags(tb->pc + tb->size - 1);
367
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
368
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
369
                       (long)tb->pc, tb->size, flags1, flags2);
370
            }
371
        }
372
    }
373
}
374

    
375
void tb_jmp_check(TranslationBlock *tb)
376
{
377
    TranslationBlock *tb1;
378
    unsigned int n1;
379

    
380
    /* suppress any remaining jumps to this TB */
381
    tb1 = tb->jmp_first;
382
    for(;;) {
383
        n1 = (long)tb1 & 3;
384
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
385
        if (n1 == 2)
386
            break;
387
        tb1 = tb1->jmp_next[n1];
388
    }
389
    /* check end of list */
390
    if (tb1 != tb) {
391
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
392
    }
393
}
394

    
395
#endif
396

    
397
/* invalidate one TB */
398
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
399
                             int next_offset)
400
{
401
    TranslationBlock *tb1;
402
    for(;;) {
403
        tb1 = *ptb;
404
        if (tb1 == tb) {
405
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
406
            break;
407
        }
408
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
409
    }
410
}
411

    
412
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
413
{
414
    TranslationBlock *tb1;
415
    unsigned int n1;
416

    
417
    for(;;) {
418
        tb1 = *ptb;
419
        n1 = (long)tb1 & 3;
420
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
421
        if (tb1 == tb) {
422
            *ptb = tb1->page_next[n1];
423
            break;
424
        }
425
        ptb = &tb1->page_next[n1];
426
    }
427
}
428

    
429
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
430
{
431
    TranslationBlock *tb1, **ptb;
432
    unsigned int n1;
433

    
434
    ptb = &tb->jmp_next[n];
435
    tb1 = *ptb;
436
    if (tb1) {
437
        /* find tb(n) in circular list */
438
        for(;;) {
439
            tb1 = *ptb;
440
            n1 = (long)tb1 & 3;
441
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
442
            if (n1 == n && tb1 == tb)
443
                break;
444
            if (n1 == 2) {
445
                ptb = &tb1->jmp_first;
446
            } else {
447
                ptb = &tb1->jmp_next[n1];
448
            }
449
        }
450
        /* now we can suppress tb(n) from the list */
451
        *ptb = tb->jmp_next[n];
452

    
453
        tb->jmp_next[n] = NULL;
454
    }
455
}
456

    
457
/* reset the jump entry 'n' of a TB so that it is not chained to
458
   another TB */
459
static inline void tb_reset_jump(TranslationBlock *tb, int n)
460
{
461
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
462
}
463

    
464
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
465
{
466
    CPUState *env;
467
    PageDesc *p;
468
    unsigned int h, n1;
469
    target_ulong phys_pc;
470
    TranslationBlock *tb1, *tb2;
471
    
472
    /* remove the TB from the hash list */
473
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
474
    h = tb_phys_hash_func(phys_pc);
475
    tb_remove(&tb_phys_hash[h], tb, 
476
              offsetof(TranslationBlock, phys_hash_next));
477

    
478
    /* remove the TB from the page list */
479
    if (tb->page_addr[0] != page_addr) {
480
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
481
        tb_page_remove(&p->first_tb, tb);
482
        invalidate_page_bitmap(p);
483
    }
484
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
485
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
486
        tb_page_remove(&p->first_tb, tb);
487
        invalidate_page_bitmap(p);
488
    }
489

    
490
    tb_invalidated_flag = 1;
491

    
492
    /* remove the TB from the hash list */
493
    h = tb_jmp_cache_hash_func(tb->pc);
494
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
495
        if (env->tb_jmp_cache[h] == tb)
496
            env->tb_jmp_cache[h] = NULL;
497
    }
498

    
499
    /* suppress this TB from the two jump lists */
500
    tb_jmp_remove(tb, 0);
501
    tb_jmp_remove(tb, 1);
502

    
503
    /* suppress any remaining jumps to this TB */
504
    tb1 = tb->jmp_first;
505
    for(;;) {
506
        n1 = (long)tb1 & 3;
507
        if (n1 == 2)
508
            break;
509
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
510
        tb2 = tb1->jmp_next[n1];
511
        tb_reset_jump(tb1, n1);
512
        tb1->jmp_next[n1] = NULL;
513
        tb1 = tb2;
514
    }
515
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
516

    
517
    tb_phys_invalidate_count++;
518
}
519

    
520
static inline void set_bits(uint8_t *tab, int start, int len)
521
{
522
    int end, mask, end1;
523

    
524
    end = start + len;
525
    tab += start >> 3;
526
    mask = 0xff << (start & 7);
527
    if ((start & ~7) == (end & ~7)) {
528
        if (start < end) {
529
            mask &= ~(0xff << (end & 7));
530
            *tab |= mask;
531
        }
532
    } else {
533
        *tab++ |= mask;
534
        start = (start + 8) & ~7;
535
        end1 = end & ~7;
536
        while (start < end1) {
537
            *tab++ = 0xff;
538
            start += 8;
539
        }
540
        if (start < end) {
541
            mask = ~(0xff << (end & 7));
542
            *tab |= mask;
543
        }
544
    }
545
}
546

    
547
static void build_page_bitmap(PageDesc *p)
548
{
549
    int n, tb_start, tb_end;
550
    TranslationBlock *tb;
551
    
552
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
553
    if (!p->code_bitmap)
554
        return;
555
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
556

    
557
    tb = p->first_tb;
558
    while (tb != NULL) {
559
        n = (long)tb & 3;
560
        tb = (TranslationBlock *)((long)tb & ~3);
561
        /* NOTE: this is subtle as a TB may span two physical pages */
562
        if (n == 0) {
563
            /* NOTE: tb_end may be after the end of the page, but
564
               it is not a problem */
565
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
566
            tb_end = tb_start + tb->size;
567
            if (tb_end > TARGET_PAGE_SIZE)
568
                tb_end = TARGET_PAGE_SIZE;
569
        } else {
570
            tb_start = 0;
571
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
572
        }
573
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
574
        tb = tb->page_next[n];
575
    }
576
}
577

    
578
#ifdef TARGET_HAS_PRECISE_SMC
579

    
580
static void tb_gen_code(CPUState *env, 
581
                        target_ulong pc, target_ulong cs_base, int flags,
582
                        int cflags)
583
{
584
    TranslationBlock *tb;
585
    uint8_t *tc_ptr;
586
    target_ulong phys_pc, phys_page2, virt_page2;
587
    int code_gen_size;
588

    
589
    phys_pc = get_phys_addr_code(env, pc);
590
    tb = tb_alloc(pc);
591
    if (!tb) {
592
        /* flush must be done */
593
        tb_flush(env);
594
        /* cannot fail at this point */
595
        tb = tb_alloc(pc);
596
    }
597
    tc_ptr = code_gen_ptr;
598
    tb->tc_ptr = tc_ptr;
599
    tb->cs_base = cs_base;
600
    tb->flags = flags;
601
    tb->cflags = cflags;
602
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
603
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
604
    
605
    /* check next page if needed */
606
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
607
    phys_page2 = -1;
608
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
609
        phys_page2 = get_phys_addr_code(env, virt_page2);
610
    }
611
    tb_link_phys(tb, phys_pc, phys_page2);
612
}
613
#endif
614
    
615
/* invalidate all TBs which intersect with the target physical page
616
   starting in range [start;end[. NOTE: start and end must refer to
617
   the same physical page. 'is_cpu_write_access' should be true if called
618
   from a real cpu write access: the virtual CPU will exit the current
619
   TB if code is modified inside this TB. */
620
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
621
                                   int is_cpu_write_access)
622
{
623
    int n, current_tb_modified, current_tb_not_found, current_flags;
624
    CPUState *env = cpu_single_env;
625
    PageDesc *p;
626
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
627
    target_ulong tb_start, tb_end;
628
    target_ulong current_pc, current_cs_base;
629

    
630
    p = page_find(start >> TARGET_PAGE_BITS);
631
    if (!p) 
632
        return;
633
    if (!p->code_bitmap && 
634
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
635
        is_cpu_write_access) {
636
        /* build code bitmap */
637
        build_page_bitmap(p);
638
    }
639

    
640
    /* we remove all the TBs in the range [start, end[ */
641
    /* XXX: see if in some cases it could be faster to invalidate all the code */
642
    current_tb_not_found = is_cpu_write_access;
643
    current_tb_modified = 0;
644
    current_tb = NULL; /* avoid warning */
645
    current_pc = 0; /* avoid warning */
646
    current_cs_base = 0; /* avoid warning */
647
    current_flags = 0; /* avoid warning */
648
    tb = p->first_tb;
649
    while (tb != NULL) {
650
        n = (long)tb & 3;
651
        tb = (TranslationBlock *)((long)tb & ~3);
652
        tb_next = tb->page_next[n];
653
        /* NOTE: this is subtle as a TB may span two physical pages */
654
        if (n == 0) {
655
            /* NOTE: tb_end may be after the end of the page, but
656
               it is not a problem */
657
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
658
            tb_end = tb_start + tb->size;
659
        } else {
660
            tb_start = tb->page_addr[1];
661
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
662
        }
663
        if (!(tb_end <= start || tb_start >= end)) {
664
#ifdef TARGET_HAS_PRECISE_SMC
665
            if (current_tb_not_found) {
666
                current_tb_not_found = 0;
667
                current_tb = NULL;
668
                if (env->mem_write_pc) {
669
                    /* now we have a real cpu fault */
670
                    current_tb = tb_find_pc(env->mem_write_pc);
671
                }
672
            }
673
            if (current_tb == tb &&
674
                !(current_tb->cflags & CF_SINGLE_INSN)) {
675
                /* If we are modifying the current TB, we must stop
676
                its execution. We could be more precise by checking
677
                that the modification is after the current PC, but it
678
                would require a specialized function to partially
679
                restore the CPU state */
680
                
681
                current_tb_modified = 1;
682
                cpu_restore_state(current_tb, env, 
683
                                  env->mem_write_pc, NULL);
684
#if defined(TARGET_I386)
685
                current_flags = env->hflags;
686
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
687
                current_cs_base = (target_ulong)env->segs[R_CS].base;
688
                current_pc = current_cs_base + env->eip;
689
#else
690
#error unsupported CPU
691
#endif
692
            }
693
#endif /* TARGET_HAS_PRECISE_SMC */
694
            /* we need to do that to handle the case where a signal
695
               occurs while doing tb_phys_invalidate() */
696
            saved_tb = NULL;
697
            if (env) {
698
                saved_tb = env->current_tb;
699
                env->current_tb = NULL;
700
            }
701
            tb_phys_invalidate(tb, -1);
702
            if (env) {
703
                env->current_tb = saved_tb;
704
                if (env->interrupt_request && env->current_tb)
705
                    cpu_interrupt(env, env->interrupt_request);
706
            }
707
        }
708
        tb = tb_next;
709
    }
710
#if !defined(CONFIG_USER_ONLY)
711
    /* if no code remaining, no need to continue to use slow writes */
712
    if (!p->first_tb) {
713
        invalidate_page_bitmap(p);
714
        if (is_cpu_write_access) {
715
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
716
        }
717
    }
718
#endif
719
#ifdef TARGET_HAS_PRECISE_SMC
720
    if (current_tb_modified) {
721
        /* we generate a block containing just the instruction
722
           modifying the memory. It will ensure that it cannot modify
723
           itself */
724
        env->current_tb = NULL;
725
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
726
                    CF_SINGLE_INSN);
727
        cpu_resume_from_signal(env, NULL);
728
    }
729
#endif
730
}
731

    
732
/* len must be <= 8 and start must be a multiple of len */
733
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
734
{
735
    PageDesc *p;
736
    int offset, b;
737
#if 0
738
    if (1) {
739
        if (loglevel) {
740
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
741
                   cpu_single_env->mem_write_vaddr, len, 
742
                   cpu_single_env->eip, 
743
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
744
        }
745
    }
746
#endif
747
    p = page_find(start >> TARGET_PAGE_BITS);
748
    if (!p) 
749
        return;
750
    if (p->code_bitmap) {
751
        offset = start & ~TARGET_PAGE_MASK;
752
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
753
        if (b & ((1 << len) - 1))
754
            goto do_invalidate;
755
    } else {
756
    do_invalidate:
757
        tb_invalidate_phys_page_range(start, start + len, 1);
758
    }
759
}
760

    
761
#if !defined(CONFIG_SOFTMMU)
762
static void tb_invalidate_phys_page(target_ulong addr, 
763
                                    unsigned long pc, void *puc)
764
{
765
    int n, current_flags, current_tb_modified;
766
    target_ulong current_pc, current_cs_base;
767
    PageDesc *p;
768
    TranslationBlock *tb, *current_tb;
769
#ifdef TARGET_HAS_PRECISE_SMC
770
    CPUState *env = cpu_single_env;
771
#endif
772

    
773
    addr &= TARGET_PAGE_MASK;
774
    p = page_find(addr >> TARGET_PAGE_BITS);
775
    if (!p) 
776
        return;
777
    tb = p->first_tb;
778
    current_tb_modified = 0;
779
    current_tb = NULL;
780
    current_pc = 0; /* avoid warning */
781
    current_cs_base = 0; /* avoid warning */
782
    current_flags = 0; /* avoid warning */
783
#ifdef TARGET_HAS_PRECISE_SMC
784
    if (tb && pc != 0) {
785
        current_tb = tb_find_pc(pc);
786
    }
787
#endif
788
    while (tb != NULL) {
789
        n = (long)tb & 3;
790
        tb = (TranslationBlock *)((long)tb & ~3);
791
#ifdef TARGET_HAS_PRECISE_SMC
792
        if (current_tb == tb &&
793
            !(current_tb->cflags & CF_SINGLE_INSN)) {
794
                /* If we are modifying the current TB, we must stop
795
                   its execution. We could be more precise by checking
796
                   that the modification is after the current PC, but it
797
                   would require a specialized function to partially
798
                   restore the CPU state */
799
            
800
            current_tb_modified = 1;
801
            cpu_restore_state(current_tb, env, pc, puc);
802
#if defined(TARGET_I386)
803
            current_flags = env->hflags;
804
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
805
            current_cs_base = (target_ulong)env->segs[R_CS].base;
806
            current_pc = current_cs_base + env->eip;
807
#else
808
#error unsupported CPU
809
#endif
810
        }
811
#endif /* TARGET_HAS_PRECISE_SMC */
812
        tb_phys_invalidate(tb, addr);
813
        tb = tb->page_next[n];
814
    }
815
    p->first_tb = NULL;
816
#ifdef TARGET_HAS_PRECISE_SMC
817
    if (current_tb_modified) {
818
        /* we generate a block containing just the instruction
819
           modifying the memory. It will ensure that it cannot modify
820
           itself */
821
        env->current_tb = NULL;
822
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
823
                    CF_SINGLE_INSN);
824
        cpu_resume_from_signal(env, puc);
825
    }
826
#endif
827
}
828
#endif
829

    
830
/* add the tb in the target page and protect it if necessary */
831
static inline void tb_alloc_page(TranslationBlock *tb, 
832
                                 unsigned int n, target_ulong page_addr)
833
{
834
    PageDesc *p;
835
    TranslationBlock *last_first_tb;
836

    
837
    tb->page_addr[n] = page_addr;
838
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
839
    tb->page_next[n] = p->first_tb;
840
    last_first_tb = p->first_tb;
841
    p->first_tb = (TranslationBlock *)((long)tb | n);
842
    invalidate_page_bitmap(p);
843

    
844
#if defined(TARGET_HAS_SMC) || 1
845

    
846
#if defined(CONFIG_USER_ONLY)
847
    if (p->flags & PAGE_WRITE) {
848
        target_ulong addr;
849
        PageDesc *p2;
850
        int prot;
851

    
852
        /* force the host page as non writable (writes will have a
853
           page fault + mprotect overhead) */
854
        page_addr &= qemu_host_page_mask;
855
        prot = 0;
856
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
857
            addr += TARGET_PAGE_SIZE) {
858

    
859
            p2 = page_find (addr >> TARGET_PAGE_BITS);
860
            if (!p2)
861
                continue;
862
            prot |= p2->flags;
863
            p2->flags &= ~PAGE_WRITE;
864
            page_get_flags(addr);
865
          }
866
        mprotect(g2h(page_addr), qemu_host_page_size, 
867
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
868
#ifdef DEBUG_TB_INVALIDATE
869
        printf("protecting code page: 0x%08lx\n", 
870
               page_addr);
871
#endif
872
    }
873
#else
874
    /* if some code is already present, then the pages are already
875
       protected. So we handle the case where only the first TB is
876
       allocated in a physical page */
877
    if (!last_first_tb) {
878
        tlb_protect_code(page_addr);
879
    }
880
#endif
881

    
882
#endif /* TARGET_HAS_SMC */
883
}
884

    
885
/* Allocate a new translation block. Flush the translation buffer if
886
   too many translation blocks or too much generated code. */
887
TranslationBlock *tb_alloc(target_ulong pc)
888
{
889
    TranslationBlock *tb;
890

    
891
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
892
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
893
        return NULL;
894
    tb = &tbs[nb_tbs++];
895
    tb->pc = pc;
896
    tb->cflags = 0;
897
    return tb;
898
}
899

    
900
/* add a new TB and link it to the physical page tables. phys_page2 is
901
   (-1) to indicate that only one page contains the TB. */
902
void tb_link_phys(TranslationBlock *tb, 
903
                  target_ulong phys_pc, target_ulong phys_page2)
904
{
905
    unsigned int h;
906
    TranslationBlock **ptb;
907

    
908
    /* add in the physical hash table */
909
    h = tb_phys_hash_func(phys_pc);
910
    ptb = &tb_phys_hash[h];
911
    tb->phys_hash_next = *ptb;
912
    *ptb = tb;
913

    
914
    /* add in the page list */
915
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
916
    if (phys_page2 != -1)
917
        tb_alloc_page(tb, 1, phys_page2);
918
    else
919
        tb->page_addr[1] = -1;
920

    
921
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
922
    tb->jmp_next[0] = NULL;
923
    tb->jmp_next[1] = NULL;
924
#ifdef USE_CODE_COPY
925
    tb->cflags &= ~CF_FP_USED;
926
    if (tb->cflags & CF_TB_FP_USED)
927
        tb->cflags |= CF_FP_USED;
928
#endif
929

    
930
    /* init original jump addresses */
931
    if (tb->tb_next_offset[0] != 0xffff)
932
        tb_reset_jump(tb, 0);
933
    if (tb->tb_next_offset[1] != 0xffff)
934
        tb_reset_jump(tb, 1);
935

    
936
#ifdef DEBUG_TB_CHECK
937
    tb_page_check();
938
#endif
939
}
940

    
941
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
942
   tb[1].tc_ptr. Return NULL if not found */
943
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
944
{
945
    int m_min, m_max, m;
946
    unsigned long v;
947
    TranslationBlock *tb;
948

    
949
    if (nb_tbs <= 0)
950
        return NULL;
951
    if (tc_ptr < (unsigned long)code_gen_buffer ||
952
        tc_ptr >= (unsigned long)code_gen_ptr)
953
        return NULL;
954
    /* binary search (cf Knuth) */
955
    m_min = 0;
956
    m_max = nb_tbs - 1;
957
    while (m_min <= m_max) {
958
        m = (m_min + m_max) >> 1;
959
        tb = &tbs[m];
960
        v = (unsigned long)tb->tc_ptr;
961
        if (v == tc_ptr)
962
            return tb;
963
        else if (tc_ptr < v) {
964
            m_max = m - 1;
965
        } else {
966
            m_min = m + 1;
967
        }
968
    } 
969
    return &tbs[m_max];
970
}
971

    
972
static void tb_reset_jump_recursive(TranslationBlock *tb);
973

    
974
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
975
{
976
    TranslationBlock *tb1, *tb_next, **ptb;
977
    unsigned int n1;
978

    
979
    tb1 = tb->jmp_next[n];
980
    if (tb1 != NULL) {
981
        /* find head of list */
982
        for(;;) {
983
            n1 = (long)tb1 & 3;
984
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
985
            if (n1 == 2)
986
                break;
987
            tb1 = tb1->jmp_next[n1];
988
        }
989
        /* we are now sure now that tb jumps to tb1 */
990
        tb_next = tb1;
991

    
992
        /* remove tb from the jmp_first list */
993
        ptb = &tb_next->jmp_first;
994
        for(;;) {
995
            tb1 = *ptb;
996
            n1 = (long)tb1 & 3;
997
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
998
            if (n1 == n && tb1 == tb)
999
                break;
1000
            ptb = &tb1->jmp_next[n1];
1001
        }
1002
        *ptb = tb->jmp_next[n];
1003
        tb->jmp_next[n] = NULL;
1004
        
1005
        /* suppress the jump to next tb in generated code */
1006
        tb_reset_jump(tb, n);
1007

    
1008
        /* suppress jumps in the tb on which we could have jumped */
1009
        tb_reset_jump_recursive(tb_next);
1010
    }
1011
}
1012

    
1013
static void tb_reset_jump_recursive(TranslationBlock *tb)
1014
{
1015
    tb_reset_jump_recursive2(tb, 0);
1016
    tb_reset_jump_recursive2(tb, 1);
1017
}
1018

    
1019
#if defined(TARGET_HAS_ICE)
1020
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1021
{
1022
    target_ulong addr, pd;
1023
    ram_addr_t ram_addr;
1024
    PhysPageDesc *p;
1025

    
1026
    addr = cpu_get_phys_page_debug(env, pc);
1027
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1028
    if (!p) {
1029
        pd = IO_MEM_UNASSIGNED;
1030
    } else {
1031
        pd = p->phys_offset;
1032
    }
1033
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1034
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1035
}
1036
#endif
1037

    
1038
/* Add a watchpoint.  */
1039
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1040
{
1041
    int i;
1042

    
1043
    for (i = 0; i < env->nb_watchpoints; i++) {
1044
        if (addr == env->watchpoint[i].vaddr)
1045
            return 0;
1046
    }
1047
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1048
        return -1;
1049

    
1050
    i = env->nb_watchpoints++;
1051
    env->watchpoint[i].vaddr = addr;
1052
    tlb_flush_page(env, addr);
1053
    /* FIXME: This flush is needed because of the hack to make memory ops
1054
       terminate the TB.  It can be removed once the proper IO trap and
1055
       re-execute bits are in.  */
1056
    tb_flush(env);
1057
    return i;
1058
}
1059

    
1060
/* Remove a watchpoint.  */
1061
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1062
{
1063
    int i;
1064

    
1065
    for (i = 0; i < env->nb_watchpoints; i++) {
1066
        if (addr == env->watchpoint[i].vaddr) {
1067
            env->nb_watchpoints--;
1068
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1069
            tlb_flush_page(env, addr);
1070
            return 0;
1071
        }
1072
    }
1073
    return -1;
1074
}
1075

    
1076
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1077
   breakpoint is reached */
1078
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1079
{
1080
#if defined(TARGET_HAS_ICE)
1081
    int i;
1082
    
1083
    for(i = 0; i < env->nb_breakpoints; i++) {
1084
        if (env->breakpoints[i] == pc)
1085
            return 0;
1086
    }
1087

    
1088
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1089
        return -1;
1090
    env->breakpoints[env->nb_breakpoints++] = pc;
1091
    
1092
    breakpoint_invalidate(env, pc);
1093
    return 0;
1094
#else
1095
    return -1;
1096
#endif
1097
}
1098

    
1099
/* remove a breakpoint */
1100
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1101
{
1102
#if defined(TARGET_HAS_ICE)
1103
    int i;
1104
    for(i = 0; i < env->nb_breakpoints; i++) {
1105
        if (env->breakpoints[i] == pc)
1106
            goto found;
1107
    }
1108
    return -1;
1109
 found:
1110
    env->nb_breakpoints--;
1111
    if (i < env->nb_breakpoints)
1112
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1113

    
1114
    breakpoint_invalidate(env, pc);
1115
    return 0;
1116
#else
1117
    return -1;
1118
#endif
1119
}
1120

    
1121
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1122
   CPU loop after each instruction */
1123
void cpu_single_step(CPUState *env, int enabled)
1124
{
1125
#if defined(TARGET_HAS_ICE)
1126
    if (env->singlestep_enabled != enabled) {
1127
        env->singlestep_enabled = enabled;
1128
        /* must flush all the translated code to avoid inconsistancies */
1129
        /* XXX: only flush what is necessary */
1130
        tb_flush(env);
1131
    }
1132
#endif
1133
}
1134

    
1135
/* enable or disable low levels log */
1136
void cpu_set_log(int log_flags)
1137
{
1138
    loglevel = log_flags;
1139
    if (loglevel && !logfile) {
1140
        logfile = fopen(logfilename, "w");
1141
        if (!logfile) {
1142
            perror(logfilename);
1143
            _exit(1);
1144
        }
1145
#if !defined(CONFIG_SOFTMMU)
1146
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1147
        {
1148
            static uint8_t logfile_buf[4096];
1149
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1150
        }
1151
#else
1152
        setvbuf(logfile, NULL, _IOLBF, 0);
1153
#endif
1154
    }
1155
}
1156

    
1157
void cpu_set_log_filename(const char *filename)
1158
{
1159
    logfilename = strdup(filename);
1160
}
1161

    
1162
/* mask must never be zero, except for A20 change call */
1163
void cpu_interrupt(CPUState *env, int mask)
1164
{
1165
    TranslationBlock *tb;
1166
    static int interrupt_lock;
1167

    
1168
    env->interrupt_request |= mask;
1169
    /* if the cpu is currently executing code, we must unlink it and
1170
       all the potentially executing TB */
1171
    tb = env->current_tb;
1172
    if (tb && !testandset(&interrupt_lock)) {
1173
        env->current_tb = NULL;
1174
        tb_reset_jump_recursive(tb);
1175
        interrupt_lock = 0;
1176
    }
1177
}
1178

    
1179
void cpu_reset_interrupt(CPUState *env, int mask)
1180
{
1181
    env->interrupt_request &= ~mask;
1182
}
1183

    
1184
CPULogItem cpu_log_items[] = {
1185
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1186
      "show generated host assembly code for each compiled TB" },
1187
    { CPU_LOG_TB_IN_ASM, "in_asm",
1188
      "show target assembly code for each compiled TB" },
1189
    { CPU_LOG_TB_OP, "op", 
1190
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1191
#ifdef TARGET_I386
1192
    { CPU_LOG_TB_OP_OPT, "op_opt",
1193
      "show micro ops after optimization for each compiled TB" },
1194
#endif
1195
    { CPU_LOG_INT, "int",
1196
      "show interrupts/exceptions in short format" },
1197
    { CPU_LOG_EXEC, "exec",
1198
      "show trace before each executed TB (lots of logs)" },
1199
    { CPU_LOG_TB_CPU, "cpu",
1200
      "show CPU state before bloc translation" },
1201
#ifdef TARGET_I386
1202
    { CPU_LOG_PCALL, "pcall",
1203
      "show protected mode far calls/returns/exceptions" },
1204
#endif
1205
#ifdef DEBUG_IOPORT
1206
    { CPU_LOG_IOPORT, "ioport",
1207
      "show all i/o ports accesses" },
1208
#endif
1209
    { 0, NULL, NULL },
1210
};
1211

    
1212
static int cmp1(const char *s1, int n, const char *s2)
1213
{
1214
    if (strlen(s2) != n)
1215
        return 0;
1216
    return memcmp(s1, s2, n) == 0;
1217
}
1218
      
1219
/* takes a comma separated list of log masks. Return 0 if error. */
1220
int cpu_str_to_log_mask(const char *str)
1221
{
1222
    CPULogItem *item;
1223
    int mask;
1224
    const char *p, *p1;
1225

    
1226
    p = str;
1227
    mask = 0;
1228
    for(;;) {
1229
        p1 = strchr(p, ',');
1230
        if (!p1)
1231
            p1 = p + strlen(p);
1232
        if(cmp1(p,p1-p,"all")) {
1233
                for(item = cpu_log_items; item->mask != 0; item++) {
1234
                        mask |= item->mask;
1235
                }
1236
        } else {
1237
        for(item = cpu_log_items; item->mask != 0; item++) {
1238
            if (cmp1(p, p1 - p, item->name))
1239
                goto found;
1240
        }
1241
        return 0;
1242
        }
1243
    found:
1244
        mask |= item->mask;
1245
        if (*p1 != ',')
1246
            break;
1247
        p = p1 + 1;
1248
    }
1249
    return mask;
1250
}
1251

    
1252
void cpu_abort(CPUState *env, const char *fmt, ...)
1253
{
1254
    va_list ap;
1255

    
1256
    va_start(ap, fmt);
1257
    fprintf(stderr, "qemu: fatal: ");
1258
    vfprintf(stderr, fmt, ap);
1259
    fprintf(stderr, "\n");
1260
#ifdef TARGET_I386
1261
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1262
#else
1263
    cpu_dump_state(env, stderr, fprintf, 0);
1264
#endif
1265
    va_end(ap);
1266
    abort();
1267
}
1268

    
1269
CPUState *cpu_copy(CPUState *env)
1270
{
1271
    CPUState *new_env = cpu_init();
1272
    /* preserve chaining and index */
1273
    CPUState *next_cpu = new_env->next_cpu;
1274
    int cpu_index = new_env->cpu_index;
1275
    memcpy(new_env, env, sizeof(CPUState));
1276
    new_env->next_cpu = next_cpu;
1277
    new_env->cpu_index = cpu_index;
1278
    return new_env;
1279
}
1280

    
1281
#if !defined(CONFIG_USER_ONLY)
1282

    
1283
/* NOTE: if flush_global is true, also flush global entries (not
1284
   implemented yet) */
1285
void tlb_flush(CPUState *env, int flush_global)
1286
{
1287
    int i;
1288

    
1289
#if defined(DEBUG_TLB)
1290
    printf("tlb_flush:\n");
1291
#endif
1292
    /* must reset current TB so that interrupts cannot modify the
1293
       links while we are modifying them */
1294
    env->current_tb = NULL;
1295

    
1296
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1297
        env->tlb_table[0][i].addr_read = -1;
1298
        env->tlb_table[0][i].addr_write = -1;
1299
        env->tlb_table[0][i].addr_code = -1;
1300
        env->tlb_table[1][i].addr_read = -1;
1301
        env->tlb_table[1][i].addr_write = -1;
1302
        env->tlb_table[1][i].addr_code = -1;
1303
#if (NB_MMU_MODES >= 3)
1304
        env->tlb_table[2][i].addr_read = -1;
1305
        env->tlb_table[2][i].addr_write = -1;
1306
        env->tlb_table[2][i].addr_code = -1;
1307
#if (NB_MMU_MODES == 4)
1308
        env->tlb_table[3][i].addr_read = -1;
1309
        env->tlb_table[3][i].addr_write = -1;
1310
        env->tlb_table[3][i].addr_code = -1;
1311
#endif
1312
#endif
1313
    }
1314

    
1315
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1316

    
1317
#if !defined(CONFIG_SOFTMMU)
1318
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1319
#endif
1320
#ifdef USE_KQEMU
1321
    if (env->kqemu_enabled) {
1322
        kqemu_flush(env, flush_global);
1323
    }
1324
#endif
1325
    tlb_flush_count++;
1326
}
1327

    
1328
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1329
{
1330
    if (addr == (tlb_entry->addr_read & 
1331
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1332
        addr == (tlb_entry->addr_write & 
1333
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1334
        addr == (tlb_entry->addr_code & 
1335
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1336
        tlb_entry->addr_read = -1;
1337
        tlb_entry->addr_write = -1;
1338
        tlb_entry->addr_code = -1;
1339
    }
1340
}
1341

    
1342
void tlb_flush_page(CPUState *env, target_ulong addr)
1343
{
1344
    int i;
1345
    TranslationBlock *tb;
1346

    
1347
#if defined(DEBUG_TLB)
1348
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1349
#endif
1350
    /* must reset current TB so that interrupts cannot modify the
1351
       links while we are modifying them */
1352
    env->current_tb = NULL;
1353

    
1354
    addr &= TARGET_PAGE_MASK;
1355
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1356
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1357
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1358
#if (NB_MMU_MODES >= 3)
1359
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1360
#if (NB_MMU_MODES == 4)
1361
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1362
#endif
1363
#endif
1364

    
1365
    /* Discard jump cache entries for any tb which might potentially
1366
       overlap the flushed page.  */
1367
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1368
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1369

    
1370
    i = tb_jmp_cache_hash_page(addr);
1371
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1372

    
1373
#if !defined(CONFIG_SOFTMMU)
1374
    if (addr < MMAP_AREA_END)
1375
        munmap((void *)addr, TARGET_PAGE_SIZE);
1376
#endif
1377
#ifdef USE_KQEMU
1378
    if (env->kqemu_enabled) {
1379
        kqemu_flush_page(env, addr);
1380
    }
1381
#endif
1382
}
1383

    
1384
/* update the TLBs so that writes to code in the virtual page 'addr'
1385
   can be detected */
1386
static void tlb_protect_code(ram_addr_t ram_addr)
1387
{
1388
    cpu_physical_memory_reset_dirty(ram_addr, 
1389
                                    ram_addr + TARGET_PAGE_SIZE,
1390
                                    CODE_DIRTY_FLAG);
1391
}
1392

    
1393
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1394
   tested for self modifying code */
1395
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
1396
                                    target_ulong vaddr)
1397
{
1398
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1399
}
1400

    
1401
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1402
                                         unsigned long start, unsigned long length)
1403
{
1404
    unsigned long addr;
1405
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1406
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1407
        if ((addr - start) < length) {
1408
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1409
        }
1410
    }
1411
}
1412

    
1413
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1414
                                     int dirty_flags)
1415
{
1416
    CPUState *env;
1417
    unsigned long length, start1;
1418
    int i, mask, len;
1419
    uint8_t *p;
1420

    
1421
    start &= TARGET_PAGE_MASK;
1422
    end = TARGET_PAGE_ALIGN(end);
1423

    
1424
    length = end - start;
1425
    if (length == 0)
1426
        return;
1427
    len = length >> TARGET_PAGE_BITS;
1428
#ifdef USE_KQEMU
1429
    /* XXX: should not depend on cpu context */
1430
    env = first_cpu;
1431
    if (env->kqemu_enabled) {
1432
        ram_addr_t addr;
1433
        addr = start;
1434
        for(i = 0; i < len; i++) {
1435
            kqemu_set_notdirty(env, addr);
1436
            addr += TARGET_PAGE_SIZE;
1437
        }
1438
    }
1439
#endif
1440
    mask = ~dirty_flags;
1441
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1442
    for(i = 0; i < len; i++)
1443
        p[i] &= mask;
1444

    
1445
    /* we modify the TLB cache so that the dirty bit will be set again
1446
       when accessing the range */
1447
    start1 = start + (unsigned long)phys_ram_base;
1448
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1449
        for(i = 0; i < CPU_TLB_SIZE; i++)
1450
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1451
        for(i = 0; i < CPU_TLB_SIZE; i++)
1452
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1453
#if (NB_MMU_MODES >= 3)
1454
        for(i = 0; i < CPU_TLB_SIZE; i++)
1455
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1456
#if (NB_MMU_MODES == 4)
1457
        for(i = 0; i < CPU_TLB_SIZE; i++)
1458
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1459
#endif
1460
#endif
1461
    }
1462

    
1463
#if !defined(CONFIG_SOFTMMU)
1464
    /* XXX: this is expensive */
1465
    {
1466
        VirtPageDesc *p;
1467
        int j;
1468
        target_ulong addr;
1469

    
1470
        for(i = 0; i < L1_SIZE; i++) {
1471
            p = l1_virt_map[i];
1472
            if (p) {
1473
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1474
                for(j = 0; j < L2_SIZE; j++) {
1475
                    if (p->valid_tag == virt_valid_tag &&
1476
                        p->phys_addr >= start && p->phys_addr < end &&
1477
                        (p->prot & PROT_WRITE)) {
1478
                        if (addr < MMAP_AREA_END) {
1479
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1480
                                     p->prot & ~PROT_WRITE);
1481
                        }
1482
                    }
1483
                    addr += TARGET_PAGE_SIZE;
1484
                    p++;
1485
                }
1486
            }
1487
        }
1488
    }
1489
#endif
1490
}
1491

    
1492
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1493
{
1494
    ram_addr_t ram_addr;
1495

    
1496
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1497
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 
1498
            tlb_entry->addend - (unsigned long)phys_ram_base;
1499
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1500
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1501
        }
1502
    }
1503
}
1504

    
1505
/* update the TLB according to the current state of the dirty bits */
1506
void cpu_tlb_update_dirty(CPUState *env)
1507
{
1508
    int i;
1509
    for(i = 0; i < CPU_TLB_SIZE; i++)
1510
        tlb_update_dirty(&env->tlb_table[0][i]);
1511
    for(i = 0; i < CPU_TLB_SIZE; i++)
1512
        tlb_update_dirty(&env->tlb_table[1][i]);
1513
#if (NB_MMU_MODES >= 3)
1514
    for(i = 0; i < CPU_TLB_SIZE; i++)
1515
        tlb_update_dirty(&env->tlb_table[2][i]);
1516
#if (NB_MMU_MODES == 4)
1517
    for(i = 0; i < CPU_TLB_SIZE; i++)
1518
        tlb_update_dirty(&env->tlb_table[3][i]);
1519
#endif
1520
#endif
1521
}
1522

    
1523
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1524
                                  unsigned long start)
1525
{
1526
    unsigned long addr;
1527
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1528
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1529
        if (addr == start) {
1530
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1531
        }
1532
    }
1533
}
1534

    
1535
/* update the TLB corresponding to virtual page vaddr and phys addr
1536
   addr so that it is no longer dirty */
1537
static inline void tlb_set_dirty(CPUState *env,
1538
                                 unsigned long addr, target_ulong vaddr)
1539
{
1540
    int i;
1541

    
1542
    addr &= TARGET_PAGE_MASK;
1543
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1544
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1545
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1546
#if (NB_MMU_MODES >= 3)
1547
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1548
#if (NB_MMU_MODES == 4)
1549
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1550
#endif
1551
#endif
1552
}
1553

    
1554
/* add a new TLB entry. At most one entry for a given virtual address
1555
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1556
   (can only happen in non SOFTMMU mode for I/O pages or pages
1557
   conflicting with the host address space). */
1558
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1559
                      target_phys_addr_t paddr, int prot, 
1560
                      int is_user, int is_softmmu)
1561
{
1562
    PhysPageDesc *p;
1563
    unsigned long pd;
1564
    unsigned int index;
1565
    target_ulong address;
1566
    target_phys_addr_t addend;
1567
    int ret;
1568
    CPUTLBEntry *te;
1569
    int i;
1570

    
1571
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1572
    if (!p) {
1573
        pd = IO_MEM_UNASSIGNED;
1574
    } else {
1575
        pd = p->phys_offset;
1576
    }
1577
#if defined(DEBUG_TLB)
1578
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1579
           vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1580
#endif
1581

    
1582
    ret = 0;
1583
#if !defined(CONFIG_SOFTMMU)
1584
    if (is_softmmu) 
1585
#endif
1586
    {
1587
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1588
            /* IO memory case */
1589
            address = vaddr | pd;
1590
            addend = paddr;
1591
        } else {
1592
            /* standard memory */
1593
            address = vaddr;
1594
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1595
        }
1596

    
1597
        /* Make accesses to pages with watchpoints go via the
1598
           watchpoint trap routines.  */
1599
        for (i = 0; i < env->nb_watchpoints; i++) {
1600
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1601
                if (address & ~TARGET_PAGE_MASK) {
1602
                    env->watchpoint[i].is_ram = 0;
1603
                    address = vaddr | io_mem_watch;
1604
                } else {
1605
                    env->watchpoint[i].is_ram = 1;
1606
                    /* TODO: Figure out how to make read watchpoints coexist
1607
                       with code.  */
1608
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1609
                }
1610
            }
1611
        }
1612
        
1613
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1614
        addend -= vaddr;
1615
        te = &env->tlb_table[is_user][index];
1616
        te->addend = addend;
1617
        if (prot & PAGE_READ) {
1618
            te->addr_read = address;
1619
        } else {
1620
            te->addr_read = -1;
1621
        }
1622
        if (prot & PAGE_EXEC) {
1623
            te->addr_code = address;
1624
        } else {
1625
            te->addr_code = -1;
1626
        }
1627
        if (prot & PAGE_WRITE) {
1628
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1629
                (pd & IO_MEM_ROMD)) {
1630
                /* write access calls the I/O callback */
1631
                te->addr_write = vaddr | 
1632
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1633
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1634
                       !cpu_physical_memory_is_dirty(pd)) {
1635
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1636
            } else {
1637
                te->addr_write = address;
1638
            }
1639
        } else {
1640
            te->addr_write = -1;
1641
        }
1642
    }
1643
#if !defined(CONFIG_SOFTMMU)
1644
    else {
1645
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1646
            /* IO access: no mapping is done as it will be handled by the
1647
               soft MMU */
1648
            if (!(env->hflags & HF_SOFTMMU_MASK))
1649
                ret = 2;
1650
        } else {
1651
            void *map_addr;
1652

    
1653
            if (vaddr >= MMAP_AREA_END) {
1654
                ret = 2;
1655
            } else {
1656
                if (prot & PROT_WRITE) {
1657
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1658
#if defined(TARGET_HAS_SMC) || 1
1659
                        first_tb ||
1660
#endif
1661
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1662
                         !cpu_physical_memory_is_dirty(pd))) {
1663
                        /* ROM: we do as if code was inside */
1664
                        /* if code is present, we only map as read only and save the
1665
                           original mapping */
1666
                        VirtPageDesc *vp;
1667
                        
1668
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1669
                        vp->phys_addr = pd;
1670
                        vp->prot = prot;
1671
                        vp->valid_tag = virt_valid_tag;
1672
                        prot &= ~PAGE_WRITE;
1673
                    }
1674
                }
1675
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1676
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1677
                if (map_addr == MAP_FAILED) {
1678
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1679
                              paddr, vaddr);
1680
                }
1681
            }
1682
        }
1683
    }
1684
#endif
1685
    return ret;
1686
}
1687

    
1688
/* called from signal handler: invalidate the code and unprotect the
1689
   page. Return TRUE if the fault was succesfully handled. */
1690
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1691
{
1692
#if !defined(CONFIG_SOFTMMU)
1693
    VirtPageDesc *vp;
1694

    
1695
#if defined(DEBUG_TLB)
1696
    printf("page_unprotect: addr=0x%08x\n", addr);
1697
#endif
1698
    addr &= TARGET_PAGE_MASK;
1699

    
1700
    /* if it is not mapped, no need to worry here */
1701
    if (addr >= MMAP_AREA_END)
1702
        return 0;
1703
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1704
    if (!vp)
1705
        return 0;
1706
    /* NOTE: in this case, validate_tag is _not_ tested as it
1707
       validates only the code TLB */
1708
    if (vp->valid_tag != virt_valid_tag)
1709
        return 0;
1710
    if (!(vp->prot & PAGE_WRITE))
1711
        return 0;
1712
#if defined(DEBUG_TLB)
1713
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1714
           addr, vp->phys_addr, vp->prot);
1715
#endif
1716
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1717
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1718
                  (unsigned long)addr, vp->prot);
1719
    /* set the dirty bit */
1720
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1721
    /* flush the code inside */
1722
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1723
    return 1;
1724
#else
1725
    return 0;
1726
#endif
1727
}
1728

    
1729
#else
1730

    
1731
void tlb_flush(CPUState *env, int flush_global)
1732
{
1733
}
1734

    
1735
void tlb_flush_page(CPUState *env, target_ulong addr)
1736
{
1737
}
1738

    
1739
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1740
                      target_phys_addr_t paddr, int prot, 
1741
                      int is_user, int is_softmmu)
1742
{
1743
    return 0;
1744
}
1745

    
1746
/* dump memory mappings */
1747
void page_dump(FILE *f)
1748
{
1749
    unsigned long start, end;
1750
    int i, j, prot, prot1;
1751
    PageDesc *p;
1752

    
1753
    fprintf(f, "%-8s %-8s %-8s %s\n",
1754
            "start", "end", "size", "prot");
1755
    start = -1;
1756
    end = -1;
1757
    prot = 0;
1758
    for(i = 0; i <= L1_SIZE; i++) {
1759
        if (i < L1_SIZE)
1760
            p = l1_map[i];
1761
        else
1762
            p = NULL;
1763
        for(j = 0;j < L2_SIZE; j++) {
1764
            if (!p)
1765
                prot1 = 0;
1766
            else
1767
                prot1 = p[j].flags;
1768
            if (prot1 != prot) {
1769
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1770
                if (start != -1) {
1771
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1772
                            start, end, end - start, 
1773
                            prot & PAGE_READ ? 'r' : '-',
1774
                            prot & PAGE_WRITE ? 'w' : '-',
1775
                            prot & PAGE_EXEC ? 'x' : '-');
1776
                }
1777
                if (prot1 != 0)
1778
                    start = end;
1779
                else
1780
                    start = -1;
1781
                prot = prot1;
1782
            }
1783
            if (!p)
1784
                break;
1785
        }
1786
    }
1787
}
1788

    
1789
int page_get_flags(target_ulong address)
1790
{
1791
    PageDesc *p;
1792

    
1793
    p = page_find(address >> TARGET_PAGE_BITS);
1794
    if (!p)
1795
        return 0;
1796
    return p->flags;
1797
}
1798

    
1799
/* modify the flags of a page and invalidate the code if
1800
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1801
   depending on PAGE_WRITE */
1802
void page_set_flags(target_ulong start, target_ulong end, int flags)
1803
{
1804
    PageDesc *p;
1805
    target_ulong addr;
1806

    
1807
    start = start & TARGET_PAGE_MASK;
1808
    end = TARGET_PAGE_ALIGN(end);
1809
    if (flags & PAGE_WRITE)
1810
        flags |= PAGE_WRITE_ORG;
1811
    spin_lock(&tb_lock);
1812
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1813
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1814
        /* if the write protection is set, then we invalidate the code
1815
           inside */
1816
        if (!(p->flags & PAGE_WRITE) && 
1817
            (flags & PAGE_WRITE) &&
1818
            p->first_tb) {
1819
            tb_invalidate_phys_page(addr, 0, NULL);
1820
        }
1821
        p->flags = flags;
1822
    }
1823
    spin_unlock(&tb_lock);
1824
}
1825

    
1826
/* called from signal handler: invalidate the code and unprotect the
1827
   page. Return TRUE if the fault was succesfully handled. */
1828
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1829
{
1830
    unsigned int page_index, prot, pindex;
1831
    PageDesc *p, *p1;
1832
    target_ulong host_start, host_end, addr;
1833

    
1834
    host_start = address & qemu_host_page_mask;
1835
    page_index = host_start >> TARGET_PAGE_BITS;
1836
    p1 = page_find(page_index);
1837
    if (!p1)
1838
        return 0;
1839
    host_end = host_start + qemu_host_page_size;
1840
    p = p1;
1841
    prot = 0;
1842
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1843
        prot |= p->flags;
1844
        p++;
1845
    }
1846
    /* if the page was really writable, then we change its
1847
       protection back to writable */
1848
    if (prot & PAGE_WRITE_ORG) {
1849
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1850
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1851
            mprotect((void *)g2h(host_start), qemu_host_page_size, 
1852
                     (prot & PAGE_BITS) | PAGE_WRITE);
1853
            p1[pindex].flags |= PAGE_WRITE;
1854
            /* and since the content will be modified, we must invalidate
1855
               the corresponding translated code. */
1856
            tb_invalidate_phys_page(address, pc, puc);
1857
#ifdef DEBUG_TB_CHECK
1858
            tb_invalidate_check(address);
1859
#endif
1860
            return 1;
1861
        }
1862
    }
1863
    return 0;
1864
}
1865

    
1866
/* call this function when system calls directly modify a memory area */
1867
/* ??? This should be redundant now we have lock_user.  */
1868
void page_unprotect_range(target_ulong data, target_ulong data_size)
1869
{
1870
    target_ulong start, end, addr;
1871

    
1872
    start = data;
1873
    end = start + data_size;
1874
    start &= TARGET_PAGE_MASK;
1875
    end = TARGET_PAGE_ALIGN(end);
1876
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1877
        page_unprotect(addr, 0, NULL);
1878
    }
1879
}
1880

    
1881
static inline void tlb_set_dirty(CPUState *env,
1882
                                 unsigned long addr, target_ulong vaddr)
1883
{
1884
}
1885
#endif /* defined(CONFIG_USER_ONLY) */
1886

    
1887
/* register physical memory. 'size' must be a multiple of the target
1888
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1889
   io memory page */
1890
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1891
                                  unsigned long size,
1892
                                  unsigned long phys_offset)
1893
{
1894
    target_phys_addr_t addr, end_addr;
1895
    PhysPageDesc *p;
1896
    CPUState *env;
1897

    
1898
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1899
    end_addr = start_addr + size;
1900
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1901
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1902
        p->phys_offset = phys_offset;
1903
        if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1904
            (phys_offset & IO_MEM_ROMD))
1905
            phys_offset += TARGET_PAGE_SIZE;
1906
    }
1907
    
1908
    /* since each CPU stores ram addresses in its TLB cache, we must
1909
       reset the modified entries */
1910
    /* XXX: slow ! */
1911
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1912
        tlb_flush(env, 1);
1913
    }
1914
}
1915

    
1916
/* XXX: temporary until new memory mapping API */
1917
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
1918
{
1919
    PhysPageDesc *p;
1920

    
1921
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1922
    if (!p)
1923
        return IO_MEM_UNASSIGNED;
1924
    return p->phys_offset;
1925
}
1926

    
1927
/* XXX: better than nothing */
1928
ram_addr_t qemu_ram_alloc(unsigned int size)
1929
{
1930
    ram_addr_t addr;
1931
    if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
1932
        fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n", 
1933
                size, phys_ram_size);
1934
        abort();
1935
    }
1936
    addr = phys_ram_alloc_offset;
1937
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
1938
    return addr;
1939
}
1940

    
1941
void qemu_ram_free(ram_addr_t addr)
1942
{
1943
}
1944

    
1945
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
1946
{
1947
#ifdef DEBUG_UNASSIGNED
1948
    printf("Unassigned mem read  0x%08x\n", (int)addr);
1949
#endif
1950
    return 0;
1951
}
1952

    
1953
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1954
{
1955
#ifdef DEBUG_UNASSIGNED
1956
    printf("Unassigned mem write 0x%08x = 0x%x\n", (int)addr, val);
1957
#endif
1958
}
1959

    
1960
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1961
    unassigned_mem_readb,
1962
    unassigned_mem_readb,
1963
    unassigned_mem_readb,
1964
};
1965

    
1966
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1967
    unassigned_mem_writeb,
1968
    unassigned_mem_writeb,
1969
    unassigned_mem_writeb,
1970
};
1971

    
1972
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
1973
{
1974
    unsigned long ram_addr;
1975
    int dirty_flags;
1976
    ram_addr = addr - (unsigned long)phys_ram_base;
1977
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1978
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
1979
#if !defined(CONFIG_USER_ONLY)
1980
        tb_invalidate_phys_page_fast(ram_addr, 1);
1981
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1982
#endif
1983
    }
1984
    stb_p((uint8_t *)(long)addr, val);
1985
#ifdef USE_KQEMU
1986
    if (cpu_single_env->kqemu_enabled &&
1987
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
1988
        kqemu_modify_page(cpu_single_env, ram_addr);
1989
#endif
1990
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1991
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1992
    /* we remove the notdirty callback only if the code has been
1993
       flushed */
1994
    if (dirty_flags == 0xff)
1995
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
1996
}
1997

    
1998
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
1999
{
2000
    unsigned long ram_addr;
2001
    int dirty_flags;
2002
    ram_addr = addr - (unsigned long)phys_ram_base;
2003
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2004
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2005
#if !defined(CONFIG_USER_ONLY)
2006
        tb_invalidate_phys_page_fast(ram_addr, 2);
2007
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2008
#endif
2009
    }
2010
    stw_p((uint8_t *)(long)addr, val);
2011
#ifdef USE_KQEMU
2012
    if (cpu_single_env->kqemu_enabled &&
2013
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2014
        kqemu_modify_page(cpu_single_env, ram_addr);
2015
#endif
2016
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2017
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2018
    /* we remove the notdirty callback only if the code has been
2019
       flushed */
2020
    if (dirty_flags == 0xff)
2021
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2022
}
2023

    
2024
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2025
{
2026
    unsigned long ram_addr;
2027
    int dirty_flags;
2028
    ram_addr = addr - (unsigned long)phys_ram_base;
2029
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2030
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2031
#if !defined(CONFIG_USER_ONLY)
2032
        tb_invalidate_phys_page_fast(ram_addr, 4);
2033
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2034
#endif
2035
    }
2036
    stl_p((uint8_t *)(long)addr, val);
2037
#ifdef USE_KQEMU
2038
    if (cpu_single_env->kqemu_enabled &&
2039
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2040
        kqemu_modify_page(cpu_single_env, ram_addr);
2041
#endif
2042
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2043
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2044
    /* we remove the notdirty callback only if the code has been
2045
       flushed */
2046
    if (dirty_flags == 0xff)
2047
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2048
}
2049

    
2050
static CPUReadMemoryFunc *error_mem_read[3] = {
2051
    NULL, /* never used */
2052
    NULL, /* never used */
2053
    NULL, /* never used */
2054
};
2055

    
2056
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2057
    notdirty_mem_writeb,
2058
    notdirty_mem_writew,
2059
    notdirty_mem_writel,
2060
};
2061

    
2062
#if defined(CONFIG_SOFTMMU)
2063
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2064
   so these check for a hit then pass through to the normal out-of-line
2065
   phys routines.  */
2066
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2067
{
2068
    return ldub_phys(addr);
2069
}
2070

    
2071
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2072
{
2073
    return lduw_phys(addr);
2074
}
2075

    
2076
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2077
{
2078
    return ldl_phys(addr);
2079
}
2080

    
2081
/* Generate a debug exception if a watchpoint has been hit.
2082
   Returns the real physical address of the access.  addr will be a host
2083
   address in the is_ram case.  */
2084
static target_ulong check_watchpoint(target_phys_addr_t addr)
2085
{
2086
    CPUState *env = cpu_single_env;
2087
    target_ulong watch;
2088
    target_ulong retaddr;
2089
    int i;
2090

    
2091
    retaddr = addr;
2092
    for (i = 0; i < env->nb_watchpoints; i++) {
2093
        watch = env->watchpoint[i].vaddr;
2094
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2095
            if (env->watchpoint[i].is_ram)
2096
                retaddr = addr - (unsigned long)phys_ram_base;
2097
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2098
                cpu_single_env->watchpoint_hit = i + 1;
2099
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2100
                break;
2101
            }
2102
        }
2103
    }
2104
    return retaddr;
2105
}
2106

    
2107
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2108
                             uint32_t val)
2109
{
2110
    addr = check_watchpoint(addr);
2111
    stb_phys(addr, val);
2112
}
2113

    
2114
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2115
                             uint32_t val)
2116
{
2117
    addr = check_watchpoint(addr);
2118
    stw_phys(addr, val);
2119
}
2120

    
2121
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2122
                             uint32_t val)
2123
{
2124
    addr = check_watchpoint(addr);
2125
    stl_phys(addr, val);
2126
}
2127

    
2128
static CPUReadMemoryFunc *watch_mem_read[3] = {
2129
    watch_mem_readb,
2130
    watch_mem_readw,
2131
    watch_mem_readl,
2132
};
2133

    
2134
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2135
    watch_mem_writeb,
2136
    watch_mem_writew,
2137
    watch_mem_writel,
2138
};
2139
#endif
2140

    
2141
static void io_mem_init(void)
2142
{
2143
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2144
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2145
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2146
    io_mem_nb = 5;
2147

    
2148
#if defined(CONFIG_SOFTMMU)
2149
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2150
                                          watch_mem_write, NULL);
2151
#endif
2152
    /* alloc dirty bits array */
2153
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2154
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2155
}
2156

    
2157
/* mem_read and mem_write are arrays of functions containing the
2158
   function to access byte (index 0), word (index 1) and dword (index
2159
   2). All functions must be supplied. If io_index is non zero, the
2160
   corresponding io zone is modified. If it is zero, a new io zone is
2161
   allocated. The return value can be used with
2162
   cpu_register_physical_memory(). (-1) is returned if error. */
2163
int cpu_register_io_memory(int io_index,
2164
                           CPUReadMemoryFunc **mem_read,
2165
                           CPUWriteMemoryFunc **mem_write,
2166
                           void *opaque)
2167
{
2168
    int i;
2169

    
2170
    if (io_index <= 0) {
2171
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2172
            return -1;
2173
        io_index = io_mem_nb++;
2174
    } else {
2175
        if (io_index >= IO_MEM_NB_ENTRIES)
2176
            return -1;
2177
    }
2178

    
2179
    for(i = 0;i < 3; i++) {
2180
        io_mem_read[io_index][i] = mem_read[i];
2181
        io_mem_write[io_index][i] = mem_write[i];
2182
    }
2183
    io_mem_opaque[io_index] = opaque;
2184
    return io_index << IO_MEM_SHIFT;
2185
}
2186

    
2187
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2188
{
2189
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2190
}
2191

    
2192
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2193
{
2194
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2195
}
2196

    
2197
/* physical memory access (slow version, mainly for debug) */
2198
#if defined(CONFIG_USER_ONLY)
2199
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2200
                            int len, int is_write)
2201
{
2202
    int l, flags;
2203
    target_ulong page;
2204
    void * p;
2205

    
2206
    while (len > 0) {
2207
        page = addr & TARGET_PAGE_MASK;
2208
        l = (page + TARGET_PAGE_SIZE) - addr;
2209
        if (l > len)
2210
            l = len;
2211
        flags = page_get_flags(page);
2212
        if (!(flags & PAGE_VALID))
2213
            return;
2214
        if (is_write) {
2215
            if (!(flags & PAGE_WRITE))
2216
                return;
2217
            p = lock_user(addr, len, 0);
2218
            memcpy(p, buf, len);
2219
            unlock_user(p, addr, len);
2220
        } else {
2221
            if (!(flags & PAGE_READ))
2222
                return;
2223
            p = lock_user(addr, len, 1);
2224
            memcpy(buf, p, len);
2225
            unlock_user(p, addr, 0);
2226
        }
2227
        len -= l;
2228
        buf += l;
2229
        addr += l;
2230
    }
2231
}
2232

    
2233
#else
2234
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2235
                            int len, int is_write)
2236
{
2237
    int l, io_index;
2238
    uint8_t *ptr;
2239
    uint32_t val;
2240
    target_phys_addr_t page;
2241
    unsigned long pd;
2242
    PhysPageDesc *p;
2243
    
2244
    while (len > 0) {
2245
        page = addr & TARGET_PAGE_MASK;
2246
        l = (page + TARGET_PAGE_SIZE) - addr;
2247
        if (l > len)
2248
            l = len;
2249
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2250
        if (!p) {
2251
            pd = IO_MEM_UNASSIGNED;
2252
        } else {
2253
            pd = p->phys_offset;
2254
        }
2255
        
2256
        if (is_write) {
2257
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2258
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2259
                /* XXX: could force cpu_single_env to NULL to avoid
2260
                   potential bugs */
2261
                if (l >= 4 && ((addr & 3) == 0)) {
2262
                    /* 32 bit write access */
2263
                    val = ldl_p(buf);
2264
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2265
                    l = 4;
2266
                } else if (l >= 2 && ((addr & 1) == 0)) {
2267
                    /* 16 bit write access */
2268
                    val = lduw_p(buf);
2269
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2270
                    l = 2;
2271
                } else {
2272
                    /* 8 bit write access */
2273
                    val = ldub_p(buf);
2274
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2275
                    l = 1;
2276
                }
2277
            } else {
2278
                unsigned long addr1;
2279
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2280
                /* RAM case */
2281
                ptr = phys_ram_base + addr1;
2282
                memcpy(ptr, buf, l);
2283
                if (!cpu_physical_memory_is_dirty(addr1)) {
2284
                    /* invalidate code */
2285
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2286
                    /* set dirty bit */
2287
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 
2288
                        (0xff & ~CODE_DIRTY_FLAG);
2289
                }
2290
            }
2291
        } else {
2292
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2293
                !(pd & IO_MEM_ROMD)) {
2294
                /* I/O case */
2295
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2296
                if (l >= 4 && ((addr & 3) == 0)) {
2297
                    /* 32 bit read access */
2298
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2299
                    stl_p(buf, val);
2300
                    l = 4;
2301
                } else if (l >= 2 && ((addr & 1) == 0)) {
2302
                    /* 16 bit read access */
2303
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2304
                    stw_p(buf, val);
2305
                    l = 2;
2306
                } else {
2307
                    /* 8 bit read access */
2308
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2309
                    stb_p(buf, val);
2310
                    l = 1;
2311
                }
2312
            } else {
2313
                /* RAM case */
2314
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2315
                    (addr & ~TARGET_PAGE_MASK);
2316
                memcpy(buf, ptr, l);
2317
            }
2318
        }
2319
        len -= l;
2320
        buf += l;
2321
        addr += l;
2322
    }
2323
}
2324

    
2325
/* used for ROM loading : can write in RAM and ROM */
2326
void cpu_physical_memory_write_rom(target_phys_addr_t addr, 
2327
                                   const uint8_t *buf, int len)
2328
{
2329
    int l;
2330
    uint8_t *ptr;
2331
    target_phys_addr_t page;
2332
    unsigned long pd;
2333
    PhysPageDesc *p;
2334
    
2335
    while (len > 0) {
2336
        page = addr & TARGET_PAGE_MASK;
2337
        l = (page + TARGET_PAGE_SIZE) - addr;
2338
        if (l > len)
2339
            l = len;
2340
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2341
        if (!p) {
2342
            pd = IO_MEM_UNASSIGNED;
2343
        } else {
2344
            pd = p->phys_offset;
2345
        }
2346
        
2347
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2348
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2349
            !(pd & IO_MEM_ROMD)) {
2350
            /* do nothing */
2351
        } else {
2352
            unsigned long addr1;
2353
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2354
            /* ROM/RAM case */
2355
            ptr = phys_ram_base + addr1;
2356
            memcpy(ptr, buf, l);
2357
        }
2358
        len -= l;
2359
        buf += l;
2360
        addr += l;
2361
    }
2362
}
2363

    
2364

    
2365
/* warning: addr must be aligned */
2366
uint32_t ldl_phys(target_phys_addr_t addr)
2367
{
2368
    int io_index;
2369
    uint8_t *ptr;
2370
    uint32_t val;
2371
    unsigned long pd;
2372
    PhysPageDesc *p;
2373

    
2374
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2375
    if (!p) {
2376
        pd = IO_MEM_UNASSIGNED;
2377
    } else {
2378
        pd = p->phys_offset;
2379
    }
2380
        
2381
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2382
        !(pd & IO_MEM_ROMD)) {
2383
        /* I/O case */
2384
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2385
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2386
    } else {
2387
        /* RAM case */
2388
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2389
            (addr & ~TARGET_PAGE_MASK);
2390
        val = ldl_p(ptr);
2391
    }
2392
    return val;
2393
}
2394

    
2395
/* warning: addr must be aligned */
2396
uint64_t ldq_phys(target_phys_addr_t addr)
2397
{
2398
    int io_index;
2399
    uint8_t *ptr;
2400
    uint64_t val;
2401
    unsigned long pd;
2402
    PhysPageDesc *p;
2403

    
2404
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2405
    if (!p) {
2406
        pd = IO_MEM_UNASSIGNED;
2407
    } else {
2408
        pd = p->phys_offset;
2409
    }
2410
        
2411
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2412
        !(pd & IO_MEM_ROMD)) {
2413
        /* I/O case */
2414
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2415
#ifdef TARGET_WORDS_BIGENDIAN
2416
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2417
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2418
#else
2419
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2420
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2421
#endif
2422
    } else {
2423
        /* RAM case */
2424
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2425
            (addr & ~TARGET_PAGE_MASK);
2426
        val = ldq_p(ptr);
2427
    }
2428
    return val;
2429
}
2430

    
2431
/* XXX: optimize */
2432
uint32_t ldub_phys(target_phys_addr_t addr)
2433
{
2434
    uint8_t val;
2435
    cpu_physical_memory_read(addr, &val, 1);
2436
    return val;
2437
}
2438

    
2439
/* XXX: optimize */
2440
uint32_t lduw_phys(target_phys_addr_t addr)
2441
{
2442
    uint16_t val;
2443
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2444
    return tswap16(val);
2445
}
2446

    
2447
/* warning: addr must be aligned. The ram page is not masked as dirty
2448
   and the code inside is not invalidated. It is useful if the dirty
2449
   bits are used to track modified PTEs */
2450
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2451
{
2452
    int io_index;
2453
    uint8_t *ptr;
2454
    unsigned long pd;
2455
    PhysPageDesc *p;
2456

    
2457
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2458
    if (!p) {
2459
        pd = IO_MEM_UNASSIGNED;
2460
    } else {
2461
        pd = p->phys_offset;
2462
    }
2463
        
2464
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2465
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2466
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2467
    } else {
2468
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2469
            (addr & ~TARGET_PAGE_MASK);
2470
        stl_p(ptr, val);
2471
    }
2472
}
2473

    
2474
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2475
{
2476
    int io_index;
2477
    uint8_t *ptr;
2478
    unsigned long pd;
2479
    PhysPageDesc *p;
2480

    
2481
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2482
    if (!p) {
2483
        pd = IO_MEM_UNASSIGNED;
2484
    } else {
2485
        pd = p->phys_offset;
2486
    }
2487
        
2488
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2489
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2490
#ifdef TARGET_WORDS_BIGENDIAN
2491
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2492
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2493
#else
2494
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2495
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2496
#endif
2497
    } else {
2498
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2499
            (addr & ~TARGET_PAGE_MASK);
2500
        stq_p(ptr, val);
2501
    }
2502
}
2503

    
2504
/* warning: addr must be aligned */
2505
void stl_phys(target_phys_addr_t addr, uint32_t val)
2506
{
2507
    int io_index;
2508
    uint8_t *ptr;
2509
    unsigned long pd;
2510
    PhysPageDesc *p;
2511

    
2512
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2513
    if (!p) {
2514
        pd = IO_MEM_UNASSIGNED;
2515
    } else {
2516
        pd = p->phys_offset;
2517
    }
2518
        
2519
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2520
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2521
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2522
    } else {
2523
        unsigned long addr1;
2524
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2525
        /* RAM case */
2526
        ptr = phys_ram_base + addr1;
2527
        stl_p(ptr, val);
2528
        if (!cpu_physical_memory_is_dirty(addr1)) {
2529
            /* invalidate code */
2530
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2531
            /* set dirty bit */
2532
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2533
                (0xff & ~CODE_DIRTY_FLAG);
2534
        }
2535
    }
2536
}
2537

    
2538
/* XXX: optimize */
2539
void stb_phys(target_phys_addr_t addr, uint32_t val)
2540
{
2541
    uint8_t v = val;
2542
    cpu_physical_memory_write(addr, &v, 1);
2543
}
2544

    
2545
/* XXX: optimize */
2546
void stw_phys(target_phys_addr_t addr, uint32_t val)
2547
{
2548
    uint16_t v = tswap16(val);
2549
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2550
}
2551

    
2552
/* XXX: optimize */
2553
void stq_phys(target_phys_addr_t addr, uint64_t val)
2554
{
2555
    val = tswap64(val);
2556
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2557
}
2558

    
2559
#endif
2560

    
2561
/* virtual memory access for debug */
2562
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2563
                        uint8_t *buf, int len, int is_write)
2564
{
2565
    int l;
2566
    target_ulong page, phys_addr;
2567

    
2568
    while (len > 0) {
2569
        page = addr & TARGET_PAGE_MASK;
2570
        phys_addr = cpu_get_phys_page_debug(env, page);
2571
        /* if no physical page mapped, return an error */
2572
        if (phys_addr == -1)
2573
            return -1;
2574
        l = (page + TARGET_PAGE_SIZE) - addr;
2575
        if (l > len)
2576
            l = len;
2577
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2578
                               buf, l, is_write);
2579
        len -= l;
2580
        buf += l;
2581
        addr += l;
2582
    }
2583
    return 0;
2584
}
2585

    
2586
void dump_exec_info(FILE *f,
2587
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2588
{
2589
    int i, target_code_size, max_target_code_size;
2590
    int direct_jmp_count, direct_jmp2_count, cross_page;
2591
    TranslationBlock *tb;
2592
    
2593
    target_code_size = 0;
2594
    max_target_code_size = 0;
2595
    cross_page = 0;
2596
    direct_jmp_count = 0;
2597
    direct_jmp2_count = 0;
2598
    for(i = 0; i < nb_tbs; i++) {
2599
        tb = &tbs[i];
2600
        target_code_size += tb->size;
2601
        if (tb->size > max_target_code_size)
2602
            max_target_code_size = tb->size;
2603
        if (tb->page_addr[1] != -1)
2604
            cross_page++;
2605
        if (tb->tb_next_offset[0] != 0xffff) {
2606
            direct_jmp_count++;
2607
            if (tb->tb_next_offset[1] != 0xffff) {
2608
                direct_jmp2_count++;
2609
            }
2610
        }
2611
    }
2612
    /* XXX: avoid using doubles ? */
2613
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2614
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
2615
                nb_tbs ? target_code_size / nb_tbs : 0,
2616
                max_target_code_size);
2617
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
2618
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2619
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2620
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
2621
            cross_page, 
2622
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2623
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2624
                direct_jmp_count, 
2625
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2626
                direct_jmp2_count,
2627
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2628
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2629
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2630
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2631
}
2632

    
2633
#if !defined(CONFIG_USER_ONLY) 
2634

    
2635
#define MMUSUFFIX _cmmu
2636
#define GETPC() NULL
2637
#define env cpu_single_env
2638
#define SOFTMMU_CODE_ACCESS
2639

    
2640
#define SHIFT 0
2641
#include "softmmu_template.h"
2642

    
2643
#define SHIFT 1
2644
#include "softmmu_template.h"
2645

    
2646
#define SHIFT 2
2647
#include "softmmu_template.h"
2648

    
2649
#define SHIFT 3
2650
#include "softmmu_template.h"
2651

    
2652
#undef env
2653

    
2654
#endif