Statistics
| Branch: | Revision:

root / exec.c @ db7b5426

History | View | Annotate | Download (85.9 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#if defined(CONFIG_USER_ONLY)
38
#include <qemu.h>
39
#endif
40

    
41
//#define DEBUG_TB_INVALIDATE
42
//#define DEBUG_FLUSH
43
//#define DEBUG_TLB
44
//#define DEBUG_UNASSIGNED
45

    
46
/* make various TB consistency checks */
47
//#define DEBUG_TB_CHECK 
48
//#define DEBUG_TLB_CHECK 
49

    
50
//#define DEBUG_IOPORT
51
//#define DEBUG_SUBPAGE
52

    
53
#if !defined(CONFIG_USER_ONLY)
54
/* TB consistency checks only implemented for usermode emulation.  */
55
#undef DEBUG_TB_CHECK
56
#endif
57

    
58
/* threshold to flush the translated code buffer */
59
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60

    
61
#define SMC_BITMAP_USE_THRESHOLD 10
62

    
63
#define MMAP_AREA_START        0x00000000
64
#define MMAP_AREA_END          0xa8000000
65

    
66
#if defined(TARGET_SPARC64)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 41
68
#elif defined(TARGET_SPARC)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 36
70
#elif defined(TARGET_ALPHA)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 42
72
#define TARGET_VIRT_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_PPC64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#else
76
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77
#define TARGET_PHYS_ADDR_SPACE_BITS 32
78
#endif
79

    
80
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
81
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
82
int nb_tbs;
83
/* any access to the tbs or the page table must use this lock */
84
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
85

    
86
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
87
uint8_t *code_gen_ptr;
88

    
89
int phys_ram_size;
90
int phys_ram_fd;
91
uint8_t *phys_ram_base;
92
uint8_t *phys_ram_dirty;
93
static ram_addr_t phys_ram_alloc_offset = 0;
94

    
95
CPUState *first_cpu;
96
/* current CPU in the current thread. It is only valid inside
97
   cpu_exec() */
98
CPUState *cpu_single_env; 
99

    
100
typedef struct PageDesc {
101
    /* list of TBs intersecting this ram page */
102
    TranslationBlock *first_tb;
103
    /* in order to optimize self modifying code, we count the number
104
       of lookups we do to a given page to use a bitmap */
105
    unsigned int code_write_count;
106
    uint8_t *code_bitmap;
107
#if defined(CONFIG_USER_ONLY)
108
    unsigned long flags;
109
#endif
110
} PageDesc;
111

    
112
typedef struct PhysPageDesc {
113
    /* offset in host memory of the page + io_index in the low 12 bits */
114
    uint32_t phys_offset;
115
} PhysPageDesc;
116

    
117
#define L2_BITS 10
118
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119
/* XXX: this is a temporary hack for alpha target.
120
 *      In the future, this is to be replaced by a multi-level table
121
 *      to actually be able to handle the complete 64 bits address space.
122
 */
123
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124
#else
125
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
126
#endif
127

    
128
#define L1_SIZE (1 << L1_BITS)
129
#define L2_SIZE (1 << L2_BITS)
130

    
131
static void io_mem_init(void);
132

    
133
unsigned long qemu_real_host_page_size;
134
unsigned long qemu_host_page_bits;
135
unsigned long qemu_host_page_size;
136
unsigned long qemu_host_page_mask;
137

    
138
/* XXX: for system emulation, it could just be an array */
139
static PageDesc *l1_map[L1_SIZE];
140
PhysPageDesc **l1_phys_map;
141

    
142
/* io memory support */
143
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
144
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
145
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
146
static int io_mem_nb;
147
#if defined(CONFIG_SOFTMMU)
148
static int io_mem_watch;
149
#endif
150

    
151
/* log support */
152
char *logfilename = "/tmp/qemu.log";
153
FILE *logfile;
154
int loglevel;
155

    
156
/* statistics */
157
static int tlb_flush_count;
158
static int tb_flush_count;
159
static int tb_phys_invalidate_count;
160

    
161
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
162
typedef struct subpage_t {
163
    target_phys_addr_t base;
164
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
165
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
166
    void *opaque[TARGET_PAGE_SIZE];
167
} subpage_t;
168

    
169
static void page_init(void)
170
{
171
    /* NOTE: we can always suppose that qemu_host_page_size >=
172
       TARGET_PAGE_SIZE */
173
#ifdef _WIN32
174
    {
175
        SYSTEM_INFO system_info;
176
        DWORD old_protect;
177
        
178
        GetSystemInfo(&system_info);
179
        qemu_real_host_page_size = system_info.dwPageSize;
180
        
181
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
182
                       PAGE_EXECUTE_READWRITE, &old_protect);
183
    }
184
#else
185
    qemu_real_host_page_size = getpagesize();
186
    {
187
        unsigned long start, end;
188

    
189
        start = (unsigned long)code_gen_buffer;
190
        start &= ~(qemu_real_host_page_size - 1);
191
        
192
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
193
        end += qemu_real_host_page_size - 1;
194
        end &= ~(qemu_real_host_page_size - 1);
195
        
196
        mprotect((void *)start, end - start, 
197
                 PROT_READ | PROT_WRITE | PROT_EXEC);
198
    }
199
#endif
200

    
201
    if (qemu_host_page_size == 0)
202
        qemu_host_page_size = qemu_real_host_page_size;
203
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
204
        qemu_host_page_size = TARGET_PAGE_SIZE;
205
    qemu_host_page_bits = 0;
206
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
207
        qemu_host_page_bits++;
208
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
209
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
210
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
211
}
212

    
213
static inline PageDesc *page_find_alloc(unsigned int index)
214
{
215
    PageDesc **lp, *p;
216

    
217
    lp = &l1_map[index >> L2_BITS];
218
    p = *lp;
219
    if (!p) {
220
        /* allocate if not found */
221
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
222
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
223
        *lp = p;
224
    }
225
    return p + (index & (L2_SIZE - 1));
226
}
227

    
228
static inline PageDesc *page_find(unsigned int index)
229
{
230
    PageDesc *p;
231

    
232
    p = l1_map[index >> L2_BITS];
233
    if (!p)
234
        return 0;
235
    return p + (index & (L2_SIZE - 1));
236
}
237

    
238
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
239
{
240
    void **lp, **p;
241
    PhysPageDesc *pd;
242

    
243
    p = (void **)l1_phys_map;
244
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
245

    
246
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
247
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
248
#endif
249
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
250
    p = *lp;
251
    if (!p) {
252
        /* allocate if not found */
253
        if (!alloc)
254
            return NULL;
255
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
256
        memset(p, 0, sizeof(void *) * L1_SIZE);
257
        *lp = p;
258
    }
259
#endif
260
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
261
    pd = *lp;
262
    if (!pd) {
263
        int i;
264
        /* allocate if not found */
265
        if (!alloc)
266
            return NULL;
267
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
268
        *lp = pd;
269
        for (i = 0; i < L2_SIZE; i++)
270
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
271
    }
272
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
273
}
274

    
275
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
276
{
277
    return phys_page_find_alloc(index, 0);
278
}
279

    
280
#if !defined(CONFIG_USER_ONLY)
281
static void tlb_protect_code(ram_addr_t ram_addr);
282
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
283
                                    target_ulong vaddr);
284
#endif
285

    
286
void cpu_exec_init(CPUState *env)
287
{
288
    CPUState **penv;
289
    int cpu_index;
290

    
291
    if (!code_gen_ptr) {
292
        code_gen_ptr = code_gen_buffer;
293
        page_init();
294
        io_mem_init();
295
    }
296
    env->next_cpu = NULL;
297
    penv = &first_cpu;
298
    cpu_index = 0;
299
    while (*penv != NULL) {
300
        penv = (CPUState **)&(*penv)->next_cpu;
301
        cpu_index++;
302
    }
303
    env->cpu_index = cpu_index;
304
    env->nb_watchpoints = 0;
305
    *penv = env;
306
}
307

    
308
static inline void invalidate_page_bitmap(PageDesc *p)
309
{
310
    if (p->code_bitmap) {
311
        qemu_free(p->code_bitmap);
312
        p->code_bitmap = NULL;
313
    }
314
    p->code_write_count = 0;
315
}
316

    
317
/* set to NULL all the 'first_tb' fields in all PageDescs */
318
static void page_flush_tb(void)
319
{
320
    int i, j;
321
    PageDesc *p;
322

    
323
    for(i = 0; i < L1_SIZE; i++) {
324
        p = l1_map[i];
325
        if (p) {
326
            for(j = 0; j < L2_SIZE; j++) {
327
                p->first_tb = NULL;
328
                invalidate_page_bitmap(p);
329
                p++;
330
            }
331
        }
332
    }
333
}
334

    
335
/* flush all the translation blocks */
336
/* XXX: tb_flush is currently not thread safe */
337
void tb_flush(CPUState *env1)
338
{
339
    CPUState *env;
340
#if defined(DEBUG_FLUSH)
341
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n", 
342
           code_gen_ptr - code_gen_buffer, 
343
           nb_tbs, 
344
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
345
#endif
346
    nb_tbs = 0;
347
    
348
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
349
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
350
    }
351

    
352
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
353
    page_flush_tb();
354

    
355
    code_gen_ptr = code_gen_buffer;
356
    /* XXX: flush processor icache at this point if cache flush is
357
       expensive */
358
    tb_flush_count++;
359
}
360

    
361
#ifdef DEBUG_TB_CHECK
362

    
363
static void tb_invalidate_check(target_ulong address)
364
{
365
    TranslationBlock *tb;
366
    int i;
367
    address &= TARGET_PAGE_MASK;
368
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
369
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
370
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
371
                  address >= tb->pc + tb->size)) {
372
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
373
                       address, (long)tb->pc, tb->size);
374
            }
375
        }
376
    }
377
}
378

    
379
/* verify that all the pages have correct rights for code */
380
static void tb_page_check(void)
381
{
382
    TranslationBlock *tb;
383
    int i, flags1, flags2;
384
    
385
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
386
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
387
            flags1 = page_get_flags(tb->pc);
388
            flags2 = page_get_flags(tb->pc + tb->size - 1);
389
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
390
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
391
                       (long)tb->pc, tb->size, flags1, flags2);
392
            }
393
        }
394
    }
395
}
396

    
397
void tb_jmp_check(TranslationBlock *tb)
398
{
399
    TranslationBlock *tb1;
400
    unsigned int n1;
401

    
402
    /* suppress any remaining jumps to this TB */
403
    tb1 = tb->jmp_first;
404
    for(;;) {
405
        n1 = (long)tb1 & 3;
406
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
407
        if (n1 == 2)
408
            break;
409
        tb1 = tb1->jmp_next[n1];
410
    }
411
    /* check end of list */
412
    if (tb1 != tb) {
413
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
414
    }
415
}
416

    
417
#endif
418

    
419
/* invalidate one TB */
420
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
421
                             int next_offset)
422
{
423
    TranslationBlock *tb1;
424
    for(;;) {
425
        tb1 = *ptb;
426
        if (tb1 == tb) {
427
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
428
            break;
429
        }
430
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
431
    }
432
}
433

    
434
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
435
{
436
    TranslationBlock *tb1;
437
    unsigned int n1;
438

    
439
    for(;;) {
440
        tb1 = *ptb;
441
        n1 = (long)tb1 & 3;
442
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
443
        if (tb1 == tb) {
444
            *ptb = tb1->page_next[n1];
445
            break;
446
        }
447
        ptb = &tb1->page_next[n1];
448
    }
449
}
450

    
451
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
452
{
453
    TranslationBlock *tb1, **ptb;
454
    unsigned int n1;
455

    
456
    ptb = &tb->jmp_next[n];
457
    tb1 = *ptb;
458
    if (tb1) {
459
        /* find tb(n) in circular list */
460
        for(;;) {
461
            tb1 = *ptb;
462
            n1 = (long)tb1 & 3;
463
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
464
            if (n1 == n && tb1 == tb)
465
                break;
466
            if (n1 == 2) {
467
                ptb = &tb1->jmp_first;
468
            } else {
469
                ptb = &tb1->jmp_next[n1];
470
            }
471
        }
472
        /* now we can suppress tb(n) from the list */
473
        *ptb = tb->jmp_next[n];
474

    
475
        tb->jmp_next[n] = NULL;
476
    }
477
}
478

    
479
/* reset the jump entry 'n' of a TB so that it is not chained to
480
   another TB */
481
static inline void tb_reset_jump(TranslationBlock *tb, int n)
482
{
483
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
484
}
485

    
486
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
487
{
488
    CPUState *env;
489
    PageDesc *p;
490
    unsigned int h, n1;
491
    target_ulong phys_pc;
492
    TranslationBlock *tb1, *tb2;
493
    
494
    /* remove the TB from the hash list */
495
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
496
    h = tb_phys_hash_func(phys_pc);
497
    tb_remove(&tb_phys_hash[h], tb, 
498
              offsetof(TranslationBlock, phys_hash_next));
499

    
500
    /* remove the TB from the page list */
501
    if (tb->page_addr[0] != page_addr) {
502
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
503
        tb_page_remove(&p->first_tb, tb);
504
        invalidate_page_bitmap(p);
505
    }
506
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
507
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
508
        tb_page_remove(&p->first_tb, tb);
509
        invalidate_page_bitmap(p);
510
    }
511

    
512
    tb_invalidated_flag = 1;
513

    
514
    /* remove the TB from the hash list */
515
    h = tb_jmp_cache_hash_func(tb->pc);
516
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
517
        if (env->tb_jmp_cache[h] == tb)
518
            env->tb_jmp_cache[h] = NULL;
519
    }
520

    
521
    /* suppress this TB from the two jump lists */
522
    tb_jmp_remove(tb, 0);
523
    tb_jmp_remove(tb, 1);
524

    
525
    /* suppress any remaining jumps to this TB */
526
    tb1 = tb->jmp_first;
527
    for(;;) {
528
        n1 = (long)tb1 & 3;
529
        if (n1 == 2)
530
            break;
531
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
532
        tb2 = tb1->jmp_next[n1];
533
        tb_reset_jump(tb1, n1);
534
        tb1->jmp_next[n1] = NULL;
535
        tb1 = tb2;
536
    }
537
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
538

    
539
    tb_phys_invalidate_count++;
540
}
541

    
542
static inline void set_bits(uint8_t *tab, int start, int len)
543
{
544
    int end, mask, end1;
545

    
546
    end = start + len;
547
    tab += start >> 3;
548
    mask = 0xff << (start & 7);
549
    if ((start & ~7) == (end & ~7)) {
550
        if (start < end) {
551
            mask &= ~(0xff << (end & 7));
552
            *tab |= mask;
553
        }
554
    } else {
555
        *tab++ |= mask;
556
        start = (start + 8) & ~7;
557
        end1 = end & ~7;
558
        while (start < end1) {
559
            *tab++ = 0xff;
560
            start += 8;
561
        }
562
        if (start < end) {
563
            mask = ~(0xff << (end & 7));
564
            *tab |= mask;
565
        }
566
    }
567
}
568

    
569
static void build_page_bitmap(PageDesc *p)
570
{
571
    int n, tb_start, tb_end;
572
    TranslationBlock *tb;
573
    
574
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
575
    if (!p->code_bitmap)
576
        return;
577
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
578

    
579
    tb = p->first_tb;
580
    while (tb != NULL) {
581
        n = (long)tb & 3;
582
        tb = (TranslationBlock *)((long)tb & ~3);
583
        /* NOTE: this is subtle as a TB may span two physical pages */
584
        if (n == 0) {
585
            /* NOTE: tb_end may be after the end of the page, but
586
               it is not a problem */
587
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
588
            tb_end = tb_start + tb->size;
589
            if (tb_end > TARGET_PAGE_SIZE)
590
                tb_end = TARGET_PAGE_SIZE;
591
        } else {
592
            tb_start = 0;
593
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
594
        }
595
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
596
        tb = tb->page_next[n];
597
    }
598
}
599

    
600
#ifdef TARGET_HAS_PRECISE_SMC
601

    
602
static void tb_gen_code(CPUState *env, 
603
                        target_ulong pc, target_ulong cs_base, int flags,
604
                        int cflags)
605
{
606
    TranslationBlock *tb;
607
    uint8_t *tc_ptr;
608
    target_ulong phys_pc, phys_page2, virt_page2;
609
    int code_gen_size;
610

    
611
    phys_pc = get_phys_addr_code(env, pc);
612
    tb = tb_alloc(pc);
613
    if (!tb) {
614
        /* flush must be done */
615
        tb_flush(env);
616
        /* cannot fail at this point */
617
        tb = tb_alloc(pc);
618
    }
619
    tc_ptr = code_gen_ptr;
620
    tb->tc_ptr = tc_ptr;
621
    tb->cs_base = cs_base;
622
    tb->flags = flags;
623
    tb->cflags = cflags;
624
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
625
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
626
    
627
    /* check next page if needed */
628
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
629
    phys_page2 = -1;
630
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
631
        phys_page2 = get_phys_addr_code(env, virt_page2);
632
    }
633
    tb_link_phys(tb, phys_pc, phys_page2);
634
}
635
#endif
636
    
637
/* invalidate all TBs which intersect with the target physical page
638
   starting in range [start;end[. NOTE: start and end must refer to
639
   the same physical page. 'is_cpu_write_access' should be true if called
640
   from a real cpu write access: the virtual CPU will exit the current
641
   TB if code is modified inside this TB. */
642
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
643
                                   int is_cpu_write_access)
644
{
645
    int n, current_tb_modified, current_tb_not_found, current_flags;
646
    CPUState *env = cpu_single_env;
647
    PageDesc *p;
648
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
649
    target_ulong tb_start, tb_end;
650
    target_ulong current_pc, current_cs_base;
651

    
652
    p = page_find(start >> TARGET_PAGE_BITS);
653
    if (!p) 
654
        return;
655
    if (!p->code_bitmap && 
656
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
657
        is_cpu_write_access) {
658
        /* build code bitmap */
659
        build_page_bitmap(p);
660
    }
661

    
662
    /* we remove all the TBs in the range [start, end[ */
663
    /* XXX: see if in some cases it could be faster to invalidate all the code */
664
    current_tb_not_found = is_cpu_write_access;
665
    current_tb_modified = 0;
666
    current_tb = NULL; /* avoid warning */
667
    current_pc = 0; /* avoid warning */
668
    current_cs_base = 0; /* avoid warning */
669
    current_flags = 0; /* avoid warning */
670
    tb = p->first_tb;
671
    while (tb != NULL) {
672
        n = (long)tb & 3;
673
        tb = (TranslationBlock *)((long)tb & ~3);
674
        tb_next = tb->page_next[n];
675
        /* NOTE: this is subtle as a TB may span two physical pages */
676
        if (n == 0) {
677
            /* NOTE: tb_end may be after the end of the page, but
678
               it is not a problem */
679
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
680
            tb_end = tb_start + tb->size;
681
        } else {
682
            tb_start = tb->page_addr[1];
683
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
684
        }
685
        if (!(tb_end <= start || tb_start >= end)) {
686
#ifdef TARGET_HAS_PRECISE_SMC
687
            if (current_tb_not_found) {
688
                current_tb_not_found = 0;
689
                current_tb = NULL;
690
                if (env->mem_write_pc) {
691
                    /* now we have a real cpu fault */
692
                    current_tb = tb_find_pc(env->mem_write_pc);
693
                }
694
            }
695
            if (current_tb == tb &&
696
                !(current_tb->cflags & CF_SINGLE_INSN)) {
697
                /* If we are modifying the current TB, we must stop
698
                its execution. We could be more precise by checking
699
                that the modification is after the current PC, but it
700
                would require a specialized function to partially
701
                restore the CPU state */
702
                
703
                current_tb_modified = 1;
704
                cpu_restore_state(current_tb, env, 
705
                                  env->mem_write_pc, NULL);
706
#if defined(TARGET_I386)
707
                current_flags = env->hflags;
708
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
709
                current_cs_base = (target_ulong)env->segs[R_CS].base;
710
                current_pc = current_cs_base + env->eip;
711
#else
712
#error unsupported CPU
713
#endif
714
            }
715
#endif /* TARGET_HAS_PRECISE_SMC */
716
            /* we need to do that to handle the case where a signal
717
               occurs while doing tb_phys_invalidate() */
718
            saved_tb = NULL;
719
            if (env) {
720
                saved_tb = env->current_tb;
721
                env->current_tb = NULL;
722
            }
723
            tb_phys_invalidate(tb, -1);
724
            if (env) {
725
                env->current_tb = saved_tb;
726
                if (env->interrupt_request && env->current_tb)
727
                    cpu_interrupt(env, env->interrupt_request);
728
            }
729
        }
730
        tb = tb_next;
731
    }
732
#if !defined(CONFIG_USER_ONLY)
733
    /* if no code remaining, no need to continue to use slow writes */
734
    if (!p->first_tb) {
735
        invalidate_page_bitmap(p);
736
        if (is_cpu_write_access) {
737
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
738
        }
739
    }
740
#endif
741
#ifdef TARGET_HAS_PRECISE_SMC
742
    if (current_tb_modified) {
743
        /* we generate a block containing just the instruction
744
           modifying the memory. It will ensure that it cannot modify
745
           itself */
746
        env->current_tb = NULL;
747
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
748
                    CF_SINGLE_INSN);
749
        cpu_resume_from_signal(env, NULL);
750
    }
751
#endif
752
}
753

    
754
/* len must be <= 8 and start must be a multiple of len */
755
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
756
{
757
    PageDesc *p;
758
    int offset, b;
759
#if 0
760
    if (1) {
761
        if (loglevel) {
762
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n", 
763
                   cpu_single_env->mem_write_vaddr, len, 
764
                   cpu_single_env->eip, 
765
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
766
        }
767
    }
768
#endif
769
    p = page_find(start >> TARGET_PAGE_BITS);
770
    if (!p) 
771
        return;
772
    if (p->code_bitmap) {
773
        offset = start & ~TARGET_PAGE_MASK;
774
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
775
        if (b & ((1 << len) - 1))
776
            goto do_invalidate;
777
    } else {
778
    do_invalidate:
779
        tb_invalidate_phys_page_range(start, start + len, 1);
780
    }
781
}
782

    
783
#if !defined(CONFIG_SOFTMMU)
784
static void tb_invalidate_phys_page(target_ulong addr, 
785
                                    unsigned long pc, void *puc)
786
{
787
    int n, current_flags, current_tb_modified;
788
    target_ulong current_pc, current_cs_base;
789
    PageDesc *p;
790
    TranslationBlock *tb, *current_tb;
791
#ifdef TARGET_HAS_PRECISE_SMC
792
    CPUState *env = cpu_single_env;
793
#endif
794

    
795
    addr &= TARGET_PAGE_MASK;
796
    p = page_find(addr >> TARGET_PAGE_BITS);
797
    if (!p) 
798
        return;
799
    tb = p->first_tb;
800
    current_tb_modified = 0;
801
    current_tb = NULL;
802
    current_pc = 0; /* avoid warning */
803
    current_cs_base = 0; /* avoid warning */
804
    current_flags = 0; /* avoid warning */
805
#ifdef TARGET_HAS_PRECISE_SMC
806
    if (tb && pc != 0) {
807
        current_tb = tb_find_pc(pc);
808
    }
809
#endif
810
    while (tb != NULL) {
811
        n = (long)tb & 3;
812
        tb = (TranslationBlock *)((long)tb & ~3);
813
#ifdef TARGET_HAS_PRECISE_SMC
814
        if (current_tb == tb &&
815
            !(current_tb->cflags & CF_SINGLE_INSN)) {
816
                /* If we are modifying the current TB, we must stop
817
                   its execution. We could be more precise by checking
818
                   that the modification is after the current PC, but it
819
                   would require a specialized function to partially
820
                   restore the CPU state */
821
            
822
            current_tb_modified = 1;
823
            cpu_restore_state(current_tb, env, pc, puc);
824
#if defined(TARGET_I386)
825
            current_flags = env->hflags;
826
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
827
            current_cs_base = (target_ulong)env->segs[R_CS].base;
828
            current_pc = current_cs_base + env->eip;
829
#else
830
#error unsupported CPU
831
#endif
832
        }
833
#endif /* TARGET_HAS_PRECISE_SMC */
834
        tb_phys_invalidate(tb, addr);
835
        tb = tb->page_next[n];
836
    }
837
    p->first_tb = NULL;
838
#ifdef TARGET_HAS_PRECISE_SMC
839
    if (current_tb_modified) {
840
        /* we generate a block containing just the instruction
841
           modifying the memory. It will ensure that it cannot modify
842
           itself */
843
        env->current_tb = NULL;
844
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 
845
                    CF_SINGLE_INSN);
846
        cpu_resume_from_signal(env, puc);
847
    }
848
#endif
849
}
850
#endif
851

    
852
/* add the tb in the target page and protect it if necessary */
853
static inline void tb_alloc_page(TranslationBlock *tb, 
854
                                 unsigned int n, target_ulong page_addr)
855
{
856
    PageDesc *p;
857
    TranslationBlock *last_first_tb;
858

    
859
    tb->page_addr[n] = page_addr;
860
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
861
    tb->page_next[n] = p->first_tb;
862
    last_first_tb = p->first_tb;
863
    p->first_tb = (TranslationBlock *)((long)tb | n);
864
    invalidate_page_bitmap(p);
865

    
866
#if defined(TARGET_HAS_SMC) || 1
867

    
868
#if defined(CONFIG_USER_ONLY)
869
    if (p->flags & PAGE_WRITE) {
870
        target_ulong addr;
871
        PageDesc *p2;
872
        int prot;
873

    
874
        /* force the host page as non writable (writes will have a
875
           page fault + mprotect overhead) */
876
        page_addr &= qemu_host_page_mask;
877
        prot = 0;
878
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
879
            addr += TARGET_PAGE_SIZE) {
880

    
881
            p2 = page_find (addr >> TARGET_PAGE_BITS);
882
            if (!p2)
883
                continue;
884
            prot |= p2->flags;
885
            p2->flags &= ~PAGE_WRITE;
886
            page_get_flags(addr);
887
          }
888
        mprotect(g2h(page_addr), qemu_host_page_size, 
889
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
890
#ifdef DEBUG_TB_INVALIDATE
891
        printf("protecting code page: 0x%08lx\n", 
892
               page_addr);
893
#endif
894
    }
895
#else
896
    /* if some code is already present, then the pages are already
897
       protected. So we handle the case where only the first TB is
898
       allocated in a physical page */
899
    if (!last_first_tb) {
900
        tlb_protect_code(page_addr);
901
    }
902
#endif
903

    
904
#endif /* TARGET_HAS_SMC */
905
}
906

    
907
/* Allocate a new translation block. Flush the translation buffer if
908
   too many translation blocks or too much generated code. */
909
TranslationBlock *tb_alloc(target_ulong pc)
910
{
911
    TranslationBlock *tb;
912

    
913
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS || 
914
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
915
        return NULL;
916
    tb = &tbs[nb_tbs++];
917
    tb->pc = pc;
918
    tb->cflags = 0;
919
    return tb;
920
}
921

    
922
/* add a new TB and link it to the physical page tables. phys_page2 is
923
   (-1) to indicate that only one page contains the TB. */
924
void tb_link_phys(TranslationBlock *tb, 
925
                  target_ulong phys_pc, target_ulong phys_page2)
926
{
927
    unsigned int h;
928
    TranslationBlock **ptb;
929

    
930
    /* add in the physical hash table */
931
    h = tb_phys_hash_func(phys_pc);
932
    ptb = &tb_phys_hash[h];
933
    tb->phys_hash_next = *ptb;
934
    *ptb = tb;
935

    
936
    /* add in the page list */
937
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
938
    if (phys_page2 != -1)
939
        tb_alloc_page(tb, 1, phys_page2);
940
    else
941
        tb->page_addr[1] = -1;
942

    
943
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
944
    tb->jmp_next[0] = NULL;
945
    tb->jmp_next[1] = NULL;
946
#ifdef USE_CODE_COPY
947
    tb->cflags &= ~CF_FP_USED;
948
    if (tb->cflags & CF_TB_FP_USED)
949
        tb->cflags |= CF_FP_USED;
950
#endif
951

    
952
    /* init original jump addresses */
953
    if (tb->tb_next_offset[0] != 0xffff)
954
        tb_reset_jump(tb, 0);
955
    if (tb->tb_next_offset[1] != 0xffff)
956
        tb_reset_jump(tb, 1);
957

    
958
#ifdef DEBUG_TB_CHECK
959
    tb_page_check();
960
#endif
961
}
962

    
963
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
964
   tb[1].tc_ptr. Return NULL if not found */
965
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
966
{
967
    int m_min, m_max, m;
968
    unsigned long v;
969
    TranslationBlock *tb;
970

    
971
    if (nb_tbs <= 0)
972
        return NULL;
973
    if (tc_ptr < (unsigned long)code_gen_buffer ||
974
        tc_ptr >= (unsigned long)code_gen_ptr)
975
        return NULL;
976
    /* binary search (cf Knuth) */
977
    m_min = 0;
978
    m_max = nb_tbs - 1;
979
    while (m_min <= m_max) {
980
        m = (m_min + m_max) >> 1;
981
        tb = &tbs[m];
982
        v = (unsigned long)tb->tc_ptr;
983
        if (v == tc_ptr)
984
            return tb;
985
        else if (tc_ptr < v) {
986
            m_max = m - 1;
987
        } else {
988
            m_min = m + 1;
989
        }
990
    } 
991
    return &tbs[m_max];
992
}
993

    
994
static void tb_reset_jump_recursive(TranslationBlock *tb);
995

    
996
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
997
{
998
    TranslationBlock *tb1, *tb_next, **ptb;
999
    unsigned int n1;
1000

    
1001
    tb1 = tb->jmp_next[n];
1002
    if (tb1 != NULL) {
1003
        /* find head of list */
1004
        for(;;) {
1005
            n1 = (long)tb1 & 3;
1006
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1007
            if (n1 == 2)
1008
                break;
1009
            tb1 = tb1->jmp_next[n1];
1010
        }
1011
        /* we are now sure now that tb jumps to tb1 */
1012
        tb_next = tb1;
1013

    
1014
        /* remove tb from the jmp_first list */
1015
        ptb = &tb_next->jmp_first;
1016
        for(;;) {
1017
            tb1 = *ptb;
1018
            n1 = (long)tb1 & 3;
1019
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1020
            if (n1 == n && tb1 == tb)
1021
                break;
1022
            ptb = &tb1->jmp_next[n1];
1023
        }
1024
        *ptb = tb->jmp_next[n];
1025
        tb->jmp_next[n] = NULL;
1026
        
1027
        /* suppress the jump to next tb in generated code */
1028
        tb_reset_jump(tb, n);
1029

    
1030
        /* suppress jumps in the tb on which we could have jumped */
1031
        tb_reset_jump_recursive(tb_next);
1032
    }
1033
}
1034

    
1035
static void tb_reset_jump_recursive(TranslationBlock *tb)
1036
{
1037
    tb_reset_jump_recursive2(tb, 0);
1038
    tb_reset_jump_recursive2(tb, 1);
1039
}
1040

    
1041
#if defined(TARGET_HAS_ICE)
1042
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1043
{
1044
    target_phys_addr_t addr;
1045
    target_ulong pd;
1046
    ram_addr_t ram_addr;
1047
    PhysPageDesc *p;
1048

    
1049
    addr = cpu_get_phys_page_debug(env, pc);
1050
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1051
    if (!p) {
1052
        pd = IO_MEM_UNASSIGNED;
1053
    } else {
1054
        pd = p->phys_offset;
1055
    }
1056
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1057
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1058
}
1059
#endif
1060

    
1061
/* Add a watchpoint.  */
1062
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1063
{
1064
    int i;
1065

    
1066
    for (i = 0; i < env->nb_watchpoints; i++) {
1067
        if (addr == env->watchpoint[i].vaddr)
1068
            return 0;
1069
    }
1070
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1071
        return -1;
1072

    
1073
    i = env->nb_watchpoints++;
1074
    env->watchpoint[i].vaddr = addr;
1075
    tlb_flush_page(env, addr);
1076
    /* FIXME: This flush is needed because of the hack to make memory ops
1077
       terminate the TB.  It can be removed once the proper IO trap and
1078
       re-execute bits are in.  */
1079
    tb_flush(env);
1080
    return i;
1081
}
1082

    
1083
/* Remove a watchpoint.  */
1084
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1085
{
1086
    int i;
1087

    
1088
    for (i = 0; i < env->nb_watchpoints; i++) {
1089
        if (addr == env->watchpoint[i].vaddr) {
1090
            env->nb_watchpoints--;
1091
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1092
            tlb_flush_page(env, addr);
1093
            return 0;
1094
        }
1095
    }
1096
    return -1;
1097
}
1098

    
1099
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1100
   breakpoint is reached */
1101
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1102
{
1103
#if defined(TARGET_HAS_ICE)
1104
    int i;
1105
    
1106
    for(i = 0; i < env->nb_breakpoints; i++) {
1107
        if (env->breakpoints[i] == pc)
1108
            return 0;
1109
    }
1110

    
1111
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1112
        return -1;
1113
    env->breakpoints[env->nb_breakpoints++] = pc;
1114
    
1115
    breakpoint_invalidate(env, pc);
1116
    return 0;
1117
#else
1118
    return -1;
1119
#endif
1120
}
1121

    
1122
/* remove a breakpoint */
1123
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1124
{
1125
#if defined(TARGET_HAS_ICE)
1126
    int i;
1127
    for(i = 0; i < env->nb_breakpoints; i++) {
1128
        if (env->breakpoints[i] == pc)
1129
            goto found;
1130
    }
1131
    return -1;
1132
 found:
1133
    env->nb_breakpoints--;
1134
    if (i < env->nb_breakpoints)
1135
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1136

    
1137
    breakpoint_invalidate(env, pc);
1138
    return 0;
1139
#else
1140
    return -1;
1141
#endif
1142
}
1143

    
1144
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1145
   CPU loop after each instruction */
1146
void cpu_single_step(CPUState *env, int enabled)
1147
{
1148
#if defined(TARGET_HAS_ICE)
1149
    if (env->singlestep_enabled != enabled) {
1150
        env->singlestep_enabled = enabled;
1151
        /* must flush all the translated code to avoid inconsistancies */
1152
        /* XXX: only flush what is necessary */
1153
        tb_flush(env);
1154
    }
1155
#endif
1156
}
1157

    
1158
/* enable or disable low levels log */
1159
void cpu_set_log(int log_flags)
1160
{
1161
    loglevel = log_flags;
1162
    if (loglevel && !logfile) {
1163
        logfile = fopen(logfilename, "w");
1164
        if (!logfile) {
1165
            perror(logfilename);
1166
            _exit(1);
1167
        }
1168
#if !defined(CONFIG_SOFTMMU)
1169
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1170
        {
1171
            static uint8_t logfile_buf[4096];
1172
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1173
        }
1174
#else
1175
        setvbuf(logfile, NULL, _IOLBF, 0);
1176
#endif
1177
    }
1178
}
1179

    
1180
void cpu_set_log_filename(const char *filename)
1181
{
1182
    logfilename = strdup(filename);
1183
}
1184

    
1185
/* mask must never be zero, except for A20 change call */
1186
void cpu_interrupt(CPUState *env, int mask)
1187
{
1188
    TranslationBlock *tb;
1189
    static int interrupt_lock;
1190

    
1191
    env->interrupt_request |= mask;
1192
    /* if the cpu is currently executing code, we must unlink it and
1193
       all the potentially executing TB */
1194
    tb = env->current_tb;
1195
    if (tb && !testandset(&interrupt_lock)) {
1196
        env->current_tb = NULL;
1197
        tb_reset_jump_recursive(tb);
1198
        interrupt_lock = 0;
1199
    }
1200
}
1201

    
1202
void cpu_reset_interrupt(CPUState *env, int mask)
1203
{
1204
    env->interrupt_request &= ~mask;
1205
}
1206

    
1207
CPULogItem cpu_log_items[] = {
1208
    { CPU_LOG_TB_OUT_ASM, "out_asm", 
1209
      "show generated host assembly code for each compiled TB" },
1210
    { CPU_LOG_TB_IN_ASM, "in_asm",
1211
      "show target assembly code for each compiled TB" },
1212
    { CPU_LOG_TB_OP, "op", 
1213
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1214
#ifdef TARGET_I386
1215
    { CPU_LOG_TB_OP_OPT, "op_opt",
1216
      "show micro ops after optimization for each compiled TB" },
1217
#endif
1218
    { CPU_LOG_INT, "int",
1219
      "show interrupts/exceptions in short format" },
1220
    { CPU_LOG_EXEC, "exec",
1221
      "show trace before each executed TB (lots of logs)" },
1222
    { CPU_LOG_TB_CPU, "cpu",
1223
      "show CPU state before bloc translation" },
1224
#ifdef TARGET_I386
1225
    { CPU_LOG_PCALL, "pcall",
1226
      "show protected mode far calls/returns/exceptions" },
1227
#endif
1228
#ifdef DEBUG_IOPORT
1229
    { CPU_LOG_IOPORT, "ioport",
1230
      "show all i/o ports accesses" },
1231
#endif
1232
    { 0, NULL, NULL },
1233
};
1234

    
1235
static int cmp1(const char *s1, int n, const char *s2)
1236
{
1237
    if (strlen(s2) != n)
1238
        return 0;
1239
    return memcmp(s1, s2, n) == 0;
1240
}
1241
      
1242
/* takes a comma separated list of log masks. Return 0 if error. */
1243
int cpu_str_to_log_mask(const char *str)
1244
{
1245
    CPULogItem *item;
1246
    int mask;
1247
    const char *p, *p1;
1248

    
1249
    p = str;
1250
    mask = 0;
1251
    for(;;) {
1252
        p1 = strchr(p, ',');
1253
        if (!p1)
1254
            p1 = p + strlen(p);
1255
        if(cmp1(p,p1-p,"all")) {
1256
                for(item = cpu_log_items; item->mask != 0; item++) {
1257
                        mask |= item->mask;
1258
                }
1259
        } else {
1260
        for(item = cpu_log_items; item->mask != 0; item++) {
1261
            if (cmp1(p, p1 - p, item->name))
1262
                goto found;
1263
        }
1264
        return 0;
1265
        }
1266
    found:
1267
        mask |= item->mask;
1268
        if (*p1 != ',')
1269
            break;
1270
        p = p1 + 1;
1271
    }
1272
    return mask;
1273
}
1274

    
1275
void cpu_abort(CPUState *env, const char *fmt, ...)
1276
{
1277
    va_list ap;
1278

    
1279
    va_start(ap, fmt);
1280
    fprintf(stderr, "qemu: fatal: ");
1281
    vfprintf(stderr, fmt, ap);
1282
    fprintf(stderr, "\n");
1283
#ifdef TARGET_I386
1284
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1285
#else
1286
    cpu_dump_state(env, stderr, fprintf, 0);
1287
#endif
1288
    va_end(ap);
1289
    abort();
1290
}
1291

    
1292
CPUState *cpu_copy(CPUState *env)
1293
{
1294
    CPUState *new_env = cpu_init();
1295
    /* preserve chaining and index */
1296
    CPUState *next_cpu = new_env->next_cpu;
1297
    int cpu_index = new_env->cpu_index;
1298
    memcpy(new_env, env, sizeof(CPUState));
1299
    new_env->next_cpu = next_cpu;
1300
    new_env->cpu_index = cpu_index;
1301
    return new_env;
1302
}
1303

    
1304
#if !defined(CONFIG_USER_ONLY)
1305

    
1306
/* NOTE: if flush_global is true, also flush global entries (not
1307
   implemented yet) */
1308
void tlb_flush(CPUState *env, int flush_global)
1309
{
1310
    int i;
1311

    
1312
#if defined(DEBUG_TLB)
1313
    printf("tlb_flush:\n");
1314
#endif
1315
    /* must reset current TB so that interrupts cannot modify the
1316
       links while we are modifying them */
1317
    env->current_tb = NULL;
1318

    
1319
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1320
        env->tlb_table[0][i].addr_read = -1;
1321
        env->tlb_table[0][i].addr_write = -1;
1322
        env->tlb_table[0][i].addr_code = -1;
1323
        env->tlb_table[1][i].addr_read = -1;
1324
        env->tlb_table[1][i].addr_write = -1;
1325
        env->tlb_table[1][i].addr_code = -1;
1326
#if (NB_MMU_MODES >= 3)
1327
        env->tlb_table[2][i].addr_read = -1;
1328
        env->tlb_table[2][i].addr_write = -1;
1329
        env->tlb_table[2][i].addr_code = -1;
1330
#if (NB_MMU_MODES == 4)
1331
        env->tlb_table[3][i].addr_read = -1;
1332
        env->tlb_table[3][i].addr_write = -1;
1333
        env->tlb_table[3][i].addr_code = -1;
1334
#endif
1335
#endif
1336
    }
1337

    
1338
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1339

    
1340
#if !defined(CONFIG_SOFTMMU)
1341
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1342
#endif
1343
#ifdef USE_KQEMU
1344
    if (env->kqemu_enabled) {
1345
        kqemu_flush(env, flush_global);
1346
    }
1347
#endif
1348
    tlb_flush_count++;
1349
}
1350

    
1351
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1352
{
1353
    if (addr == (tlb_entry->addr_read & 
1354
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1355
        addr == (tlb_entry->addr_write & 
1356
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1357
        addr == (tlb_entry->addr_code & 
1358
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1359
        tlb_entry->addr_read = -1;
1360
        tlb_entry->addr_write = -1;
1361
        tlb_entry->addr_code = -1;
1362
    }
1363
}
1364

    
1365
void tlb_flush_page(CPUState *env, target_ulong addr)
1366
{
1367
    int i;
1368
    TranslationBlock *tb;
1369

    
1370
#if defined(DEBUG_TLB)
1371
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1372
#endif
1373
    /* must reset current TB so that interrupts cannot modify the
1374
       links while we are modifying them */
1375
    env->current_tb = NULL;
1376

    
1377
    addr &= TARGET_PAGE_MASK;
1378
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1379
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1380
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1381
#if (NB_MMU_MODES >= 3)
1382
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1383
#if (NB_MMU_MODES == 4)
1384
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1385
#endif
1386
#endif
1387

    
1388
    /* Discard jump cache entries for any tb which might potentially
1389
       overlap the flushed page.  */
1390
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1391
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1392

    
1393
    i = tb_jmp_cache_hash_page(addr);
1394
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1395

    
1396
#if !defined(CONFIG_SOFTMMU)
1397
    if (addr < MMAP_AREA_END)
1398
        munmap((void *)addr, TARGET_PAGE_SIZE);
1399
#endif
1400
#ifdef USE_KQEMU
1401
    if (env->kqemu_enabled) {
1402
        kqemu_flush_page(env, addr);
1403
    }
1404
#endif
1405
}
1406

    
1407
/* update the TLBs so that writes to code in the virtual page 'addr'
1408
   can be detected */
1409
static void tlb_protect_code(ram_addr_t ram_addr)
1410
{
1411
    cpu_physical_memory_reset_dirty(ram_addr, 
1412
                                    ram_addr + TARGET_PAGE_SIZE,
1413
                                    CODE_DIRTY_FLAG);
1414
}
1415

    
1416
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1417
   tested for self modifying code */
1418
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr, 
1419
                                    target_ulong vaddr)
1420
{
1421
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1422
}
1423

    
1424
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, 
1425
                                         unsigned long start, unsigned long length)
1426
{
1427
    unsigned long addr;
1428
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1429
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1430
        if ((addr - start) < length) {
1431
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1432
        }
1433
    }
1434
}
1435

    
1436
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1437
                                     int dirty_flags)
1438
{
1439
    CPUState *env;
1440
    unsigned long length, start1;
1441
    int i, mask, len;
1442
    uint8_t *p;
1443

    
1444
    start &= TARGET_PAGE_MASK;
1445
    end = TARGET_PAGE_ALIGN(end);
1446

    
1447
    length = end - start;
1448
    if (length == 0)
1449
        return;
1450
    len = length >> TARGET_PAGE_BITS;
1451
#ifdef USE_KQEMU
1452
    /* XXX: should not depend on cpu context */
1453
    env = first_cpu;
1454
    if (env->kqemu_enabled) {
1455
        ram_addr_t addr;
1456
        addr = start;
1457
        for(i = 0; i < len; i++) {
1458
            kqemu_set_notdirty(env, addr);
1459
            addr += TARGET_PAGE_SIZE;
1460
        }
1461
    }
1462
#endif
1463
    mask = ~dirty_flags;
1464
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1465
    for(i = 0; i < len; i++)
1466
        p[i] &= mask;
1467

    
1468
    /* we modify the TLB cache so that the dirty bit will be set again
1469
       when accessing the range */
1470
    start1 = start + (unsigned long)phys_ram_base;
1471
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1472
        for(i = 0; i < CPU_TLB_SIZE; i++)
1473
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1474
        for(i = 0; i < CPU_TLB_SIZE; i++)
1475
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1476
#if (NB_MMU_MODES >= 3)
1477
        for(i = 0; i < CPU_TLB_SIZE; i++)
1478
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1479
#if (NB_MMU_MODES == 4)
1480
        for(i = 0; i < CPU_TLB_SIZE; i++)
1481
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1482
#endif
1483
#endif
1484
    }
1485

    
1486
#if !defined(CONFIG_SOFTMMU)
1487
    /* XXX: this is expensive */
1488
    {
1489
        VirtPageDesc *p;
1490
        int j;
1491
        target_ulong addr;
1492

    
1493
        for(i = 0; i < L1_SIZE; i++) {
1494
            p = l1_virt_map[i];
1495
            if (p) {
1496
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1497
                for(j = 0; j < L2_SIZE; j++) {
1498
                    if (p->valid_tag == virt_valid_tag &&
1499
                        p->phys_addr >= start && p->phys_addr < end &&
1500
                        (p->prot & PROT_WRITE)) {
1501
                        if (addr < MMAP_AREA_END) {
1502
                            mprotect((void *)addr, TARGET_PAGE_SIZE, 
1503
                                     p->prot & ~PROT_WRITE);
1504
                        }
1505
                    }
1506
                    addr += TARGET_PAGE_SIZE;
1507
                    p++;
1508
                }
1509
            }
1510
        }
1511
    }
1512
#endif
1513
}
1514

    
1515
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1516
{
1517
    ram_addr_t ram_addr;
1518

    
1519
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1520
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + 
1521
            tlb_entry->addend - (unsigned long)phys_ram_base;
1522
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1523
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1524
        }
1525
    }
1526
}
1527

    
1528
/* update the TLB according to the current state of the dirty bits */
1529
void cpu_tlb_update_dirty(CPUState *env)
1530
{
1531
    int i;
1532
    for(i = 0; i < CPU_TLB_SIZE; i++)
1533
        tlb_update_dirty(&env->tlb_table[0][i]);
1534
    for(i = 0; i < CPU_TLB_SIZE; i++)
1535
        tlb_update_dirty(&env->tlb_table[1][i]);
1536
#if (NB_MMU_MODES >= 3)
1537
    for(i = 0; i < CPU_TLB_SIZE; i++)
1538
        tlb_update_dirty(&env->tlb_table[2][i]);
1539
#if (NB_MMU_MODES == 4)
1540
    for(i = 0; i < CPU_TLB_SIZE; i++)
1541
        tlb_update_dirty(&env->tlb_table[3][i]);
1542
#endif
1543
#endif
1544
}
1545

    
1546
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, 
1547
                                  unsigned long start)
1548
{
1549
    unsigned long addr;
1550
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1551
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1552
        if (addr == start) {
1553
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1554
        }
1555
    }
1556
}
1557

    
1558
/* update the TLB corresponding to virtual page vaddr and phys addr
1559
   addr so that it is no longer dirty */
1560
static inline void tlb_set_dirty(CPUState *env,
1561
                                 unsigned long addr, target_ulong vaddr)
1562
{
1563
    int i;
1564

    
1565
    addr &= TARGET_PAGE_MASK;
1566
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1567
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1568
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1569
#if (NB_MMU_MODES >= 3)
1570
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1571
#if (NB_MMU_MODES == 4)
1572
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1573
#endif
1574
#endif
1575
}
1576

    
1577
/* add a new TLB entry. At most one entry for a given virtual address
1578
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1579
   (can only happen in non SOFTMMU mode for I/O pages or pages
1580
   conflicting with the host address space). */
1581
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1582
                      target_phys_addr_t paddr, int prot, 
1583
                      int is_user, int is_softmmu)
1584
{
1585
    PhysPageDesc *p;
1586
    unsigned long pd;
1587
    unsigned int index;
1588
    target_ulong address;
1589
    target_phys_addr_t addend;
1590
    int ret;
1591
    CPUTLBEntry *te;
1592
    int i;
1593

    
1594
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1595
    if (!p) {
1596
        pd = IO_MEM_UNASSIGNED;
1597
    } else {
1598
        pd = p->phys_offset;
1599
    }
1600
#if defined(DEBUG_TLB)
1601
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1602
           vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1603
#endif
1604

    
1605
    ret = 0;
1606
#if !defined(CONFIG_SOFTMMU)
1607
    if (is_softmmu) 
1608
#endif
1609
    {
1610
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1611
            /* IO memory case */
1612
            address = vaddr | pd;
1613
            addend = paddr;
1614
        } else {
1615
            /* standard memory */
1616
            address = vaddr;
1617
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1618
        }
1619

    
1620
        /* Make accesses to pages with watchpoints go via the
1621
           watchpoint trap routines.  */
1622
        for (i = 0; i < env->nb_watchpoints; i++) {
1623
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1624
                if (address & ~TARGET_PAGE_MASK) {
1625
                    env->watchpoint[i].is_ram = 0;
1626
                    address = vaddr | io_mem_watch;
1627
                } else {
1628
                    env->watchpoint[i].is_ram = 1;
1629
                    /* TODO: Figure out how to make read watchpoints coexist
1630
                       with code.  */
1631
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1632
                }
1633
            }
1634
        }
1635
        
1636
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1637
        addend -= vaddr;
1638
        te = &env->tlb_table[is_user][index];
1639
        te->addend = addend;
1640
        if (prot & PAGE_READ) {
1641
            te->addr_read = address;
1642
        } else {
1643
            te->addr_read = -1;
1644
        }
1645
        if (prot & PAGE_EXEC) {
1646
            te->addr_code = address;
1647
        } else {
1648
            te->addr_code = -1;
1649
        }
1650
        if (prot & PAGE_WRITE) {
1651
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1652
                (pd & IO_MEM_ROMD)) {
1653
                /* write access calls the I/O callback */
1654
                te->addr_write = vaddr | 
1655
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1656
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1657
                       !cpu_physical_memory_is_dirty(pd)) {
1658
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1659
            } else {
1660
                te->addr_write = address;
1661
            }
1662
        } else {
1663
            te->addr_write = -1;
1664
        }
1665
    }
1666
#if !defined(CONFIG_SOFTMMU)
1667
    else {
1668
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1669
            /* IO access: no mapping is done as it will be handled by the
1670
               soft MMU */
1671
            if (!(env->hflags & HF_SOFTMMU_MASK))
1672
                ret = 2;
1673
        } else {
1674
            void *map_addr;
1675

    
1676
            if (vaddr >= MMAP_AREA_END) {
1677
                ret = 2;
1678
            } else {
1679
                if (prot & PROT_WRITE) {
1680
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM || 
1681
#if defined(TARGET_HAS_SMC) || 1
1682
                        first_tb ||
1683
#endif
1684
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM && 
1685
                         !cpu_physical_memory_is_dirty(pd))) {
1686
                        /* ROM: we do as if code was inside */
1687
                        /* if code is present, we only map as read only and save the
1688
                           original mapping */
1689
                        VirtPageDesc *vp;
1690
                        
1691
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1692
                        vp->phys_addr = pd;
1693
                        vp->prot = prot;
1694
                        vp->valid_tag = virt_valid_tag;
1695
                        prot &= ~PAGE_WRITE;
1696
                    }
1697
                }
1698
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot, 
1699
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1700
                if (map_addr == MAP_FAILED) {
1701
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1702
                              paddr, vaddr);
1703
                }
1704
            }
1705
        }
1706
    }
1707
#endif
1708
    return ret;
1709
}
1710

    
1711
/* called from signal handler: invalidate the code and unprotect the
1712
   page. Return TRUE if the fault was succesfully handled. */
1713
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1714
{
1715
#if !defined(CONFIG_SOFTMMU)
1716
    VirtPageDesc *vp;
1717

    
1718
#if defined(DEBUG_TLB)
1719
    printf("page_unprotect: addr=0x%08x\n", addr);
1720
#endif
1721
    addr &= TARGET_PAGE_MASK;
1722

    
1723
    /* if it is not mapped, no need to worry here */
1724
    if (addr >= MMAP_AREA_END)
1725
        return 0;
1726
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1727
    if (!vp)
1728
        return 0;
1729
    /* NOTE: in this case, validate_tag is _not_ tested as it
1730
       validates only the code TLB */
1731
    if (vp->valid_tag != virt_valid_tag)
1732
        return 0;
1733
    if (!(vp->prot & PAGE_WRITE))
1734
        return 0;
1735
#if defined(DEBUG_TLB)
1736
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n", 
1737
           addr, vp->phys_addr, vp->prot);
1738
#endif
1739
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1740
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1741
                  (unsigned long)addr, vp->prot);
1742
    /* set the dirty bit */
1743
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1744
    /* flush the code inside */
1745
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1746
    return 1;
1747
#else
1748
    return 0;
1749
#endif
1750
}
1751

    
1752
#else
1753

    
1754
void tlb_flush(CPUState *env, int flush_global)
1755
{
1756
}
1757

    
1758
void tlb_flush_page(CPUState *env, target_ulong addr)
1759
{
1760
}
1761

    
1762
int tlb_set_page_exec(CPUState *env, target_ulong vaddr, 
1763
                      target_phys_addr_t paddr, int prot, 
1764
                      int is_user, int is_softmmu)
1765
{
1766
    return 0;
1767
}
1768

    
1769
/* dump memory mappings */
1770
void page_dump(FILE *f)
1771
{
1772
    unsigned long start, end;
1773
    int i, j, prot, prot1;
1774
    PageDesc *p;
1775

    
1776
    fprintf(f, "%-8s %-8s %-8s %s\n",
1777
            "start", "end", "size", "prot");
1778
    start = -1;
1779
    end = -1;
1780
    prot = 0;
1781
    for(i = 0; i <= L1_SIZE; i++) {
1782
        if (i < L1_SIZE)
1783
            p = l1_map[i];
1784
        else
1785
            p = NULL;
1786
        for(j = 0;j < L2_SIZE; j++) {
1787
            if (!p)
1788
                prot1 = 0;
1789
            else
1790
                prot1 = p[j].flags;
1791
            if (prot1 != prot) {
1792
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1793
                if (start != -1) {
1794
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1795
                            start, end, end - start, 
1796
                            prot & PAGE_READ ? 'r' : '-',
1797
                            prot & PAGE_WRITE ? 'w' : '-',
1798
                            prot & PAGE_EXEC ? 'x' : '-');
1799
                }
1800
                if (prot1 != 0)
1801
                    start = end;
1802
                else
1803
                    start = -1;
1804
                prot = prot1;
1805
            }
1806
            if (!p)
1807
                break;
1808
        }
1809
    }
1810
}
1811

    
1812
int page_get_flags(target_ulong address)
1813
{
1814
    PageDesc *p;
1815

    
1816
    p = page_find(address >> TARGET_PAGE_BITS);
1817
    if (!p)
1818
        return 0;
1819
    return p->flags;
1820
}
1821

    
1822
/* modify the flags of a page and invalidate the code if
1823
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1824
   depending on PAGE_WRITE */
1825
void page_set_flags(target_ulong start, target_ulong end, int flags)
1826
{
1827
    PageDesc *p;
1828
    target_ulong addr;
1829

    
1830
    start = start & TARGET_PAGE_MASK;
1831
    end = TARGET_PAGE_ALIGN(end);
1832
    if (flags & PAGE_WRITE)
1833
        flags |= PAGE_WRITE_ORG;
1834
    spin_lock(&tb_lock);
1835
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1836
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1837
        /* if the write protection is set, then we invalidate the code
1838
           inside */
1839
        if (!(p->flags & PAGE_WRITE) && 
1840
            (flags & PAGE_WRITE) &&
1841
            p->first_tb) {
1842
            tb_invalidate_phys_page(addr, 0, NULL);
1843
        }
1844
        p->flags = flags;
1845
    }
1846
    spin_unlock(&tb_lock);
1847
}
1848

    
1849
/* called from signal handler: invalidate the code and unprotect the
1850
   page. Return TRUE if the fault was succesfully handled. */
1851
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1852
{
1853
    unsigned int page_index, prot, pindex;
1854
    PageDesc *p, *p1;
1855
    target_ulong host_start, host_end, addr;
1856

    
1857
    host_start = address & qemu_host_page_mask;
1858
    page_index = host_start >> TARGET_PAGE_BITS;
1859
    p1 = page_find(page_index);
1860
    if (!p1)
1861
        return 0;
1862
    host_end = host_start + qemu_host_page_size;
1863
    p = p1;
1864
    prot = 0;
1865
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1866
        prot |= p->flags;
1867
        p++;
1868
    }
1869
    /* if the page was really writable, then we change its
1870
       protection back to writable */
1871
    if (prot & PAGE_WRITE_ORG) {
1872
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1873
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1874
            mprotect((void *)g2h(host_start), qemu_host_page_size, 
1875
                     (prot & PAGE_BITS) | PAGE_WRITE);
1876
            p1[pindex].flags |= PAGE_WRITE;
1877
            /* and since the content will be modified, we must invalidate
1878
               the corresponding translated code. */
1879
            tb_invalidate_phys_page(address, pc, puc);
1880
#ifdef DEBUG_TB_CHECK
1881
            tb_invalidate_check(address);
1882
#endif
1883
            return 1;
1884
        }
1885
    }
1886
    return 0;
1887
}
1888

    
1889
/* call this function when system calls directly modify a memory area */
1890
/* ??? This should be redundant now we have lock_user.  */
1891
void page_unprotect_range(target_ulong data, target_ulong data_size)
1892
{
1893
    target_ulong start, end, addr;
1894

    
1895
    start = data;
1896
    end = start + data_size;
1897
    start &= TARGET_PAGE_MASK;
1898
    end = TARGET_PAGE_ALIGN(end);
1899
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1900
        page_unprotect(addr, 0, NULL);
1901
    }
1902
}
1903

    
1904
static inline void tlb_set_dirty(CPUState *env,
1905
                                 unsigned long addr, target_ulong vaddr)
1906
{
1907
}
1908
#endif /* defined(CONFIG_USER_ONLY) */
1909

    
1910
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1911
                             int memory);
1912
static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1913
                           int orig_memory);
1914
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1915
                      need_subpage)                                     \
1916
    do {                                                                \
1917
        if (addr > start_addr)                                          \
1918
            start_addr2 = 0;                                            \
1919
        else {                                                          \
1920
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
1921
            if (start_addr2 > 0)                                        \
1922
                need_subpage = 1;                                       \
1923
        }                                                               \
1924
                                                                        \
1925
        if (end_addr - addr > TARGET_PAGE_SIZE)                         \
1926
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
1927
        else {                                                          \
1928
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1929
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
1930
                need_subpage = 1;                                       \
1931
        }                                                               \
1932
    } while (0)
1933

    
1934
/* register physical memory. 'size' must be a multiple of the target
1935
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1936
   io memory page */
1937
void cpu_register_physical_memory(target_phys_addr_t start_addr, 
1938
                                  unsigned long size,
1939
                                  unsigned long phys_offset)
1940
{
1941
    target_phys_addr_t addr, end_addr;
1942
    PhysPageDesc *p;
1943
    CPUState *env;
1944
    unsigned long orig_size = size;
1945
    void *subpage;
1946

    
1947
    end_addr = start_addr + (target_phys_addr_t)size;
1948
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1949
    for(addr = start_addr; addr < end_addr; addr += TARGET_PAGE_SIZE) {
1950
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
1951
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
1952
            unsigned long orig_memory = p->phys_offset;
1953
            target_phys_addr_t start_addr2, end_addr2;
1954
            int need_subpage = 0;
1955

    
1956
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
1957
                          need_subpage);
1958
            if (need_subpage) {
1959
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
1960
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
1961
                                           &p->phys_offset, orig_memory);
1962
                } else {
1963
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
1964
                                            >> IO_MEM_SHIFT];
1965
                }
1966
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
1967
            } else {
1968
                p->phys_offset = phys_offset;
1969
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1970
                    (phys_offset & IO_MEM_ROMD))
1971
                    phys_offset += TARGET_PAGE_SIZE;
1972
            }
1973
        } else {
1974
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1975
            p->phys_offset = phys_offset;
1976
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1977
                (phys_offset & IO_MEM_ROMD))
1978
                phys_offset += TARGET_PAGE_SIZE;
1979
            else {
1980
                target_phys_addr_t start_addr2, end_addr2;
1981
                int need_subpage = 0;
1982

    
1983
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
1984
                              end_addr2, need_subpage);
1985

    
1986
                if (need_subpage) {
1987
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
1988
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
1989
                    subpage_register(subpage, start_addr2, end_addr2,
1990
                                     phys_offset);
1991
                }
1992
            }
1993
        }
1994
    }
1995
    
1996
    /* since each CPU stores ram addresses in its TLB cache, we must
1997
       reset the modified entries */
1998
    /* XXX: slow ! */
1999
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2000
        tlb_flush(env, 1);
2001
    }
2002
}
2003

    
2004
/* XXX: temporary until new memory mapping API */
2005
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2006
{
2007
    PhysPageDesc *p;
2008

    
2009
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2010
    if (!p)
2011
        return IO_MEM_UNASSIGNED;
2012
    return p->phys_offset;
2013
}
2014

    
2015
/* XXX: better than nothing */
2016
ram_addr_t qemu_ram_alloc(unsigned int size)
2017
{
2018
    ram_addr_t addr;
2019
    if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2020
        fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n", 
2021
                size, phys_ram_size);
2022
        abort();
2023
    }
2024
    addr = phys_ram_alloc_offset;
2025
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2026
    return addr;
2027
}
2028

    
2029
void qemu_ram_free(ram_addr_t addr)
2030
{
2031
}
2032

    
2033
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2034
{
2035
#ifdef DEBUG_UNASSIGNED
2036
    printf("Unassigned mem read " TARGET_FMT_lx "\n", addr);
2037
#endif
2038
#ifdef TARGET_SPARC
2039
    do_unassigned_access(addr, 0, 0, 0);
2040
#endif
2041
    return 0;
2042
}
2043

    
2044
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2045
{
2046
#ifdef DEBUG_UNASSIGNED
2047
    printf("Unassigned mem write " TARGET_FMT_lx " = 0x%x\n", addr, val);
2048
#endif
2049
#ifdef TARGET_SPARC
2050
    do_unassigned_access(addr, 1, 0, 0);
2051
#endif
2052
}
2053

    
2054
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2055
    unassigned_mem_readb,
2056
    unassigned_mem_readb,
2057
    unassigned_mem_readb,
2058
};
2059

    
2060
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2061
    unassigned_mem_writeb,
2062
    unassigned_mem_writeb,
2063
    unassigned_mem_writeb,
2064
};
2065

    
2066
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2067
{
2068
    unsigned long ram_addr;
2069
    int dirty_flags;
2070
    ram_addr = addr - (unsigned long)phys_ram_base;
2071
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2072
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2073
#if !defined(CONFIG_USER_ONLY)
2074
        tb_invalidate_phys_page_fast(ram_addr, 1);
2075
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2076
#endif
2077
    }
2078
    stb_p((uint8_t *)(long)addr, val);
2079
#ifdef USE_KQEMU
2080
    if (cpu_single_env->kqemu_enabled &&
2081
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2082
        kqemu_modify_page(cpu_single_env, ram_addr);
2083
#endif
2084
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2085
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2086
    /* we remove the notdirty callback only if the code has been
2087
       flushed */
2088
    if (dirty_flags == 0xff)
2089
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2090
}
2091

    
2092
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2093
{
2094
    unsigned long ram_addr;
2095
    int dirty_flags;
2096
    ram_addr = addr - (unsigned long)phys_ram_base;
2097
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2098
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2099
#if !defined(CONFIG_USER_ONLY)
2100
        tb_invalidate_phys_page_fast(ram_addr, 2);
2101
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2102
#endif
2103
    }
2104
    stw_p((uint8_t *)(long)addr, val);
2105
#ifdef USE_KQEMU
2106
    if (cpu_single_env->kqemu_enabled &&
2107
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2108
        kqemu_modify_page(cpu_single_env, ram_addr);
2109
#endif
2110
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2111
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2112
    /* we remove the notdirty callback only if the code has been
2113
       flushed */
2114
    if (dirty_flags == 0xff)
2115
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2116
}
2117

    
2118
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2119
{
2120
    unsigned long ram_addr;
2121
    int dirty_flags;
2122
    ram_addr = addr - (unsigned long)phys_ram_base;
2123
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2124
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2125
#if !defined(CONFIG_USER_ONLY)
2126
        tb_invalidate_phys_page_fast(ram_addr, 4);
2127
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2128
#endif
2129
    }
2130
    stl_p((uint8_t *)(long)addr, val);
2131
#ifdef USE_KQEMU
2132
    if (cpu_single_env->kqemu_enabled &&
2133
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2134
        kqemu_modify_page(cpu_single_env, ram_addr);
2135
#endif
2136
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2137
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2138
    /* we remove the notdirty callback only if the code has been
2139
       flushed */
2140
    if (dirty_flags == 0xff)
2141
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2142
}
2143

    
2144
static CPUReadMemoryFunc *error_mem_read[3] = {
2145
    NULL, /* never used */
2146
    NULL, /* never used */
2147
    NULL, /* never used */
2148
};
2149

    
2150
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2151
    notdirty_mem_writeb,
2152
    notdirty_mem_writew,
2153
    notdirty_mem_writel,
2154
};
2155

    
2156
#if defined(CONFIG_SOFTMMU)
2157
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2158
   so these check for a hit then pass through to the normal out-of-line
2159
   phys routines.  */
2160
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2161
{
2162
    return ldub_phys(addr);
2163
}
2164

    
2165
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2166
{
2167
    return lduw_phys(addr);
2168
}
2169

    
2170
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2171
{
2172
    return ldl_phys(addr);
2173
}
2174

    
2175
/* Generate a debug exception if a watchpoint has been hit.
2176
   Returns the real physical address of the access.  addr will be a host
2177
   address in the is_ram case.  */
2178
static target_ulong check_watchpoint(target_phys_addr_t addr)
2179
{
2180
    CPUState *env = cpu_single_env;
2181
    target_ulong watch;
2182
    target_ulong retaddr;
2183
    int i;
2184

    
2185
    retaddr = addr;
2186
    for (i = 0; i < env->nb_watchpoints; i++) {
2187
        watch = env->watchpoint[i].vaddr;
2188
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2189
            if (env->watchpoint[i].is_ram)
2190
                retaddr = addr - (unsigned long)phys_ram_base;
2191
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2192
                cpu_single_env->watchpoint_hit = i + 1;
2193
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2194
                break;
2195
            }
2196
        }
2197
    }
2198
    return retaddr;
2199
}
2200

    
2201
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2202
                             uint32_t val)
2203
{
2204
    addr = check_watchpoint(addr);
2205
    stb_phys(addr, val);
2206
}
2207

    
2208
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2209
                             uint32_t val)
2210
{
2211
    addr = check_watchpoint(addr);
2212
    stw_phys(addr, val);
2213
}
2214

    
2215
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2216
                             uint32_t val)
2217
{
2218
    addr = check_watchpoint(addr);
2219
    stl_phys(addr, val);
2220
}
2221

    
2222
static CPUReadMemoryFunc *watch_mem_read[3] = {
2223
    watch_mem_readb,
2224
    watch_mem_readw,
2225
    watch_mem_readl,
2226
};
2227

    
2228
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2229
    watch_mem_writeb,
2230
    watch_mem_writew,
2231
    watch_mem_writel,
2232
};
2233
#endif
2234

    
2235
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2236
                                 unsigned int len)
2237
{
2238
    CPUReadMemoryFunc **mem_read;
2239
    uint32_t ret;
2240
    unsigned int idx;
2241

    
2242
    idx = SUBPAGE_IDX(addr - mmio->base);
2243
#if defined(DEBUG_SUBPAGE)
2244
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2245
           mmio, len, addr, idx);
2246
#endif
2247
    mem_read = mmio->mem_read[idx];
2248
    ret = (*mem_read[len])(mmio->opaque[idx], addr);
2249

    
2250
    return ret;
2251
}
2252

    
2253
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2254
                              uint32_t value, unsigned int len)
2255
{
2256
    CPUWriteMemoryFunc **mem_write;
2257
    unsigned int idx;
2258

    
2259
    idx = SUBPAGE_IDX(addr - mmio->base);
2260
#if defined(DEBUG_SUBPAGE)
2261
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2262
           mmio, len, addr, idx, value);
2263
#endif
2264
    mem_write = mmio->mem_write[idx];
2265
    (*mem_write[len])(mmio->opaque[idx], addr, value);
2266
}
2267

    
2268
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2269
{
2270
#if defined(DEBUG_SUBPAGE)
2271
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2272
#endif
2273

    
2274
    return subpage_readlen(opaque, addr, 0);
2275
}
2276

    
2277
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2278
                            uint32_t value)
2279
{
2280
#if defined(DEBUG_SUBPAGE)
2281
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2282
#endif
2283
    subpage_writelen(opaque, addr, value, 0);
2284
}
2285

    
2286
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2287
{
2288
#if defined(DEBUG_SUBPAGE)
2289
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2290
#endif
2291

    
2292
    return subpage_readlen(opaque, addr, 1);
2293
}
2294

    
2295
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2296
                            uint32_t value)
2297
{
2298
#if defined(DEBUG_SUBPAGE)
2299
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2300
#endif
2301
    subpage_writelen(opaque, addr, value, 1);
2302
}
2303

    
2304
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2305
{
2306
#if defined(DEBUG_SUBPAGE)
2307
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2308
#endif
2309

    
2310
    return subpage_readlen(opaque, addr, 2);
2311
}
2312

    
2313
static void subpage_writel (void *opaque,
2314
                         target_phys_addr_t addr, uint32_t value)
2315
{
2316
#if defined(DEBUG_SUBPAGE)
2317
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2318
#endif
2319
    subpage_writelen(opaque, addr, value, 2);
2320
}
2321

    
2322
static CPUReadMemoryFunc *subpage_read[] = {
2323
    &subpage_readb,
2324
    &subpage_readw,
2325
    &subpage_readl,
2326
};
2327

    
2328
static CPUWriteMemoryFunc *subpage_write[] = {
2329
    &subpage_writeb,
2330
    &subpage_writew,
2331
    &subpage_writel,
2332
};
2333

    
2334
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2335
                             int memory)
2336
{
2337
    int idx, eidx;
2338

    
2339
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2340
        return -1;
2341
    idx = SUBPAGE_IDX(start);
2342
    eidx = SUBPAGE_IDX(end);
2343
#if defined(DEBUG_SUBPAGE)
2344
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2345
           mmio, start, end, idx, eidx, memory);
2346
#endif
2347
    memory >>= IO_MEM_SHIFT;
2348
    for (; idx <= eidx; idx++) {
2349
        mmio->mem_read[idx] = io_mem_read[memory];
2350
        mmio->mem_write[idx] = io_mem_write[memory];
2351
        mmio->opaque[idx] = io_mem_opaque[memory];
2352
    }
2353

    
2354
    return 0;
2355
}
2356

    
2357
static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2358
                           int orig_memory)
2359
{
2360
    subpage_t *mmio;
2361
    int subpage_memory;
2362

    
2363
    mmio = qemu_mallocz(sizeof(subpage_t));
2364
    if (mmio != NULL) {
2365
        mmio->base = base;
2366
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2367
#if defined(DEBUG_SUBPAGE)
2368
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2369
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2370
#endif
2371
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2372
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2373
    }
2374

    
2375
    return mmio;
2376
}
2377

    
2378
static void io_mem_init(void)
2379
{
2380
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2381
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2382
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2383
    io_mem_nb = 5;
2384

    
2385
#if defined(CONFIG_SOFTMMU)
2386
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2387
                                          watch_mem_write, NULL);
2388
#endif
2389
    /* alloc dirty bits array */
2390
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2391
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2392
}
2393

    
2394
/* mem_read and mem_write are arrays of functions containing the
2395
   function to access byte (index 0), word (index 1) and dword (index
2396
   2). All functions must be supplied. If io_index is non zero, the
2397
   corresponding io zone is modified. If it is zero, a new io zone is
2398
   allocated. The return value can be used with
2399
   cpu_register_physical_memory(). (-1) is returned if error. */
2400
int cpu_register_io_memory(int io_index,
2401
                           CPUReadMemoryFunc **mem_read,
2402
                           CPUWriteMemoryFunc **mem_write,
2403
                           void *opaque)
2404
{
2405
    int i;
2406

    
2407
    if (io_index <= 0) {
2408
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2409
            return -1;
2410
        io_index = io_mem_nb++;
2411
    } else {
2412
        if (io_index >= IO_MEM_NB_ENTRIES)
2413
            return -1;
2414
    }
2415

    
2416
    for(i = 0;i < 3; i++) {
2417
        io_mem_read[io_index][i] = mem_read[i];
2418
        io_mem_write[io_index][i] = mem_write[i];
2419
    }
2420
    io_mem_opaque[io_index] = opaque;
2421
    return io_index << IO_MEM_SHIFT;
2422
}
2423

    
2424
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2425
{
2426
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2427
}
2428

    
2429
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2430
{
2431
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2432
}
2433

    
2434
/* physical memory access (slow version, mainly for debug) */
2435
#if defined(CONFIG_USER_ONLY)
2436
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2437
                            int len, int is_write)
2438
{
2439
    int l, flags;
2440
    target_ulong page;
2441
    void * p;
2442

    
2443
    while (len > 0) {
2444
        page = addr & TARGET_PAGE_MASK;
2445
        l = (page + TARGET_PAGE_SIZE) - addr;
2446
        if (l > len)
2447
            l = len;
2448
        flags = page_get_flags(page);
2449
        if (!(flags & PAGE_VALID))
2450
            return;
2451
        if (is_write) {
2452
            if (!(flags & PAGE_WRITE))
2453
                return;
2454
            p = lock_user(addr, len, 0);
2455
            memcpy(p, buf, len);
2456
            unlock_user(p, addr, len);
2457
        } else {
2458
            if (!(flags & PAGE_READ))
2459
                return;
2460
            p = lock_user(addr, len, 1);
2461
            memcpy(buf, p, len);
2462
            unlock_user(p, addr, 0);
2463
        }
2464
        len -= l;
2465
        buf += l;
2466
        addr += l;
2467
    }
2468
}
2469

    
2470
#else
2471
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf, 
2472
                            int len, int is_write)
2473
{
2474
    int l, io_index;
2475
    uint8_t *ptr;
2476
    uint32_t val;
2477
    target_phys_addr_t page;
2478
    unsigned long pd;
2479
    PhysPageDesc *p;
2480
    
2481
    while (len > 0) {
2482
        page = addr & TARGET_PAGE_MASK;
2483
        l = (page + TARGET_PAGE_SIZE) - addr;
2484
        if (l > len)
2485
            l = len;
2486
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2487
        if (!p) {
2488
            pd = IO_MEM_UNASSIGNED;
2489
        } else {
2490
            pd = p->phys_offset;
2491
        }
2492
        
2493
        if (is_write) {
2494
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2495
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2496
                /* XXX: could force cpu_single_env to NULL to avoid
2497
                   potential bugs */
2498
                if (l >= 4 && ((addr & 3) == 0)) {
2499
                    /* 32 bit write access */
2500
                    val = ldl_p(buf);
2501
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2502
                    l = 4;
2503
                } else if (l >= 2 && ((addr & 1) == 0)) {
2504
                    /* 16 bit write access */
2505
                    val = lduw_p(buf);
2506
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2507
                    l = 2;
2508
                } else {
2509
                    /* 8 bit write access */
2510
                    val = ldub_p(buf);
2511
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2512
                    l = 1;
2513
                }
2514
            } else {
2515
                unsigned long addr1;
2516
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2517
                /* RAM case */
2518
                ptr = phys_ram_base + addr1;
2519
                memcpy(ptr, buf, l);
2520
                if (!cpu_physical_memory_is_dirty(addr1)) {
2521
                    /* invalidate code */
2522
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2523
                    /* set dirty bit */
2524
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |= 
2525
                        (0xff & ~CODE_DIRTY_FLAG);
2526
                }
2527
            }
2528
        } else {
2529
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2530
                !(pd & IO_MEM_ROMD)) {
2531
                /* I/O case */
2532
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2533
                if (l >= 4 && ((addr & 3) == 0)) {
2534
                    /* 32 bit read access */
2535
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2536
                    stl_p(buf, val);
2537
                    l = 4;
2538
                } else if (l >= 2 && ((addr & 1) == 0)) {
2539
                    /* 16 bit read access */
2540
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2541
                    stw_p(buf, val);
2542
                    l = 2;
2543
                } else {
2544
                    /* 8 bit read access */
2545
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2546
                    stb_p(buf, val);
2547
                    l = 1;
2548
                }
2549
            } else {
2550
                /* RAM case */
2551
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2552
                    (addr & ~TARGET_PAGE_MASK);
2553
                memcpy(buf, ptr, l);
2554
            }
2555
        }
2556
        len -= l;
2557
        buf += l;
2558
        addr += l;
2559
    }
2560
}
2561

    
2562
/* used for ROM loading : can write in RAM and ROM */
2563
void cpu_physical_memory_write_rom(target_phys_addr_t addr, 
2564
                                   const uint8_t *buf, int len)
2565
{
2566
    int l;
2567
    uint8_t *ptr;
2568
    target_phys_addr_t page;
2569
    unsigned long pd;
2570
    PhysPageDesc *p;
2571
    
2572
    while (len > 0) {
2573
        page = addr & TARGET_PAGE_MASK;
2574
        l = (page + TARGET_PAGE_SIZE) - addr;
2575
        if (l > len)
2576
            l = len;
2577
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2578
        if (!p) {
2579
            pd = IO_MEM_UNASSIGNED;
2580
        } else {
2581
            pd = p->phys_offset;
2582
        }
2583
        
2584
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2585
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2586
            !(pd & IO_MEM_ROMD)) {
2587
            /* do nothing */
2588
        } else {
2589
            unsigned long addr1;
2590
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2591
            /* ROM/RAM case */
2592
            ptr = phys_ram_base + addr1;
2593
            memcpy(ptr, buf, l);
2594
        }
2595
        len -= l;
2596
        buf += l;
2597
        addr += l;
2598
    }
2599
}
2600

    
2601

    
2602
/* warning: addr must be aligned */
2603
uint32_t ldl_phys(target_phys_addr_t addr)
2604
{
2605
    int io_index;
2606
    uint8_t *ptr;
2607
    uint32_t val;
2608
    unsigned long pd;
2609
    PhysPageDesc *p;
2610

    
2611
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2612
    if (!p) {
2613
        pd = IO_MEM_UNASSIGNED;
2614
    } else {
2615
        pd = p->phys_offset;
2616
    }
2617
        
2618
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && 
2619
        !(pd & IO_MEM_ROMD)) {
2620
        /* I/O case */
2621
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2622
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2623
    } else {
2624
        /* RAM case */
2625
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2626
            (addr & ~TARGET_PAGE_MASK);
2627
        val = ldl_p(ptr);
2628
    }
2629
    return val;
2630
}
2631

    
2632
/* warning: addr must be aligned */
2633
uint64_t ldq_phys(target_phys_addr_t addr)
2634
{
2635
    int io_index;
2636
    uint8_t *ptr;
2637
    uint64_t val;
2638
    unsigned long pd;
2639
    PhysPageDesc *p;
2640

    
2641
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2642
    if (!p) {
2643
        pd = IO_MEM_UNASSIGNED;
2644
    } else {
2645
        pd = p->phys_offset;
2646
    }
2647
        
2648
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2649
        !(pd & IO_MEM_ROMD)) {
2650
        /* I/O case */
2651
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2652
#ifdef TARGET_WORDS_BIGENDIAN
2653
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2654
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2655
#else
2656
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2657
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2658
#endif
2659
    } else {
2660
        /* RAM case */
2661
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2662
            (addr & ~TARGET_PAGE_MASK);
2663
        val = ldq_p(ptr);
2664
    }
2665
    return val;
2666
}
2667

    
2668
/* XXX: optimize */
2669
uint32_t ldub_phys(target_phys_addr_t addr)
2670
{
2671
    uint8_t val;
2672
    cpu_physical_memory_read(addr, &val, 1);
2673
    return val;
2674
}
2675

    
2676
/* XXX: optimize */
2677
uint32_t lduw_phys(target_phys_addr_t addr)
2678
{
2679
    uint16_t val;
2680
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2681
    return tswap16(val);
2682
}
2683

    
2684
/* warning: addr must be aligned. The ram page is not masked as dirty
2685
   and the code inside is not invalidated. It is useful if the dirty
2686
   bits are used to track modified PTEs */
2687
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2688
{
2689
    int io_index;
2690
    uint8_t *ptr;
2691
    unsigned long pd;
2692
    PhysPageDesc *p;
2693

    
2694
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2695
    if (!p) {
2696
        pd = IO_MEM_UNASSIGNED;
2697
    } else {
2698
        pd = p->phys_offset;
2699
    }
2700
        
2701
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2702
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2703
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2704
    } else {
2705
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2706
            (addr & ~TARGET_PAGE_MASK);
2707
        stl_p(ptr, val);
2708
    }
2709
}
2710

    
2711
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2712
{
2713
    int io_index;
2714
    uint8_t *ptr;
2715
    unsigned long pd;
2716
    PhysPageDesc *p;
2717

    
2718
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2719
    if (!p) {
2720
        pd = IO_MEM_UNASSIGNED;
2721
    } else {
2722
        pd = p->phys_offset;
2723
    }
2724
        
2725
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2726
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2727
#ifdef TARGET_WORDS_BIGENDIAN
2728
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2729
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2730
#else
2731
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2732
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2733
#endif
2734
    } else {
2735
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) + 
2736
            (addr & ~TARGET_PAGE_MASK);
2737
        stq_p(ptr, val);
2738
    }
2739
}
2740

    
2741
/* warning: addr must be aligned */
2742
void stl_phys(target_phys_addr_t addr, uint32_t val)
2743
{
2744
    int io_index;
2745
    uint8_t *ptr;
2746
    unsigned long pd;
2747
    PhysPageDesc *p;
2748

    
2749
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2750
    if (!p) {
2751
        pd = IO_MEM_UNASSIGNED;
2752
    } else {
2753
        pd = p->phys_offset;
2754
    }
2755
        
2756
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2757
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2758
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2759
    } else {
2760
        unsigned long addr1;
2761
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2762
        /* RAM case */
2763
        ptr = phys_ram_base + addr1;
2764
        stl_p(ptr, val);
2765
        if (!cpu_physical_memory_is_dirty(addr1)) {
2766
            /* invalidate code */
2767
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2768
            /* set dirty bit */
2769
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2770
                (0xff & ~CODE_DIRTY_FLAG);
2771
        }
2772
    }
2773
}
2774

    
2775
/* XXX: optimize */
2776
void stb_phys(target_phys_addr_t addr, uint32_t val)
2777
{
2778
    uint8_t v = val;
2779
    cpu_physical_memory_write(addr, &v, 1);
2780
}
2781

    
2782
/* XXX: optimize */
2783
void stw_phys(target_phys_addr_t addr, uint32_t val)
2784
{
2785
    uint16_t v = tswap16(val);
2786
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2787
}
2788

    
2789
/* XXX: optimize */
2790
void stq_phys(target_phys_addr_t addr, uint64_t val)
2791
{
2792
    val = tswap64(val);
2793
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2794
}
2795

    
2796
#endif
2797

    
2798
/* virtual memory access for debug */
2799
int cpu_memory_rw_debug(CPUState *env, target_ulong addr, 
2800
                        uint8_t *buf, int len, int is_write)
2801
{
2802
    int l;
2803
    target_phys_addr_t phys_addr;
2804
    target_ulong page;
2805

    
2806
    while (len > 0) {
2807
        page = addr & TARGET_PAGE_MASK;
2808
        phys_addr = cpu_get_phys_page_debug(env, page);
2809
        /* if no physical page mapped, return an error */
2810
        if (phys_addr == -1)
2811
            return -1;
2812
        l = (page + TARGET_PAGE_SIZE) - addr;
2813
        if (l > len)
2814
            l = len;
2815
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK), 
2816
                               buf, l, is_write);
2817
        len -= l;
2818
        buf += l;
2819
        addr += l;
2820
    }
2821
    return 0;
2822
}
2823

    
2824
void dump_exec_info(FILE *f,
2825
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2826
{
2827
    int i, target_code_size, max_target_code_size;
2828
    int direct_jmp_count, direct_jmp2_count, cross_page;
2829
    TranslationBlock *tb;
2830
    
2831
    target_code_size = 0;
2832
    max_target_code_size = 0;
2833
    cross_page = 0;
2834
    direct_jmp_count = 0;
2835
    direct_jmp2_count = 0;
2836
    for(i = 0; i < nb_tbs; i++) {
2837
        tb = &tbs[i];
2838
        target_code_size += tb->size;
2839
        if (tb->size > max_target_code_size)
2840
            max_target_code_size = tb->size;
2841
        if (tb->page_addr[1] != -1)
2842
            cross_page++;
2843
        if (tb->tb_next_offset[0] != 0xffff) {
2844
            direct_jmp_count++;
2845
            if (tb->tb_next_offset[1] != 0xffff) {
2846
                direct_jmp2_count++;
2847
            }
2848
        }
2849
    }
2850
    /* XXX: avoid using doubles ? */
2851
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2852
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n", 
2853
                nb_tbs ? target_code_size / nb_tbs : 0,
2854
                max_target_code_size);
2855
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n", 
2856
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2857
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2858
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n", 
2859
            cross_page, 
2860
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2861
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2862
                direct_jmp_count, 
2863
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2864
                direct_jmp2_count,
2865
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2866
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2867
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2868
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2869
}
2870

    
2871
#if !defined(CONFIG_USER_ONLY) 
2872

    
2873
#define MMUSUFFIX _cmmu
2874
#define GETPC() NULL
2875
#define env cpu_single_env
2876
#define SOFTMMU_CODE_ACCESS
2877

    
2878
#define SHIFT 0
2879
#include "softmmu_template.h"
2880

    
2881
#define SHIFT 1
2882
#include "softmmu_template.h"
2883

    
2884
#define SHIFT 2
2885
#include "softmmu_template.h"
2886

    
2887
#define SHIFT 3
2888
#include "softmmu_template.h"
2889

    
2890
#undef env
2891

    
2892
#endif