Statistics
| Branch: | Revision:

root / exec.c @ 3b46e624

History | View | Annotate | Download (85.9 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#if defined(CONFIG_USER_ONLY)
38
#include <qemu.h>
39
#endif
40

    
41
//#define DEBUG_TB_INVALIDATE
42
//#define DEBUG_FLUSH
43
//#define DEBUG_TLB
44
//#define DEBUG_UNASSIGNED
45

    
46
/* make various TB consistency checks */
47
//#define DEBUG_TB_CHECK
48
//#define DEBUG_TLB_CHECK
49

    
50
//#define DEBUG_IOPORT
51
//#define DEBUG_SUBPAGE
52

    
53
#if !defined(CONFIG_USER_ONLY)
54
/* TB consistency checks only implemented for usermode emulation.  */
55
#undef DEBUG_TB_CHECK
56
#endif
57

    
58
/* threshold to flush the translated code buffer */
59
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60

    
61
#define SMC_BITMAP_USE_THRESHOLD 10
62

    
63
#define MMAP_AREA_START        0x00000000
64
#define MMAP_AREA_END          0xa8000000
65

    
66
#if defined(TARGET_SPARC64)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 41
68
#elif defined(TARGET_SPARC)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 36
70
#elif defined(TARGET_ALPHA)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 42
72
#define TARGET_VIRT_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_PPC64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#else
76
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77
#define TARGET_PHYS_ADDR_SPACE_BITS 32
78
#endif
79

    
80
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
81
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
82
int nb_tbs;
83
/* any access to the tbs or the page table must use this lock */
84
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
85

    
86
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
87
uint8_t *code_gen_ptr;
88

    
89
int phys_ram_size;
90
int phys_ram_fd;
91
uint8_t *phys_ram_base;
92
uint8_t *phys_ram_dirty;
93
static ram_addr_t phys_ram_alloc_offset = 0;
94

    
95
CPUState *first_cpu;
96
/* current CPU in the current thread. It is only valid inside
97
   cpu_exec() */
98
CPUState *cpu_single_env;
99

    
100
typedef struct PageDesc {
101
    /* list of TBs intersecting this ram page */
102
    TranslationBlock *first_tb;
103
    /* in order to optimize self modifying code, we count the number
104
       of lookups we do to a given page to use a bitmap */
105
    unsigned int code_write_count;
106
    uint8_t *code_bitmap;
107
#if defined(CONFIG_USER_ONLY)
108
    unsigned long flags;
109
#endif
110
} PageDesc;
111

    
112
typedef struct PhysPageDesc {
113
    /* offset in host memory of the page + io_index in the low 12 bits */
114
    uint32_t phys_offset;
115
} PhysPageDesc;
116

    
117
#define L2_BITS 10
118
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119
/* XXX: this is a temporary hack for alpha target.
120
 *      In the future, this is to be replaced by a multi-level table
121
 *      to actually be able to handle the complete 64 bits address space.
122
 */
123
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124
#else
125
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
126
#endif
127

    
128
#define L1_SIZE (1 << L1_BITS)
129
#define L2_SIZE (1 << L2_BITS)
130

    
131
static void io_mem_init(void);
132

    
133
unsigned long qemu_real_host_page_size;
134
unsigned long qemu_host_page_bits;
135
unsigned long qemu_host_page_size;
136
unsigned long qemu_host_page_mask;
137

    
138
/* XXX: for system emulation, it could just be an array */
139
static PageDesc *l1_map[L1_SIZE];
140
PhysPageDesc **l1_phys_map;
141

    
142
/* io memory support */
143
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
144
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
145
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
146
static int io_mem_nb;
147
#if defined(CONFIG_SOFTMMU)
148
static int io_mem_watch;
149
#endif
150

    
151
/* log support */
152
char *logfilename = "/tmp/qemu.log";
153
FILE *logfile;
154
int loglevel;
155
static int log_append = 0;
156

    
157
/* statistics */
158
static int tlb_flush_count;
159
static int tb_flush_count;
160
static int tb_phys_invalidate_count;
161

    
162
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
163
typedef struct subpage_t {
164
    target_phys_addr_t base;
165
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
166
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
167
    void *opaque[TARGET_PAGE_SIZE];
168
} subpage_t;
169

    
170
static void page_init(void)
171
{
172
    /* NOTE: we can always suppose that qemu_host_page_size >=
173
       TARGET_PAGE_SIZE */
174
#ifdef _WIN32
175
    {
176
        SYSTEM_INFO system_info;
177
        DWORD old_protect;
178

    
179
        GetSystemInfo(&system_info);
180
        qemu_real_host_page_size = system_info.dwPageSize;
181

    
182
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
183
                       PAGE_EXECUTE_READWRITE, &old_protect);
184
    }
185
#else
186
    qemu_real_host_page_size = getpagesize();
187
    {
188
        unsigned long start, end;
189

    
190
        start = (unsigned long)code_gen_buffer;
191
        start &= ~(qemu_real_host_page_size - 1);
192

    
193
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
194
        end += qemu_real_host_page_size - 1;
195
        end &= ~(qemu_real_host_page_size - 1);
196

    
197
        mprotect((void *)start, end - start,
198
                 PROT_READ | PROT_WRITE | PROT_EXEC);
199
    }
200
#endif
201

    
202
    if (qemu_host_page_size == 0)
203
        qemu_host_page_size = qemu_real_host_page_size;
204
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
205
        qemu_host_page_size = TARGET_PAGE_SIZE;
206
    qemu_host_page_bits = 0;
207
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
208
        qemu_host_page_bits++;
209
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
210
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
211
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
212
}
213

    
214
static inline PageDesc *page_find_alloc(unsigned int index)
215
{
216
    PageDesc **lp, *p;
217

    
218
    lp = &l1_map[index >> L2_BITS];
219
    p = *lp;
220
    if (!p) {
221
        /* allocate if not found */
222
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
223
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
224
        *lp = p;
225
    }
226
    return p + (index & (L2_SIZE - 1));
227
}
228

    
229
static inline PageDesc *page_find(unsigned int index)
230
{
231
    PageDesc *p;
232

    
233
    p = l1_map[index >> L2_BITS];
234
    if (!p)
235
        return 0;
236
    return p + (index & (L2_SIZE - 1));
237
}
238

    
239
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
240
{
241
    void **lp, **p;
242
    PhysPageDesc *pd;
243

    
244
    p = (void **)l1_phys_map;
245
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
246

    
247
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
248
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
249
#endif
250
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
251
    p = *lp;
252
    if (!p) {
253
        /* allocate if not found */
254
        if (!alloc)
255
            return NULL;
256
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
257
        memset(p, 0, sizeof(void *) * L1_SIZE);
258
        *lp = p;
259
    }
260
#endif
261
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
262
    pd = *lp;
263
    if (!pd) {
264
        int i;
265
        /* allocate if not found */
266
        if (!alloc)
267
            return NULL;
268
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
269
        *lp = pd;
270
        for (i = 0; i < L2_SIZE; i++)
271
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
272
    }
273
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
274
}
275

    
276
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
277
{
278
    return phys_page_find_alloc(index, 0);
279
}
280

    
281
#if !defined(CONFIG_USER_ONLY)
282
static void tlb_protect_code(ram_addr_t ram_addr);
283
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
284
                                    target_ulong vaddr);
285
#endif
286

    
287
void cpu_exec_init(CPUState *env)
288
{
289
    CPUState **penv;
290
    int cpu_index;
291

    
292
    if (!code_gen_ptr) {
293
        code_gen_ptr = code_gen_buffer;
294
        page_init();
295
        io_mem_init();
296
    }
297
    env->next_cpu = NULL;
298
    penv = &first_cpu;
299
    cpu_index = 0;
300
    while (*penv != NULL) {
301
        penv = (CPUState **)&(*penv)->next_cpu;
302
        cpu_index++;
303
    }
304
    env->cpu_index = cpu_index;
305
    env->nb_watchpoints = 0;
306
    *penv = env;
307
}
308

    
309
static inline void invalidate_page_bitmap(PageDesc *p)
310
{
311
    if (p->code_bitmap) {
312
        qemu_free(p->code_bitmap);
313
        p->code_bitmap = NULL;
314
    }
315
    p->code_write_count = 0;
316
}
317

    
318
/* set to NULL all the 'first_tb' fields in all PageDescs */
319
static void page_flush_tb(void)
320
{
321
    int i, j;
322
    PageDesc *p;
323

    
324
    for(i = 0; i < L1_SIZE; i++) {
325
        p = l1_map[i];
326
        if (p) {
327
            for(j = 0; j < L2_SIZE; j++) {
328
                p->first_tb = NULL;
329
                invalidate_page_bitmap(p);
330
                p++;
331
            }
332
        }
333
    }
334
}
335

    
336
/* flush all the translation blocks */
337
/* XXX: tb_flush is currently not thread safe */
338
void tb_flush(CPUState *env1)
339
{
340
    CPUState *env;
341
#if defined(DEBUG_FLUSH)
342
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
343
           code_gen_ptr - code_gen_buffer,
344
           nb_tbs,
345
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
346
#endif
347
    nb_tbs = 0;
348

    
349
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
350
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
351
    }
352

    
353
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
354
    page_flush_tb();
355

    
356
    code_gen_ptr = code_gen_buffer;
357
    /* XXX: flush processor icache at this point if cache flush is
358
       expensive */
359
    tb_flush_count++;
360
}
361

    
362
#ifdef DEBUG_TB_CHECK
363

    
364
static void tb_invalidate_check(target_ulong address)
365
{
366
    TranslationBlock *tb;
367
    int i;
368
    address &= TARGET_PAGE_MASK;
369
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
370
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
371
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
372
                  address >= tb->pc + tb->size)) {
373
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
374
                       address, (long)tb->pc, tb->size);
375
            }
376
        }
377
    }
378
}
379

    
380
/* verify that all the pages have correct rights for code */
381
static void tb_page_check(void)
382
{
383
    TranslationBlock *tb;
384
    int i, flags1, flags2;
385

    
386
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
387
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
388
            flags1 = page_get_flags(tb->pc);
389
            flags2 = page_get_flags(tb->pc + tb->size - 1);
390
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
391
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
392
                       (long)tb->pc, tb->size, flags1, flags2);
393
            }
394
        }
395
    }
396
}
397

    
398
void tb_jmp_check(TranslationBlock *tb)
399
{
400
    TranslationBlock *tb1;
401
    unsigned int n1;
402

    
403
    /* suppress any remaining jumps to this TB */
404
    tb1 = tb->jmp_first;
405
    for(;;) {
406
        n1 = (long)tb1 & 3;
407
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
408
        if (n1 == 2)
409
            break;
410
        tb1 = tb1->jmp_next[n1];
411
    }
412
    /* check end of list */
413
    if (tb1 != tb) {
414
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
415
    }
416
}
417

    
418
#endif
419

    
420
/* invalidate one TB */
421
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
422
                             int next_offset)
423
{
424
    TranslationBlock *tb1;
425
    for(;;) {
426
        tb1 = *ptb;
427
        if (tb1 == tb) {
428
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
429
            break;
430
        }
431
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
432
    }
433
}
434

    
435
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
436
{
437
    TranslationBlock *tb1;
438
    unsigned int n1;
439

    
440
    for(;;) {
441
        tb1 = *ptb;
442
        n1 = (long)tb1 & 3;
443
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
444
        if (tb1 == tb) {
445
            *ptb = tb1->page_next[n1];
446
            break;
447
        }
448
        ptb = &tb1->page_next[n1];
449
    }
450
}
451

    
452
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
453
{
454
    TranslationBlock *tb1, **ptb;
455
    unsigned int n1;
456

    
457
    ptb = &tb->jmp_next[n];
458
    tb1 = *ptb;
459
    if (tb1) {
460
        /* find tb(n) in circular list */
461
        for(;;) {
462
            tb1 = *ptb;
463
            n1 = (long)tb1 & 3;
464
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
465
            if (n1 == n && tb1 == tb)
466
                break;
467
            if (n1 == 2) {
468
                ptb = &tb1->jmp_first;
469
            } else {
470
                ptb = &tb1->jmp_next[n1];
471
            }
472
        }
473
        /* now we can suppress tb(n) from the list */
474
        *ptb = tb->jmp_next[n];
475

    
476
        tb->jmp_next[n] = NULL;
477
    }
478
}
479

    
480
/* reset the jump entry 'n' of a TB so that it is not chained to
481
   another TB */
482
static inline void tb_reset_jump(TranslationBlock *tb, int n)
483
{
484
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
485
}
486

    
487
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
488
{
489
    CPUState *env;
490
    PageDesc *p;
491
    unsigned int h, n1;
492
    target_ulong phys_pc;
493
    TranslationBlock *tb1, *tb2;
494

    
495
    /* remove the TB from the hash list */
496
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
497
    h = tb_phys_hash_func(phys_pc);
498
    tb_remove(&tb_phys_hash[h], tb,
499
              offsetof(TranslationBlock, phys_hash_next));
500

    
501
    /* remove the TB from the page list */
502
    if (tb->page_addr[0] != page_addr) {
503
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
504
        tb_page_remove(&p->first_tb, tb);
505
        invalidate_page_bitmap(p);
506
    }
507
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
508
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
509
        tb_page_remove(&p->first_tb, tb);
510
        invalidate_page_bitmap(p);
511
    }
512

    
513
    tb_invalidated_flag = 1;
514

    
515
    /* remove the TB from the hash list */
516
    h = tb_jmp_cache_hash_func(tb->pc);
517
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
518
        if (env->tb_jmp_cache[h] == tb)
519
            env->tb_jmp_cache[h] = NULL;
520
    }
521

    
522
    /* suppress this TB from the two jump lists */
523
    tb_jmp_remove(tb, 0);
524
    tb_jmp_remove(tb, 1);
525

    
526
    /* suppress any remaining jumps to this TB */
527
    tb1 = tb->jmp_first;
528
    for(;;) {
529
        n1 = (long)tb1 & 3;
530
        if (n1 == 2)
531
            break;
532
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
533
        tb2 = tb1->jmp_next[n1];
534
        tb_reset_jump(tb1, n1);
535
        tb1->jmp_next[n1] = NULL;
536
        tb1 = tb2;
537
    }
538
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
539

    
540
    tb_phys_invalidate_count++;
541
}
542

    
543
static inline void set_bits(uint8_t *tab, int start, int len)
544
{
545
    int end, mask, end1;
546

    
547
    end = start + len;
548
    tab += start >> 3;
549
    mask = 0xff << (start & 7);
550
    if ((start & ~7) == (end & ~7)) {
551
        if (start < end) {
552
            mask &= ~(0xff << (end & 7));
553
            *tab |= mask;
554
        }
555
    } else {
556
        *tab++ |= mask;
557
        start = (start + 8) & ~7;
558
        end1 = end & ~7;
559
        while (start < end1) {
560
            *tab++ = 0xff;
561
            start += 8;
562
        }
563
        if (start < end) {
564
            mask = ~(0xff << (end & 7));
565
            *tab |= mask;
566
        }
567
    }
568
}
569

    
570
static void build_page_bitmap(PageDesc *p)
571
{
572
    int n, tb_start, tb_end;
573
    TranslationBlock *tb;
574

    
575
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
576
    if (!p->code_bitmap)
577
        return;
578
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
579

    
580
    tb = p->first_tb;
581
    while (tb != NULL) {
582
        n = (long)tb & 3;
583
        tb = (TranslationBlock *)((long)tb & ~3);
584
        /* NOTE: this is subtle as a TB may span two physical pages */
585
        if (n == 0) {
586
            /* NOTE: tb_end may be after the end of the page, but
587
               it is not a problem */
588
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
589
            tb_end = tb_start + tb->size;
590
            if (tb_end > TARGET_PAGE_SIZE)
591
                tb_end = TARGET_PAGE_SIZE;
592
        } else {
593
            tb_start = 0;
594
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
595
        }
596
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
597
        tb = tb->page_next[n];
598
    }
599
}
600

    
601
#ifdef TARGET_HAS_PRECISE_SMC
602

    
603
static void tb_gen_code(CPUState *env,
604
                        target_ulong pc, target_ulong cs_base, int flags,
605
                        int cflags)
606
{
607
    TranslationBlock *tb;
608
    uint8_t *tc_ptr;
609
    target_ulong phys_pc, phys_page2, virt_page2;
610
    int code_gen_size;
611

    
612
    phys_pc = get_phys_addr_code(env, pc);
613
    tb = tb_alloc(pc);
614
    if (!tb) {
615
        /* flush must be done */
616
        tb_flush(env);
617
        /* cannot fail at this point */
618
        tb = tb_alloc(pc);
619
    }
620
    tc_ptr = code_gen_ptr;
621
    tb->tc_ptr = tc_ptr;
622
    tb->cs_base = cs_base;
623
    tb->flags = flags;
624
    tb->cflags = cflags;
625
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
626
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
627

    
628
    /* check next page if needed */
629
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
630
    phys_page2 = -1;
631
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
632
        phys_page2 = get_phys_addr_code(env, virt_page2);
633
    }
634
    tb_link_phys(tb, phys_pc, phys_page2);
635
}
636
#endif
637

    
638
/* invalidate all TBs which intersect with the target physical page
639
   starting in range [start;end[. NOTE: start and end must refer to
640
   the same physical page. 'is_cpu_write_access' should be true if called
641
   from a real cpu write access: the virtual CPU will exit the current
642
   TB if code is modified inside this TB. */
643
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
644
                                   int is_cpu_write_access)
645
{
646
    int n, current_tb_modified, current_tb_not_found, current_flags;
647
    CPUState *env = cpu_single_env;
648
    PageDesc *p;
649
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
650
    target_ulong tb_start, tb_end;
651
    target_ulong current_pc, current_cs_base;
652

    
653
    p = page_find(start >> TARGET_PAGE_BITS);
654
    if (!p)
655
        return;
656
    if (!p->code_bitmap &&
657
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
658
        is_cpu_write_access) {
659
        /* build code bitmap */
660
        build_page_bitmap(p);
661
    }
662

    
663
    /* we remove all the TBs in the range [start, end[ */
664
    /* XXX: see if in some cases it could be faster to invalidate all the code */
665
    current_tb_not_found = is_cpu_write_access;
666
    current_tb_modified = 0;
667
    current_tb = NULL; /* avoid warning */
668
    current_pc = 0; /* avoid warning */
669
    current_cs_base = 0; /* avoid warning */
670
    current_flags = 0; /* avoid warning */
671
    tb = p->first_tb;
672
    while (tb != NULL) {
673
        n = (long)tb & 3;
674
        tb = (TranslationBlock *)((long)tb & ~3);
675
        tb_next = tb->page_next[n];
676
        /* NOTE: this is subtle as a TB may span two physical pages */
677
        if (n == 0) {
678
            /* NOTE: tb_end may be after the end of the page, but
679
               it is not a problem */
680
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
681
            tb_end = tb_start + tb->size;
682
        } else {
683
            tb_start = tb->page_addr[1];
684
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
685
        }
686
        if (!(tb_end <= start || tb_start >= end)) {
687
#ifdef TARGET_HAS_PRECISE_SMC
688
            if (current_tb_not_found) {
689
                current_tb_not_found = 0;
690
                current_tb = NULL;
691
                if (env->mem_write_pc) {
692
                    /* now we have a real cpu fault */
693
                    current_tb = tb_find_pc(env->mem_write_pc);
694
                }
695
            }
696
            if (current_tb == tb &&
697
                !(current_tb->cflags & CF_SINGLE_INSN)) {
698
                /* If we are modifying the current TB, we must stop
699
                its execution. We could be more precise by checking
700
                that the modification is after the current PC, but it
701
                would require a specialized function to partially
702
                restore the CPU state */
703

    
704
                current_tb_modified = 1;
705
                cpu_restore_state(current_tb, env,
706
                                  env->mem_write_pc, NULL);
707
#if defined(TARGET_I386)
708
                current_flags = env->hflags;
709
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
710
                current_cs_base = (target_ulong)env->segs[R_CS].base;
711
                current_pc = current_cs_base + env->eip;
712
#else
713
#error unsupported CPU
714
#endif
715
            }
716
#endif /* TARGET_HAS_PRECISE_SMC */
717
            /* we need to do that to handle the case where a signal
718
               occurs while doing tb_phys_invalidate() */
719
            saved_tb = NULL;
720
            if (env) {
721
                saved_tb = env->current_tb;
722
                env->current_tb = NULL;
723
            }
724
            tb_phys_invalidate(tb, -1);
725
            if (env) {
726
                env->current_tb = saved_tb;
727
                if (env->interrupt_request && env->current_tb)
728
                    cpu_interrupt(env, env->interrupt_request);
729
            }
730
        }
731
        tb = tb_next;
732
    }
733
#if !defined(CONFIG_USER_ONLY)
734
    /* if no code remaining, no need to continue to use slow writes */
735
    if (!p->first_tb) {
736
        invalidate_page_bitmap(p);
737
        if (is_cpu_write_access) {
738
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
739
        }
740
    }
741
#endif
742
#ifdef TARGET_HAS_PRECISE_SMC
743
    if (current_tb_modified) {
744
        /* we generate a block containing just the instruction
745
           modifying the memory. It will ensure that it cannot modify
746
           itself */
747
        env->current_tb = NULL;
748
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
749
                    CF_SINGLE_INSN);
750
        cpu_resume_from_signal(env, NULL);
751
    }
752
#endif
753
}
754

    
755
/* len must be <= 8 and start must be a multiple of len */
756
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
757
{
758
    PageDesc *p;
759
    int offset, b;
760
#if 0
761
    if (1) {
762
        if (loglevel) {
763
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
764
                   cpu_single_env->mem_write_vaddr, len,
765
                   cpu_single_env->eip,
766
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
767
        }
768
    }
769
#endif
770
    p = page_find(start >> TARGET_PAGE_BITS);
771
    if (!p)
772
        return;
773
    if (p->code_bitmap) {
774
        offset = start & ~TARGET_PAGE_MASK;
775
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
776
        if (b & ((1 << len) - 1))
777
            goto do_invalidate;
778
    } else {
779
    do_invalidate:
780
        tb_invalidate_phys_page_range(start, start + len, 1);
781
    }
782
}
783

    
784
#if !defined(CONFIG_SOFTMMU)
785
static void tb_invalidate_phys_page(target_ulong addr,
786
                                    unsigned long pc, void *puc)
787
{
788
    int n, current_flags, current_tb_modified;
789
    target_ulong current_pc, current_cs_base;
790
    PageDesc *p;
791
    TranslationBlock *tb, *current_tb;
792
#ifdef TARGET_HAS_PRECISE_SMC
793
    CPUState *env = cpu_single_env;
794
#endif
795

    
796
    addr &= TARGET_PAGE_MASK;
797
    p = page_find(addr >> TARGET_PAGE_BITS);
798
    if (!p)
799
        return;
800
    tb = p->first_tb;
801
    current_tb_modified = 0;
802
    current_tb = NULL;
803
    current_pc = 0; /* avoid warning */
804
    current_cs_base = 0; /* avoid warning */
805
    current_flags = 0; /* avoid warning */
806
#ifdef TARGET_HAS_PRECISE_SMC
807
    if (tb && pc != 0) {
808
        current_tb = tb_find_pc(pc);
809
    }
810
#endif
811
    while (tb != NULL) {
812
        n = (long)tb & 3;
813
        tb = (TranslationBlock *)((long)tb & ~3);
814
#ifdef TARGET_HAS_PRECISE_SMC
815
        if (current_tb == tb &&
816
            !(current_tb->cflags & CF_SINGLE_INSN)) {
817
                /* If we are modifying the current TB, we must stop
818
                   its execution. We could be more precise by checking
819
                   that the modification is after the current PC, but it
820
                   would require a specialized function to partially
821
                   restore the CPU state */
822

    
823
            current_tb_modified = 1;
824
            cpu_restore_state(current_tb, env, pc, puc);
825
#if defined(TARGET_I386)
826
            current_flags = env->hflags;
827
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
828
            current_cs_base = (target_ulong)env->segs[R_CS].base;
829
            current_pc = current_cs_base + env->eip;
830
#else
831
#error unsupported CPU
832
#endif
833
        }
834
#endif /* TARGET_HAS_PRECISE_SMC */
835
        tb_phys_invalidate(tb, addr);
836
        tb = tb->page_next[n];
837
    }
838
    p->first_tb = NULL;
839
#ifdef TARGET_HAS_PRECISE_SMC
840
    if (current_tb_modified) {
841
        /* we generate a block containing just the instruction
842
           modifying the memory. It will ensure that it cannot modify
843
           itself */
844
        env->current_tb = NULL;
845
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
846
                    CF_SINGLE_INSN);
847
        cpu_resume_from_signal(env, puc);
848
    }
849
#endif
850
}
851
#endif
852

    
853
/* add the tb in the target page and protect it if necessary */
854
static inline void tb_alloc_page(TranslationBlock *tb,
855
                                 unsigned int n, target_ulong page_addr)
856
{
857
    PageDesc *p;
858
    TranslationBlock *last_first_tb;
859

    
860
    tb->page_addr[n] = page_addr;
861
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
862
    tb->page_next[n] = p->first_tb;
863
    last_first_tb = p->first_tb;
864
    p->first_tb = (TranslationBlock *)((long)tb | n);
865
    invalidate_page_bitmap(p);
866

    
867
#if defined(TARGET_HAS_SMC) || 1
868

    
869
#if defined(CONFIG_USER_ONLY)
870
    if (p->flags & PAGE_WRITE) {
871
        target_ulong addr;
872
        PageDesc *p2;
873
        int prot;
874

    
875
        /* force the host page as non writable (writes will have a
876
           page fault + mprotect overhead) */
877
        page_addr &= qemu_host_page_mask;
878
        prot = 0;
879
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
880
            addr += TARGET_PAGE_SIZE) {
881

    
882
            p2 = page_find (addr >> TARGET_PAGE_BITS);
883
            if (!p2)
884
                continue;
885
            prot |= p2->flags;
886
            p2->flags &= ~PAGE_WRITE;
887
            page_get_flags(addr);
888
          }
889
        mprotect(g2h(page_addr), qemu_host_page_size,
890
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
891
#ifdef DEBUG_TB_INVALIDATE
892
        printf("protecting code page: 0x%08lx\n",
893
               page_addr);
894
#endif
895
    }
896
#else
897
    /* if some code is already present, then the pages are already
898
       protected. So we handle the case where only the first TB is
899
       allocated in a physical page */
900
    if (!last_first_tb) {
901
        tlb_protect_code(page_addr);
902
    }
903
#endif
904

    
905
#endif /* TARGET_HAS_SMC */
906
}
907

    
908
/* Allocate a new translation block. Flush the translation buffer if
909
   too many translation blocks or too much generated code. */
910
TranslationBlock *tb_alloc(target_ulong pc)
911
{
912
    TranslationBlock *tb;
913

    
914
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
915
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
916
        return NULL;
917
    tb = &tbs[nb_tbs++];
918
    tb->pc = pc;
919
    tb->cflags = 0;
920
    return tb;
921
}
922

    
923
/* add a new TB and link it to the physical page tables. phys_page2 is
924
   (-1) to indicate that only one page contains the TB. */
925
void tb_link_phys(TranslationBlock *tb,
926
                  target_ulong phys_pc, target_ulong phys_page2)
927
{
928
    unsigned int h;
929
    TranslationBlock **ptb;
930

    
931
    /* add in the physical hash table */
932
    h = tb_phys_hash_func(phys_pc);
933
    ptb = &tb_phys_hash[h];
934
    tb->phys_hash_next = *ptb;
935
    *ptb = tb;
936

    
937
    /* add in the page list */
938
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
939
    if (phys_page2 != -1)
940
        tb_alloc_page(tb, 1, phys_page2);
941
    else
942
        tb->page_addr[1] = -1;
943

    
944
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
945
    tb->jmp_next[0] = NULL;
946
    tb->jmp_next[1] = NULL;
947
#ifdef USE_CODE_COPY
948
    tb->cflags &= ~CF_FP_USED;
949
    if (tb->cflags & CF_TB_FP_USED)
950
        tb->cflags |= CF_FP_USED;
951
#endif
952

    
953
    /* init original jump addresses */
954
    if (tb->tb_next_offset[0] != 0xffff)
955
        tb_reset_jump(tb, 0);
956
    if (tb->tb_next_offset[1] != 0xffff)
957
        tb_reset_jump(tb, 1);
958

    
959
#ifdef DEBUG_TB_CHECK
960
    tb_page_check();
961
#endif
962
}
963

    
964
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
965
   tb[1].tc_ptr. Return NULL if not found */
966
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
967
{
968
    int m_min, m_max, m;
969
    unsigned long v;
970
    TranslationBlock *tb;
971

    
972
    if (nb_tbs <= 0)
973
        return NULL;
974
    if (tc_ptr < (unsigned long)code_gen_buffer ||
975
        tc_ptr >= (unsigned long)code_gen_ptr)
976
        return NULL;
977
    /* binary search (cf Knuth) */
978
    m_min = 0;
979
    m_max = nb_tbs - 1;
980
    while (m_min <= m_max) {
981
        m = (m_min + m_max) >> 1;
982
        tb = &tbs[m];
983
        v = (unsigned long)tb->tc_ptr;
984
        if (v == tc_ptr)
985
            return tb;
986
        else if (tc_ptr < v) {
987
            m_max = m - 1;
988
        } else {
989
            m_min = m + 1;
990
        }
991
    }
992
    return &tbs[m_max];
993
}
994

    
995
static void tb_reset_jump_recursive(TranslationBlock *tb);
996

    
997
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
998
{
999
    TranslationBlock *tb1, *tb_next, **ptb;
1000
    unsigned int n1;
1001

    
1002
    tb1 = tb->jmp_next[n];
1003
    if (tb1 != NULL) {
1004
        /* find head of list */
1005
        for(;;) {
1006
            n1 = (long)tb1 & 3;
1007
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1008
            if (n1 == 2)
1009
                break;
1010
            tb1 = tb1->jmp_next[n1];
1011
        }
1012
        /* we are now sure now that tb jumps to tb1 */
1013
        tb_next = tb1;
1014

    
1015
        /* remove tb from the jmp_first list */
1016
        ptb = &tb_next->jmp_first;
1017
        for(;;) {
1018
            tb1 = *ptb;
1019
            n1 = (long)tb1 & 3;
1020
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1021
            if (n1 == n && tb1 == tb)
1022
                break;
1023
            ptb = &tb1->jmp_next[n1];
1024
        }
1025
        *ptb = tb->jmp_next[n];
1026
        tb->jmp_next[n] = NULL;
1027

    
1028
        /* suppress the jump to next tb in generated code */
1029
        tb_reset_jump(tb, n);
1030

    
1031
        /* suppress jumps in the tb on which we could have jumped */
1032
        tb_reset_jump_recursive(tb_next);
1033
    }
1034
}
1035

    
1036
static void tb_reset_jump_recursive(TranslationBlock *tb)
1037
{
1038
    tb_reset_jump_recursive2(tb, 0);
1039
    tb_reset_jump_recursive2(tb, 1);
1040
}
1041

    
1042
#if defined(TARGET_HAS_ICE)
1043
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1044
{
1045
    target_phys_addr_t addr;
1046
    target_ulong pd;
1047
    ram_addr_t ram_addr;
1048
    PhysPageDesc *p;
1049

    
1050
    addr = cpu_get_phys_page_debug(env, pc);
1051
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1052
    if (!p) {
1053
        pd = IO_MEM_UNASSIGNED;
1054
    } else {
1055
        pd = p->phys_offset;
1056
    }
1057
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1058
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1059
}
1060
#endif
1061

    
1062
/* Add a watchpoint.  */
1063
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1064
{
1065
    int i;
1066

    
1067
    for (i = 0; i < env->nb_watchpoints; i++) {
1068
        if (addr == env->watchpoint[i].vaddr)
1069
            return 0;
1070
    }
1071
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1072
        return -1;
1073

    
1074
    i = env->nb_watchpoints++;
1075
    env->watchpoint[i].vaddr = addr;
1076
    tlb_flush_page(env, addr);
1077
    /* FIXME: This flush is needed because of the hack to make memory ops
1078
       terminate the TB.  It can be removed once the proper IO trap and
1079
       re-execute bits are in.  */
1080
    tb_flush(env);
1081
    return i;
1082
}
1083

    
1084
/* Remove a watchpoint.  */
1085
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1086
{
1087
    int i;
1088

    
1089
    for (i = 0; i < env->nb_watchpoints; i++) {
1090
        if (addr == env->watchpoint[i].vaddr) {
1091
            env->nb_watchpoints--;
1092
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1093
            tlb_flush_page(env, addr);
1094
            return 0;
1095
        }
1096
    }
1097
    return -1;
1098
}
1099

    
1100
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1101
   breakpoint is reached */
1102
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1103
{
1104
#if defined(TARGET_HAS_ICE)
1105
    int i;
1106

    
1107
    for(i = 0; i < env->nb_breakpoints; i++) {
1108
        if (env->breakpoints[i] == pc)
1109
            return 0;
1110
    }
1111

    
1112
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1113
        return -1;
1114
    env->breakpoints[env->nb_breakpoints++] = pc;
1115

    
1116
    breakpoint_invalidate(env, pc);
1117
    return 0;
1118
#else
1119
    return -1;
1120
#endif
1121
}
1122

    
1123
/* remove a breakpoint */
1124
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1125
{
1126
#if defined(TARGET_HAS_ICE)
1127
    int i;
1128
    for(i = 0; i < env->nb_breakpoints; i++) {
1129
        if (env->breakpoints[i] == pc)
1130
            goto found;
1131
    }
1132
    return -1;
1133
 found:
1134
    env->nb_breakpoints--;
1135
    if (i < env->nb_breakpoints)
1136
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1137

    
1138
    breakpoint_invalidate(env, pc);
1139
    return 0;
1140
#else
1141
    return -1;
1142
#endif
1143
}
1144

    
1145
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1146
   CPU loop after each instruction */
1147
void cpu_single_step(CPUState *env, int enabled)
1148
{
1149
#if defined(TARGET_HAS_ICE)
1150
    if (env->singlestep_enabled != enabled) {
1151
        env->singlestep_enabled = enabled;
1152
        /* must flush all the translated code to avoid inconsistancies */
1153
        /* XXX: only flush what is necessary */
1154
        tb_flush(env);
1155
    }
1156
#endif
1157
}
1158

    
1159
/* enable or disable low levels log */
1160
void cpu_set_log(int log_flags)
1161
{
1162
    loglevel = log_flags;
1163
    if (loglevel && !logfile) {
1164
        logfile = fopen(logfilename, log_append ? "a" : "w");
1165
        if (!logfile) {
1166
            perror(logfilename);
1167
            _exit(1);
1168
        }
1169
#if !defined(CONFIG_SOFTMMU)
1170
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1171
        {
1172
            static uint8_t logfile_buf[4096];
1173
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1174
        }
1175
#else
1176
        setvbuf(logfile, NULL, _IOLBF, 0);
1177
#endif
1178
        log_append = 1;
1179
    }
1180
    if (!loglevel && logfile) {
1181
        fclose(logfile);
1182
        logfile = NULL;
1183
    }
1184
}
1185

    
1186
void cpu_set_log_filename(const char *filename)
1187
{
1188
    logfilename = strdup(filename);
1189
    if (logfile) {
1190
        fclose(logfile);
1191
        logfile = NULL;
1192
    }
1193
    cpu_set_log(loglevel);
1194
}
1195

    
1196
/* mask must never be zero, except for A20 change call */
1197
void cpu_interrupt(CPUState *env, int mask)
1198
{
1199
    TranslationBlock *tb;
1200
    static int interrupt_lock;
1201

    
1202
    env->interrupt_request |= mask;
1203
    /* if the cpu is currently executing code, we must unlink it and
1204
       all the potentially executing TB */
1205
    tb = env->current_tb;
1206
    if (tb && !testandset(&interrupt_lock)) {
1207
        env->current_tb = NULL;
1208
        tb_reset_jump_recursive(tb);
1209
        interrupt_lock = 0;
1210
    }
1211
}
1212

    
1213
void cpu_reset_interrupt(CPUState *env, int mask)
1214
{
1215
    env->interrupt_request &= ~mask;
1216
}
1217

    
1218
CPULogItem cpu_log_items[] = {
1219
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1220
      "show generated host assembly code for each compiled TB" },
1221
    { CPU_LOG_TB_IN_ASM, "in_asm",
1222
      "show target assembly code for each compiled TB" },
1223
    { CPU_LOG_TB_OP, "op",
1224
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1225
#ifdef TARGET_I386
1226
    { CPU_LOG_TB_OP_OPT, "op_opt",
1227
      "show micro ops after optimization for each compiled TB" },
1228
#endif
1229
    { CPU_LOG_INT, "int",
1230
      "show interrupts/exceptions in short format" },
1231
    { CPU_LOG_EXEC, "exec",
1232
      "show trace before each executed TB (lots of logs)" },
1233
    { CPU_LOG_TB_CPU, "cpu",
1234
      "show CPU state before block translation" },
1235
#ifdef TARGET_I386
1236
    { CPU_LOG_PCALL, "pcall",
1237
      "show protected mode far calls/returns/exceptions" },
1238
#endif
1239
#ifdef DEBUG_IOPORT
1240
    { CPU_LOG_IOPORT, "ioport",
1241
      "show all i/o ports accesses" },
1242
#endif
1243
    { 0, NULL, NULL },
1244
};
1245

    
1246
static int cmp1(const char *s1, int n, const char *s2)
1247
{
1248
    if (strlen(s2) != n)
1249
        return 0;
1250
    return memcmp(s1, s2, n) == 0;
1251
}
1252

    
1253
/* takes a comma separated list of log masks. Return 0 if error. */
1254
int cpu_str_to_log_mask(const char *str)
1255
{
1256
    CPULogItem *item;
1257
    int mask;
1258
    const char *p, *p1;
1259

    
1260
    p = str;
1261
    mask = 0;
1262
    for(;;) {
1263
        p1 = strchr(p, ',');
1264
        if (!p1)
1265
            p1 = p + strlen(p);
1266
        if(cmp1(p,p1-p,"all")) {
1267
                for(item = cpu_log_items; item->mask != 0; item++) {
1268
                        mask |= item->mask;
1269
                }
1270
        } else {
1271
        for(item = cpu_log_items; item->mask != 0; item++) {
1272
            if (cmp1(p, p1 - p, item->name))
1273
                goto found;
1274
        }
1275
        return 0;
1276
        }
1277
    found:
1278
        mask |= item->mask;
1279
        if (*p1 != ',')
1280
            break;
1281
        p = p1 + 1;
1282
    }
1283
    return mask;
1284
}
1285

    
1286
void cpu_abort(CPUState *env, const char *fmt, ...)
1287
{
1288
    va_list ap;
1289

    
1290
    va_start(ap, fmt);
1291
    fprintf(stderr, "qemu: fatal: ");
1292
    vfprintf(stderr, fmt, ap);
1293
    fprintf(stderr, "\n");
1294
#ifdef TARGET_I386
1295
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1296
#else
1297
    cpu_dump_state(env, stderr, fprintf, 0);
1298
#endif
1299
    va_end(ap);
1300
    if (logfile) {
1301
        fflush(logfile);
1302
        fclose(logfile);
1303
    }
1304
    abort();
1305
}
1306

    
1307
CPUState *cpu_copy(CPUState *env)
1308
{
1309
    CPUState *new_env = cpu_init();
1310
    /* preserve chaining and index */
1311
    CPUState *next_cpu = new_env->next_cpu;
1312
    int cpu_index = new_env->cpu_index;
1313
    memcpy(new_env, env, sizeof(CPUState));
1314
    new_env->next_cpu = next_cpu;
1315
    new_env->cpu_index = cpu_index;
1316
    return new_env;
1317
}
1318

    
1319
#if !defined(CONFIG_USER_ONLY)
1320

    
1321
/* NOTE: if flush_global is true, also flush global entries (not
1322
   implemented yet) */
1323
void tlb_flush(CPUState *env, int flush_global)
1324
{
1325
    int i;
1326

    
1327
#if defined(DEBUG_TLB)
1328
    printf("tlb_flush:\n");
1329
#endif
1330
    /* must reset current TB so that interrupts cannot modify the
1331
       links while we are modifying them */
1332
    env->current_tb = NULL;
1333

    
1334
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1335
        env->tlb_table[0][i].addr_read = -1;
1336
        env->tlb_table[0][i].addr_write = -1;
1337
        env->tlb_table[0][i].addr_code = -1;
1338
        env->tlb_table[1][i].addr_read = -1;
1339
        env->tlb_table[1][i].addr_write = -1;
1340
        env->tlb_table[1][i].addr_code = -1;
1341
#if (NB_MMU_MODES >= 3)
1342
        env->tlb_table[2][i].addr_read = -1;
1343
        env->tlb_table[2][i].addr_write = -1;
1344
        env->tlb_table[2][i].addr_code = -1;
1345
#if (NB_MMU_MODES == 4)
1346
        env->tlb_table[3][i].addr_read = -1;
1347
        env->tlb_table[3][i].addr_write = -1;
1348
        env->tlb_table[3][i].addr_code = -1;
1349
#endif
1350
#endif
1351
    }
1352

    
1353
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1354

    
1355
#if !defined(CONFIG_SOFTMMU)
1356
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1357
#endif
1358
#ifdef USE_KQEMU
1359
    if (env->kqemu_enabled) {
1360
        kqemu_flush(env, flush_global);
1361
    }
1362
#endif
1363
    tlb_flush_count++;
1364
}
1365

    
1366
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1367
{
1368
    if (addr == (tlb_entry->addr_read &
1369
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1370
        addr == (tlb_entry->addr_write &
1371
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1372
        addr == (tlb_entry->addr_code &
1373
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1374
        tlb_entry->addr_read = -1;
1375
        tlb_entry->addr_write = -1;
1376
        tlb_entry->addr_code = -1;
1377
    }
1378
}
1379

    
1380
void tlb_flush_page(CPUState *env, target_ulong addr)
1381
{
1382
    int i;
1383
    TranslationBlock *tb;
1384

    
1385
#if defined(DEBUG_TLB)
1386
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1387
#endif
1388
    /* must reset current TB so that interrupts cannot modify the
1389
       links while we are modifying them */
1390
    env->current_tb = NULL;
1391

    
1392
    addr &= TARGET_PAGE_MASK;
1393
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1394
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1395
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1396
#if (NB_MMU_MODES >= 3)
1397
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1398
#if (NB_MMU_MODES == 4)
1399
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1400
#endif
1401
#endif
1402

    
1403
    /* Discard jump cache entries for any tb which might potentially
1404
       overlap the flushed page.  */
1405
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1406
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1407

    
1408
    i = tb_jmp_cache_hash_page(addr);
1409
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1410

    
1411
#if !defined(CONFIG_SOFTMMU)
1412
    if (addr < MMAP_AREA_END)
1413
        munmap((void *)addr, TARGET_PAGE_SIZE);
1414
#endif
1415
#ifdef USE_KQEMU
1416
    if (env->kqemu_enabled) {
1417
        kqemu_flush_page(env, addr);
1418
    }
1419
#endif
1420
}
1421

    
1422
/* update the TLBs so that writes to code in the virtual page 'addr'
1423
   can be detected */
1424
static void tlb_protect_code(ram_addr_t ram_addr)
1425
{
1426
    cpu_physical_memory_reset_dirty(ram_addr,
1427
                                    ram_addr + TARGET_PAGE_SIZE,
1428
                                    CODE_DIRTY_FLAG);
1429
}
1430

    
1431
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1432
   tested for self modifying code */
1433
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1434
                                    target_ulong vaddr)
1435
{
1436
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1437
}
1438

    
1439
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1440
                                         unsigned long start, unsigned long length)
1441
{
1442
    unsigned long addr;
1443
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1444
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1445
        if ((addr - start) < length) {
1446
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1447
        }
1448
    }
1449
}
1450

    
1451
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1452
                                     int dirty_flags)
1453
{
1454
    CPUState *env;
1455
    unsigned long length, start1;
1456
    int i, mask, len;
1457
    uint8_t *p;
1458

    
1459
    start &= TARGET_PAGE_MASK;
1460
    end = TARGET_PAGE_ALIGN(end);
1461

    
1462
    length = end - start;
1463
    if (length == 0)
1464
        return;
1465
    len = length >> TARGET_PAGE_BITS;
1466
#ifdef USE_KQEMU
1467
    /* XXX: should not depend on cpu context */
1468
    env = first_cpu;
1469
    if (env->kqemu_enabled) {
1470
        ram_addr_t addr;
1471
        addr = start;
1472
        for(i = 0; i < len; i++) {
1473
            kqemu_set_notdirty(env, addr);
1474
            addr += TARGET_PAGE_SIZE;
1475
        }
1476
    }
1477
#endif
1478
    mask = ~dirty_flags;
1479
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1480
    for(i = 0; i < len; i++)
1481
        p[i] &= mask;
1482

    
1483
    /* we modify the TLB cache so that the dirty bit will be set again
1484
       when accessing the range */
1485
    start1 = start + (unsigned long)phys_ram_base;
1486
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1487
        for(i = 0; i < CPU_TLB_SIZE; i++)
1488
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1489
        for(i = 0; i < CPU_TLB_SIZE; i++)
1490
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1491
#if (NB_MMU_MODES >= 3)
1492
        for(i = 0; i < CPU_TLB_SIZE; i++)
1493
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1494
#if (NB_MMU_MODES == 4)
1495
        for(i = 0; i < CPU_TLB_SIZE; i++)
1496
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1497
#endif
1498
#endif
1499
    }
1500

    
1501
#if !defined(CONFIG_SOFTMMU)
1502
    /* XXX: this is expensive */
1503
    {
1504
        VirtPageDesc *p;
1505
        int j;
1506
        target_ulong addr;
1507

    
1508
        for(i = 0; i < L1_SIZE; i++) {
1509
            p = l1_virt_map[i];
1510
            if (p) {
1511
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1512
                for(j = 0; j < L2_SIZE; j++) {
1513
                    if (p->valid_tag == virt_valid_tag &&
1514
                        p->phys_addr >= start && p->phys_addr < end &&
1515
                        (p->prot & PROT_WRITE)) {
1516
                        if (addr < MMAP_AREA_END) {
1517
                            mprotect((void *)addr, TARGET_PAGE_SIZE,
1518
                                     p->prot & ~PROT_WRITE);
1519
                        }
1520
                    }
1521
                    addr += TARGET_PAGE_SIZE;
1522
                    p++;
1523
                }
1524
            }
1525
        }
1526
    }
1527
#endif
1528
}
1529

    
1530
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1531
{
1532
    ram_addr_t ram_addr;
1533

    
1534
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1535
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1536
            tlb_entry->addend - (unsigned long)phys_ram_base;
1537
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1538
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1539
        }
1540
    }
1541
}
1542

    
1543
/* update the TLB according to the current state of the dirty bits */
1544
void cpu_tlb_update_dirty(CPUState *env)
1545
{
1546
    int i;
1547
    for(i = 0; i < CPU_TLB_SIZE; i++)
1548
        tlb_update_dirty(&env->tlb_table[0][i]);
1549
    for(i = 0; i < CPU_TLB_SIZE; i++)
1550
        tlb_update_dirty(&env->tlb_table[1][i]);
1551
#if (NB_MMU_MODES >= 3)
1552
    for(i = 0; i < CPU_TLB_SIZE; i++)
1553
        tlb_update_dirty(&env->tlb_table[2][i]);
1554
#if (NB_MMU_MODES == 4)
1555
    for(i = 0; i < CPU_TLB_SIZE; i++)
1556
        tlb_update_dirty(&env->tlb_table[3][i]);
1557
#endif
1558
#endif
1559
}
1560

    
1561
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1562
                                  unsigned long start)
1563
{
1564
    unsigned long addr;
1565
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1566
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1567
        if (addr == start) {
1568
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1569
        }
1570
    }
1571
}
1572

    
1573
/* update the TLB corresponding to virtual page vaddr and phys addr
1574
   addr so that it is no longer dirty */
1575
static inline void tlb_set_dirty(CPUState *env,
1576
                                 unsigned long addr, target_ulong vaddr)
1577
{
1578
    int i;
1579

    
1580
    addr &= TARGET_PAGE_MASK;
1581
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1582
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1583
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1584
#if (NB_MMU_MODES >= 3)
1585
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1586
#if (NB_MMU_MODES == 4)
1587
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1588
#endif
1589
#endif
1590
}
1591

    
1592
/* add a new TLB entry. At most one entry for a given virtual address
1593
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1594
   (can only happen in non SOFTMMU mode for I/O pages or pages
1595
   conflicting with the host address space). */
1596
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1597
                      target_phys_addr_t paddr, int prot,
1598
                      int is_user, int is_softmmu)
1599
{
1600
    PhysPageDesc *p;
1601
    unsigned long pd;
1602
    unsigned int index;
1603
    target_ulong address;
1604
    target_phys_addr_t addend;
1605
    int ret;
1606
    CPUTLBEntry *te;
1607
    int i;
1608

    
1609
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1610
    if (!p) {
1611
        pd = IO_MEM_UNASSIGNED;
1612
    } else {
1613
        pd = p->phys_offset;
1614
    }
1615
#if defined(DEBUG_TLB)
1616
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1617
           vaddr, (int)paddr, prot, is_user, is_softmmu, pd);
1618
#endif
1619

    
1620
    ret = 0;
1621
#if !defined(CONFIG_SOFTMMU)
1622
    if (is_softmmu)
1623
#endif
1624
    {
1625
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1626
            /* IO memory case */
1627
            address = vaddr | pd;
1628
            addend = paddr;
1629
        } else {
1630
            /* standard memory */
1631
            address = vaddr;
1632
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1633
        }
1634

    
1635
        /* Make accesses to pages with watchpoints go via the
1636
           watchpoint trap routines.  */
1637
        for (i = 0; i < env->nb_watchpoints; i++) {
1638
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1639
                if (address & ~TARGET_PAGE_MASK) {
1640
                    env->watchpoint[i].addend = 0;
1641
                    address = vaddr | io_mem_watch;
1642
                } else {
1643
                    env->watchpoint[i].addend = pd - paddr +
1644
                        (unsigned long) phys_ram_base;
1645
                    /* TODO: Figure out how to make read watchpoints coexist
1646
                       with code.  */
1647
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1648
                }
1649
            }
1650
        }
1651

    
1652
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1653
        addend -= vaddr;
1654
        te = &env->tlb_table[is_user][index];
1655
        te->addend = addend;
1656
        if (prot & PAGE_READ) {
1657
            te->addr_read = address;
1658
        } else {
1659
            te->addr_read = -1;
1660
        }
1661
        if (prot & PAGE_EXEC) {
1662
            te->addr_code = address;
1663
        } else {
1664
            te->addr_code = -1;
1665
        }
1666
        if (prot & PAGE_WRITE) {
1667
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1668
                (pd & IO_MEM_ROMD)) {
1669
                /* write access calls the I/O callback */
1670
                te->addr_write = vaddr |
1671
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1672
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1673
                       !cpu_physical_memory_is_dirty(pd)) {
1674
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1675
            } else {
1676
                te->addr_write = address;
1677
            }
1678
        } else {
1679
            te->addr_write = -1;
1680
        }
1681
    }
1682
#if !defined(CONFIG_SOFTMMU)
1683
    else {
1684
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1685
            /* IO access: no mapping is done as it will be handled by the
1686
               soft MMU */
1687
            if (!(env->hflags & HF_SOFTMMU_MASK))
1688
                ret = 2;
1689
        } else {
1690
            void *map_addr;
1691

    
1692
            if (vaddr >= MMAP_AREA_END) {
1693
                ret = 2;
1694
            } else {
1695
                if (prot & PROT_WRITE) {
1696
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1697
#if defined(TARGET_HAS_SMC) || 1
1698
                        first_tb ||
1699
#endif
1700
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1701
                         !cpu_physical_memory_is_dirty(pd))) {
1702
                        /* ROM: we do as if code was inside */
1703
                        /* if code is present, we only map as read only and save the
1704
                           original mapping */
1705
                        VirtPageDesc *vp;
1706

    
1707
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1708
                        vp->phys_addr = pd;
1709
                        vp->prot = prot;
1710
                        vp->valid_tag = virt_valid_tag;
1711
                        prot &= ~PAGE_WRITE;
1712
                    }
1713
                }
1714
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1715
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1716
                if (map_addr == MAP_FAILED) {
1717
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1718
                              paddr, vaddr);
1719
                }
1720
            }
1721
        }
1722
    }
1723
#endif
1724
    return ret;
1725
}
1726

    
1727
/* called from signal handler: invalidate the code and unprotect the
1728
   page. Return TRUE if the fault was succesfully handled. */
1729
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1730
{
1731
#if !defined(CONFIG_SOFTMMU)
1732
    VirtPageDesc *vp;
1733

    
1734
#if defined(DEBUG_TLB)
1735
    printf("page_unprotect: addr=0x%08x\n", addr);
1736
#endif
1737
    addr &= TARGET_PAGE_MASK;
1738

    
1739
    /* if it is not mapped, no need to worry here */
1740
    if (addr >= MMAP_AREA_END)
1741
        return 0;
1742
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1743
    if (!vp)
1744
        return 0;
1745
    /* NOTE: in this case, validate_tag is _not_ tested as it
1746
       validates only the code TLB */
1747
    if (vp->valid_tag != virt_valid_tag)
1748
        return 0;
1749
    if (!(vp->prot & PAGE_WRITE))
1750
        return 0;
1751
#if defined(DEBUG_TLB)
1752
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1753
           addr, vp->phys_addr, vp->prot);
1754
#endif
1755
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1756
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1757
                  (unsigned long)addr, vp->prot);
1758
    /* set the dirty bit */
1759
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1760
    /* flush the code inside */
1761
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1762
    return 1;
1763
#else
1764
    return 0;
1765
#endif
1766
}
1767

    
1768
#else
1769

    
1770
void tlb_flush(CPUState *env, int flush_global)
1771
{
1772
}
1773

    
1774
void tlb_flush_page(CPUState *env, target_ulong addr)
1775
{
1776
}
1777

    
1778
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1779
                      target_phys_addr_t paddr, int prot,
1780
                      int is_user, int is_softmmu)
1781
{
1782
    return 0;
1783
}
1784

    
1785
/* dump memory mappings */
1786
void page_dump(FILE *f)
1787
{
1788
    unsigned long start, end;
1789
    int i, j, prot, prot1;
1790
    PageDesc *p;
1791

    
1792
    fprintf(f, "%-8s %-8s %-8s %s\n",
1793
            "start", "end", "size", "prot");
1794
    start = -1;
1795
    end = -1;
1796
    prot = 0;
1797
    for(i = 0; i <= L1_SIZE; i++) {
1798
        if (i < L1_SIZE)
1799
            p = l1_map[i];
1800
        else
1801
            p = NULL;
1802
        for(j = 0;j < L2_SIZE; j++) {
1803
            if (!p)
1804
                prot1 = 0;
1805
            else
1806
                prot1 = p[j].flags;
1807
            if (prot1 != prot) {
1808
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1809
                if (start != -1) {
1810
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1811
                            start, end, end - start,
1812
                            prot & PAGE_READ ? 'r' : '-',
1813
                            prot & PAGE_WRITE ? 'w' : '-',
1814
                            prot & PAGE_EXEC ? 'x' : '-');
1815
                }
1816
                if (prot1 != 0)
1817
                    start = end;
1818
                else
1819
                    start = -1;
1820
                prot = prot1;
1821
            }
1822
            if (!p)
1823
                break;
1824
        }
1825
    }
1826
}
1827

    
1828
int page_get_flags(target_ulong address)
1829
{
1830
    PageDesc *p;
1831

    
1832
    p = page_find(address >> TARGET_PAGE_BITS);
1833
    if (!p)
1834
        return 0;
1835
    return p->flags;
1836
}
1837

    
1838
/* modify the flags of a page and invalidate the code if
1839
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1840
   depending on PAGE_WRITE */
1841
void page_set_flags(target_ulong start, target_ulong end, int flags)
1842
{
1843
    PageDesc *p;
1844
    target_ulong addr;
1845

    
1846
    start = start & TARGET_PAGE_MASK;
1847
    end = TARGET_PAGE_ALIGN(end);
1848
    if (flags & PAGE_WRITE)
1849
        flags |= PAGE_WRITE_ORG;
1850
    spin_lock(&tb_lock);
1851
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1852
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1853
        /* if the write protection is set, then we invalidate the code
1854
           inside */
1855
        if (!(p->flags & PAGE_WRITE) &&
1856
            (flags & PAGE_WRITE) &&
1857
            p->first_tb) {
1858
            tb_invalidate_phys_page(addr, 0, NULL);
1859
        }
1860
        p->flags = flags;
1861
    }
1862
    spin_unlock(&tb_lock);
1863
}
1864

    
1865
/* called from signal handler: invalidate the code and unprotect the
1866
   page. Return TRUE if the fault was succesfully handled. */
1867
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1868
{
1869
    unsigned int page_index, prot, pindex;
1870
    PageDesc *p, *p1;
1871
    target_ulong host_start, host_end, addr;
1872

    
1873
    host_start = address & qemu_host_page_mask;
1874
    page_index = host_start >> TARGET_PAGE_BITS;
1875
    p1 = page_find(page_index);
1876
    if (!p1)
1877
        return 0;
1878
    host_end = host_start + qemu_host_page_size;
1879
    p = p1;
1880
    prot = 0;
1881
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1882
        prot |= p->flags;
1883
        p++;
1884
    }
1885
    /* if the page was really writable, then we change its
1886
       protection back to writable */
1887
    if (prot & PAGE_WRITE_ORG) {
1888
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1889
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1890
            mprotect((void *)g2h(host_start), qemu_host_page_size,
1891
                     (prot & PAGE_BITS) | PAGE_WRITE);
1892
            p1[pindex].flags |= PAGE_WRITE;
1893
            /* and since the content will be modified, we must invalidate
1894
               the corresponding translated code. */
1895
            tb_invalidate_phys_page(address, pc, puc);
1896
#ifdef DEBUG_TB_CHECK
1897
            tb_invalidate_check(address);
1898
#endif
1899
            return 1;
1900
        }
1901
    }
1902
    return 0;
1903
}
1904

    
1905
/* call this function when system calls directly modify a memory area */
1906
/* ??? This should be redundant now we have lock_user.  */
1907
void page_unprotect_range(target_ulong data, target_ulong data_size)
1908
{
1909
    target_ulong start, end, addr;
1910

    
1911
    start = data;
1912
    end = start + data_size;
1913
    start &= TARGET_PAGE_MASK;
1914
    end = TARGET_PAGE_ALIGN(end);
1915
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1916
        page_unprotect(addr, 0, NULL);
1917
    }
1918
}
1919

    
1920
static inline void tlb_set_dirty(CPUState *env,
1921
                                 unsigned long addr, target_ulong vaddr)
1922
{
1923
}
1924
#endif /* defined(CONFIG_USER_ONLY) */
1925

    
1926
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1927
                             int memory);
1928
static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1929
                           int orig_memory);
1930
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1931
                      need_subpage)                                     \
1932
    do {                                                                \
1933
        if (addr > start_addr)                                          \
1934
            start_addr2 = 0;                                            \
1935
        else {                                                          \
1936
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
1937
            if (start_addr2 > 0)                                        \
1938
                need_subpage = 1;                                       \
1939
        }                                                               \
1940
                                                                        \
1941
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
1942
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
1943
        else {                                                          \
1944
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1945
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
1946
                need_subpage = 1;                                       \
1947
        }                                                               \
1948
    } while (0)
1949

    
1950
/* register physical memory. 'size' must be a multiple of the target
1951
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1952
   io memory page */
1953
void cpu_register_physical_memory(target_phys_addr_t start_addr,
1954
                                  unsigned long size,
1955
                                  unsigned long phys_offset)
1956
{
1957
    target_phys_addr_t addr, end_addr;
1958
    PhysPageDesc *p;
1959
    CPUState *env;
1960
    unsigned long orig_size = size;
1961
    void *subpage;
1962

    
1963
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1964
    end_addr = start_addr + (target_phys_addr_t)size;
1965
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1966
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
1967
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
1968
            unsigned long orig_memory = p->phys_offset;
1969
            target_phys_addr_t start_addr2, end_addr2;
1970
            int need_subpage = 0;
1971

    
1972
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
1973
                          need_subpage);
1974
            if (need_subpage) {
1975
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
1976
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
1977
                                           &p->phys_offset, orig_memory);
1978
                } else {
1979
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
1980
                                            >> IO_MEM_SHIFT];
1981
                }
1982
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
1983
            } else {
1984
                p->phys_offset = phys_offset;
1985
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1986
                    (phys_offset & IO_MEM_ROMD))
1987
                    phys_offset += TARGET_PAGE_SIZE;
1988
            }
1989
        } else {
1990
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
1991
            p->phys_offset = phys_offset;
1992
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1993
                (phys_offset & IO_MEM_ROMD))
1994
                phys_offset += TARGET_PAGE_SIZE;
1995
            else {
1996
                target_phys_addr_t start_addr2, end_addr2;
1997
                int need_subpage = 0;
1998

    
1999
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2000
                              end_addr2, need_subpage);
2001

    
2002
                if (need_subpage) {
2003
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2004
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2005
                    subpage_register(subpage, start_addr2, end_addr2,
2006
                                     phys_offset);
2007
                }
2008
            }
2009
        }
2010
    }
2011

    
2012
    /* since each CPU stores ram addresses in its TLB cache, we must
2013
       reset the modified entries */
2014
    /* XXX: slow ! */
2015
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2016
        tlb_flush(env, 1);
2017
    }
2018
}
2019

    
2020
/* XXX: temporary until new memory mapping API */
2021
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2022
{
2023
    PhysPageDesc *p;
2024

    
2025
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2026
    if (!p)
2027
        return IO_MEM_UNASSIGNED;
2028
    return p->phys_offset;
2029
}
2030

    
2031
/* XXX: better than nothing */
2032
ram_addr_t qemu_ram_alloc(unsigned int size)
2033
{
2034
    ram_addr_t addr;
2035
    if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2036
        fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2037
                size, phys_ram_size);
2038
        abort();
2039
    }
2040
    addr = phys_ram_alloc_offset;
2041
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2042
    return addr;
2043
}
2044

    
2045
void qemu_ram_free(ram_addr_t addr)
2046
{
2047
}
2048

    
2049
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2050
{
2051
#ifdef DEBUG_UNASSIGNED
2052
    printf("Unassigned mem read " TARGET_FMT_lx "\n", addr);
2053
#endif
2054
#ifdef TARGET_SPARC
2055
    do_unassigned_access(addr, 0, 0, 0);
2056
#endif
2057
    return 0;
2058
}
2059

    
2060
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2061
{
2062
#ifdef DEBUG_UNASSIGNED
2063
    printf("Unassigned mem write " TARGET_FMT_lx " = 0x%x\n", addr, val);
2064
#endif
2065
#ifdef TARGET_SPARC
2066
    do_unassigned_access(addr, 1, 0, 0);
2067
#endif
2068
}
2069

    
2070
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2071
    unassigned_mem_readb,
2072
    unassigned_mem_readb,
2073
    unassigned_mem_readb,
2074
};
2075

    
2076
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2077
    unassigned_mem_writeb,
2078
    unassigned_mem_writeb,
2079
    unassigned_mem_writeb,
2080
};
2081

    
2082
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2083
{
2084
    unsigned long ram_addr;
2085
    int dirty_flags;
2086
    ram_addr = addr - (unsigned long)phys_ram_base;
2087
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2088
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2089
#if !defined(CONFIG_USER_ONLY)
2090
        tb_invalidate_phys_page_fast(ram_addr, 1);
2091
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2092
#endif
2093
    }
2094
    stb_p((uint8_t *)(long)addr, val);
2095
#ifdef USE_KQEMU
2096
    if (cpu_single_env->kqemu_enabled &&
2097
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2098
        kqemu_modify_page(cpu_single_env, ram_addr);
2099
#endif
2100
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2101
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2102
    /* we remove the notdirty callback only if the code has been
2103
       flushed */
2104
    if (dirty_flags == 0xff)
2105
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2106
}
2107

    
2108
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2109
{
2110
    unsigned long ram_addr;
2111
    int dirty_flags;
2112
    ram_addr = addr - (unsigned long)phys_ram_base;
2113
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2114
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2115
#if !defined(CONFIG_USER_ONLY)
2116
        tb_invalidate_phys_page_fast(ram_addr, 2);
2117
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2118
#endif
2119
    }
2120
    stw_p((uint8_t *)(long)addr, val);
2121
#ifdef USE_KQEMU
2122
    if (cpu_single_env->kqemu_enabled &&
2123
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2124
        kqemu_modify_page(cpu_single_env, ram_addr);
2125
#endif
2126
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2127
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2128
    /* we remove the notdirty callback only if the code has been
2129
       flushed */
2130
    if (dirty_flags == 0xff)
2131
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2132
}
2133

    
2134
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2135
{
2136
    unsigned long ram_addr;
2137
    int dirty_flags;
2138
    ram_addr = addr - (unsigned long)phys_ram_base;
2139
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2140
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2141
#if !defined(CONFIG_USER_ONLY)
2142
        tb_invalidate_phys_page_fast(ram_addr, 4);
2143
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2144
#endif
2145
    }
2146
    stl_p((uint8_t *)(long)addr, val);
2147
#ifdef USE_KQEMU
2148
    if (cpu_single_env->kqemu_enabled &&
2149
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2150
        kqemu_modify_page(cpu_single_env, ram_addr);
2151
#endif
2152
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2153
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2154
    /* we remove the notdirty callback only if the code has been
2155
       flushed */
2156
    if (dirty_flags == 0xff)
2157
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2158
}
2159

    
2160
static CPUReadMemoryFunc *error_mem_read[3] = {
2161
    NULL, /* never used */
2162
    NULL, /* never used */
2163
    NULL, /* never used */
2164
};
2165

    
2166
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2167
    notdirty_mem_writeb,
2168
    notdirty_mem_writew,
2169
    notdirty_mem_writel,
2170
};
2171

    
2172
#if defined(CONFIG_SOFTMMU)
2173
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2174
   so these check for a hit then pass through to the normal out-of-line
2175
   phys routines.  */
2176
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2177
{
2178
    return ldub_phys(addr);
2179
}
2180

    
2181
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2182
{
2183
    return lduw_phys(addr);
2184
}
2185

    
2186
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2187
{
2188
    return ldl_phys(addr);
2189
}
2190

    
2191
/* Generate a debug exception if a watchpoint has been hit.
2192
   Returns the real physical address of the access.  addr will be a host
2193
   address in case of a RAM location.  */
2194
static target_ulong check_watchpoint(target_phys_addr_t addr)
2195
{
2196
    CPUState *env = cpu_single_env;
2197
    target_ulong watch;
2198
    target_ulong retaddr;
2199
    int i;
2200

    
2201
    retaddr = addr;
2202
    for (i = 0; i < env->nb_watchpoints; i++) {
2203
        watch = env->watchpoint[i].vaddr;
2204
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2205
            retaddr = addr - env->watchpoint[i].addend;
2206
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2207
                cpu_single_env->watchpoint_hit = i + 1;
2208
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2209
                break;
2210
            }
2211
        }
2212
    }
2213
    return retaddr;
2214
}
2215

    
2216
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2217
                             uint32_t val)
2218
{
2219
    addr = check_watchpoint(addr);
2220
    stb_phys(addr, val);
2221
}
2222

    
2223
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2224
                             uint32_t val)
2225
{
2226
    addr = check_watchpoint(addr);
2227
    stw_phys(addr, val);
2228
}
2229

    
2230
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2231
                             uint32_t val)
2232
{
2233
    addr = check_watchpoint(addr);
2234
    stl_phys(addr, val);
2235
}
2236

    
2237
static CPUReadMemoryFunc *watch_mem_read[3] = {
2238
    watch_mem_readb,
2239
    watch_mem_readw,
2240
    watch_mem_readl,
2241
};
2242

    
2243
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2244
    watch_mem_writeb,
2245
    watch_mem_writew,
2246
    watch_mem_writel,
2247
};
2248
#endif
2249

    
2250
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2251
                                 unsigned int len)
2252
{
2253
    CPUReadMemoryFunc **mem_read;
2254
    uint32_t ret;
2255
    unsigned int idx;
2256

    
2257
    idx = SUBPAGE_IDX(addr - mmio->base);
2258
#if defined(DEBUG_SUBPAGE)
2259
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2260
           mmio, len, addr, idx);
2261
#endif
2262
    mem_read = mmio->mem_read[idx];
2263
    ret = (*mem_read[len])(mmio->opaque[idx], addr);
2264

    
2265
    return ret;
2266
}
2267

    
2268
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2269
                              uint32_t value, unsigned int len)
2270
{
2271
    CPUWriteMemoryFunc **mem_write;
2272
    unsigned int idx;
2273

    
2274
    idx = SUBPAGE_IDX(addr - mmio->base);
2275
#if defined(DEBUG_SUBPAGE)
2276
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2277
           mmio, len, addr, idx, value);
2278
#endif
2279
    mem_write = mmio->mem_write[idx];
2280
    (*mem_write[len])(mmio->opaque[idx], addr, value);
2281
}
2282

    
2283
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2284
{
2285
#if defined(DEBUG_SUBPAGE)
2286
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2287
#endif
2288

    
2289
    return subpage_readlen(opaque, addr, 0);
2290
}
2291

    
2292
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2293
                            uint32_t value)
2294
{
2295
#if defined(DEBUG_SUBPAGE)
2296
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2297
#endif
2298
    subpage_writelen(opaque, addr, value, 0);
2299
}
2300

    
2301
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2302
{
2303
#if defined(DEBUG_SUBPAGE)
2304
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2305
#endif
2306

    
2307
    return subpage_readlen(opaque, addr, 1);
2308
}
2309

    
2310
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2311
                            uint32_t value)
2312
{
2313
#if defined(DEBUG_SUBPAGE)
2314
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2315
#endif
2316
    subpage_writelen(opaque, addr, value, 1);
2317
}
2318

    
2319
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2320
{
2321
#if defined(DEBUG_SUBPAGE)
2322
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2323
#endif
2324

    
2325
    return subpage_readlen(opaque, addr, 2);
2326
}
2327

    
2328
static void subpage_writel (void *opaque,
2329
                         target_phys_addr_t addr, uint32_t value)
2330
{
2331
#if defined(DEBUG_SUBPAGE)
2332
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2333
#endif
2334
    subpage_writelen(opaque, addr, value, 2);
2335
}
2336

    
2337
static CPUReadMemoryFunc *subpage_read[] = {
2338
    &subpage_readb,
2339
    &subpage_readw,
2340
    &subpage_readl,
2341
};
2342

    
2343
static CPUWriteMemoryFunc *subpage_write[] = {
2344
    &subpage_writeb,
2345
    &subpage_writew,
2346
    &subpage_writel,
2347
};
2348

    
2349
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2350
                             int memory)
2351
{
2352
    int idx, eidx;
2353

    
2354
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2355
        return -1;
2356
    idx = SUBPAGE_IDX(start);
2357
    eidx = SUBPAGE_IDX(end);
2358
#if defined(DEBUG_SUBPAGE)
2359
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2360
           mmio, start, end, idx, eidx, memory);
2361
#endif
2362
    memory >>= IO_MEM_SHIFT;
2363
    for (; idx <= eidx; idx++) {
2364
        mmio->mem_read[idx] = io_mem_read[memory];
2365
        mmio->mem_write[idx] = io_mem_write[memory];
2366
        mmio->opaque[idx] = io_mem_opaque[memory];
2367
    }
2368

    
2369
    return 0;
2370
}
2371

    
2372
static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2373
                           int orig_memory)
2374
{
2375
    subpage_t *mmio;
2376
    int subpage_memory;
2377

    
2378
    mmio = qemu_mallocz(sizeof(subpage_t));
2379
    if (mmio != NULL) {
2380
        mmio->base = base;
2381
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2382
#if defined(DEBUG_SUBPAGE)
2383
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2384
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2385
#endif
2386
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2387
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2388
    }
2389

    
2390
    return mmio;
2391
}
2392

    
2393
static void io_mem_init(void)
2394
{
2395
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2396
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2397
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2398
    io_mem_nb = 5;
2399

    
2400
#if defined(CONFIG_SOFTMMU)
2401
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2402
                                          watch_mem_write, NULL);
2403
#endif
2404
    /* alloc dirty bits array */
2405
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2406
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2407
}
2408

    
2409
/* mem_read and mem_write are arrays of functions containing the
2410
   function to access byte (index 0), word (index 1) and dword (index
2411
   2). All functions must be supplied. If io_index is non zero, the
2412
   corresponding io zone is modified. If it is zero, a new io zone is
2413
   allocated. The return value can be used with
2414
   cpu_register_physical_memory(). (-1) is returned if error. */
2415
int cpu_register_io_memory(int io_index,
2416
                           CPUReadMemoryFunc **mem_read,
2417
                           CPUWriteMemoryFunc **mem_write,
2418
                           void *opaque)
2419
{
2420
    int i;
2421

    
2422
    if (io_index <= 0) {
2423
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2424
            return -1;
2425
        io_index = io_mem_nb++;
2426
    } else {
2427
        if (io_index >= IO_MEM_NB_ENTRIES)
2428
            return -1;
2429
    }
2430

    
2431
    for(i = 0;i < 3; i++) {
2432
        io_mem_read[io_index][i] = mem_read[i];
2433
        io_mem_write[io_index][i] = mem_write[i];
2434
    }
2435
    io_mem_opaque[io_index] = opaque;
2436
    return io_index << IO_MEM_SHIFT;
2437
}
2438

    
2439
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2440
{
2441
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2442
}
2443

    
2444
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2445
{
2446
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2447
}
2448

    
2449
/* physical memory access (slow version, mainly for debug) */
2450
#if defined(CONFIG_USER_ONLY)
2451
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2452
                            int len, int is_write)
2453
{
2454
    int l, flags;
2455
    target_ulong page;
2456
    void * p;
2457

    
2458
    while (len > 0) {
2459
        page = addr & TARGET_PAGE_MASK;
2460
        l = (page + TARGET_PAGE_SIZE) - addr;
2461
        if (l > len)
2462
            l = len;
2463
        flags = page_get_flags(page);
2464
        if (!(flags & PAGE_VALID))
2465
            return;
2466
        if (is_write) {
2467
            if (!(flags & PAGE_WRITE))
2468
                return;
2469
            p = lock_user(addr, len, 0);
2470
            memcpy(p, buf, len);
2471
            unlock_user(p, addr, len);
2472
        } else {
2473
            if (!(flags & PAGE_READ))
2474
                return;
2475
            p = lock_user(addr, len, 1);
2476
            memcpy(buf, p, len);
2477
            unlock_user(p, addr, 0);
2478
        }
2479
        len -= l;
2480
        buf += l;
2481
        addr += l;
2482
    }
2483
}
2484

    
2485
#else
2486
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2487
                            int len, int is_write)
2488
{
2489
    int l, io_index;
2490
    uint8_t *ptr;
2491
    uint32_t val;
2492
    target_phys_addr_t page;
2493
    unsigned long pd;
2494
    PhysPageDesc *p;
2495

    
2496
    while (len > 0) {
2497
        page = addr & TARGET_PAGE_MASK;
2498
        l = (page + TARGET_PAGE_SIZE) - addr;
2499
        if (l > len)
2500
            l = len;
2501
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2502
        if (!p) {
2503
            pd = IO_MEM_UNASSIGNED;
2504
        } else {
2505
            pd = p->phys_offset;
2506
        }
2507

    
2508
        if (is_write) {
2509
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2510
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2511
                /* XXX: could force cpu_single_env to NULL to avoid
2512
                   potential bugs */
2513
                if (l >= 4 && ((addr & 3) == 0)) {
2514
                    /* 32 bit write access */
2515
                    val = ldl_p(buf);
2516
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2517
                    l = 4;
2518
                } else if (l >= 2 && ((addr & 1) == 0)) {
2519
                    /* 16 bit write access */
2520
                    val = lduw_p(buf);
2521
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2522
                    l = 2;
2523
                } else {
2524
                    /* 8 bit write access */
2525
                    val = ldub_p(buf);
2526
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2527
                    l = 1;
2528
                }
2529
            } else {
2530
                unsigned long addr1;
2531
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2532
                /* RAM case */
2533
                ptr = phys_ram_base + addr1;
2534
                memcpy(ptr, buf, l);
2535
                if (!cpu_physical_memory_is_dirty(addr1)) {
2536
                    /* invalidate code */
2537
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2538
                    /* set dirty bit */
2539
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2540
                        (0xff & ~CODE_DIRTY_FLAG);
2541
                }
2542
            }
2543
        } else {
2544
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2545
                !(pd & IO_MEM_ROMD)) {
2546
                /* I/O case */
2547
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2548
                if (l >= 4 && ((addr & 3) == 0)) {
2549
                    /* 32 bit read access */
2550
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2551
                    stl_p(buf, val);
2552
                    l = 4;
2553
                } else if (l >= 2 && ((addr & 1) == 0)) {
2554
                    /* 16 bit read access */
2555
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2556
                    stw_p(buf, val);
2557
                    l = 2;
2558
                } else {
2559
                    /* 8 bit read access */
2560
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2561
                    stb_p(buf, val);
2562
                    l = 1;
2563
                }
2564
            } else {
2565
                /* RAM case */
2566
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2567
                    (addr & ~TARGET_PAGE_MASK);
2568
                memcpy(buf, ptr, l);
2569
            }
2570
        }
2571
        len -= l;
2572
        buf += l;
2573
        addr += l;
2574
    }
2575
}
2576

    
2577
/* used for ROM loading : can write in RAM and ROM */
2578
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2579
                                   const uint8_t *buf, int len)
2580
{
2581
    int l;
2582
    uint8_t *ptr;
2583
    target_phys_addr_t page;
2584
    unsigned long pd;
2585
    PhysPageDesc *p;
2586

    
2587
    while (len > 0) {
2588
        page = addr & TARGET_PAGE_MASK;
2589
        l = (page + TARGET_PAGE_SIZE) - addr;
2590
        if (l > len)
2591
            l = len;
2592
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2593
        if (!p) {
2594
            pd = IO_MEM_UNASSIGNED;
2595
        } else {
2596
            pd = p->phys_offset;
2597
        }
2598

    
2599
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2600
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2601
            !(pd & IO_MEM_ROMD)) {
2602
            /* do nothing */
2603
        } else {
2604
            unsigned long addr1;
2605
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2606
            /* ROM/RAM case */
2607
            ptr = phys_ram_base + addr1;
2608
            memcpy(ptr, buf, l);
2609
        }
2610
        len -= l;
2611
        buf += l;
2612
        addr += l;
2613
    }
2614
}
2615

    
2616

    
2617
/* warning: addr must be aligned */
2618
uint32_t ldl_phys(target_phys_addr_t addr)
2619
{
2620
    int io_index;
2621
    uint8_t *ptr;
2622
    uint32_t val;
2623
    unsigned long pd;
2624
    PhysPageDesc *p;
2625

    
2626
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2627
    if (!p) {
2628
        pd = IO_MEM_UNASSIGNED;
2629
    } else {
2630
        pd = p->phys_offset;
2631
    }
2632

    
2633
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2634
        !(pd & IO_MEM_ROMD)) {
2635
        /* I/O case */
2636
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2637
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2638
    } else {
2639
        /* RAM case */
2640
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2641
            (addr & ~TARGET_PAGE_MASK);
2642
        val = ldl_p(ptr);
2643
    }
2644
    return val;
2645
}
2646

    
2647
/* warning: addr must be aligned */
2648
uint64_t ldq_phys(target_phys_addr_t addr)
2649
{
2650
    int io_index;
2651
    uint8_t *ptr;
2652
    uint64_t val;
2653
    unsigned long pd;
2654
    PhysPageDesc *p;
2655

    
2656
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2657
    if (!p) {
2658
        pd = IO_MEM_UNASSIGNED;
2659
    } else {
2660
        pd = p->phys_offset;
2661
    }
2662

    
2663
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2664
        !(pd & IO_MEM_ROMD)) {
2665
        /* I/O case */
2666
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2667
#ifdef TARGET_WORDS_BIGENDIAN
2668
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2669
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2670
#else
2671
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2672
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2673
#endif
2674
    } else {
2675
        /* RAM case */
2676
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2677
            (addr & ~TARGET_PAGE_MASK);
2678
        val = ldq_p(ptr);
2679
    }
2680
    return val;
2681
}
2682

    
2683
/* XXX: optimize */
2684
uint32_t ldub_phys(target_phys_addr_t addr)
2685
{
2686
    uint8_t val;
2687
    cpu_physical_memory_read(addr, &val, 1);
2688
    return val;
2689
}
2690

    
2691
/* XXX: optimize */
2692
uint32_t lduw_phys(target_phys_addr_t addr)
2693
{
2694
    uint16_t val;
2695
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2696
    return tswap16(val);
2697
}
2698

    
2699
/* warning: addr must be aligned. The ram page is not masked as dirty
2700
   and the code inside is not invalidated. It is useful if the dirty
2701
   bits are used to track modified PTEs */
2702
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2703
{
2704
    int io_index;
2705
    uint8_t *ptr;
2706
    unsigned long pd;
2707
    PhysPageDesc *p;
2708

    
2709
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2710
    if (!p) {
2711
        pd = IO_MEM_UNASSIGNED;
2712
    } else {
2713
        pd = p->phys_offset;
2714
    }
2715

    
2716
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2717
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2718
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2719
    } else {
2720
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2721
            (addr & ~TARGET_PAGE_MASK);
2722
        stl_p(ptr, val);
2723
    }
2724
}
2725

    
2726
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2727
{
2728
    int io_index;
2729
    uint8_t *ptr;
2730
    unsigned long pd;
2731
    PhysPageDesc *p;
2732

    
2733
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2734
    if (!p) {
2735
        pd = IO_MEM_UNASSIGNED;
2736
    } else {
2737
        pd = p->phys_offset;
2738
    }
2739

    
2740
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2741
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2742
#ifdef TARGET_WORDS_BIGENDIAN
2743
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2744
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2745
#else
2746
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2747
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2748
#endif
2749
    } else {
2750
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2751
            (addr & ~TARGET_PAGE_MASK);
2752
        stq_p(ptr, val);
2753
    }
2754
}
2755

    
2756
/* warning: addr must be aligned */
2757
void stl_phys(target_phys_addr_t addr, uint32_t val)
2758
{
2759
    int io_index;
2760
    uint8_t *ptr;
2761
    unsigned long pd;
2762
    PhysPageDesc *p;
2763

    
2764
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2765
    if (!p) {
2766
        pd = IO_MEM_UNASSIGNED;
2767
    } else {
2768
        pd = p->phys_offset;
2769
    }
2770

    
2771
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2772
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2773
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2774
    } else {
2775
        unsigned long addr1;
2776
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2777
        /* RAM case */
2778
        ptr = phys_ram_base + addr1;
2779
        stl_p(ptr, val);
2780
        if (!cpu_physical_memory_is_dirty(addr1)) {
2781
            /* invalidate code */
2782
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2783
            /* set dirty bit */
2784
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2785
                (0xff & ~CODE_DIRTY_FLAG);
2786
        }
2787
    }
2788
}
2789

    
2790
/* XXX: optimize */
2791
void stb_phys(target_phys_addr_t addr, uint32_t val)
2792
{
2793
    uint8_t v = val;
2794
    cpu_physical_memory_write(addr, &v, 1);
2795
}
2796

    
2797
/* XXX: optimize */
2798
void stw_phys(target_phys_addr_t addr, uint32_t val)
2799
{
2800
    uint16_t v = tswap16(val);
2801
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2802
}
2803

    
2804
/* XXX: optimize */
2805
void stq_phys(target_phys_addr_t addr, uint64_t val)
2806
{
2807
    val = tswap64(val);
2808
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2809
}
2810

    
2811
#endif
2812

    
2813
/* virtual memory access for debug */
2814
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2815
                        uint8_t *buf, int len, int is_write)
2816
{
2817
    int l;
2818
    target_phys_addr_t phys_addr;
2819
    target_ulong page;
2820

    
2821
    while (len > 0) {
2822
        page = addr & TARGET_PAGE_MASK;
2823
        phys_addr = cpu_get_phys_page_debug(env, page);
2824
        /* if no physical page mapped, return an error */
2825
        if (phys_addr == -1)
2826
            return -1;
2827
        l = (page + TARGET_PAGE_SIZE) - addr;
2828
        if (l > len)
2829
            l = len;
2830
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2831
                               buf, l, is_write);
2832
        len -= l;
2833
        buf += l;
2834
        addr += l;
2835
    }
2836
    return 0;
2837
}
2838

    
2839
void dump_exec_info(FILE *f,
2840
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2841
{
2842
    int i, target_code_size, max_target_code_size;
2843
    int direct_jmp_count, direct_jmp2_count, cross_page;
2844
    TranslationBlock *tb;
2845

    
2846
    target_code_size = 0;
2847
    max_target_code_size = 0;
2848
    cross_page = 0;
2849
    direct_jmp_count = 0;
2850
    direct_jmp2_count = 0;
2851
    for(i = 0; i < nb_tbs; i++) {
2852
        tb = &tbs[i];
2853
        target_code_size += tb->size;
2854
        if (tb->size > max_target_code_size)
2855
            max_target_code_size = tb->size;
2856
        if (tb->page_addr[1] != -1)
2857
            cross_page++;
2858
        if (tb->tb_next_offset[0] != 0xffff) {
2859
            direct_jmp_count++;
2860
            if (tb->tb_next_offset[1] != 0xffff) {
2861
                direct_jmp2_count++;
2862
            }
2863
        }
2864
    }
2865
    /* XXX: avoid using doubles ? */
2866
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2867
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2868
                nb_tbs ? target_code_size / nb_tbs : 0,
2869
                max_target_code_size);
2870
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2871
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2872
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2873
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2874
            cross_page,
2875
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2876
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2877
                direct_jmp_count,
2878
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2879
                direct_jmp2_count,
2880
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2881
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2882
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2883
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2884
}
2885

    
2886
#if !defined(CONFIG_USER_ONLY)
2887

    
2888
#define MMUSUFFIX _cmmu
2889
#define GETPC() NULL
2890
#define env cpu_single_env
2891
#define SOFTMMU_CODE_ACCESS
2892

    
2893
#define SHIFT 0
2894
#include "softmmu_template.h"
2895

    
2896
#define SHIFT 1
2897
#include "softmmu_template.h"
2898

    
2899
#define SHIFT 2
2900
#include "softmmu_template.h"
2901

    
2902
#define SHIFT 3
2903
#include "softmmu_template.h"
2904

    
2905
#undef env
2906

    
2907
#endif