Statistics
| Branch: | Revision:

root / exec.c @ aaed909a

History | View | Annotate | Download (87.2 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#if defined(CONFIG_USER_ONLY)
38
#include <qemu.h>
39
#endif
40

    
41
//#define DEBUG_TB_INVALIDATE
42
//#define DEBUG_FLUSH
43
//#define DEBUG_TLB
44
//#define DEBUG_UNASSIGNED
45

    
46
/* make various TB consistency checks */
47
//#define DEBUG_TB_CHECK
48
//#define DEBUG_TLB_CHECK
49

    
50
//#define DEBUG_IOPORT
51
//#define DEBUG_SUBPAGE
52

    
53
#if !defined(CONFIG_USER_ONLY)
54
/* TB consistency checks only implemented for usermode emulation.  */
55
#undef DEBUG_TB_CHECK
56
#endif
57

    
58
/* threshold to flush the translated code buffer */
59
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60

    
61
#define SMC_BITMAP_USE_THRESHOLD 10
62

    
63
#define MMAP_AREA_START        0x00000000
64
#define MMAP_AREA_END          0xa8000000
65

    
66
#if defined(TARGET_SPARC64)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 41
68
#elif defined(TARGET_SPARC)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 36
70
#elif defined(TARGET_ALPHA)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 42
72
#define TARGET_VIRT_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_PPC64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#else
76
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77
#define TARGET_PHYS_ADDR_SPACE_BITS 32
78
#endif
79

    
80
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
81
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
82
int nb_tbs;
83
/* any access to the tbs or the page table must use this lock */
84
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
85

    
86
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
87
uint8_t *code_gen_ptr;
88

    
89
int phys_ram_size;
90
int phys_ram_fd;
91
uint8_t *phys_ram_base;
92
uint8_t *phys_ram_dirty;
93
static ram_addr_t phys_ram_alloc_offset = 0;
94

    
95
CPUState *first_cpu;
96
/* current CPU in the current thread. It is only valid inside
97
   cpu_exec() */
98
CPUState *cpu_single_env;
99

    
100
typedef struct PageDesc {
101
    /* list of TBs intersecting this ram page */
102
    TranslationBlock *first_tb;
103
    /* in order to optimize self modifying code, we count the number
104
       of lookups we do to a given page to use a bitmap */
105
    unsigned int code_write_count;
106
    uint8_t *code_bitmap;
107
#if defined(CONFIG_USER_ONLY)
108
    unsigned long flags;
109
#endif
110
} PageDesc;
111

    
112
typedef struct PhysPageDesc {
113
    /* offset in host memory of the page + io_index in the low 12 bits */
114
    uint32_t phys_offset;
115
} PhysPageDesc;
116

    
117
#define L2_BITS 10
118
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119
/* XXX: this is a temporary hack for alpha target.
120
 *      In the future, this is to be replaced by a multi-level table
121
 *      to actually be able to handle the complete 64 bits address space.
122
 */
123
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124
#else
125
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
126
#endif
127

    
128
#define L1_SIZE (1 << L1_BITS)
129
#define L2_SIZE (1 << L2_BITS)
130

    
131
static void io_mem_init(void);
132

    
133
unsigned long qemu_real_host_page_size;
134
unsigned long qemu_host_page_bits;
135
unsigned long qemu_host_page_size;
136
unsigned long qemu_host_page_mask;
137

    
138
/* XXX: for system emulation, it could just be an array */
139
static PageDesc *l1_map[L1_SIZE];
140
PhysPageDesc **l1_phys_map;
141

    
142
/* io memory support */
143
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
144
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
145
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
146
static int io_mem_nb;
147
#if defined(CONFIG_SOFTMMU)
148
static int io_mem_watch;
149
#endif
150

    
151
/* log support */
152
char *logfilename = "/tmp/qemu.log";
153
FILE *logfile;
154
int loglevel;
155
static int log_append = 0;
156

    
157
/* statistics */
158
static int tlb_flush_count;
159
static int tb_flush_count;
160
static int tb_phys_invalidate_count;
161

    
162
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
163
typedef struct subpage_t {
164
    target_phys_addr_t base;
165
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
166
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
167
    void *opaque[TARGET_PAGE_SIZE];
168
} subpage_t;
169

    
170
static void page_init(void)
171
{
172
    /* NOTE: we can always suppose that qemu_host_page_size >=
173
       TARGET_PAGE_SIZE */
174
#ifdef _WIN32
175
    {
176
        SYSTEM_INFO system_info;
177
        DWORD old_protect;
178

    
179
        GetSystemInfo(&system_info);
180
        qemu_real_host_page_size = system_info.dwPageSize;
181

    
182
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
183
                       PAGE_EXECUTE_READWRITE, &old_protect);
184
    }
185
#else
186
    qemu_real_host_page_size = getpagesize();
187
    {
188
        unsigned long start, end;
189

    
190
        start = (unsigned long)code_gen_buffer;
191
        start &= ~(qemu_real_host_page_size - 1);
192

    
193
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
194
        end += qemu_real_host_page_size - 1;
195
        end &= ~(qemu_real_host_page_size - 1);
196

    
197
        mprotect((void *)start, end - start,
198
                 PROT_READ | PROT_WRITE | PROT_EXEC);
199
    }
200
#endif
201

    
202
    if (qemu_host_page_size == 0)
203
        qemu_host_page_size = qemu_real_host_page_size;
204
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
205
        qemu_host_page_size = TARGET_PAGE_SIZE;
206
    qemu_host_page_bits = 0;
207
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
208
        qemu_host_page_bits++;
209
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
210
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
211
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
212
}
213

    
214
static inline PageDesc *page_find_alloc(unsigned int index)
215
{
216
    PageDesc **lp, *p;
217

    
218
    lp = &l1_map[index >> L2_BITS];
219
    p = *lp;
220
    if (!p) {
221
        /* allocate if not found */
222
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
223
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
224
        *lp = p;
225
    }
226
    return p + (index & (L2_SIZE - 1));
227
}
228

    
229
static inline PageDesc *page_find(unsigned int index)
230
{
231
    PageDesc *p;
232

    
233
    p = l1_map[index >> L2_BITS];
234
    if (!p)
235
        return 0;
236
    return p + (index & (L2_SIZE - 1));
237
}
238

    
239
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
240
{
241
    void **lp, **p;
242
    PhysPageDesc *pd;
243

    
244
    p = (void **)l1_phys_map;
245
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
246

    
247
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
248
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
249
#endif
250
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
251
    p = *lp;
252
    if (!p) {
253
        /* allocate if not found */
254
        if (!alloc)
255
            return NULL;
256
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
257
        memset(p, 0, sizeof(void *) * L1_SIZE);
258
        *lp = p;
259
    }
260
#endif
261
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
262
    pd = *lp;
263
    if (!pd) {
264
        int i;
265
        /* allocate if not found */
266
        if (!alloc)
267
            return NULL;
268
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
269
        *lp = pd;
270
        for (i = 0; i < L2_SIZE; i++)
271
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
272
    }
273
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
274
}
275

    
276
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
277
{
278
    return phys_page_find_alloc(index, 0);
279
}
280

    
281
#if !defined(CONFIG_USER_ONLY)
282
static void tlb_protect_code(ram_addr_t ram_addr);
283
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
284
                                    target_ulong vaddr);
285
#endif
286

    
287
void cpu_exec_init(CPUState *env)
288
{
289
    CPUState **penv;
290
    int cpu_index;
291

    
292
    if (!code_gen_ptr) {
293
        code_gen_ptr = code_gen_buffer;
294
        page_init();
295
        io_mem_init();
296
    }
297
    env->next_cpu = NULL;
298
    penv = &first_cpu;
299
    cpu_index = 0;
300
    while (*penv != NULL) {
301
        penv = (CPUState **)&(*penv)->next_cpu;
302
        cpu_index++;
303
    }
304
    env->cpu_index = cpu_index;
305
    env->nb_watchpoints = 0;
306
    *penv = env;
307
}
308

    
309
static inline void invalidate_page_bitmap(PageDesc *p)
310
{
311
    if (p->code_bitmap) {
312
        qemu_free(p->code_bitmap);
313
        p->code_bitmap = NULL;
314
    }
315
    p->code_write_count = 0;
316
}
317

    
318
/* set to NULL all the 'first_tb' fields in all PageDescs */
319
static void page_flush_tb(void)
320
{
321
    int i, j;
322
    PageDesc *p;
323

    
324
    for(i = 0; i < L1_SIZE; i++) {
325
        p = l1_map[i];
326
        if (p) {
327
            for(j = 0; j < L2_SIZE; j++) {
328
                p->first_tb = NULL;
329
                invalidate_page_bitmap(p);
330
                p++;
331
            }
332
        }
333
    }
334
}
335

    
336
/* flush all the translation blocks */
337
/* XXX: tb_flush is currently not thread safe */
338
void tb_flush(CPUState *env1)
339
{
340
    CPUState *env;
341
#if defined(DEBUG_FLUSH)
342
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
343
           (unsigned long)(code_gen_ptr - code_gen_buffer),
344
           nb_tbs, nb_tbs > 0 ?
345
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
346
#endif
347
    nb_tbs = 0;
348

    
349
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
350
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
351
    }
352

    
353
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
354
    page_flush_tb();
355

    
356
    code_gen_ptr = code_gen_buffer;
357
    /* XXX: flush processor icache at this point if cache flush is
358
       expensive */
359
    tb_flush_count++;
360
}
361

    
362
#ifdef DEBUG_TB_CHECK
363

    
364
static void tb_invalidate_check(target_ulong address)
365
{
366
    TranslationBlock *tb;
367
    int i;
368
    address &= TARGET_PAGE_MASK;
369
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
370
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
371
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
372
                  address >= tb->pc + tb->size)) {
373
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
374
                       address, (long)tb->pc, tb->size);
375
            }
376
        }
377
    }
378
}
379

    
380
/* verify that all the pages have correct rights for code */
381
static void tb_page_check(void)
382
{
383
    TranslationBlock *tb;
384
    int i, flags1, flags2;
385

    
386
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
387
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
388
            flags1 = page_get_flags(tb->pc);
389
            flags2 = page_get_flags(tb->pc + tb->size - 1);
390
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
391
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
392
                       (long)tb->pc, tb->size, flags1, flags2);
393
            }
394
        }
395
    }
396
}
397

    
398
void tb_jmp_check(TranslationBlock *tb)
399
{
400
    TranslationBlock *tb1;
401
    unsigned int n1;
402

    
403
    /* suppress any remaining jumps to this TB */
404
    tb1 = tb->jmp_first;
405
    for(;;) {
406
        n1 = (long)tb1 & 3;
407
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
408
        if (n1 == 2)
409
            break;
410
        tb1 = tb1->jmp_next[n1];
411
    }
412
    /* check end of list */
413
    if (tb1 != tb) {
414
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
415
    }
416
}
417

    
418
#endif
419

    
420
/* invalidate one TB */
421
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
422
                             int next_offset)
423
{
424
    TranslationBlock *tb1;
425
    for(;;) {
426
        tb1 = *ptb;
427
        if (tb1 == tb) {
428
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
429
            break;
430
        }
431
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
432
    }
433
}
434

    
435
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
436
{
437
    TranslationBlock *tb1;
438
    unsigned int n1;
439

    
440
    for(;;) {
441
        tb1 = *ptb;
442
        n1 = (long)tb1 & 3;
443
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
444
        if (tb1 == tb) {
445
            *ptb = tb1->page_next[n1];
446
            break;
447
        }
448
        ptb = &tb1->page_next[n1];
449
    }
450
}
451

    
452
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
453
{
454
    TranslationBlock *tb1, **ptb;
455
    unsigned int n1;
456

    
457
    ptb = &tb->jmp_next[n];
458
    tb1 = *ptb;
459
    if (tb1) {
460
        /* find tb(n) in circular list */
461
        for(;;) {
462
            tb1 = *ptb;
463
            n1 = (long)tb1 & 3;
464
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
465
            if (n1 == n && tb1 == tb)
466
                break;
467
            if (n1 == 2) {
468
                ptb = &tb1->jmp_first;
469
            } else {
470
                ptb = &tb1->jmp_next[n1];
471
            }
472
        }
473
        /* now we can suppress tb(n) from the list */
474
        *ptb = tb->jmp_next[n];
475

    
476
        tb->jmp_next[n] = NULL;
477
    }
478
}
479

    
480
/* reset the jump entry 'n' of a TB so that it is not chained to
481
   another TB */
482
static inline void tb_reset_jump(TranslationBlock *tb, int n)
483
{
484
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
485
}
486

    
487
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
488
{
489
    CPUState *env;
490
    PageDesc *p;
491
    unsigned int h, n1;
492
    target_ulong phys_pc;
493
    TranslationBlock *tb1, *tb2;
494

    
495
    /* remove the TB from the hash list */
496
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
497
    h = tb_phys_hash_func(phys_pc);
498
    tb_remove(&tb_phys_hash[h], tb,
499
              offsetof(TranslationBlock, phys_hash_next));
500

    
501
    /* remove the TB from the page list */
502
    if (tb->page_addr[0] != page_addr) {
503
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
504
        tb_page_remove(&p->first_tb, tb);
505
        invalidate_page_bitmap(p);
506
    }
507
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
508
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
509
        tb_page_remove(&p->first_tb, tb);
510
        invalidate_page_bitmap(p);
511
    }
512

    
513
    tb_invalidated_flag = 1;
514

    
515
    /* remove the TB from the hash list */
516
    h = tb_jmp_cache_hash_func(tb->pc);
517
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
518
        if (env->tb_jmp_cache[h] == tb)
519
            env->tb_jmp_cache[h] = NULL;
520
    }
521

    
522
    /* suppress this TB from the two jump lists */
523
    tb_jmp_remove(tb, 0);
524
    tb_jmp_remove(tb, 1);
525

    
526
    /* suppress any remaining jumps to this TB */
527
    tb1 = tb->jmp_first;
528
    for(;;) {
529
        n1 = (long)tb1 & 3;
530
        if (n1 == 2)
531
            break;
532
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
533
        tb2 = tb1->jmp_next[n1];
534
        tb_reset_jump(tb1, n1);
535
        tb1->jmp_next[n1] = NULL;
536
        tb1 = tb2;
537
    }
538
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
539

    
540
    tb_phys_invalidate_count++;
541
}
542

    
543
static inline void set_bits(uint8_t *tab, int start, int len)
544
{
545
    int end, mask, end1;
546

    
547
    end = start + len;
548
    tab += start >> 3;
549
    mask = 0xff << (start & 7);
550
    if ((start & ~7) == (end & ~7)) {
551
        if (start < end) {
552
            mask &= ~(0xff << (end & 7));
553
            *tab |= mask;
554
        }
555
    } else {
556
        *tab++ |= mask;
557
        start = (start + 8) & ~7;
558
        end1 = end & ~7;
559
        while (start < end1) {
560
            *tab++ = 0xff;
561
            start += 8;
562
        }
563
        if (start < end) {
564
            mask = ~(0xff << (end & 7));
565
            *tab |= mask;
566
        }
567
    }
568
}
569

    
570
static void build_page_bitmap(PageDesc *p)
571
{
572
    int n, tb_start, tb_end;
573
    TranslationBlock *tb;
574

    
575
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
576
    if (!p->code_bitmap)
577
        return;
578
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
579

    
580
    tb = p->first_tb;
581
    while (tb != NULL) {
582
        n = (long)tb & 3;
583
        tb = (TranslationBlock *)((long)tb & ~3);
584
        /* NOTE: this is subtle as a TB may span two physical pages */
585
        if (n == 0) {
586
            /* NOTE: tb_end may be after the end of the page, but
587
               it is not a problem */
588
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
589
            tb_end = tb_start + tb->size;
590
            if (tb_end > TARGET_PAGE_SIZE)
591
                tb_end = TARGET_PAGE_SIZE;
592
        } else {
593
            tb_start = 0;
594
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
595
        }
596
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
597
        tb = tb->page_next[n];
598
    }
599
}
600

    
601
#ifdef TARGET_HAS_PRECISE_SMC
602

    
603
static void tb_gen_code(CPUState *env,
604
                        target_ulong pc, target_ulong cs_base, int flags,
605
                        int cflags)
606
{
607
    TranslationBlock *tb;
608
    uint8_t *tc_ptr;
609
    target_ulong phys_pc, phys_page2, virt_page2;
610
    int code_gen_size;
611

    
612
    phys_pc = get_phys_addr_code(env, pc);
613
    tb = tb_alloc(pc);
614
    if (!tb) {
615
        /* flush must be done */
616
        tb_flush(env);
617
        /* cannot fail at this point */
618
        tb = tb_alloc(pc);
619
    }
620
    tc_ptr = code_gen_ptr;
621
    tb->tc_ptr = tc_ptr;
622
    tb->cs_base = cs_base;
623
    tb->flags = flags;
624
    tb->cflags = cflags;
625
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
626
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
627

    
628
    /* check next page if needed */
629
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
630
    phys_page2 = -1;
631
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
632
        phys_page2 = get_phys_addr_code(env, virt_page2);
633
    }
634
    tb_link_phys(tb, phys_pc, phys_page2);
635
}
636
#endif
637

    
638
/* invalidate all TBs which intersect with the target physical page
639
   starting in range [start;end[. NOTE: start and end must refer to
640
   the same physical page. 'is_cpu_write_access' should be true if called
641
   from a real cpu write access: the virtual CPU will exit the current
642
   TB if code is modified inside this TB. */
643
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
644
                                   int is_cpu_write_access)
645
{
646
    int n, current_tb_modified, current_tb_not_found, current_flags;
647
    CPUState *env = cpu_single_env;
648
    PageDesc *p;
649
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
650
    target_ulong tb_start, tb_end;
651
    target_ulong current_pc, current_cs_base;
652

    
653
    p = page_find(start >> TARGET_PAGE_BITS);
654
    if (!p)
655
        return;
656
    if (!p->code_bitmap &&
657
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
658
        is_cpu_write_access) {
659
        /* build code bitmap */
660
        build_page_bitmap(p);
661
    }
662

    
663
    /* we remove all the TBs in the range [start, end[ */
664
    /* XXX: see if in some cases it could be faster to invalidate all the code */
665
    current_tb_not_found = is_cpu_write_access;
666
    current_tb_modified = 0;
667
    current_tb = NULL; /* avoid warning */
668
    current_pc = 0; /* avoid warning */
669
    current_cs_base = 0; /* avoid warning */
670
    current_flags = 0; /* avoid warning */
671
    tb = p->first_tb;
672
    while (tb != NULL) {
673
        n = (long)tb & 3;
674
        tb = (TranslationBlock *)((long)tb & ~3);
675
        tb_next = tb->page_next[n];
676
        /* NOTE: this is subtle as a TB may span two physical pages */
677
        if (n == 0) {
678
            /* NOTE: tb_end may be after the end of the page, but
679
               it is not a problem */
680
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
681
            tb_end = tb_start + tb->size;
682
        } else {
683
            tb_start = tb->page_addr[1];
684
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
685
        }
686
        if (!(tb_end <= start || tb_start >= end)) {
687
#ifdef TARGET_HAS_PRECISE_SMC
688
            if (current_tb_not_found) {
689
                current_tb_not_found = 0;
690
                current_tb = NULL;
691
                if (env->mem_write_pc) {
692
                    /* now we have a real cpu fault */
693
                    current_tb = tb_find_pc(env->mem_write_pc);
694
                }
695
            }
696
            if (current_tb == tb &&
697
                !(current_tb->cflags & CF_SINGLE_INSN)) {
698
                /* If we are modifying the current TB, we must stop
699
                its execution. We could be more precise by checking
700
                that the modification is after the current PC, but it
701
                would require a specialized function to partially
702
                restore the CPU state */
703

    
704
                current_tb_modified = 1;
705
                cpu_restore_state(current_tb, env,
706
                                  env->mem_write_pc, NULL);
707
#if defined(TARGET_I386)
708
                current_flags = env->hflags;
709
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
710
                current_cs_base = (target_ulong)env->segs[R_CS].base;
711
                current_pc = current_cs_base + env->eip;
712
#else
713
#error unsupported CPU
714
#endif
715
            }
716
#endif /* TARGET_HAS_PRECISE_SMC */
717
            /* we need to do that to handle the case where a signal
718
               occurs while doing tb_phys_invalidate() */
719
            saved_tb = NULL;
720
            if (env) {
721
                saved_tb = env->current_tb;
722
                env->current_tb = NULL;
723
            }
724
            tb_phys_invalidate(tb, -1);
725
            if (env) {
726
                env->current_tb = saved_tb;
727
                if (env->interrupt_request && env->current_tb)
728
                    cpu_interrupt(env, env->interrupt_request);
729
            }
730
        }
731
        tb = tb_next;
732
    }
733
#if !defined(CONFIG_USER_ONLY)
734
    /* if no code remaining, no need to continue to use slow writes */
735
    if (!p->first_tb) {
736
        invalidate_page_bitmap(p);
737
        if (is_cpu_write_access) {
738
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
739
        }
740
    }
741
#endif
742
#ifdef TARGET_HAS_PRECISE_SMC
743
    if (current_tb_modified) {
744
        /* we generate a block containing just the instruction
745
           modifying the memory. It will ensure that it cannot modify
746
           itself */
747
        env->current_tb = NULL;
748
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
749
                    CF_SINGLE_INSN);
750
        cpu_resume_from_signal(env, NULL);
751
    }
752
#endif
753
}
754

    
755
/* len must be <= 8 and start must be a multiple of len */
756
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
757
{
758
    PageDesc *p;
759
    int offset, b;
760
#if 0
761
    if (1) {
762
        if (loglevel) {
763
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
764
                   cpu_single_env->mem_write_vaddr, len,
765
                   cpu_single_env->eip,
766
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
767
        }
768
    }
769
#endif
770
    p = page_find(start >> TARGET_PAGE_BITS);
771
    if (!p)
772
        return;
773
    if (p->code_bitmap) {
774
        offset = start & ~TARGET_PAGE_MASK;
775
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
776
        if (b & ((1 << len) - 1))
777
            goto do_invalidate;
778
    } else {
779
    do_invalidate:
780
        tb_invalidate_phys_page_range(start, start + len, 1);
781
    }
782
}
783

    
784
#if !defined(CONFIG_SOFTMMU)
785
static void tb_invalidate_phys_page(target_ulong addr,
786
                                    unsigned long pc, void *puc)
787
{
788
    int n, current_flags, current_tb_modified;
789
    target_ulong current_pc, current_cs_base;
790
    PageDesc *p;
791
    TranslationBlock *tb, *current_tb;
792
#ifdef TARGET_HAS_PRECISE_SMC
793
    CPUState *env = cpu_single_env;
794
#endif
795

    
796
    addr &= TARGET_PAGE_MASK;
797
    p = page_find(addr >> TARGET_PAGE_BITS);
798
    if (!p)
799
        return;
800
    tb = p->first_tb;
801
    current_tb_modified = 0;
802
    current_tb = NULL;
803
    current_pc = 0; /* avoid warning */
804
    current_cs_base = 0; /* avoid warning */
805
    current_flags = 0; /* avoid warning */
806
#ifdef TARGET_HAS_PRECISE_SMC
807
    if (tb && pc != 0) {
808
        current_tb = tb_find_pc(pc);
809
    }
810
#endif
811
    while (tb != NULL) {
812
        n = (long)tb & 3;
813
        tb = (TranslationBlock *)((long)tb & ~3);
814
#ifdef TARGET_HAS_PRECISE_SMC
815
        if (current_tb == tb &&
816
            !(current_tb->cflags & CF_SINGLE_INSN)) {
817
                /* If we are modifying the current TB, we must stop
818
                   its execution. We could be more precise by checking
819
                   that the modification is after the current PC, but it
820
                   would require a specialized function to partially
821
                   restore the CPU state */
822

    
823
            current_tb_modified = 1;
824
            cpu_restore_state(current_tb, env, pc, puc);
825
#if defined(TARGET_I386)
826
            current_flags = env->hflags;
827
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
828
            current_cs_base = (target_ulong)env->segs[R_CS].base;
829
            current_pc = current_cs_base + env->eip;
830
#else
831
#error unsupported CPU
832
#endif
833
        }
834
#endif /* TARGET_HAS_PRECISE_SMC */
835
        tb_phys_invalidate(tb, addr);
836
        tb = tb->page_next[n];
837
    }
838
    p->first_tb = NULL;
839
#ifdef TARGET_HAS_PRECISE_SMC
840
    if (current_tb_modified) {
841
        /* we generate a block containing just the instruction
842
           modifying the memory. It will ensure that it cannot modify
843
           itself */
844
        env->current_tb = NULL;
845
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
846
                    CF_SINGLE_INSN);
847
        cpu_resume_from_signal(env, puc);
848
    }
849
#endif
850
}
851
#endif
852

    
853
/* add the tb in the target page and protect it if necessary */
854
static inline void tb_alloc_page(TranslationBlock *tb,
855
                                 unsigned int n, target_ulong page_addr)
856
{
857
    PageDesc *p;
858
    TranslationBlock *last_first_tb;
859

    
860
    tb->page_addr[n] = page_addr;
861
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
862
    tb->page_next[n] = p->first_tb;
863
    last_first_tb = p->first_tb;
864
    p->first_tb = (TranslationBlock *)((long)tb | n);
865
    invalidate_page_bitmap(p);
866

    
867
#if defined(TARGET_HAS_SMC) || 1
868

    
869
#if defined(CONFIG_USER_ONLY)
870
    if (p->flags & PAGE_WRITE) {
871
        target_ulong addr;
872
        PageDesc *p2;
873
        int prot;
874

    
875
        /* force the host page as non writable (writes will have a
876
           page fault + mprotect overhead) */
877
        page_addr &= qemu_host_page_mask;
878
        prot = 0;
879
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
880
            addr += TARGET_PAGE_SIZE) {
881

    
882
            p2 = page_find (addr >> TARGET_PAGE_BITS);
883
            if (!p2)
884
                continue;
885
            prot |= p2->flags;
886
            p2->flags &= ~PAGE_WRITE;
887
            page_get_flags(addr);
888
          }
889
        mprotect(g2h(page_addr), qemu_host_page_size,
890
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
891
#ifdef DEBUG_TB_INVALIDATE
892
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
893
               page_addr);
894
#endif
895
    }
896
#else
897
    /* if some code is already present, then the pages are already
898
       protected. So we handle the case where only the first TB is
899
       allocated in a physical page */
900
    if (!last_first_tb) {
901
        tlb_protect_code(page_addr);
902
    }
903
#endif
904

    
905
#endif /* TARGET_HAS_SMC */
906
}
907

    
908
/* Allocate a new translation block. Flush the translation buffer if
909
   too many translation blocks or too much generated code. */
910
TranslationBlock *tb_alloc(target_ulong pc)
911
{
912
    TranslationBlock *tb;
913

    
914
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
915
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
916
        return NULL;
917
    tb = &tbs[nb_tbs++];
918
    tb->pc = pc;
919
    tb->cflags = 0;
920
    return tb;
921
}
922

    
923
/* add a new TB and link it to the physical page tables. phys_page2 is
924
   (-1) to indicate that only one page contains the TB. */
925
void tb_link_phys(TranslationBlock *tb,
926
                  target_ulong phys_pc, target_ulong phys_page2)
927
{
928
    unsigned int h;
929
    TranslationBlock **ptb;
930

    
931
    /* add in the physical hash table */
932
    h = tb_phys_hash_func(phys_pc);
933
    ptb = &tb_phys_hash[h];
934
    tb->phys_hash_next = *ptb;
935
    *ptb = tb;
936

    
937
    /* add in the page list */
938
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
939
    if (phys_page2 != -1)
940
        tb_alloc_page(tb, 1, phys_page2);
941
    else
942
        tb->page_addr[1] = -1;
943

    
944
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
945
    tb->jmp_next[0] = NULL;
946
    tb->jmp_next[1] = NULL;
947

    
948
    /* init original jump addresses */
949
    if (tb->tb_next_offset[0] != 0xffff)
950
        tb_reset_jump(tb, 0);
951
    if (tb->tb_next_offset[1] != 0xffff)
952
        tb_reset_jump(tb, 1);
953

    
954
#ifdef DEBUG_TB_CHECK
955
    tb_page_check();
956
#endif
957
}
958

    
959
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
960
   tb[1].tc_ptr. Return NULL if not found */
961
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
962
{
963
    int m_min, m_max, m;
964
    unsigned long v;
965
    TranslationBlock *tb;
966

    
967
    if (nb_tbs <= 0)
968
        return NULL;
969
    if (tc_ptr < (unsigned long)code_gen_buffer ||
970
        tc_ptr >= (unsigned long)code_gen_ptr)
971
        return NULL;
972
    /* binary search (cf Knuth) */
973
    m_min = 0;
974
    m_max = nb_tbs - 1;
975
    while (m_min <= m_max) {
976
        m = (m_min + m_max) >> 1;
977
        tb = &tbs[m];
978
        v = (unsigned long)tb->tc_ptr;
979
        if (v == tc_ptr)
980
            return tb;
981
        else if (tc_ptr < v) {
982
            m_max = m - 1;
983
        } else {
984
            m_min = m + 1;
985
        }
986
    }
987
    return &tbs[m_max];
988
}
989

    
990
static void tb_reset_jump_recursive(TranslationBlock *tb);
991

    
992
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
993
{
994
    TranslationBlock *tb1, *tb_next, **ptb;
995
    unsigned int n1;
996

    
997
    tb1 = tb->jmp_next[n];
998
    if (tb1 != NULL) {
999
        /* find head of list */
1000
        for(;;) {
1001
            n1 = (long)tb1 & 3;
1002
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1003
            if (n1 == 2)
1004
                break;
1005
            tb1 = tb1->jmp_next[n1];
1006
        }
1007
        /* we are now sure now that tb jumps to tb1 */
1008
        tb_next = tb1;
1009

    
1010
        /* remove tb from the jmp_first list */
1011
        ptb = &tb_next->jmp_first;
1012
        for(;;) {
1013
            tb1 = *ptb;
1014
            n1 = (long)tb1 & 3;
1015
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1016
            if (n1 == n && tb1 == tb)
1017
                break;
1018
            ptb = &tb1->jmp_next[n1];
1019
        }
1020
        *ptb = tb->jmp_next[n];
1021
        tb->jmp_next[n] = NULL;
1022

    
1023
        /* suppress the jump to next tb in generated code */
1024
        tb_reset_jump(tb, n);
1025

    
1026
        /* suppress jumps in the tb on which we could have jumped */
1027
        tb_reset_jump_recursive(tb_next);
1028
    }
1029
}
1030

    
1031
static void tb_reset_jump_recursive(TranslationBlock *tb)
1032
{
1033
    tb_reset_jump_recursive2(tb, 0);
1034
    tb_reset_jump_recursive2(tb, 1);
1035
}
1036

    
1037
#if defined(TARGET_HAS_ICE)
1038
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1039
{
1040
    target_phys_addr_t addr;
1041
    target_ulong pd;
1042
    ram_addr_t ram_addr;
1043
    PhysPageDesc *p;
1044

    
1045
    addr = cpu_get_phys_page_debug(env, pc);
1046
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1047
    if (!p) {
1048
        pd = IO_MEM_UNASSIGNED;
1049
    } else {
1050
        pd = p->phys_offset;
1051
    }
1052
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1053
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1054
}
1055
#endif
1056

    
1057
/* Add a watchpoint.  */
1058
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1059
{
1060
    int i;
1061

    
1062
    for (i = 0; i < env->nb_watchpoints; i++) {
1063
        if (addr == env->watchpoint[i].vaddr)
1064
            return 0;
1065
    }
1066
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1067
        return -1;
1068

    
1069
    i = env->nb_watchpoints++;
1070
    env->watchpoint[i].vaddr = addr;
1071
    tlb_flush_page(env, addr);
1072
    /* FIXME: This flush is needed because of the hack to make memory ops
1073
       terminate the TB.  It can be removed once the proper IO trap and
1074
       re-execute bits are in.  */
1075
    tb_flush(env);
1076
    return i;
1077
}
1078

    
1079
/* Remove a watchpoint.  */
1080
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1081
{
1082
    int i;
1083

    
1084
    for (i = 0; i < env->nb_watchpoints; i++) {
1085
        if (addr == env->watchpoint[i].vaddr) {
1086
            env->nb_watchpoints--;
1087
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1088
            tlb_flush_page(env, addr);
1089
            return 0;
1090
        }
1091
    }
1092
    return -1;
1093
}
1094

    
1095
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1096
   breakpoint is reached */
1097
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1098
{
1099
#if defined(TARGET_HAS_ICE)
1100
    int i;
1101

    
1102
    for(i = 0; i < env->nb_breakpoints; i++) {
1103
        if (env->breakpoints[i] == pc)
1104
            return 0;
1105
    }
1106

    
1107
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1108
        return -1;
1109
    env->breakpoints[env->nb_breakpoints++] = pc;
1110

    
1111
    breakpoint_invalidate(env, pc);
1112
    return 0;
1113
#else
1114
    return -1;
1115
#endif
1116
}
1117

    
1118
/* remove a breakpoint */
1119
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1120
{
1121
#if defined(TARGET_HAS_ICE)
1122
    int i;
1123
    for(i = 0; i < env->nb_breakpoints; i++) {
1124
        if (env->breakpoints[i] == pc)
1125
            goto found;
1126
    }
1127
    return -1;
1128
 found:
1129
    env->nb_breakpoints--;
1130
    if (i < env->nb_breakpoints)
1131
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1132

    
1133
    breakpoint_invalidate(env, pc);
1134
    return 0;
1135
#else
1136
    return -1;
1137
#endif
1138
}
1139

    
1140
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1141
   CPU loop after each instruction */
1142
void cpu_single_step(CPUState *env, int enabled)
1143
{
1144
#if defined(TARGET_HAS_ICE)
1145
    if (env->singlestep_enabled != enabled) {
1146
        env->singlestep_enabled = enabled;
1147
        /* must flush all the translated code to avoid inconsistancies */
1148
        /* XXX: only flush what is necessary */
1149
        tb_flush(env);
1150
    }
1151
#endif
1152
}
1153

    
1154
/* enable or disable low levels log */
1155
void cpu_set_log(int log_flags)
1156
{
1157
    loglevel = log_flags;
1158
    if (loglevel && !logfile) {
1159
        logfile = fopen(logfilename, log_append ? "a" : "w");
1160
        if (!logfile) {
1161
            perror(logfilename);
1162
            _exit(1);
1163
        }
1164
#if !defined(CONFIG_SOFTMMU)
1165
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1166
        {
1167
            static uint8_t logfile_buf[4096];
1168
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1169
        }
1170
#else
1171
        setvbuf(logfile, NULL, _IOLBF, 0);
1172
#endif
1173
        log_append = 1;
1174
    }
1175
    if (!loglevel && logfile) {
1176
        fclose(logfile);
1177
        logfile = NULL;
1178
    }
1179
}
1180

    
1181
void cpu_set_log_filename(const char *filename)
1182
{
1183
    logfilename = strdup(filename);
1184
    if (logfile) {
1185
        fclose(logfile);
1186
        logfile = NULL;
1187
    }
1188
    cpu_set_log(loglevel);
1189
}
1190

    
1191
/* mask must never be zero, except for A20 change call */
1192
void cpu_interrupt(CPUState *env, int mask)
1193
{
1194
    TranslationBlock *tb;
1195
    static int interrupt_lock;
1196

    
1197
    env->interrupt_request |= mask;
1198
    /* if the cpu is currently executing code, we must unlink it and
1199
       all the potentially executing TB */
1200
    tb = env->current_tb;
1201
    if (tb && !testandset(&interrupt_lock)) {
1202
        env->current_tb = NULL;
1203
        tb_reset_jump_recursive(tb);
1204
        interrupt_lock = 0;
1205
    }
1206
}
1207

    
1208
void cpu_reset_interrupt(CPUState *env, int mask)
1209
{
1210
    env->interrupt_request &= ~mask;
1211
}
1212

    
1213
CPULogItem cpu_log_items[] = {
1214
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1215
      "show generated host assembly code for each compiled TB" },
1216
    { CPU_LOG_TB_IN_ASM, "in_asm",
1217
      "show target assembly code for each compiled TB" },
1218
    { CPU_LOG_TB_OP, "op",
1219
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1220
#ifdef TARGET_I386
1221
    { CPU_LOG_TB_OP_OPT, "op_opt",
1222
      "show micro ops after optimization for each compiled TB" },
1223
#endif
1224
    { CPU_LOG_INT, "int",
1225
      "show interrupts/exceptions in short format" },
1226
    { CPU_LOG_EXEC, "exec",
1227
      "show trace before each executed TB (lots of logs)" },
1228
    { CPU_LOG_TB_CPU, "cpu",
1229
      "show CPU state before block translation" },
1230
#ifdef TARGET_I386
1231
    { CPU_LOG_PCALL, "pcall",
1232
      "show protected mode far calls/returns/exceptions" },
1233
#endif
1234
#ifdef DEBUG_IOPORT
1235
    { CPU_LOG_IOPORT, "ioport",
1236
      "show all i/o ports accesses" },
1237
#endif
1238
    { 0, NULL, NULL },
1239
};
1240

    
1241
static int cmp1(const char *s1, int n, const char *s2)
1242
{
1243
    if (strlen(s2) != n)
1244
        return 0;
1245
    return memcmp(s1, s2, n) == 0;
1246
}
1247

    
1248
/* takes a comma separated list of log masks. Return 0 if error. */
1249
int cpu_str_to_log_mask(const char *str)
1250
{
1251
    CPULogItem *item;
1252
    int mask;
1253
    const char *p, *p1;
1254

    
1255
    p = str;
1256
    mask = 0;
1257
    for(;;) {
1258
        p1 = strchr(p, ',');
1259
        if (!p1)
1260
            p1 = p + strlen(p);
1261
        if(cmp1(p,p1-p,"all")) {
1262
                for(item = cpu_log_items; item->mask != 0; item++) {
1263
                        mask |= item->mask;
1264
                }
1265
        } else {
1266
        for(item = cpu_log_items; item->mask != 0; item++) {
1267
            if (cmp1(p, p1 - p, item->name))
1268
                goto found;
1269
        }
1270
        return 0;
1271
        }
1272
    found:
1273
        mask |= item->mask;
1274
        if (*p1 != ',')
1275
            break;
1276
        p = p1 + 1;
1277
    }
1278
    return mask;
1279
}
1280

    
1281
void cpu_abort(CPUState *env, const char *fmt, ...)
1282
{
1283
    va_list ap;
1284

    
1285
    va_start(ap, fmt);
1286
    fprintf(stderr, "qemu: fatal: ");
1287
    vfprintf(stderr, fmt, ap);
1288
    fprintf(stderr, "\n");
1289
#ifdef TARGET_I386
1290
    if(env->intercept & INTERCEPT_SVM_MASK) {
1291
        /* most probably the virtual machine should not
1292
           be shut down but rather caught by the VMM */
1293
        vmexit(SVM_EXIT_SHUTDOWN, 0);
1294
    }
1295
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1296
#else
1297
    cpu_dump_state(env, stderr, fprintf, 0);
1298
#endif
1299
    if (logfile) {
1300
        fprintf(logfile, "qemu: fatal: ");
1301
        vfprintf(logfile, fmt, ap);
1302
        fprintf(logfile, "\n");
1303
#ifdef TARGET_I386
1304
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1305
#else
1306
        cpu_dump_state(env, logfile, fprintf, 0);
1307
#endif
1308
        fflush(logfile);
1309
        fclose(logfile);
1310
    }
1311
    va_end(ap);
1312
    abort();
1313
}
1314

    
1315
CPUState *cpu_copy(CPUState *env)
1316
{
1317
#if 0
1318
    /* XXX: broken, must be handled by each CPU */
1319
    CPUState *new_env = cpu_init();
1320
    /* preserve chaining and index */
1321
    CPUState *next_cpu = new_env->next_cpu;
1322
    int cpu_index = new_env->cpu_index;
1323
    memcpy(new_env, env, sizeof(CPUState));
1324
    new_env->next_cpu = next_cpu;
1325
    new_env->cpu_index = cpu_index;
1326
    return new_env;
1327
#else
1328
    return NULL;
1329
#endif
1330
}
1331

    
1332
#if !defined(CONFIG_USER_ONLY)
1333

    
1334
/* NOTE: if flush_global is true, also flush global entries (not
1335
   implemented yet) */
1336
void tlb_flush(CPUState *env, int flush_global)
1337
{
1338
    int i;
1339

    
1340
#if defined(DEBUG_TLB)
1341
    printf("tlb_flush:\n");
1342
#endif
1343
    /* must reset current TB so that interrupts cannot modify the
1344
       links while we are modifying them */
1345
    env->current_tb = NULL;
1346

    
1347
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1348
        env->tlb_table[0][i].addr_read = -1;
1349
        env->tlb_table[0][i].addr_write = -1;
1350
        env->tlb_table[0][i].addr_code = -1;
1351
        env->tlb_table[1][i].addr_read = -1;
1352
        env->tlb_table[1][i].addr_write = -1;
1353
        env->tlb_table[1][i].addr_code = -1;
1354
#if (NB_MMU_MODES >= 3)
1355
        env->tlb_table[2][i].addr_read = -1;
1356
        env->tlb_table[2][i].addr_write = -1;
1357
        env->tlb_table[2][i].addr_code = -1;
1358
#if (NB_MMU_MODES == 4)
1359
        env->tlb_table[3][i].addr_read = -1;
1360
        env->tlb_table[3][i].addr_write = -1;
1361
        env->tlb_table[3][i].addr_code = -1;
1362
#endif
1363
#endif
1364
    }
1365

    
1366
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1367

    
1368
#if !defined(CONFIG_SOFTMMU)
1369
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1370
#endif
1371
#ifdef USE_KQEMU
1372
    if (env->kqemu_enabled) {
1373
        kqemu_flush(env, flush_global);
1374
    }
1375
#endif
1376
    tlb_flush_count++;
1377
}
1378

    
1379
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1380
{
1381
    if (addr == (tlb_entry->addr_read &
1382
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1383
        addr == (tlb_entry->addr_write &
1384
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1385
        addr == (tlb_entry->addr_code &
1386
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1387
        tlb_entry->addr_read = -1;
1388
        tlb_entry->addr_write = -1;
1389
        tlb_entry->addr_code = -1;
1390
    }
1391
}
1392

    
1393
void tlb_flush_page(CPUState *env, target_ulong addr)
1394
{
1395
    int i;
1396
    TranslationBlock *tb;
1397

    
1398
#if defined(DEBUG_TLB)
1399
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1400
#endif
1401
    /* must reset current TB so that interrupts cannot modify the
1402
       links while we are modifying them */
1403
    env->current_tb = NULL;
1404

    
1405
    addr &= TARGET_PAGE_MASK;
1406
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1407
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1408
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1409
#if (NB_MMU_MODES >= 3)
1410
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1411
#if (NB_MMU_MODES == 4)
1412
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1413
#endif
1414
#endif
1415

    
1416
    /* Discard jump cache entries for any tb which might potentially
1417
       overlap the flushed page.  */
1418
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1419
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1420

    
1421
    i = tb_jmp_cache_hash_page(addr);
1422
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1423

    
1424
#if !defined(CONFIG_SOFTMMU)
1425
    if (addr < MMAP_AREA_END)
1426
        munmap((void *)addr, TARGET_PAGE_SIZE);
1427
#endif
1428
#ifdef USE_KQEMU
1429
    if (env->kqemu_enabled) {
1430
        kqemu_flush_page(env, addr);
1431
    }
1432
#endif
1433
}
1434

    
1435
/* update the TLBs so that writes to code in the virtual page 'addr'
1436
   can be detected */
1437
static void tlb_protect_code(ram_addr_t ram_addr)
1438
{
1439
    cpu_physical_memory_reset_dirty(ram_addr,
1440
                                    ram_addr + TARGET_PAGE_SIZE,
1441
                                    CODE_DIRTY_FLAG);
1442
}
1443

    
1444
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1445
   tested for self modifying code */
1446
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1447
                                    target_ulong vaddr)
1448
{
1449
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1450
}
1451

    
1452
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1453
                                         unsigned long start, unsigned long length)
1454
{
1455
    unsigned long addr;
1456
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1457
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1458
        if ((addr - start) < length) {
1459
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1460
        }
1461
    }
1462
}
1463

    
1464
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1465
                                     int dirty_flags)
1466
{
1467
    CPUState *env;
1468
    unsigned long length, start1;
1469
    int i, mask, len;
1470
    uint8_t *p;
1471

    
1472
    start &= TARGET_PAGE_MASK;
1473
    end = TARGET_PAGE_ALIGN(end);
1474

    
1475
    length = end - start;
1476
    if (length == 0)
1477
        return;
1478
    len = length >> TARGET_PAGE_BITS;
1479
#ifdef USE_KQEMU
1480
    /* XXX: should not depend on cpu context */
1481
    env = first_cpu;
1482
    if (env->kqemu_enabled) {
1483
        ram_addr_t addr;
1484
        addr = start;
1485
        for(i = 0; i < len; i++) {
1486
            kqemu_set_notdirty(env, addr);
1487
            addr += TARGET_PAGE_SIZE;
1488
        }
1489
    }
1490
#endif
1491
    mask = ~dirty_flags;
1492
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1493
    for(i = 0; i < len; i++)
1494
        p[i] &= mask;
1495

    
1496
    /* we modify the TLB cache so that the dirty bit will be set again
1497
       when accessing the range */
1498
    start1 = start + (unsigned long)phys_ram_base;
1499
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1500
        for(i = 0; i < CPU_TLB_SIZE; i++)
1501
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1502
        for(i = 0; i < CPU_TLB_SIZE; i++)
1503
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1504
#if (NB_MMU_MODES >= 3)
1505
        for(i = 0; i < CPU_TLB_SIZE; i++)
1506
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1507
#if (NB_MMU_MODES == 4)
1508
        for(i = 0; i < CPU_TLB_SIZE; i++)
1509
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1510
#endif
1511
#endif
1512
    }
1513

    
1514
#if !defined(CONFIG_SOFTMMU)
1515
    /* XXX: this is expensive */
1516
    {
1517
        VirtPageDesc *p;
1518
        int j;
1519
        target_ulong addr;
1520

    
1521
        for(i = 0; i < L1_SIZE; i++) {
1522
            p = l1_virt_map[i];
1523
            if (p) {
1524
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1525
                for(j = 0; j < L2_SIZE; j++) {
1526
                    if (p->valid_tag == virt_valid_tag &&
1527
                        p->phys_addr >= start && p->phys_addr < end &&
1528
                        (p->prot & PROT_WRITE)) {
1529
                        if (addr < MMAP_AREA_END) {
1530
                            mprotect((void *)addr, TARGET_PAGE_SIZE,
1531
                                     p->prot & ~PROT_WRITE);
1532
                        }
1533
                    }
1534
                    addr += TARGET_PAGE_SIZE;
1535
                    p++;
1536
                }
1537
            }
1538
        }
1539
    }
1540
#endif
1541
}
1542

    
1543
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1544
{
1545
    ram_addr_t ram_addr;
1546

    
1547
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1548
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1549
            tlb_entry->addend - (unsigned long)phys_ram_base;
1550
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1551
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1552
        }
1553
    }
1554
}
1555

    
1556
/* update the TLB according to the current state of the dirty bits */
1557
void cpu_tlb_update_dirty(CPUState *env)
1558
{
1559
    int i;
1560
    for(i = 0; i < CPU_TLB_SIZE; i++)
1561
        tlb_update_dirty(&env->tlb_table[0][i]);
1562
    for(i = 0; i < CPU_TLB_SIZE; i++)
1563
        tlb_update_dirty(&env->tlb_table[1][i]);
1564
#if (NB_MMU_MODES >= 3)
1565
    for(i = 0; i < CPU_TLB_SIZE; i++)
1566
        tlb_update_dirty(&env->tlb_table[2][i]);
1567
#if (NB_MMU_MODES == 4)
1568
    for(i = 0; i < CPU_TLB_SIZE; i++)
1569
        tlb_update_dirty(&env->tlb_table[3][i]);
1570
#endif
1571
#endif
1572
}
1573

    
1574
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1575
                                  unsigned long start)
1576
{
1577
    unsigned long addr;
1578
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1579
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1580
        if (addr == start) {
1581
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1582
        }
1583
    }
1584
}
1585

    
1586
/* update the TLB corresponding to virtual page vaddr and phys addr
1587
   addr so that it is no longer dirty */
1588
static inline void tlb_set_dirty(CPUState *env,
1589
                                 unsigned long addr, target_ulong vaddr)
1590
{
1591
    int i;
1592

    
1593
    addr &= TARGET_PAGE_MASK;
1594
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1595
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1596
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1597
#if (NB_MMU_MODES >= 3)
1598
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1599
#if (NB_MMU_MODES == 4)
1600
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1601
#endif
1602
#endif
1603
}
1604

    
1605
/* add a new TLB entry. At most one entry for a given virtual address
1606
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1607
   (can only happen in non SOFTMMU mode for I/O pages or pages
1608
   conflicting with the host address space). */
1609
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1610
                      target_phys_addr_t paddr, int prot,
1611
                      int mmu_idx, int is_softmmu)
1612
{
1613
    PhysPageDesc *p;
1614
    unsigned long pd;
1615
    unsigned int index;
1616
    target_ulong address;
1617
    target_phys_addr_t addend;
1618
    int ret;
1619
    CPUTLBEntry *te;
1620
    int i;
1621

    
1622
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1623
    if (!p) {
1624
        pd = IO_MEM_UNASSIGNED;
1625
    } else {
1626
        pd = p->phys_offset;
1627
    }
1628
#if defined(DEBUG_TLB)
1629
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1630
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1631
#endif
1632

    
1633
    ret = 0;
1634
#if !defined(CONFIG_SOFTMMU)
1635
    if (is_softmmu)
1636
#endif
1637
    {
1638
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1639
            /* IO memory case */
1640
            address = vaddr | pd;
1641
            addend = paddr;
1642
        } else {
1643
            /* standard memory */
1644
            address = vaddr;
1645
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1646
        }
1647

    
1648
        /* Make accesses to pages with watchpoints go via the
1649
           watchpoint trap routines.  */
1650
        for (i = 0; i < env->nb_watchpoints; i++) {
1651
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1652
                if (address & ~TARGET_PAGE_MASK) {
1653
                    env->watchpoint[i].addend = 0;
1654
                    address = vaddr | io_mem_watch;
1655
                } else {
1656
                    env->watchpoint[i].addend = pd - paddr +
1657
                        (unsigned long) phys_ram_base;
1658
                    /* TODO: Figure out how to make read watchpoints coexist
1659
                       with code.  */
1660
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1661
                }
1662
            }
1663
        }
1664

    
1665
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1666
        addend -= vaddr;
1667
        te = &env->tlb_table[mmu_idx][index];
1668
        te->addend = addend;
1669
        if (prot & PAGE_READ) {
1670
            te->addr_read = address;
1671
        } else {
1672
            te->addr_read = -1;
1673
        }
1674
        if (prot & PAGE_EXEC) {
1675
            te->addr_code = address;
1676
        } else {
1677
            te->addr_code = -1;
1678
        }
1679
        if (prot & PAGE_WRITE) {
1680
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1681
                (pd & IO_MEM_ROMD)) {
1682
                /* write access calls the I/O callback */
1683
                te->addr_write = vaddr |
1684
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1685
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1686
                       !cpu_physical_memory_is_dirty(pd)) {
1687
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1688
            } else {
1689
                te->addr_write = address;
1690
            }
1691
        } else {
1692
            te->addr_write = -1;
1693
        }
1694
    }
1695
#if !defined(CONFIG_SOFTMMU)
1696
    else {
1697
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1698
            /* IO access: no mapping is done as it will be handled by the
1699
               soft MMU */
1700
            if (!(env->hflags & HF_SOFTMMU_MASK))
1701
                ret = 2;
1702
        } else {
1703
            void *map_addr;
1704

    
1705
            if (vaddr >= MMAP_AREA_END) {
1706
                ret = 2;
1707
            } else {
1708
                if (prot & PROT_WRITE) {
1709
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1710
#if defined(TARGET_HAS_SMC) || 1
1711
                        first_tb ||
1712
#endif
1713
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1714
                         !cpu_physical_memory_is_dirty(pd))) {
1715
                        /* ROM: we do as if code was inside */
1716
                        /* if code is present, we only map as read only and save the
1717
                           original mapping */
1718
                        VirtPageDesc *vp;
1719

    
1720
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1721
                        vp->phys_addr = pd;
1722
                        vp->prot = prot;
1723
                        vp->valid_tag = virt_valid_tag;
1724
                        prot &= ~PAGE_WRITE;
1725
                    }
1726
                }
1727
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1728
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1729
                if (map_addr == MAP_FAILED) {
1730
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1731
                              paddr, vaddr);
1732
                }
1733
            }
1734
        }
1735
    }
1736
#endif
1737
    return ret;
1738
}
1739

    
1740
/* called from signal handler: invalidate the code and unprotect the
1741
   page. Return TRUE if the fault was succesfully handled. */
1742
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1743
{
1744
#if !defined(CONFIG_SOFTMMU)
1745
    VirtPageDesc *vp;
1746

    
1747
#if defined(DEBUG_TLB)
1748
    printf("page_unprotect: addr=0x%08x\n", addr);
1749
#endif
1750
    addr &= TARGET_PAGE_MASK;
1751

    
1752
    /* if it is not mapped, no need to worry here */
1753
    if (addr >= MMAP_AREA_END)
1754
        return 0;
1755
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1756
    if (!vp)
1757
        return 0;
1758
    /* NOTE: in this case, validate_tag is _not_ tested as it
1759
       validates only the code TLB */
1760
    if (vp->valid_tag != virt_valid_tag)
1761
        return 0;
1762
    if (!(vp->prot & PAGE_WRITE))
1763
        return 0;
1764
#if defined(DEBUG_TLB)
1765
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1766
           addr, vp->phys_addr, vp->prot);
1767
#endif
1768
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1769
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1770
                  (unsigned long)addr, vp->prot);
1771
    /* set the dirty bit */
1772
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1773
    /* flush the code inside */
1774
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1775
    return 1;
1776
#else
1777
    return 0;
1778
#endif
1779
}
1780

    
1781
#else
1782

    
1783
void tlb_flush(CPUState *env, int flush_global)
1784
{
1785
}
1786

    
1787
void tlb_flush_page(CPUState *env, target_ulong addr)
1788
{
1789
}
1790

    
1791
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1792
                      target_phys_addr_t paddr, int prot,
1793
                      int mmu_idx, int is_softmmu)
1794
{
1795
    return 0;
1796
}
1797

    
1798
/* dump memory mappings */
1799
void page_dump(FILE *f)
1800
{
1801
    unsigned long start, end;
1802
    int i, j, prot, prot1;
1803
    PageDesc *p;
1804

    
1805
    fprintf(f, "%-8s %-8s %-8s %s\n",
1806
            "start", "end", "size", "prot");
1807
    start = -1;
1808
    end = -1;
1809
    prot = 0;
1810
    for(i = 0; i <= L1_SIZE; i++) {
1811
        if (i < L1_SIZE)
1812
            p = l1_map[i];
1813
        else
1814
            p = NULL;
1815
        for(j = 0;j < L2_SIZE; j++) {
1816
            if (!p)
1817
                prot1 = 0;
1818
            else
1819
                prot1 = p[j].flags;
1820
            if (prot1 != prot) {
1821
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1822
                if (start != -1) {
1823
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1824
                            start, end, end - start,
1825
                            prot & PAGE_READ ? 'r' : '-',
1826
                            prot & PAGE_WRITE ? 'w' : '-',
1827
                            prot & PAGE_EXEC ? 'x' : '-');
1828
                }
1829
                if (prot1 != 0)
1830
                    start = end;
1831
                else
1832
                    start = -1;
1833
                prot = prot1;
1834
            }
1835
            if (!p)
1836
                break;
1837
        }
1838
    }
1839
}
1840

    
1841
int page_get_flags(target_ulong address)
1842
{
1843
    PageDesc *p;
1844

    
1845
    p = page_find(address >> TARGET_PAGE_BITS);
1846
    if (!p)
1847
        return 0;
1848
    return p->flags;
1849
}
1850

    
1851
/* modify the flags of a page and invalidate the code if
1852
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1853
   depending on PAGE_WRITE */
1854
void page_set_flags(target_ulong start, target_ulong end, int flags)
1855
{
1856
    PageDesc *p;
1857
    target_ulong addr;
1858

    
1859
    start = start & TARGET_PAGE_MASK;
1860
    end = TARGET_PAGE_ALIGN(end);
1861
    if (flags & PAGE_WRITE)
1862
        flags |= PAGE_WRITE_ORG;
1863
    spin_lock(&tb_lock);
1864
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1865
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1866
        /* if the write protection is set, then we invalidate the code
1867
           inside */
1868
        if (!(p->flags & PAGE_WRITE) &&
1869
            (flags & PAGE_WRITE) &&
1870
            p->first_tb) {
1871
            tb_invalidate_phys_page(addr, 0, NULL);
1872
        }
1873
        p->flags = flags;
1874
    }
1875
    spin_unlock(&tb_lock);
1876
}
1877

    
1878
int page_check_range(target_ulong start, target_ulong len, int flags)
1879
{
1880
    PageDesc *p;
1881
    target_ulong end;
1882
    target_ulong addr;
1883

    
1884
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1885
    start = start & TARGET_PAGE_MASK;
1886

    
1887
    if( end < start )
1888
        /* we've wrapped around */
1889
        return -1;
1890
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1891
        p = page_find(addr >> TARGET_PAGE_BITS);
1892
        if( !p )
1893
            return -1;
1894
        if( !(p->flags & PAGE_VALID) )
1895
            return -1;
1896

    
1897
        if (!(p->flags & PAGE_READ) && (flags & PAGE_READ) )
1898
            return -1;
1899
        if (!(p->flags & PAGE_WRITE) && (flags & PAGE_WRITE) )
1900
            return -1;
1901
    }
1902
    return 0;
1903
}
1904

    
1905
/* called from signal handler: invalidate the code and unprotect the
1906
   page. Return TRUE if the fault was succesfully handled. */
1907
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1908
{
1909
    unsigned int page_index, prot, pindex;
1910
    PageDesc *p, *p1;
1911
    target_ulong host_start, host_end, addr;
1912

    
1913
    host_start = address & qemu_host_page_mask;
1914
    page_index = host_start >> TARGET_PAGE_BITS;
1915
    p1 = page_find(page_index);
1916
    if (!p1)
1917
        return 0;
1918
    host_end = host_start + qemu_host_page_size;
1919
    p = p1;
1920
    prot = 0;
1921
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1922
        prot |= p->flags;
1923
        p++;
1924
    }
1925
    /* if the page was really writable, then we change its
1926
       protection back to writable */
1927
    if (prot & PAGE_WRITE_ORG) {
1928
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1929
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1930
            mprotect((void *)g2h(host_start), qemu_host_page_size,
1931
                     (prot & PAGE_BITS) | PAGE_WRITE);
1932
            p1[pindex].flags |= PAGE_WRITE;
1933
            /* and since the content will be modified, we must invalidate
1934
               the corresponding translated code. */
1935
            tb_invalidate_phys_page(address, pc, puc);
1936
#ifdef DEBUG_TB_CHECK
1937
            tb_invalidate_check(address);
1938
#endif
1939
            return 1;
1940
        }
1941
    }
1942
    return 0;
1943
}
1944

    
1945
/* call this function when system calls directly modify a memory area */
1946
/* ??? This should be redundant now we have lock_user.  */
1947
void page_unprotect_range(target_ulong data, target_ulong data_size)
1948
{
1949
    target_ulong start, end, addr;
1950

    
1951
    start = data;
1952
    end = start + data_size;
1953
    start &= TARGET_PAGE_MASK;
1954
    end = TARGET_PAGE_ALIGN(end);
1955
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1956
        page_unprotect(addr, 0, NULL);
1957
    }
1958
}
1959

    
1960
static inline void tlb_set_dirty(CPUState *env,
1961
                                 unsigned long addr, target_ulong vaddr)
1962
{
1963
}
1964
#endif /* defined(CONFIG_USER_ONLY) */
1965

    
1966
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1967
                             int memory);
1968
static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1969
                           int orig_memory);
1970
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1971
                      need_subpage)                                     \
1972
    do {                                                                \
1973
        if (addr > start_addr)                                          \
1974
            start_addr2 = 0;                                            \
1975
        else {                                                          \
1976
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
1977
            if (start_addr2 > 0)                                        \
1978
                need_subpage = 1;                                       \
1979
        }                                                               \
1980
                                                                        \
1981
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
1982
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
1983
        else {                                                          \
1984
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1985
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
1986
                need_subpage = 1;                                       \
1987
        }                                                               \
1988
    } while (0)
1989

    
1990
/* register physical memory. 'size' must be a multiple of the target
1991
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1992
   io memory page */
1993
void cpu_register_physical_memory(target_phys_addr_t start_addr,
1994
                                  unsigned long size,
1995
                                  unsigned long phys_offset)
1996
{
1997
    target_phys_addr_t addr, end_addr;
1998
    PhysPageDesc *p;
1999
    CPUState *env;
2000
    unsigned long orig_size = size;
2001
    void *subpage;
2002

    
2003
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2004
    end_addr = start_addr + (target_phys_addr_t)size;
2005
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2006
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2007
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2008
            unsigned long orig_memory = p->phys_offset;
2009
            target_phys_addr_t start_addr2, end_addr2;
2010
            int need_subpage = 0;
2011

    
2012
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2013
                          need_subpage);
2014
            if (need_subpage) {
2015
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2016
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2017
                                           &p->phys_offset, orig_memory);
2018
                } else {
2019
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2020
                                            >> IO_MEM_SHIFT];
2021
                }
2022
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2023
            } else {
2024
                p->phys_offset = phys_offset;
2025
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2026
                    (phys_offset & IO_MEM_ROMD))
2027
                    phys_offset += TARGET_PAGE_SIZE;
2028
            }
2029
        } else {
2030
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2031
            p->phys_offset = phys_offset;
2032
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2033
                (phys_offset & IO_MEM_ROMD))
2034
                phys_offset += TARGET_PAGE_SIZE;
2035
            else {
2036
                target_phys_addr_t start_addr2, end_addr2;
2037
                int need_subpage = 0;
2038

    
2039
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2040
                              end_addr2, need_subpage);
2041

    
2042
                if (need_subpage) {
2043
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2044
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2045
                    subpage_register(subpage, start_addr2, end_addr2,
2046
                                     phys_offset);
2047
                }
2048
            }
2049
        }
2050
    }
2051

    
2052
    /* since each CPU stores ram addresses in its TLB cache, we must
2053
       reset the modified entries */
2054
    /* XXX: slow ! */
2055
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2056
        tlb_flush(env, 1);
2057
    }
2058
}
2059

    
2060
/* XXX: temporary until new memory mapping API */
2061
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2062
{
2063
    PhysPageDesc *p;
2064

    
2065
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2066
    if (!p)
2067
        return IO_MEM_UNASSIGNED;
2068
    return p->phys_offset;
2069
}
2070

    
2071
/* XXX: better than nothing */
2072
ram_addr_t qemu_ram_alloc(unsigned int size)
2073
{
2074
    ram_addr_t addr;
2075
    if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2076
        fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2077
                size, phys_ram_size);
2078
        abort();
2079
    }
2080
    addr = phys_ram_alloc_offset;
2081
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2082
    return addr;
2083
}
2084

    
2085
void qemu_ram_free(ram_addr_t addr)
2086
{
2087
}
2088

    
2089
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2090
{
2091
#ifdef DEBUG_UNASSIGNED
2092
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2093
#endif
2094
#ifdef TARGET_SPARC
2095
    do_unassigned_access(addr, 0, 0, 0);
2096
#elif TARGET_CRIS
2097
    do_unassigned_access(addr, 0, 0, 0);
2098
#endif
2099
    return 0;
2100
}
2101

    
2102
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2103
{
2104
#ifdef DEBUG_UNASSIGNED
2105
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2106
#endif
2107
#ifdef TARGET_SPARC
2108
    do_unassigned_access(addr, 1, 0, 0);
2109
#elif TARGET_CRIS
2110
    do_unassigned_access(addr, 1, 0, 0);
2111
#endif
2112
}
2113

    
2114
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2115
    unassigned_mem_readb,
2116
    unassigned_mem_readb,
2117
    unassigned_mem_readb,
2118
};
2119

    
2120
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2121
    unassigned_mem_writeb,
2122
    unassigned_mem_writeb,
2123
    unassigned_mem_writeb,
2124
};
2125

    
2126
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2127
{
2128
    unsigned long ram_addr;
2129
    int dirty_flags;
2130
    ram_addr = addr - (unsigned long)phys_ram_base;
2131
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2132
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2133
#if !defined(CONFIG_USER_ONLY)
2134
        tb_invalidate_phys_page_fast(ram_addr, 1);
2135
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2136
#endif
2137
    }
2138
    stb_p((uint8_t *)(long)addr, val);
2139
#ifdef USE_KQEMU
2140
    if (cpu_single_env->kqemu_enabled &&
2141
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2142
        kqemu_modify_page(cpu_single_env, ram_addr);
2143
#endif
2144
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2145
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2146
    /* we remove the notdirty callback only if the code has been
2147
       flushed */
2148
    if (dirty_flags == 0xff)
2149
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2150
}
2151

    
2152
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2153
{
2154
    unsigned long ram_addr;
2155
    int dirty_flags;
2156
    ram_addr = addr - (unsigned long)phys_ram_base;
2157
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2158
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2159
#if !defined(CONFIG_USER_ONLY)
2160
        tb_invalidate_phys_page_fast(ram_addr, 2);
2161
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2162
#endif
2163
    }
2164
    stw_p((uint8_t *)(long)addr, val);
2165
#ifdef USE_KQEMU
2166
    if (cpu_single_env->kqemu_enabled &&
2167
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2168
        kqemu_modify_page(cpu_single_env, ram_addr);
2169
#endif
2170
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2171
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2172
    /* we remove the notdirty callback only if the code has been
2173
       flushed */
2174
    if (dirty_flags == 0xff)
2175
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2176
}
2177

    
2178
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2179
{
2180
    unsigned long ram_addr;
2181
    int dirty_flags;
2182
    ram_addr = addr - (unsigned long)phys_ram_base;
2183
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2184
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2185
#if !defined(CONFIG_USER_ONLY)
2186
        tb_invalidate_phys_page_fast(ram_addr, 4);
2187
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2188
#endif
2189
    }
2190
    stl_p((uint8_t *)(long)addr, val);
2191
#ifdef USE_KQEMU
2192
    if (cpu_single_env->kqemu_enabled &&
2193
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2194
        kqemu_modify_page(cpu_single_env, ram_addr);
2195
#endif
2196
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2197
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2198
    /* we remove the notdirty callback only if the code has been
2199
       flushed */
2200
    if (dirty_flags == 0xff)
2201
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2202
}
2203

    
2204
static CPUReadMemoryFunc *error_mem_read[3] = {
2205
    NULL, /* never used */
2206
    NULL, /* never used */
2207
    NULL, /* never used */
2208
};
2209

    
2210
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2211
    notdirty_mem_writeb,
2212
    notdirty_mem_writew,
2213
    notdirty_mem_writel,
2214
};
2215

    
2216
#if defined(CONFIG_SOFTMMU)
2217
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2218
   so these check for a hit then pass through to the normal out-of-line
2219
   phys routines.  */
2220
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2221
{
2222
    return ldub_phys(addr);
2223
}
2224

    
2225
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2226
{
2227
    return lduw_phys(addr);
2228
}
2229

    
2230
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2231
{
2232
    return ldl_phys(addr);
2233
}
2234

    
2235
/* Generate a debug exception if a watchpoint has been hit.
2236
   Returns the real physical address of the access.  addr will be a host
2237
   address in case of a RAM location.  */
2238
static target_ulong check_watchpoint(target_phys_addr_t addr)
2239
{
2240
    CPUState *env = cpu_single_env;
2241
    target_ulong watch;
2242
    target_ulong retaddr;
2243
    int i;
2244

    
2245
    retaddr = addr;
2246
    for (i = 0; i < env->nb_watchpoints; i++) {
2247
        watch = env->watchpoint[i].vaddr;
2248
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2249
            retaddr = addr - env->watchpoint[i].addend;
2250
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2251
                cpu_single_env->watchpoint_hit = i + 1;
2252
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2253
                break;
2254
            }
2255
        }
2256
    }
2257
    return retaddr;
2258
}
2259

    
2260
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2261
                             uint32_t val)
2262
{
2263
    addr = check_watchpoint(addr);
2264
    stb_phys(addr, val);
2265
}
2266

    
2267
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2268
                             uint32_t val)
2269
{
2270
    addr = check_watchpoint(addr);
2271
    stw_phys(addr, val);
2272
}
2273

    
2274
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2275
                             uint32_t val)
2276
{
2277
    addr = check_watchpoint(addr);
2278
    stl_phys(addr, val);
2279
}
2280

    
2281
static CPUReadMemoryFunc *watch_mem_read[3] = {
2282
    watch_mem_readb,
2283
    watch_mem_readw,
2284
    watch_mem_readl,
2285
};
2286

    
2287
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2288
    watch_mem_writeb,
2289
    watch_mem_writew,
2290
    watch_mem_writel,
2291
};
2292
#endif
2293

    
2294
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2295
                                 unsigned int len)
2296
{
2297
    CPUReadMemoryFunc **mem_read;
2298
    uint32_t ret;
2299
    unsigned int idx;
2300

    
2301
    idx = SUBPAGE_IDX(addr - mmio->base);
2302
#if defined(DEBUG_SUBPAGE)
2303
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2304
           mmio, len, addr, idx);
2305
#endif
2306
    mem_read = mmio->mem_read[idx];
2307
    ret = (*mem_read[len])(mmio->opaque[idx], addr);
2308

    
2309
    return ret;
2310
}
2311

    
2312
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2313
                              uint32_t value, unsigned int len)
2314
{
2315
    CPUWriteMemoryFunc **mem_write;
2316
    unsigned int idx;
2317

    
2318
    idx = SUBPAGE_IDX(addr - mmio->base);
2319
#if defined(DEBUG_SUBPAGE)
2320
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2321
           mmio, len, addr, idx, value);
2322
#endif
2323
    mem_write = mmio->mem_write[idx];
2324
    (*mem_write[len])(mmio->opaque[idx], addr, value);
2325
}
2326

    
2327
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2328
{
2329
#if defined(DEBUG_SUBPAGE)
2330
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2331
#endif
2332

    
2333
    return subpage_readlen(opaque, addr, 0);
2334
}
2335

    
2336
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2337
                            uint32_t value)
2338
{
2339
#if defined(DEBUG_SUBPAGE)
2340
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2341
#endif
2342
    subpage_writelen(opaque, addr, value, 0);
2343
}
2344

    
2345
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2346
{
2347
#if defined(DEBUG_SUBPAGE)
2348
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2349
#endif
2350

    
2351
    return subpage_readlen(opaque, addr, 1);
2352
}
2353

    
2354
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2355
                            uint32_t value)
2356
{
2357
#if defined(DEBUG_SUBPAGE)
2358
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2359
#endif
2360
    subpage_writelen(opaque, addr, value, 1);
2361
}
2362

    
2363
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2364
{
2365
#if defined(DEBUG_SUBPAGE)
2366
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2367
#endif
2368

    
2369
    return subpage_readlen(opaque, addr, 2);
2370
}
2371

    
2372
static void subpage_writel (void *opaque,
2373
                         target_phys_addr_t addr, uint32_t value)
2374
{
2375
#if defined(DEBUG_SUBPAGE)
2376
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2377
#endif
2378
    subpage_writelen(opaque, addr, value, 2);
2379
}
2380

    
2381
static CPUReadMemoryFunc *subpage_read[] = {
2382
    &subpage_readb,
2383
    &subpage_readw,
2384
    &subpage_readl,
2385
};
2386

    
2387
static CPUWriteMemoryFunc *subpage_write[] = {
2388
    &subpage_writeb,
2389
    &subpage_writew,
2390
    &subpage_writel,
2391
};
2392

    
2393
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2394
                             int memory)
2395
{
2396
    int idx, eidx;
2397

    
2398
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2399
        return -1;
2400
    idx = SUBPAGE_IDX(start);
2401
    eidx = SUBPAGE_IDX(end);
2402
#if defined(DEBUG_SUBPAGE)
2403
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2404
           mmio, start, end, idx, eidx, memory);
2405
#endif
2406
    memory >>= IO_MEM_SHIFT;
2407
    for (; idx <= eidx; idx++) {
2408
        mmio->mem_read[idx] = io_mem_read[memory];
2409
        mmio->mem_write[idx] = io_mem_write[memory];
2410
        mmio->opaque[idx] = io_mem_opaque[memory];
2411
    }
2412

    
2413
    return 0;
2414
}
2415

    
2416
static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2417
                           int orig_memory)
2418
{
2419
    subpage_t *mmio;
2420
    int subpage_memory;
2421

    
2422
    mmio = qemu_mallocz(sizeof(subpage_t));
2423
    if (mmio != NULL) {
2424
        mmio->base = base;
2425
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2426
#if defined(DEBUG_SUBPAGE)
2427
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2428
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2429
#endif
2430
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2431
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2432
    }
2433

    
2434
    return mmio;
2435
}
2436

    
2437
static void io_mem_init(void)
2438
{
2439
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2440
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2441
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2442
    io_mem_nb = 5;
2443

    
2444
#if defined(CONFIG_SOFTMMU)
2445
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2446
                                          watch_mem_write, NULL);
2447
#endif
2448
    /* alloc dirty bits array */
2449
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2450
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2451
}
2452

    
2453
/* mem_read and mem_write are arrays of functions containing the
2454
   function to access byte (index 0), word (index 1) and dword (index
2455
   2). All functions must be supplied. If io_index is non zero, the
2456
   corresponding io zone is modified. If it is zero, a new io zone is
2457
   allocated. The return value can be used with
2458
   cpu_register_physical_memory(). (-1) is returned if error. */
2459
int cpu_register_io_memory(int io_index,
2460
                           CPUReadMemoryFunc **mem_read,
2461
                           CPUWriteMemoryFunc **mem_write,
2462
                           void *opaque)
2463
{
2464
    int i;
2465

    
2466
    if (io_index <= 0) {
2467
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2468
            return -1;
2469
        io_index = io_mem_nb++;
2470
    } else {
2471
        if (io_index >= IO_MEM_NB_ENTRIES)
2472
            return -1;
2473
    }
2474

    
2475
    for(i = 0;i < 3; i++) {
2476
        io_mem_read[io_index][i] = mem_read[i];
2477
        io_mem_write[io_index][i] = mem_write[i];
2478
    }
2479
    io_mem_opaque[io_index] = opaque;
2480
    return io_index << IO_MEM_SHIFT;
2481
}
2482

    
2483
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2484
{
2485
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2486
}
2487

    
2488
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2489
{
2490
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2491
}
2492

    
2493
/* physical memory access (slow version, mainly for debug) */
2494
#if defined(CONFIG_USER_ONLY)
2495
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2496
                            int len, int is_write)
2497
{
2498
    int l, flags;
2499
    target_ulong page;
2500
    void * p;
2501

    
2502
    while (len > 0) {
2503
        page = addr & TARGET_PAGE_MASK;
2504
        l = (page + TARGET_PAGE_SIZE) - addr;
2505
        if (l > len)
2506
            l = len;
2507
        flags = page_get_flags(page);
2508
        if (!(flags & PAGE_VALID))
2509
            return;
2510
        if (is_write) {
2511
            if (!(flags & PAGE_WRITE))
2512
                return;
2513
            p = lock_user(addr, len, 0);
2514
            memcpy(p, buf, len);
2515
            unlock_user(p, addr, len);
2516
        } else {
2517
            if (!(flags & PAGE_READ))
2518
                return;
2519
            p = lock_user(addr, len, 1);
2520
            memcpy(buf, p, len);
2521
            unlock_user(p, addr, 0);
2522
        }
2523
        len -= l;
2524
        buf += l;
2525
        addr += l;
2526
    }
2527
}
2528

    
2529
#else
2530
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2531
                            int len, int is_write)
2532
{
2533
    int l, io_index;
2534
    uint8_t *ptr;
2535
    uint32_t val;
2536
    target_phys_addr_t page;
2537
    unsigned long pd;
2538
    PhysPageDesc *p;
2539

    
2540
    while (len > 0) {
2541
        page = addr & TARGET_PAGE_MASK;
2542
        l = (page + TARGET_PAGE_SIZE) - addr;
2543
        if (l > len)
2544
            l = len;
2545
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2546
        if (!p) {
2547
            pd = IO_MEM_UNASSIGNED;
2548
        } else {
2549
            pd = p->phys_offset;
2550
        }
2551

    
2552
        if (is_write) {
2553
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2554
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2555
                /* XXX: could force cpu_single_env to NULL to avoid
2556
                   potential bugs */
2557
                if (l >= 4 && ((addr & 3) == 0)) {
2558
                    /* 32 bit write access */
2559
                    val = ldl_p(buf);
2560
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2561
                    l = 4;
2562
                } else if (l >= 2 && ((addr & 1) == 0)) {
2563
                    /* 16 bit write access */
2564
                    val = lduw_p(buf);
2565
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2566
                    l = 2;
2567
                } else {
2568
                    /* 8 bit write access */
2569
                    val = ldub_p(buf);
2570
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2571
                    l = 1;
2572
                }
2573
            } else {
2574
                unsigned long addr1;
2575
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2576
                /* RAM case */
2577
                ptr = phys_ram_base + addr1;
2578
                memcpy(ptr, buf, l);
2579
                if (!cpu_physical_memory_is_dirty(addr1)) {
2580
                    /* invalidate code */
2581
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2582
                    /* set dirty bit */
2583
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2584
                        (0xff & ~CODE_DIRTY_FLAG);
2585
                }
2586
            }
2587
        } else {
2588
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2589
                !(pd & IO_MEM_ROMD)) {
2590
                /* I/O case */
2591
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2592
                if (l >= 4 && ((addr & 3) == 0)) {
2593
                    /* 32 bit read access */
2594
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2595
                    stl_p(buf, val);
2596
                    l = 4;
2597
                } else if (l >= 2 && ((addr & 1) == 0)) {
2598
                    /* 16 bit read access */
2599
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2600
                    stw_p(buf, val);
2601
                    l = 2;
2602
                } else {
2603
                    /* 8 bit read access */
2604
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2605
                    stb_p(buf, val);
2606
                    l = 1;
2607
                }
2608
            } else {
2609
                /* RAM case */
2610
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2611
                    (addr & ~TARGET_PAGE_MASK);
2612
                memcpy(buf, ptr, l);
2613
            }
2614
        }
2615
        len -= l;
2616
        buf += l;
2617
        addr += l;
2618
    }
2619
}
2620

    
2621
/* used for ROM loading : can write in RAM and ROM */
2622
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2623
                                   const uint8_t *buf, int len)
2624
{
2625
    int l;
2626
    uint8_t *ptr;
2627
    target_phys_addr_t page;
2628
    unsigned long pd;
2629
    PhysPageDesc *p;
2630

    
2631
    while (len > 0) {
2632
        page = addr & TARGET_PAGE_MASK;
2633
        l = (page + TARGET_PAGE_SIZE) - addr;
2634
        if (l > len)
2635
            l = len;
2636
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2637
        if (!p) {
2638
            pd = IO_MEM_UNASSIGNED;
2639
        } else {
2640
            pd = p->phys_offset;
2641
        }
2642

    
2643
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2644
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2645
            !(pd & IO_MEM_ROMD)) {
2646
            /* do nothing */
2647
        } else {
2648
            unsigned long addr1;
2649
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2650
            /* ROM/RAM case */
2651
            ptr = phys_ram_base + addr1;
2652
            memcpy(ptr, buf, l);
2653
        }
2654
        len -= l;
2655
        buf += l;
2656
        addr += l;
2657
    }
2658
}
2659

    
2660

    
2661
/* warning: addr must be aligned */
2662
uint32_t ldl_phys(target_phys_addr_t addr)
2663
{
2664
    int io_index;
2665
    uint8_t *ptr;
2666
    uint32_t val;
2667
    unsigned long pd;
2668
    PhysPageDesc *p;
2669

    
2670
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2671
    if (!p) {
2672
        pd = IO_MEM_UNASSIGNED;
2673
    } else {
2674
        pd = p->phys_offset;
2675
    }
2676

    
2677
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2678
        !(pd & IO_MEM_ROMD)) {
2679
        /* I/O case */
2680
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2681
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2682
    } else {
2683
        /* RAM case */
2684
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2685
            (addr & ~TARGET_PAGE_MASK);
2686
        val = ldl_p(ptr);
2687
    }
2688
    return val;
2689
}
2690

    
2691
/* warning: addr must be aligned */
2692
uint64_t ldq_phys(target_phys_addr_t addr)
2693
{
2694
    int io_index;
2695
    uint8_t *ptr;
2696
    uint64_t val;
2697
    unsigned long pd;
2698
    PhysPageDesc *p;
2699

    
2700
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2701
    if (!p) {
2702
        pd = IO_MEM_UNASSIGNED;
2703
    } else {
2704
        pd = p->phys_offset;
2705
    }
2706

    
2707
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2708
        !(pd & IO_MEM_ROMD)) {
2709
        /* I/O case */
2710
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2711
#ifdef TARGET_WORDS_BIGENDIAN
2712
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2713
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2714
#else
2715
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2716
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2717
#endif
2718
    } else {
2719
        /* RAM case */
2720
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2721
            (addr & ~TARGET_PAGE_MASK);
2722
        val = ldq_p(ptr);
2723
    }
2724
    return val;
2725
}
2726

    
2727
/* XXX: optimize */
2728
uint32_t ldub_phys(target_phys_addr_t addr)
2729
{
2730
    uint8_t val;
2731
    cpu_physical_memory_read(addr, &val, 1);
2732
    return val;
2733
}
2734

    
2735
/* XXX: optimize */
2736
uint32_t lduw_phys(target_phys_addr_t addr)
2737
{
2738
    uint16_t val;
2739
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2740
    return tswap16(val);
2741
}
2742

    
2743
/* warning: addr must be aligned. The ram page is not masked as dirty
2744
   and the code inside is not invalidated. It is useful if the dirty
2745
   bits are used to track modified PTEs */
2746
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2747
{
2748
    int io_index;
2749
    uint8_t *ptr;
2750
    unsigned long pd;
2751
    PhysPageDesc *p;
2752

    
2753
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2754
    if (!p) {
2755
        pd = IO_MEM_UNASSIGNED;
2756
    } else {
2757
        pd = p->phys_offset;
2758
    }
2759

    
2760
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2761
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2762
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2763
    } else {
2764
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2765
            (addr & ~TARGET_PAGE_MASK);
2766
        stl_p(ptr, val);
2767
    }
2768
}
2769

    
2770
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2771
{
2772
    int io_index;
2773
    uint8_t *ptr;
2774
    unsigned long pd;
2775
    PhysPageDesc *p;
2776

    
2777
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2778
    if (!p) {
2779
        pd = IO_MEM_UNASSIGNED;
2780
    } else {
2781
        pd = p->phys_offset;
2782
    }
2783

    
2784
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2785
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2786
#ifdef TARGET_WORDS_BIGENDIAN
2787
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2788
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2789
#else
2790
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2791
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2792
#endif
2793
    } else {
2794
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2795
            (addr & ~TARGET_PAGE_MASK);
2796
        stq_p(ptr, val);
2797
    }
2798
}
2799

    
2800
/* warning: addr must be aligned */
2801
void stl_phys(target_phys_addr_t addr, uint32_t val)
2802
{
2803
    int io_index;
2804
    uint8_t *ptr;
2805
    unsigned long pd;
2806
    PhysPageDesc *p;
2807

    
2808
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2809
    if (!p) {
2810
        pd = IO_MEM_UNASSIGNED;
2811
    } else {
2812
        pd = p->phys_offset;
2813
    }
2814

    
2815
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2816
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2817
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2818
    } else {
2819
        unsigned long addr1;
2820
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2821
        /* RAM case */
2822
        ptr = phys_ram_base + addr1;
2823
        stl_p(ptr, val);
2824
        if (!cpu_physical_memory_is_dirty(addr1)) {
2825
            /* invalidate code */
2826
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2827
            /* set dirty bit */
2828
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2829
                (0xff & ~CODE_DIRTY_FLAG);
2830
        }
2831
    }
2832
}
2833

    
2834
/* XXX: optimize */
2835
void stb_phys(target_phys_addr_t addr, uint32_t val)
2836
{
2837
    uint8_t v = val;
2838
    cpu_physical_memory_write(addr, &v, 1);
2839
}
2840

    
2841
/* XXX: optimize */
2842
void stw_phys(target_phys_addr_t addr, uint32_t val)
2843
{
2844
    uint16_t v = tswap16(val);
2845
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2846
}
2847

    
2848
/* XXX: optimize */
2849
void stq_phys(target_phys_addr_t addr, uint64_t val)
2850
{
2851
    val = tswap64(val);
2852
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2853
}
2854

    
2855
#endif
2856

    
2857
/* virtual memory access for debug */
2858
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2859
                        uint8_t *buf, int len, int is_write)
2860
{
2861
    int l;
2862
    target_phys_addr_t phys_addr;
2863
    target_ulong page;
2864

    
2865
    while (len > 0) {
2866
        page = addr & TARGET_PAGE_MASK;
2867
        phys_addr = cpu_get_phys_page_debug(env, page);
2868
        /* if no physical page mapped, return an error */
2869
        if (phys_addr == -1)
2870
            return -1;
2871
        l = (page + TARGET_PAGE_SIZE) - addr;
2872
        if (l > len)
2873
            l = len;
2874
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2875
                               buf, l, is_write);
2876
        len -= l;
2877
        buf += l;
2878
        addr += l;
2879
    }
2880
    return 0;
2881
}
2882

    
2883
void dump_exec_info(FILE *f,
2884
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2885
{
2886
    int i, target_code_size, max_target_code_size;
2887
    int direct_jmp_count, direct_jmp2_count, cross_page;
2888
    TranslationBlock *tb;
2889

    
2890
    target_code_size = 0;
2891
    max_target_code_size = 0;
2892
    cross_page = 0;
2893
    direct_jmp_count = 0;
2894
    direct_jmp2_count = 0;
2895
    for(i = 0; i < nb_tbs; i++) {
2896
        tb = &tbs[i];
2897
        target_code_size += tb->size;
2898
        if (tb->size > max_target_code_size)
2899
            max_target_code_size = tb->size;
2900
        if (tb->page_addr[1] != -1)
2901
            cross_page++;
2902
        if (tb->tb_next_offset[0] != 0xffff) {
2903
            direct_jmp_count++;
2904
            if (tb->tb_next_offset[1] != 0xffff) {
2905
                direct_jmp2_count++;
2906
            }
2907
        }
2908
    }
2909
    /* XXX: avoid using doubles ? */
2910
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2911
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2912
                nb_tbs ? target_code_size / nb_tbs : 0,
2913
                max_target_code_size);
2914
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2915
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2916
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2917
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2918
            cross_page,
2919
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2920
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2921
                direct_jmp_count,
2922
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2923
                direct_jmp2_count,
2924
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2925
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2926
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2927
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2928
}
2929

    
2930
#if !defined(CONFIG_USER_ONLY)
2931

    
2932
#define MMUSUFFIX _cmmu
2933
#define GETPC() NULL
2934
#define env cpu_single_env
2935
#define SOFTMMU_CODE_ACCESS
2936

    
2937
#define SHIFT 0
2938
#include "softmmu_template.h"
2939

    
2940
#define SHIFT 1
2941
#include "softmmu_template.h"
2942

    
2943
#define SHIFT 2
2944
#include "softmmu_template.h"
2945

    
2946
#define SHIFT 3
2947
#include "softmmu_template.h"
2948

    
2949
#undef env
2950

    
2951
#endif