Statistics
| Branch: | Revision:

root / exec.c @ 6ebbf390

History | View | Annotate | Download (86.5 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#if defined(CONFIG_USER_ONLY)
38
#include <qemu.h>
39
#endif
40

    
41
//#define DEBUG_TB_INVALIDATE
42
//#define DEBUG_FLUSH
43
//#define DEBUG_TLB
44
//#define DEBUG_UNASSIGNED
45

    
46
/* make various TB consistency checks */
47
//#define DEBUG_TB_CHECK
48
//#define DEBUG_TLB_CHECK
49

    
50
//#define DEBUG_IOPORT
51
//#define DEBUG_SUBPAGE
52

    
53
#if !defined(CONFIG_USER_ONLY)
54
/* TB consistency checks only implemented for usermode emulation.  */
55
#undef DEBUG_TB_CHECK
56
#endif
57

    
58
/* threshold to flush the translated code buffer */
59
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60

    
61
#define SMC_BITMAP_USE_THRESHOLD 10
62

    
63
#define MMAP_AREA_START        0x00000000
64
#define MMAP_AREA_END          0xa8000000
65

    
66
#if defined(TARGET_SPARC64)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 41
68
#elif defined(TARGET_SPARC)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 36
70
#elif defined(TARGET_ALPHA)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 42
72
#define TARGET_VIRT_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_PPC64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#else
76
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77
#define TARGET_PHYS_ADDR_SPACE_BITS 32
78
#endif
79

    
80
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
81
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
82
int nb_tbs;
83
/* any access to the tbs or the page table must use this lock */
84
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
85

    
86
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
87
uint8_t *code_gen_ptr;
88

    
89
int phys_ram_size;
90
int phys_ram_fd;
91
uint8_t *phys_ram_base;
92
uint8_t *phys_ram_dirty;
93
static ram_addr_t phys_ram_alloc_offset = 0;
94

    
95
CPUState *first_cpu;
96
/* current CPU in the current thread. It is only valid inside
97
   cpu_exec() */
98
CPUState *cpu_single_env;
99

    
100
typedef struct PageDesc {
101
    /* list of TBs intersecting this ram page */
102
    TranslationBlock *first_tb;
103
    /* in order to optimize self modifying code, we count the number
104
       of lookups we do to a given page to use a bitmap */
105
    unsigned int code_write_count;
106
    uint8_t *code_bitmap;
107
#if defined(CONFIG_USER_ONLY)
108
    unsigned long flags;
109
#endif
110
} PageDesc;
111

    
112
typedef struct PhysPageDesc {
113
    /* offset in host memory of the page + io_index in the low 12 bits */
114
    uint32_t phys_offset;
115
} PhysPageDesc;
116

    
117
#define L2_BITS 10
118
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119
/* XXX: this is a temporary hack for alpha target.
120
 *      In the future, this is to be replaced by a multi-level table
121
 *      to actually be able to handle the complete 64 bits address space.
122
 */
123
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124
#else
125
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
126
#endif
127

    
128
#define L1_SIZE (1 << L1_BITS)
129
#define L2_SIZE (1 << L2_BITS)
130

    
131
static void io_mem_init(void);
132

    
133
unsigned long qemu_real_host_page_size;
134
unsigned long qemu_host_page_bits;
135
unsigned long qemu_host_page_size;
136
unsigned long qemu_host_page_mask;
137

    
138
/* XXX: for system emulation, it could just be an array */
139
static PageDesc *l1_map[L1_SIZE];
140
PhysPageDesc **l1_phys_map;
141

    
142
/* io memory support */
143
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
144
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
145
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
146
static int io_mem_nb;
147
#if defined(CONFIG_SOFTMMU)
148
static int io_mem_watch;
149
#endif
150

    
151
/* log support */
152
char *logfilename = "/tmp/qemu.log";
153
FILE *logfile;
154
int loglevel;
155
static int log_append = 0;
156

    
157
/* statistics */
158
static int tlb_flush_count;
159
static int tb_flush_count;
160
static int tb_phys_invalidate_count;
161

    
162
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
163
typedef struct subpage_t {
164
    target_phys_addr_t base;
165
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
166
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
167
    void *opaque[TARGET_PAGE_SIZE];
168
} subpage_t;
169

    
170
static void page_init(void)
171
{
172
    /* NOTE: we can always suppose that qemu_host_page_size >=
173
       TARGET_PAGE_SIZE */
174
#ifdef _WIN32
175
    {
176
        SYSTEM_INFO system_info;
177
        DWORD old_protect;
178

    
179
        GetSystemInfo(&system_info);
180
        qemu_real_host_page_size = system_info.dwPageSize;
181

    
182
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
183
                       PAGE_EXECUTE_READWRITE, &old_protect);
184
    }
185
#else
186
    qemu_real_host_page_size = getpagesize();
187
    {
188
        unsigned long start, end;
189

    
190
        start = (unsigned long)code_gen_buffer;
191
        start &= ~(qemu_real_host_page_size - 1);
192

    
193
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
194
        end += qemu_real_host_page_size - 1;
195
        end &= ~(qemu_real_host_page_size - 1);
196

    
197
        mprotect((void *)start, end - start,
198
                 PROT_READ | PROT_WRITE | PROT_EXEC);
199
    }
200
#endif
201

    
202
    if (qemu_host_page_size == 0)
203
        qemu_host_page_size = qemu_real_host_page_size;
204
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
205
        qemu_host_page_size = TARGET_PAGE_SIZE;
206
    qemu_host_page_bits = 0;
207
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
208
        qemu_host_page_bits++;
209
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
210
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
211
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
212
}
213

    
214
static inline PageDesc *page_find_alloc(unsigned int index)
215
{
216
    PageDesc **lp, *p;
217

    
218
    lp = &l1_map[index >> L2_BITS];
219
    p = *lp;
220
    if (!p) {
221
        /* allocate if not found */
222
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
223
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
224
        *lp = p;
225
    }
226
    return p + (index & (L2_SIZE - 1));
227
}
228

    
229
static inline PageDesc *page_find(unsigned int index)
230
{
231
    PageDesc *p;
232

    
233
    p = l1_map[index >> L2_BITS];
234
    if (!p)
235
        return 0;
236
    return p + (index & (L2_SIZE - 1));
237
}
238

    
239
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
240
{
241
    void **lp, **p;
242
    PhysPageDesc *pd;
243

    
244
    p = (void **)l1_phys_map;
245
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
246

    
247
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
248
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
249
#endif
250
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
251
    p = *lp;
252
    if (!p) {
253
        /* allocate if not found */
254
        if (!alloc)
255
            return NULL;
256
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
257
        memset(p, 0, sizeof(void *) * L1_SIZE);
258
        *lp = p;
259
    }
260
#endif
261
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
262
    pd = *lp;
263
    if (!pd) {
264
        int i;
265
        /* allocate if not found */
266
        if (!alloc)
267
            return NULL;
268
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
269
        *lp = pd;
270
        for (i = 0; i < L2_SIZE; i++)
271
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
272
    }
273
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
274
}
275

    
276
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
277
{
278
    return phys_page_find_alloc(index, 0);
279
}
280

    
281
#if !defined(CONFIG_USER_ONLY)
282
static void tlb_protect_code(ram_addr_t ram_addr);
283
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
284
                                    target_ulong vaddr);
285
#endif
286

    
287
void cpu_exec_init(CPUState *env)
288
{
289
    CPUState **penv;
290
    int cpu_index;
291

    
292
    if (!code_gen_ptr) {
293
        code_gen_ptr = code_gen_buffer;
294
        page_init();
295
        io_mem_init();
296
    }
297
    env->next_cpu = NULL;
298
    penv = &first_cpu;
299
    cpu_index = 0;
300
    while (*penv != NULL) {
301
        penv = (CPUState **)&(*penv)->next_cpu;
302
        cpu_index++;
303
    }
304
    env->cpu_index = cpu_index;
305
    env->nb_watchpoints = 0;
306
    *penv = env;
307
}
308

    
309
static inline void invalidate_page_bitmap(PageDesc *p)
310
{
311
    if (p->code_bitmap) {
312
        qemu_free(p->code_bitmap);
313
        p->code_bitmap = NULL;
314
    }
315
    p->code_write_count = 0;
316
}
317

    
318
/* set to NULL all the 'first_tb' fields in all PageDescs */
319
static void page_flush_tb(void)
320
{
321
    int i, j;
322
    PageDesc *p;
323

    
324
    for(i = 0; i < L1_SIZE; i++) {
325
        p = l1_map[i];
326
        if (p) {
327
            for(j = 0; j < L2_SIZE; j++) {
328
                p->first_tb = NULL;
329
                invalidate_page_bitmap(p);
330
                p++;
331
            }
332
        }
333
    }
334
}
335

    
336
/* flush all the translation blocks */
337
/* XXX: tb_flush is currently not thread safe */
338
void tb_flush(CPUState *env1)
339
{
340
    CPUState *env;
341
#if defined(DEBUG_FLUSH)
342
    printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
343
           code_gen_ptr - code_gen_buffer,
344
           nb_tbs,
345
           nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
346
#endif
347
    nb_tbs = 0;
348

    
349
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
350
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
351
    }
352

    
353
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
354
    page_flush_tb();
355

    
356
    code_gen_ptr = code_gen_buffer;
357
    /* XXX: flush processor icache at this point if cache flush is
358
       expensive */
359
    tb_flush_count++;
360
}
361

    
362
#ifdef DEBUG_TB_CHECK
363

    
364
static void tb_invalidate_check(target_ulong address)
365
{
366
    TranslationBlock *tb;
367
    int i;
368
    address &= TARGET_PAGE_MASK;
369
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
370
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
371
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
372
                  address >= tb->pc + tb->size)) {
373
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
374
                       address, (long)tb->pc, tb->size);
375
            }
376
        }
377
    }
378
}
379

    
380
/* verify that all the pages have correct rights for code */
381
static void tb_page_check(void)
382
{
383
    TranslationBlock *tb;
384
    int i, flags1, flags2;
385

    
386
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
387
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
388
            flags1 = page_get_flags(tb->pc);
389
            flags2 = page_get_flags(tb->pc + tb->size - 1);
390
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
391
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
392
                       (long)tb->pc, tb->size, flags1, flags2);
393
            }
394
        }
395
    }
396
}
397

    
398
void tb_jmp_check(TranslationBlock *tb)
399
{
400
    TranslationBlock *tb1;
401
    unsigned int n1;
402

    
403
    /* suppress any remaining jumps to this TB */
404
    tb1 = tb->jmp_first;
405
    for(;;) {
406
        n1 = (long)tb1 & 3;
407
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
408
        if (n1 == 2)
409
            break;
410
        tb1 = tb1->jmp_next[n1];
411
    }
412
    /* check end of list */
413
    if (tb1 != tb) {
414
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
415
    }
416
}
417

    
418
#endif
419

    
420
/* invalidate one TB */
421
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
422
                             int next_offset)
423
{
424
    TranslationBlock *tb1;
425
    for(;;) {
426
        tb1 = *ptb;
427
        if (tb1 == tb) {
428
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
429
            break;
430
        }
431
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
432
    }
433
}
434

    
435
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
436
{
437
    TranslationBlock *tb1;
438
    unsigned int n1;
439

    
440
    for(;;) {
441
        tb1 = *ptb;
442
        n1 = (long)tb1 & 3;
443
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
444
        if (tb1 == tb) {
445
            *ptb = tb1->page_next[n1];
446
            break;
447
        }
448
        ptb = &tb1->page_next[n1];
449
    }
450
}
451

    
452
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
453
{
454
    TranslationBlock *tb1, **ptb;
455
    unsigned int n1;
456

    
457
    ptb = &tb->jmp_next[n];
458
    tb1 = *ptb;
459
    if (tb1) {
460
        /* find tb(n) in circular list */
461
        for(;;) {
462
            tb1 = *ptb;
463
            n1 = (long)tb1 & 3;
464
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
465
            if (n1 == n && tb1 == tb)
466
                break;
467
            if (n1 == 2) {
468
                ptb = &tb1->jmp_first;
469
            } else {
470
                ptb = &tb1->jmp_next[n1];
471
            }
472
        }
473
        /* now we can suppress tb(n) from the list */
474
        *ptb = tb->jmp_next[n];
475

    
476
        tb->jmp_next[n] = NULL;
477
    }
478
}
479

    
480
/* reset the jump entry 'n' of a TB so that it is not chained to
481
   another TB */
482
static inline void tb_reset_jump(TranslationBlock *tb, int n)
483
{
484
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
485
}
486

    
487
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
488
{
489
    CPUState *env;
490
    PageDesc *p;
491
    unsigned int h, n1;
492
    target_ulong phys_pc;
493
    TranslationBlock *tb1, *tb2;
494

    
495
    /* remove the TB from the hash list */
496
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
497
    h = tb_phys_hash_func(phys_pc);
498
    tb_remove(&tb_phys_hash[h], tb,
499
              offsetof(TranslationBlock, phys_hash_next));
500

    
501
    /* remove the TB from the page list */
502
    if (tb->page_addr[0] != page_addr) {
503
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
504
        tb_page_remove(&p->first_tb, tb);
505
        invalidate_page_bitmap(p);
506
    }
507
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
508
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
509
        tb_page_remove(&p->first_tb, tb);
510
        invalidate_page_bitmap(p);
511
    }
512

    
513
    tb_invalidated_flag = 1;
514

    
515
    /* remove the TB from the hash list */
516
    h = tb_jmp_cache_hash_func(tb->pc);
517
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
518
        if (env->tb_jmp_cache[h] == tb)
519
            env->tb_jmp_cache[h] = NULL;
520
    }
521

    
522
    /* suppress this TB from the two jump lists */
523
    tb_jmp_remove(tb, 0);
524
    tb_jmp_remove(tb, 1);
525

    
526
    /* suppress any remaining jumps to this TB */
527
    tb1 = tb->jmp_first;
528
    for(;;) {
529
        n1 = (long)tb1 & 3;
530
        if (n1 == 2)
531
            break;
532
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
533
        tb2 = tb1->jmp_next[n1];
534
        tb_reset_jump(tb1, n1);
535
        tb1->jmp_next[n1] = NULL;
536
        tb1 = tb2;
537
    }
538
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
539

    
540
    tb_phys_invalidate_count++;
541
}
542

    
543
static inline void set_bits(uint8_t *tab, int start, int len)
544
{
545
    int end, mask, end1;
546

    
547
    end = start + len;
548
    tab += start >> 3;
549
    mask = 0xff << (start & 7);
550
    if ((start & ~7) == (end & ~7)) {
551
        if (start < end) {
552
            mask &= ~(0xff << (end & 7));
553
            *tab |= mask;
554
        }
555
    } else {
556
        *tab++ |= mask;
557
        start = (start + 8) & ~7;
558
        end1 = end & ~7;
559
        while (start < end1) {
560
            *tab++ = 0xff;
561
            start += 8;
562
        }
563
        if (start < end) {
564
            mask = ~(0xff << (end & 7));
565
            *tab |= mask;
566
        }
567
    }
568
}
569

    
570
static void build_page_bitmap(PageDesc *p)
571
{
572
    int n, tb_start, tb_end;
573
    TranslationBlock *tb;
574

    
575
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
576
    if (!p->code_bitmap)
577
        return;
578
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
579

    
580
    tb = p->first_tb;
581
    while (tb != NULL) {
582
        n = (long)tb & 3;
583
        tb = (TranslationBlock *)((long)tb & ~3);
584
        /* NOTE: this is subtle as a TB may span two physical pages */
585
        if (n == 0) {
586
            /* NOTE: tb_end may be after the end of the page, but
587
               it is not a problem */
588
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
589
            tb_end = tb_start + tb->size;
590
            if (tb_end > TARGET_PAGE_SIZE)
591
                tb_end = TARGET_PAGE_SIZE;
592
        } else {
593
            tb_start = 0;
594
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
595
        }
596
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
597
        tb = tb->page_next[n];
598
    }
599
}
600

    
601
#ifdef TARGET_HAS_PRECISE_SMC
602

    
603
static void tb_gen_code(CPUState *env,
604
                        target_ulong pc, target_ulong cs_base, int flags,
605
                        int cflags)
606
{
607
    TranslationBlock *tb;
608
    uint8_t *tc_ptr;
609
    target_ulong phys_pc, phys_page2, virt_page2;
610
    int code_gen_size;
611

    
612
    phys_pc = get_phys_addr_code(env, pc);
613
    tb = tb_alloc(pc);
614
    if (!tb) {
615
        /* flush must be done */
616
        tb_flush(env);
617
        /* cannot fail at this point */
618
        tb = tb_alloc(pc);
619
    }
620
    tc_ptr = code_gen_ptr;
621
    tb->tc_ptr = tc_ptr;
622
    tb->cs_base = cs_base;
623
    tb->flags = flags;
624
    tb->cflags = cflags;
625
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
626
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
627

    
628
    /* check next page if needed */
629
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
630
    phys_page2 = -1;
631
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
632
        phys_page2 = get_phys_addr_code(env, virt_page2);
633
    }
634
    tb_link_phys(tb, phys_pc, phys_page2);
635
}
636
#endif
637

    
638
/* invalidate all TBs which intersect with the target physical page
639
   starting in range [start;end[. NOTE: start and end must refer to
640
   the same physical page. 'is_cpu_write_access' should be true if called
641
   from a real cpu write access: the virtual CPU will exit the current
642
   TB if code is modified inside this TB. */
643
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
644
                                   int is_cpu_write_access)
645
{
646
    int n, current_tb_modified, current_tb_not_found, current_flags;
647
    CPUState *env = cpu_single_env;
648
    PageDesc *p;
649
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
650
    target_ulong tb_start, tb_end;
651
    target_ulong current_pc, current_cs_base;
652

    
653
    p = page_find(start >> TARGET_PAGE_BITS);
654
    if (!p)
655
        return;
656
    if (!p->code_bitmap &&
657
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
658
        is_cpu_write_access) {
659
        /* build code bitmap */
660
        build_page_bitmap(p);
661
    }
662

    
663
    /* we remove all the TBs in the range [start, end[ */
664
    /* XXX: see if in some cases it could be faster to invalidate all the code */
665
    current_tb_not_found = is_cpu_write_access;
666
    current_tb_modified = 0;
667
    current_tb = NULL; /* avoid warning */
668
    current_pc = 0; /* avoid warning */
669
    current_cs_base = 0; /* avoid warning */
670
    current_flags = 0; /* avoid warning */
671
    tb = p->first_tb;
672
    while (tb != NULL) {
673
        n = (long)tb & 3;
674
        tb = (TranslationBlock *)((long)tb & ~3);
675
        tb_next = tb->page_next[n];
676
        /* NOTE: this is subtle as a TB may span two physical pages */
677
        if (n == 0) {
678
            /* NOTE: tb_end may be after the end of the page, but
679
               it is not a problem */
680
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
681
            tb_end = tb_start + tb->size;
682
        } else {
683
            tb_start = tb->page_addr[1];
684
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
685
        }
686
        if (!(tb_end <= start || tb_start >= end)) {
687
#ifdef TARGET_HAS_PRECISE_SMC
688
            if (current_tb_not_found) {
689
                current_tb_not_found = 0;
690
                current_tb = NULL;
691
                if (env->mem_write_pc) {
692
                    /* now we have a real cpu fault */
693
                    current_tb = tb_find_pc(env->mem_write_pc);
694
                }
695
            }
696
            if (current_tb == tb &&
697
                !(current_tb->cflags & CF_SINGLE_INSN)) {
698
                /* If we are modifying the current TB, we must stop
699
                its execution. We could be more precise by checking
700
                that the modification is after the current PC, but it
701
                would require a specialized function to partially
702
                restore the CPU state */
703

    
704
                current_tb_modified = 1;
705
                cpu_restore_state(current_tb, env,
706
                                  env->mem_write_pc, NULL);
707
#if defined(TARGET_I386)
708
                current_flags = env->hflags;
709
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
710
                current_cs_base = (target_ulong)env->segs[R_CS].base;
711
                current_pc = current_cs_base + env->eip;
712
#else
713
#error unsupported CPU
714
#endif
715
            }
716
#endif /* TARGET_HAS_PRECISE_SMC */
717
            /* we need to do that to handle the case where a signal
718
               occurs while doing tb_phys_invalidate() */
719
            saved_tb = NULL;
720
            if (env) {
721
                saved_tb = env->current_tb;
722
                env->current_tb = NULL;
723
            }
724
            tb_phys_invalidate(tb, -1);
725
            if (env) {
726
                env->current_tb = saved_tb;
727
                if (env->interrupt_request && env->current_tb)
728
                    cpu_interrupt(env, env->interrupt_request);
729
            }
730
        }
731
        tb = tb_next;
732
    }
733
#if !defined(CONFIG_USER_ONLY)
734
    /* if no code remaining, no need to continue to use slow writes */
735
    if (!p->first_tb) {
736
        invalidate_page_bitmap(p);
737
        if (is_cpu_write_access) {
738
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
739
        }
740
    }
741
#endif
742
#ifdef TARGET_HAS_PRECISE_SMC
743
    if (current_tb_modified) {
744
        /* we generate a block containing just the instruction
745
           modifying the memory. It will ensure that it cannot modify
746
           itself */
747
        env->current_tb = NULL;
748
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
749
                    CF_SINGLE_INSN);
750
        cpu_resume_from_signal(env, NULL);
751
    }
752
#endif
753
}
754

    
755
/* len must be <= 8 and start must be a multiple of len */
756
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
757
{
758
    PageDesc *p;
759
    int offset, b;
760
#if 0
761
    if (1) {
762
        if (loglevel) {
763
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
764
                   cpu_single_env->mem_write_vaddr, len,
765
                   cpu_single_env->eip,
766
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
767
        }
768
    }
769
#endif
770
    p = page_find(start >> TARGET_PAGE_BITS);
771
    if (!p)
772
        return;
773
    if (p->code_bitmap) {
774
        offset = start & ~TARGET_PAGE_MASK;
775
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
776
        if (b & ((1 << len) - 1))
777
            goto do_invalidate;
778
    } else {
779
    do_invalidate:
780
        tb_invalidate_phys_page_range(start, start + len, 1);
781
    }
782
}
783

    
784
#if !defined(CONFIG_SOFTMMU)
785
static void tb_invalidate_phys_page(target_ulong addr,
786
                                    unsigned long pc, void *puc)
787
{
788
    int n, current_flags, current_tb_modified;
789
    target_ulong current_pc, current_cs_base;
790
    PageDesc *p;
791
    TranslationBlock *tb, *current_tb;
792
#ifdef TARGET_HAS_PRECISE_SMC
793
    CPUState *env = cpu_single_env;
794
#endif
795

    
796
    addr &= TARGET_PAGE_MASK;
797
    p = page_find(addr >> TARGET_PAGE_BITS);
798
    if (!p)
799
        return;
800
    tb = p->first_tb;
801
    current_tb_modified = 0;
802
    current_tb = NULL;
803
    current_pc = 0; /* avoid warning */
804
    current_cs_base = 0; /* avoid warning */
805
    current_flags = 0; /* avoid warning */
806
#ifdef TARGET_HAS_PRECISE_SMC
807
    if (tb && pc != 0) {
808
        current_tb = tb_find_pc(pc);
809
    }
810
#endif
811
    while (tb != NULL) {
812
        n = (long)tb & 3;
813
        tb = (TranslationBlock *)((long)tb & ~3);
814
#ifdef TARGET_HAS_PRECISE_SMC
815
        if (current_tb == tb &&
816
            !(current_tb->cflags & CF_SINGLE_INSN)) {
817
                /* If we are modifying the current TB, we must stop
818
                   its execution. We could be more precise by checking
819
                   that the modification is after the current PC, but it
820
                   would require a specialized function to partially
821
                   restore the CPU state */
822

    
823
            current_tb_modified = 1;
824
            cpu_restore_state(current_tb, env, pc, puc);
825
#if defined(TARGET_I386)
826
            current_flags = env->hflags;
827
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
828
            current_cs_base = (target_ulong)env->segs[R_CS].base;
829
            current_pc = current_cs_base + env->eip;
830
#else
831
#error unsupported CPU
832
#endif
833
        }
834
#endif /* TARGET_HAS_PRECISE_SMC */
835
        tb_phys_invalidate(tb, addr);
836
        tb = tb->page_next[n];
837
    }
838
    p->first_tb = NULL;
839
#ifdef TARGET_HAS_PRECISE_SMC
840
    if (current_tb_modified) {
841
        /* we generate a block containing just the instruction
842
           modifying the memory. It will ensure that it cannot modify
843
           itself */
844
        env->current_tb = NULL;
845
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
846
                    CF_SINGLE_INSN);
847
        cpu_resume_from_signal(env, puc);
848
    }
849
#endif
850
}
851
#endif
852

    
853
/* add the tb in the target page and protect it if necessary */
854
static inline void tb_alloc_page(TranslationBlock *tb,
855
                                 unsigned int n, target_ulong page_addr)
856
{
857
    PageDesc *p;
858
    TranslationBlock *last_first_tb;
859

    
860
    tb->page_addr[n] = page_addr;
861
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
862
    tb->page_next[n] = p->first_tb;
863
    last_first_tb = p->first_tb;
864
    p->first_tb = (TranslationBlock *)((long)tb | n);
865
    invalidate_page_bitmap(p);
866

    
867
#if defined(TARGET_HAS_SMC) || 1
868

    
869
#if defined(CONFIG_USER_ONLY)
870
    if (p->flags & PAGE_WRITE) {
871
        target_ulong addr;
872
        PageDesc *p2;
873
        int prot;
874

    
875
        /* force the host page as non writable (writes will have a
876
           page fault + mprotect overhead) */
877
        page_addr &= qemu_host_page_mask;
878
        prot = 0;
879
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
880
            addr += TARGET_PAGE_SIZE) {
881

    
882
            p2 = page_find (addr >> TARGET_PAGE_BITS);
883
            if (!p2)
884
                continue;
885
            prot |= p2->flags;
886
            p2->flags &= ~PAGE_WRITE;
887
            page_get_flags(addr);
888
          }
889
        mprotect(g2h(page_addr), qemu_host_page_size,
890
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
891
#ifdef DEBUG_TB_INVALIDATE
892
        printf("protecting code page: 0x%08lx\n",
893
               page_addr);
894
#endif
895
    }
896
#else
897
    /* if some code is already present, then the pages are already
898
       protected. So we handle the case where only the first TB is
899
       allocated in a physical page */
900
    if (!last_first_tb) {
901
        tlb_protect_code(page_addr);
902
    }
903
#endif
904

    
905
#endif /* TARGET_HAS_SMC */
906
}
907

    
908
/* Allocate a new translation block. Flush the translation buffer if
909
   too many translation blocks or too much generated code. */
910
TranslationBlock *tb_alloc(target_ulong pc)
911
{
912
    TranslationBlock *tb;
913

    
914
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
915
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
916
        return NULL;
917
    tb = &tbs[nb_tbs++];
918
    tb->pc = pc;
919
    tb->cflags = 0;
920
    return tb;
921
}
922

    
923
/* add a new TB and link it to the physical page tables. phys_page2 is
924
   (-1) to indicate that only one page contains the TB. */
925
void tb_link_phys(TranslationBlock *tb,
926
                  target_ulong phys_pc, target_ulong phys_page2)
927
{
928
    unsigned int h;
929
    TranslationBlock **ptb;
930

    
931
    /* add in the physical hash table */
932
    h = tb_phys_hash_func(phys_pc);
933
    ptb = &tb_phys_hash[h];
934
    tb->phys_hash_next = *ptb;
935
    *ptb = tb;
936

    
937
    /* add in the page list */
938
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
939
    if (phys_page2 != -1)
940
        tb_alloc_page(tb, 1, phys_page2);
941
    else
942
        tb->page_addr[1] = -1;
943

    
944
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
945
    tb->jmp_next[0] = NULL;
946
    tb->jmp_next[1] = NULL;
947
#ifdef USE_CODE_COPY
948
    tb->cflags &= ~CF_FP_USED;
949
    if (tb->cflags & CF_TB_FP_USED)
950
        tb->cflags |= CF_FP_USED;
951
#endif
952

    
953
    /* init original jump addresses */
954
    if (tb->tb_next_offset[0] != 0xffff)
955
        tb_reset_jump(tb, 0);
956
    if (tb->tb_next_offset[1] != 0xffff)
957
        tb_reset_jump(tb, 1);
958

    
959
#ifdef DEBUG_TB_CHECK
960
    tb_page_check();
961
#endif
962
}
963

    
964
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
965
   tb[1].tc_ptr. Return NULL if not found */
966
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
967
{
968
    int m_min, m_max, m;
969
    unsigned long v;
970
    TranslationBlock *tb;
971

    
972
    if (nb_tbs <= 0)
973
        return NULL;
974
    if (tc_ptr < (unsigned long)code_gen_buffer ||
975
        tc_ptr >= (unsigned long)code_gen_ptr)
976
        return NULL;
977
    /* binary search (cf Knuth) */
978
    m_min = 0;
979
    m_max = nb_tbs - 1;
980
    while (m_min <= m_max) {
981
        m = (m_min + m_max) >> 1;
982
        tb = &tbs[m];
983
        v = (unsigned long)tb->tc_ptr;
984
        if (v == tc_ptr)
985
            return tb;
986
        else if (tc_ptr < v) {
987
            m_max = m - 1;
988
        } else {
989
            m_min = m + 1;
990
        }
991
    }
992
    return &tbs[m_max];
993
}
994

    
995
static void tb_reset_jump_recursive(TranslationBlock *tb);
996

    
997
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
998
{
999
    TranslationBlock *tb1, *tb_next, **ptb;
1000
    unsigned int n1;
1001

    
1002
    tb1 = tb->jmp_next[n];
1003
    if (tb1 != NULL) {
1004
        /* find head of list */
1005
        for(;;) {
1006
            n1 = (long)tb1 & 3;
1007
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1008
            if (n1 == 2)
1009
                break;
1010
            tb1 = tb1->jmp_next[n1];
1011
        }
1012
        /* we are now sure now that tb jumps to tb1 */
1013
        tb_next = tb1;
1014

    
1015
        /* remove tb from the jmp_first list */
1016
        ptb = &tb_next->jmp_first;
1017
        for(;;) {
1018
            tb1 = *ptb;
1019
            n1 = (long)tb1 & 3;
1020
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1021
            if (n1 == n && tb1 == tb)
1022
                break;
1023
            ptb = &tb1->jmp_next[n1];
1024
        }
1025
        *ptb = tb->jmp_next[n];
1026
        tb->jmp_next[n] = NULL;
1027

    
1028
        /* suppress the jump to next tb in generated code */
1029
        tb_reset_jump(tb, n);
1030

    
1031
        /* suppress jumps in the tb on which we could have jumped */
1032
        tb_reset_jump_recursive(tb_next);
1033
    }
1034
}
1035

    
1036
static void tb_reset_jump_recursive(TranslationBlock *tb)
1037
{
1038
    tb_reset_jump_recursive2(tb, 0);
1039
    tb_reset_jump_recursive2(tb, 1);
1040
}
1041

    
1042
#if defined(TARGET_HAS_ICE)
1043
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1044
{
1045
    target_phys_addr_t addr;
1046
    target_ulong pd;
1047
    ram_addr_t ram_addr;
1048
    PhysPageDesc *p;
1049

    
1050
    addr = cpu_get_phys_page_debug(env, pc);
1051
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1052
    if (!p) {
1053
        pd = IO_MEM_UNASSIGNED;
1054
    } else {
1055
        pd = p->phys_offset;
1056
    }
1057
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1058
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1059
}
1060
#endif
1061

    
1062
/* Add a watchpoint.  */
1063
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1064
{
1065
    int i;
1066

    
1067
    for (i = 0; i < env->nb_watchpoints; i++) {
1068
        if (addr == env->watchpoint[i].vaddr)
1069
            return 0;
1070
    }
1071
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1072
        return -1;
1073

    
1074
    i = env->nb_watchpoints++;
1075
    env->watchpoint[i].vaddr = addr;
1076
    tlb_flush_page(env, addr);
1077
    /* FIXME: This flush is needed because of the hack to make memory ops
1078
       terminate the TB.  It can be removed once the proper IO trap and
1079
       re-execute bits are in.  */
1080
    tb_flush(env);
1081
    return i;
1082
}
1083

    
1084
/* Remove a watchpoint.  */
1085
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1086
{
1087
    int i;
1088

    
1089
    for (i = 0; i < env->nb_watchpoints; i++) {
1090
        if (addr == env->watchpoint[i].vaddr) {
1091
            env->nb_watchpoints--;
1092
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1093
            tlb_flush_page(env, addr);
1094
            return 0;
1095
        }
1096
    }
1097
    return -1;
1098
}
1099

    
1100
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1101
   breakpoint is reached */
1102
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1103
{
1104
#if defined(TARGET_HAS_ICE)
1105
    int i;
1106

    
1107
    for(i = 0; i < env->nb_breakpoints; i++) {
1108
        if (env->breakpoints[i] == pc)
1109
            return 0;
1110
    }
1111

    
1112
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1113
        return -1;
1114
    env->breakpoints[env->nb_breakpoints++] = pc;
1115

    
1116
    breakpoint_invalidate(env, pc);
1117
    return 0;
1118
#else
1119
    return -1;
1120
#endif
1121
}
1122

    
1123
/* remove a breakpoint */
1124
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1125
{
1126
#if defined(TARGET_HAS_ICE)
1127
    int i;
1128
    for(i = 0; i < env->nb_breakpoints; i++) {
1129
        if (env->breakpoints[i] == pc)
1130
            goto found;
1131
    }
1132
    return -1;
1133
 found:
1134
    env->nb_breakpoints--;
1135
    if (i < env->nb_breakpoints)
1136
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1137

    
1138
    breakpoint_invalidate(env, pc);
1139
    return 0;
1140
#else
1141
    return -1;
1142
#endif
1143
}
1144

    
1145
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1146
   CPU loop after each instruction */
1147
void cpu_single_step(CPUState *env, int enabled)
1148
{
1149
#if defined(TARGET_HAS_ICE)
1150
    if (env->singlestep_enabled != enabled) {
1151
        env->singlestep_enabled = enabled;
1152
        /* must flush all the translated code to avoid inconsistancies */
1153
        /* XXX: only flush what is necessary */
1154
        tb_flush(env);
1155
    }
1156
#endif
1157
}
1158

    
1159
/* enable or disable low levels log */
1160
void cpu_set_log(int log_flags)
1161
{
1162
    loglevel = log_flags;
1163
    if (loglevel && !logfile) {
1164
        logfile = fopen(logfilename, log_append ? "a" : "w");
1165
        if (!logfile) {
1166
            perror(logfilename);
1167
            _exit(1);
1168
        }
1169
#if !defined(CONFIG_SOFTMMU)
1170
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1171
        {
1172
            static uint8_t logfile_buf[4096];
1173
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1174
        }
1175
#else
1176
        setvbuf(logfile, NULL, _IOLBF, 0);
1177
#endif
1178
        log_append = 1;
1179
    }
1180
    if (!loglevel && logfile) {
1181
        fclose(logfile);
1182
        logfile = NULL;
1183
    }
1184
}
1185

    
1186
void cpu_set_log_filename(const char *filename)
1187
{
1188
    logfilename = strdup(filename);
1189
    if (logfile) {
1190
        fclose(logfile);
1191
        logfile = NULL;
1192
    }
1193
    cpu_set_log(loglevel);
1194
}
1195

    
1196
/* mask must never be zero, except for A20 change call */
1197
void cpu_interrupt(CPUState *env, int mask)
1198
{
1199
    TranslationBlock *tb;
1200
    static int interrupt_lock;
1201

    
1202
    env->interrupt_request |= mask;
1203
    /* if the cpu is currently executing code, we must unlink it and
1204
       all the potentially executing TB */
1205
    tb = env->current_tb;
1206
    if (tb && !testandset(&interrupt_lock)) {
1207
        env->current_tb = NULL;
1208
        tb_reset_jump_recursive(tb);
1209
        interrupt_lock = 0;
1210
    }
1211
}
1212

    
1213
void cpu_reset_interrupt(CPUState *env, int mask)
1214
{
1215
    env->interrupt_request &= ~mask;
1216
}
1217

    
1218
CPULogItem cpu_log_items[] = {
1219
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1220
      "show generated host assembly code for each compiled TB" },
1221
    { CPU_LOG_TB_IN_ASM, "in_asm",
1222
      "show target assembly code for each compiled TB" },
1223
    { CPU_LOG_TB_OP, "op",
1224
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1225
#ifdef TARGET_I386
1226
    { CPU_LOG_TB_OP_OPT, "op_opt",
1227
      "show micro ops after optimization for each compiled TB" },
1228
#endif
1229
    { CPU_LOG_INT, "int",
1230
      "show interrupts/exceptions in short format" },
1231
    { CPU_LOG_EXEC, "exec",
1232
      "show trace before each executed TB (lots of logs)" },
1233
    { CPU_LOG_TB_CPU, "cpu",
1234
      "show CPU state before block translation" },
1235
#ifdef TARGET_I386
1236
    { CPU_LOG_PCALL, "pcall",
1237
      "show protected mode far calls/returns/exceptions" },
1238
#endif
1239
#ifdef DEBUG_IOPORT
1240
    { CPU_LOG_IOPORT, "ioport",
1241
      "show all i/o ports accesses" },
1242
#endif
1243
    { 0, NULL, NULL },
1244
};
1245

    
1246
static int cmp1(const char *s1, int n, const char *s2)
1247
{
1248
    if (strlen(s2) != n)
1249
        return 0;
1250
    return memcmp(s1, s2, n) == 0;
1251
}
1252

    
1253
/* takes a comma separated list of log masks. Return 0 if error. */
1254
int cpu_str_to_log_mask(const char *str)
1255
{
1256
    CPULogItem *item;
1257
    int mask;
1258
    const char *p, *p1;
1259

    
1260
    p = str;
1261
    mask = 0;
1262
    for(;;) {
1263
        p1 = strchr(p, ',');
1264
        if (!p1)
1265
            p1 = p + strlen(p);
1266
        if(cmp1(p,p1-p,"all")) {
1267
                for(item = cpu_log_items; item->mask != 0; item++) {
1268
                        mask |= item->mask;
1269
                }
1270
        } else {
1271
        for(item = cpu_log_items; item->mask != 0; item++) {
1272
            if (cmp1(p, p1 - p, item->name))
1273
                goto found;
1274
        }
1275
        return 0;
1276
        }
1277
    found:
1278
        mask |= item->mask;
1279
        if (*p1 != ',')
1280
            break;
1281
        p = p1 + 1;
1282
    }
1283
    return mask;
1284
}
1285

    
1286
void cpu_abort(CPUState *env, const char *fmt, ...)
1287
{
1288
    va_list ap;
1289

    
1290
    va_start(ap, fmt);
1291
    fprintf(stderr, "qemu: fatal: ");
1292
    vfprintf(stderr, fmt, ap);
1293
    fprintf(stderr, "\n");
1294
#ifdef TARGET_I386
1295
    if(env->intercept & INTERCEPT_SVM_MASK) {
1296
        /* most probably the virtual machine should not
1297
           be shut down but rather caught by the VMM */
1298
        vmexit(SVM_EXIT_SHUTDOWN, 0);
1299
    }
1300
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1301
#else
1302
    cpu_dump_state(env, stderr, fprintf, 0);
1303
#endif
1304
    if (logfile) {
1305
        fprintf(logfile, "qemu: fatal: ");
1306
        vfprintf(logfile, fmt, ap);
1307
        fprintf(logfile, "\n");
1308
#ifdef TARGET_I386
1309
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1310
#else
1311
        cpu_dump_state(env, logfile, fprintf, 0);
1312
#endif
1313
        fflush(logfile);
1314
        fclose(logfile);
1315
    }
1316
    va_end(ap);
1317
    abort();
1318
}
1319

    
1320
CPUState *cpu_copy(CPUState *env)
1321
{
1322
    CPUState *new_env = cpu_init();
1323
    /* preserve chaining and index */
1324
    CPUState *next_cpu = new_env->next_cpu;
1325
    int cpu_index = new_env->cpu_index;
1326
    memcpy(new_env, env, sizeof(CPUState));
1327
    new_env->next_cpu = next_cpu;
1328
    new_env->cpu_index = cpu_index;
1329
    return new_env;
1330
}
1331

    
1332
#if !defined(CONFIG_USER_ONLY)
1333

    
1334
/* NOTE: if flush_global is true, also flush global entries (not
1335
   implemented yet) */
1336
void tlb_flush(CPUState *env, int flush_global)
1337
{
1338
    int i;
1339

    
1340
#if defined(DEBUG_TLB)
1341
    printf("tlb_flush:\n");
1342
#endif
1343
    /* must reset current TB so that interrupts cannot modify the
1344
       links while we are modifying them */
1345
    env->current_tb = NULL;
1346

    
1347
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1348
        env->tlb_table[0][i].addr_read = -1;
1349
        env->tlb_table[0][i].addr_write = -1;
1350
        env->tlb_table[0][i].addr_code = -1;
1351
        env->tlb_table[1][i].addr_read = -1;
1352
        env->tlb_table[1][i].addr_write = -1;
1353
        env->tlb_table[1][i].addr_code = -1;
1354
#if (NB_MMU_MODES >= 3)
1355
        env->tlb_table[2][i].addr_read = -1;
1356
        env->tlb_table[2][i].addr_write = -1;
1357
        env->tlb_table[2][i].addr_code = -1;
1358
#if (NB_MMU_MODES == 4)
1359
        env->tlb_table[3][i].addr_read = -1;
1360
        env->tlb_table[3][i].addr_write = -1;
1361
        env->tlb_table[3][i].addr_code = -1;
1362
#endif
1363
#endif
1364
    }
1365

    
1366
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1367

    
1368
#if !defined(CONFIG_SOFTMMU)
1369
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1370
#endif
1371
#ifdef USE_KQEMU
1372
    if (env->kqemu_enabled) {
1373
        kqemu_flush(env, flush_global);
1374
    }
1375
#endif
1376
    tlb_flush_count++;
1377
}
1378

    
1379
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1380
{
1381
    if (addr == (tlb_entry->addr_read &
1382
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1383
        addr == (tlb_entry->addr_write &
1384
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1385
        addr == (tlb_entry->addr_code &
1386
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1387
        tlb_entry->addr_read = -1;
1388
        tlb_entry->addr_write = -1;
1389
        tlb_entry->addr_code = -1;
1390
    }
1391
}
1392

    
1393
void tlb_flush_page(CPUState *env, target_ulong addr)
1394
{
1395
    int i;
1396
    TranslationBlock *tb;
1397

    
1398
#if defined(DEBUG_TLB)
1399
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1400
#endif
1401
    /* must reset current TB so that interrupts cannot modify the
1402
       links while we are modifying them */
1403
    env->current_tb = NULL;
1404

    
1405
    addr &= TARGET_PAGE_MASK;
1406
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1407
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1408
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1409
#if (NB_MMU_MODES >= 3)
1410
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1411
#if (NB_MMU_MODES == 4)
1412
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1413
#endif
1414
#endif
1415

    
1416
    /* Discard jump cache entries for any tb which might potentially
1417
       overlap the flushed page.  */
1418
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1419
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1420

    
1421
    i = tb_jmp_cache_hash_page(addr);
1422
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1423

    
1424
#if !defined(CONFIG_SOFTMMU)
1425
    if (addr < MMAP_AREA_END)
1426
        munmap((void *)addr, TARGET_PAGE_SIZE);
1427
#endif
1428
#ifdef USE_KQEMU
1429
    if (env->kqemu_enabled) {
1430
        kqemu_flush_page(env, addr);
1431
    }
1432
#endif
1433
}
1434

    
1435
/* update the TLBs so that writes to code in the virtual page 'addr'
1436
   can be detected */
1437
static void tlb_protect_code(ram_addr_t ram_addr)
1438
{
1439
    cpu_physical_memory_reset_dirty(ram_addr,
1440
                                    ram_addr + TARGET_PAGE_SIZE,
1441
                                    CODE_DIRTY_FLAG);
1442
}
1443

    
1444
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1445
   tested for self modifying code */
1446
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1447
                                    target_ulong vaddr)
1448
{
1449
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1450
}
1451

    
1452
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1453
                                         unsigned long start, unsigned long length)
1454
{
1455
    unsigned long addr;
1456
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1457
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1458
        if ((addr - start) < length) {
1459
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1460
        }
1461
    }
1462
}
1463

    
1464
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1465
                                     int dirty_flags)
1466
{
1467
    CPUState *env;
1468
    unsigned long length, start1;
1469
    int i, mask, len;
1470
    uint8_t *p;
1471

    
1472
    start &= TARGET_PAGE_MASK;
1473
    end = TARGET_PAGE_ALIGN(end);
1474

    
1475
    length = end - start;
1476
    if (length == 0)
1477
        return;
1478
    len = length >> TARGET_PAGE_BITS;
1479
#ifdef USE_KQEMU
1480
    /* XXX: should not depend on cpu context */
1481
    env = first_cpu;
1482
    if (env->kqemu_enabled) {
1483
        ram_addr_t addr;
1484
        addr = start;
1485
        for(i = 0; i < len; i++) {
1486
            kqemu_set_notdirty(env, addr);
1487
            addr += TARGET_PAGE_SIZE;
1488
        }
1489
    }
1490
#endif
1491
    mask = ~dirty_flags;
1492
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1493
    for(i = 0; i < len; i++)
1494
        p[i] &= mask;
1495

    
1496
    /* we modify the TLB cache so that the dirty bit will be set again
1497
       when accessing the range */
1498
    start1 = start + (unsigned long)phys_ram_base;
1499
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1500
        for(i = 0; i < CPU_TLB_SIZE; i++)
1501
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1502
        for(i = 0; i < CPU_TLB_SIZE; i++)
1503
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1504
#if (NB_MMU_MODES >= 3)
1505
        for(i = 0; i < CPU_TLB_SIZE; i++)
1506
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1507
#if (NB_MMU_MODES == 4)
1508
        for(i = 0; i < CPU_TLB_SIZE; i++)
1509
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1510
#endif
1511
#endif
1512
    }
1513

    
1514
#if !defined(CONFIG_SOFTMMU)
1515
    /* XXX: this is expensive */
1516
    {
1517
        VirtPageDesc *p;
1518
        int j;
1519
        target_ulong addr;
1520

    
1521
        for(i = 0; i < L1_SIZE; i++) {
1522
            p = l1_virt_map[i];
1523
            if (p) {
1524
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1525
                for(j = 0; j < L2_SIZE; j++) {
1526
                    if (p->valid_tag == virt_valid_tag &&
1527
                        p->phys_addr >= start && p->phys_addr < end &&
1528
                        (p->prot & PROT_WRITE)) {
1529
                        if (addr < MMAP_AREA_END) {
1530
                            mprotect((void *)addr, TARGET_PAGE_SIZE,
1531
                                     p->prot & ~PROT_WRITE);
1532
                        }
1533
                    }
1534
                    addr += TARGET_PAGE_SIZE;
1535
                    p++;
1536
                }
1537
            }
1538
        }
1539
    }
1540
#endif
1541
}
1542

    
1543
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1544
{
1545
    ram_addr_t ram_addr;
1546

    
1547
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1548
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1549
            tlb_entry->addend - (unsigned long)phys_ram_base;
1550
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1551
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1552
        }
1553
    }
1554
}
1555

    
1556
/* update the TLB according to the current state of the dirty bits */
1557
void cpu_tlb_update_dirty(CPUState *env)
1558
{
1559
    int i;
1560
    for(i = 0; i < CPU_TLB_SIZE; i++)
1561
        tlb_update_dirty(&env->tlb_table[0][i]);
1562
    for(i = 0; i < CPU_TLB_SIZE; i++)
1563
        tlb_update_dirty(&env->tlb_table[1][i]);
1564
#if (NB_MMU_MODES >= 3)
1565
    for(i = 0; i < CPU_TLB_SIZE; i++)
1566
        tlb_update_dirty(&env->tlb_table[2][i]);
1567
#if (NB_MMU_MODES == 4)
1568
    for(i = 0; i < CPU_TLB_SIZE; i++)
1569
        tlb_update_dirty(&env->tlb_table[3][i]);
1570
#endif
1571
#endif
1572
}
1573

    
1574
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1575
                                  unsigned long start)
1576
{
1577
    unsigned long addr;
1578
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1579
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1580
        if (addr == start) {
1581
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1582
        }
1583
    }
1584
}
1585

    
1586
/* update the TLB corresponding to virtual page vaddr and phys addr
1587
   addr so that it is no longer dirty */
1588
static inline void tlb_set_dirty(CPUState *env,
1589
                                 unsigned long addr, target_ulong vaddr)
1590
{
1591
    int i;
1592

    
1593
    addr &= TARGET_PAGE_MASK;
1594
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1595
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1596
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1597
#if (NB_MMU_MODES >= 3)
1598
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1599
#if (NB_MMU_MODES == 4)
1600
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1601
#endif
1602
#endif
1603
}
1604

    
1605
/* add a new TLB entry. At most one entry for a given virtual address
1606
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1607
   (can only happen in non SOFTMMU mode for I/O pages or pages
1608
   conflicting with the host address space). */
1609
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1610
                      target_phys_addr_t paddr, int prot,
1611
                      int mmu_idx, int is_softmmu)
1612
{
1613
    PhysPageDesc *p;
1614
    unsigned long pd;
1615
    unsigned int index;
1616
    target_ulong address;
1617
    target_phys_addr_t addend;
1618
    int ret;
1619
    CPUTLBEntry *te;
1620
    int i;
1621

    
1622
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1623
    if (!p) {
1624
        pd = IO_MEM_UNASSIGNED;
1625
    } else {
1626
        pd = p->phys_offset;
1627
    }
1628
#if defined(DEBUG_TLB)
1629
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1630
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1631
#endif
1632

    
1633
    ret = 0;
1634
#if !defined(CONFIG_SOFTMMU)
1635
    if (is_softmmu)
1636
#endif
1637
    {
1638
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1639
            /* IO memory case */
1640
            address = vaddr | pd;
1641
            addend = paddr;
1642
        } else {
1643
            /* standard memory */
1644
            address = vaddr;
1645
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1646
        }
1647

    
1648
        /* Make accesses to pages with watchpoints go via the
1649
           watchpoint trap routines.  */
1650
        for (i = 0; i < env->nb_watchpoints; i++) {
1651
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1652
                if (address & ~TARGET_PAGE_MASK) {
1653
                    env->watchpoint[i].addend = 0;
1654
                    address = vaddr | io_mem_watch;
1655
                } else {
1656
                    env->watchpoint[i].addend = pd - paddr +
1657
                        (unsigned long) phys_ram_base;
1658
                    /* TODO: Figure out how to make read watchpoints coexist
1659
                       with code.  */
1660
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1661
                }
1662
            }
1663
        }
1664

    
1665
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1666
        addend -= vaddr;
1667
        te = &env->tlb_table[mmu_idx][index];
1668
        te->addend = addend;
1669
        if (prot & PAGE_READ) {
1670
            te->addr_read = address;
1671
        } else {
1672
            te->addr_read = -1;
1673
        }
1674
        if (prot & PAGE_EXEC) {
1675
            te->addr_code = address;
1676
        } else {
1677
            te->addr_code = -1;
1678
        }
1679
        if (prot & PAGE_WRITE) {
1680
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1681
                (pd & IO_MEM_ROMD)) {
1682
                /* write access calls the I/O callback */
1683
                te->addr_write = vaddr |
1684
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1685
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1686
                       !cpu_physical_memory_is_dirty(pd)) {
1687
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1688
            } else {
1689
                te->addr_write = address;
1690
            }
1691
        } else {
1692
            te->addr_write = -1;
1693
        }
1694
    }
1695
#if !defined(CONFIG_SOFTMMU)
1696
    else {
1697
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1698
            /* IO access: no mapping is done as it will be handled by the
1699
               soft MMU */
1700
            if (!(env->hflags & HF_SOFTMMU_MASK))
1701
                ret = 2;
1702
        } else {
1703
            void *map_addr;
1704

    
1705
            if (vaddr >= MMAP_AREA_END) {
1706
                ret = 2;
1707
            } else {
1708
                if (prot & PROT_WRITE) {
1709
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1710
#if defined(TARGET_HAS_SMC) || 1
1711
                        first_tb ||
1712
#endif
1713
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1714
                         !cpu_physical_memory_is_dirty(pd))) {
1715
                        /* ROM: we do as if code was inside */
1716
                        /* if code is present, we only map as read only and save the
1717
                           original mapping */
1718
                        VirtPageDesc *vp;
1719

    
1720
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1721
                        vp->phys_addr = pd;
1722
                        vp->prot = prot;
1723
                        vp->valid_tag = virt_valid_tag;
1724
                        prot &= ~PAGE_WRITE;
1725
                    }
1726
                }
1727
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1728
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1729
                if (map_addr == MAP_FAILED) {
1730
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1731
                              paddr, vaddr);
1732
                }
1733
            }
1734
        }
1735
    }
1736
#endif
1737
    return ret;
1738
}
1739

    
1740
/* called from signal handler: invalidate the code and unprotect the
1741
   page. Return TRUE if the fault was succesfully handled. */
1742
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1743
{
1744
#if !defined(CONFIG_SOFTMMU)
1745
    VirtPageDesc *vp;
1746

    
1747
#if defined(DEBUG_TLB)
1748
    printf("page_unprotect: addr=0x%08x\n", addr);
1749
#endif
1750
    addr &= TARGET_PAGE_MASK;
1751

    
1752
    /* if it is not mapped, no need to worry here */
1753
    if (addr >= MMAP_AREA_END)
1754
        return 0;
1755
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1756
    if (!vp)
1757
        return 0;
1758
    /* NOTE: in this case, validate_tag is _not_ tested as it
1759
       validates only the code TLB */
1760
    if (vp->valid_tag != virt_valid_tag)
1761
        return 0;
1762
    if (!(vp->prot & PAGE_WRITE))
1763
        return 0;
1764
#if defined(DEBUG_TLB)
1765
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1766
           addr, vp->phys_addr, vp->prot);
1767
#endif
1768
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1769
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1770
                  (unsigned long)addr, vp->prot);
1771
    /* set the dirty bit */
1772
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1773
    /* flush the code inside */
1774
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1775
    return 1;
1776
#else
1777
    return 0;
1778
#endif
1779
}
1780

    
1781
#else
1782

    
1783
void tlb_flush(CPUState *env, int flush_global)
1784
{
1785
}
1786

    
1787
void tlb_flush_page(CPUState *env, target_ulong addr)
1788
{
1789
}
1790

    
1791
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1792
                      target_phys_addr_t paddr, int prot,
1793
                      int mmu_idx, int is_softmmu)
1794
{
1795
    return 0;
1796
}
1797

    
1798
/* dump memory mappings */
1799
void page_dump(FILE *f)
1800
{
1801
    unsigned long start, end;
1802
    int i, j, prot, prot1;
1803
    PageDesc *p;
1804

    
1805
    fprintf(f, "%-8s %-8s %-8s %s\n",
1806
            "start", "end", "size", "prot");
1807
    start = -1;
1808
    end = -1;
1809
    prot = 0;
1810
    for(i = 0; i <= L1_SIZE; i++) {
1811
        if (i < L1_SIZE)
1812
            p = l1_map[i];
1813
        else
1814
            p = NULL;
1815
        for(j = 0;j < L2_SIZE; j++) {
1816
            if (!p)
1817
                prot1 = 0;
1818
            else
1819
                prot1 = p[j].flags;
1820
            if (prot1 != prot) {
1821
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1822
                if (start != -1) {
1823
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1824
                            start, end, end - start,
1825
                            prot & PAGE_READ ? 'r' : '-',
1826
                            prot & PAGE_WRITE ? 'w' : '-',
1827
                            prot & PAGE_EXEC ? 'x' : '-');
1828
                }
1829
                if (prot1 != 0)
1830
                    start = end;
1831
                else
1832
                    start = -1;
1833
                prot = prot1;
1834
            }
1835
            if (!p)
1836
                break;
1837
        }
1838
    }
1839
}
1840

    
1841
int page_get_flags(target_ulong address)
1842
{
1843
    PageDesc *p;
1844

    
1845
    p = page_find(address >> TARGET_PAGE_BITS);
1846
    if (!p)
1847
        return 0;
1848
    return p->flags;
1849
}
1850

    
1851
/* modify the flags of a page and invalidate the code if
1852
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1853
   depending on PAGE_WRITE */
1854
void page_set_flags(target_ulong start, target_ulong end, int flags)
1855
{
1856
    PageDesc *p;
1857
    target_ulong addr;
1858

    
1859
    start = start & TARGET_PAGE_MASK;
1860
    end = TARGET_PAGE_ALIGN(end);
1861
    if (flags & PAGE_WRITE)
1862
        flags |= PAGE_WRITE_ORG;
1863
    spin_lock(&tb_lock);
1864
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1865
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1866
        /* if the write protection is set, then we invalidate the code
1867
           inside */
1868
        if (!(p->flags & PAGE_WRITE) &&
1869
            (flags & PAGE_WRITE) &&
1870
            p->first_tb) {
1871
            tb_invalidate_phys_page(addr, 0, NULL);
1872
        }
1873
        p->flags = flags;
1874
    }
1875
    spin_unlock(&tb_lock);
1876
}
1877

    
1878
/* called from signal handler: invalidate the code and unprotect the
1879
   page. Return TRUE if the fault was succesfully handled. */
1880
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1881
{
1882
    unsigned int page_index, prot, pindex;
1883
    PageDesc *p, *p1;
1884
    target_ulong host_start, host_end, addr;
1885

    
1886
    host_start = address & qemu_host_page_mask;
1887
    page_index = host_start >> TARGET_PAGE_BITS;
1888
    p1 = page_find(page_index);
1889
    if (!p1)
1890
        return 0;
1891
    host_end = host_start + qemu_host_page_size;
1892
    p = p1;
1893
    prot = 0;
1894
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1895
        prot |= p->flags;
1896
        p++;
1897
    }
1898
    /* if the page was really writable, then we change its
1899
       protection back to writable */
1900
    if (prot & PAGE_WRITE_ORG) {
1901
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1902
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1903
            mprotect((void *)g2h(host_start), qemu_host_page_size,
1904
                     (prot & PAGE_BITS) | PAGE_WRITE);
1905
            p1[pindex].flags |= PAGE_WRITE;
1906
            /* and since the content will be modified, we must invalidate
1907
               the corresponding translated code. */
1908
            tb_invalidate_phys_page(address, pc, puc);
1909
#ifdef DEBUG_TB_CHECK
1910
            tb_invalidate_check(address);
1911
#endif
1912
            return 1;
1913
        }
1914
    }
1915
    return 0;
1916
}
1917

    
1918
/* call this function when system calls directly modify a memory area */
1919
/* ??? This should be redundant now we have lock_user.  */
1920
void page_unprotect_range(target_ulong data, target_ulong data_size)
1921
{
1922
    target_ulong start, end, addr;
1923

    
1924
    start = data;
1925
    end = start + data_size;
1926
    start &= TARGET_PAGE_MASK;
1927
    end = TARGET_PAGE_ALIGN(end);
1928
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1929
        page_unprotect(addr, 0, NULL);
1930
    }
1931
}
1932

    
1933
static inline void tlb_set_dirty(CPUState *env,
1934
                                 unsigned long addr, target_ulong vaddr)
1935
{
1936
}
1937
#endif /* defined(CONFIG_USER_ONLY) */
1938

    
1939
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1940
                             int memory);
1941
static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1942
                           int orig_memory);
1943
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1944
                      need_subpage)                                     \
1945
    do {                                                                \
1946
        if (addr > start_addr)                                          \
1947
            start_addr2 = 0;                                            \
1948
        else {                                                          \
1949
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
1950
            if (start_addr2 > 0)                                        \
1951
                need_subpage = 1;                                       \
1952
        }                                                               \
1953
                                                                        \
1954
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
1955
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
1956
        else {                                                          \
1957
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1958
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
1959
                need_subpage = 1;                                       \
1960
        }                                                               \
1961
    } while (0)
1962

    
1963
/* register physical memory. 'size' must be a multiple of the target
1964
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1965
   io memory page */
1966
void cpu_register_physical_memory(target_phys_addr_t start_addr,
1967
                                  unsigned long size,
1968
                                  unsigned long phys_offset)
1969
{
1970
    target_phys_addr_t addr, end_addr;
1971
    PhysPageDesc *p;
1972
    CPUState *env;
1973
    unsigned long orig_size = size;
1974
    void *subpage;
1975

    
1976
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1977
    end_addr = start_addr + (target_phys_addr_t)size;
1978
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
1979
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
1980
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
1981
            unsigned long orig_memory = p->phys_offset;
1982
            target_phys_addr_t start_addr2, end_addr2;
1983
            int need_subpage = 0;
1984

    
1985
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
1986
                          need_subpage);
1987
            if (need_subpage) {
1988
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
1989
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
1990
                                           &p->phys_offset, orig_memory);
1991
                } else {
1992
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
1993
                                            >> IO_MEM_SHIFT];
1994
                }
1995
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
1996
            } else {
1997
                p->phys_offset = phys_offset;
1998
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
1999
                    (phys_offset & IO_MEM_ROMD))
2000
                    phys_offset += TARGET_PAGE_SIZE;
2001
            }
2002
        } else {
2003
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2004
            p->phys_offset = phys_offset;
2005
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2006
                (phys_offset & IO_MEM_ROMD))
2007
                phys_offset += TARGET_PAGE_SIZE;
2008
            else {
2009
                target_phys_addr_t start_addr2, end_addr2;
2010
                int need_subpage = 0;
2011

    
2012
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2013
                              end_addr2, need_subpage);
2014

    
2015
                if (need_subpage) {
2016
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2017
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2018
                    subpage_register(subpage, start_addr2, end_addr2,
2019
                                     phys_offset);
2020
                }
2021
            }
2022
        }
2023
    }
2024

    
2025
    /* since each CPU stores ram addresses in its TLB cache, we must
2026
       reset the modified entries */
2027
    /* XXX: slow ! */
2028
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2029
        tlb_flush(env, 1);
2030
    }
2031
}
2032

    
2033
/* XXX: temporary until new memory mapping API */
2034
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2035
{
2036
    PhysPageDesc *p;
2037

    
2038
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2039
    if (!p)
2040
        return IO_MEM_UNASSIGNED;
2041
    return p->phys_offset;
2042
}
2043

    
2044
/* XXX: better than nothing */
2045
ram_addr_t qemu_ram_alloc(unsigned int size)
2046
{
2047
    ram_addr_t addr;
2048
    if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2049
        fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2050
                size, phys_ram_size);
2051
        abort();
2052
    }
2053
    addr = phys_ram_alloc_offset;
2054
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2055
    return addr;
2056
}
2057

    
2058
void qemu_ram_free(ram_addr_t addr)
2059
{
2060
}
2061

    
2062
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2063
{
2064
#ifdef DEBUG_UNASSIGNED
2065
    printf("Unassigned mem read " TARGET_FMT_lx "\n", addr);
2066
#endif
2067
#ifdef TARGET_SPARC
2068
    do_unassigned_access(addr, 0, 0, 0);
2069
#elif TARGET_CRIS
2070
    do_unassigned_access(addr, 0, 0, 0);
2071
#endif
2072
    return 0;
2073
}
2074

    
2075
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2076
{
2077
#ifdef DEBUG_UNASSIGNED
2078
    printf("Unassigned mem write " TARGET_FMT_lx " = 0x%x\n", addr, val);
2079
#endif
2080
#ifdef TARGET_SPARC
2081
    do_unassigned_access(addr, 1, 0, 0);
2082
#elif TARGET_CRIS
2083
    do_unassigned_access(addr, 1, 0, 0);
2084
#endif
2085
}
2086

    
2087
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2088
    unassigned_mem_readb,
2089
    unassigned_mem_readb,
2090
    unassigned_mem_readb,
2091
};
2092

    
2093
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2094
    unassigned_mem_writeb,
2095
    unassigned_mem_writeb,
2096
    unassigned_mem_writeb,
2097
};
2098

    
2099
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2100
{
2101
    unsigned long ram_addr;
2102
    int dirty_flags;
2103
    ram_addr = addr - (unsigned long)phys_ram_base;
2104
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2105
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2106
#if !defined(CONFIG_USER_ONLY)
2107
        tb_invalidate_phys_page_fast(ram_addr, 1);
2108
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2109
#endif
2110
    }
2111
    stb_p((uint8_t *)(long)addr, val);
2112
#ifdef USE_KQEMU
2113
    if (cpu_single_env->kqemu_enabled &&
2114
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2115
        kqemu_modify_page(cpu_single_env, ram_addr);
2116
#endif
2117
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2118
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2119
    /* we remove the notdirty callback only if the code has been
2120
       flushed */
2121
    if (dirty_flags == 0xff)
2122
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2123
}
2124

    
2125
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2126
{
2127
    unsigned long ram_addr;
2128
    int dirty_flags;
2129
    ram_addr = addr - (unsigned long)phys_ram_base;
2130
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2131
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2132
#if !defined(CONFIG_USER_ONLY)
2133
        tb_invalidate_phys_page_fast(ram_addr, 2);
2134
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2135
#endif
2136
    }
2137
    stw_p((uint8_t *)(long)addr, val);
2138
#ifdef USE_KQEMU
2139
    if (cpu_single_env->kqemu_enabled &&
2140
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2141
        kqemu_modify_page(cpu_single_env, ram_addr);
2142
#endif
2143
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2144
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2145
    /* we remove the notdirty callback only if the code has been
2146
       flushed */
2147
    if (dirty_flags == 0xff)
2148
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2149
}
2150

    
2151
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2152
{
2153
    unsigned long ram_addr;
2154
    int dirty_flags;
2155
    ram_addr = addr - (unsigned long)phys_ram_base;
2156
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2157
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2158
#if !defined(CONFIG_USER_ONLY)
2159
        tb_invalidate_phys_page_fast(ram_addr, 4);
2160
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2161
#endif
2162
    }
2163
    stl_p((uint8_t *)(long)addr, val);
2164
#ifdef USE_KQEMU
2165
    if (cpu_single_env->kqemu_enabled &&
2166
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2167
        kqemu_modify_page(cpu_single_env, ram_addr);
2168
#endif
2169
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2170
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2171
    /* we remove the notdirty callback only if the code has been
2172
       flushed */
2173
    if (dirty_flags == 0xff)
2174
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2175
}
2176

    
2177
static CPUReadMemoryFunc *error_mem_read[3] = {
2178
    NULL, /* never used */
2179
    NULL, /* never used */
2180
    NULL, /* never used */
2181
};
2182

    
2183
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2184
    notdirty_mem_writeb,
2185
    notdirty_mem_writew,
2186
    notdirty_mem_writel,
2187
};
2188

    
2189
#if defined(CONFIG_SOFTMMU)
2190
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2191
   so these check for a hit then pass through to the normal out-of-line
2192
   phys routines.  */
2193
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2194
{
2195
    return ldub_phys(addr);
2196
}
2197

    
2198
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2199
{
2200
    return lduw_phys(addr);
2201
}
2202

    
2203
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2204
{
2205
    return ldl_phys(addr);
2206
}
2207

    
2208
/* Generate a debug exception if a watchpoint has been hit.
2209
   Returns the real physical address of the access.  addr will be a host
2210
   address in case of a RAM location.  */
2211
static target_ulong check_watchpoint(target_phys_addr_t addr)
2212
{
2213
    CPUState *env = cpu_single_env;
2214
    target_ulong watch;
2215
    target_ulong retaddr;
2216
    int i;
2217

    
2218
    retaddr = addr;
2219
    for (i = 0; i < env->nb_watchpoints; i++) {
2220
        watch = env->watchpoint[i].vaddr;
2221
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2222
            retaddr = addr - env->watchpoint[i].addend;
2223
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2224
                cpu_single_env->watchpoint_hit = i + 1;
2225
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2226
                break;
2227
            }
2228
        }
2229
    }
2230
    return retaddr;
2231
}
2232

    
2233
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2234
                             uint32_t val)
2235
{
2236
    addr = check_watchpoint(addr);
2237
    stb_phys(addr, val);
2238
}
2239

    
2240
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2241
                             uint32_t val)
2242
{
2243
    addr = check_watchpoint(addr);
2244
    stw_phys(addr, val);
2245
}
2246

    
2247
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2248
                             uint32_t val)
2249
{
2250
    addr = check_watchpoint(addr);
2251
    stl_phys(addr, val);
2252
}
2253

    
2254
static CPUReadMemoryFunc *watch_mem_read[3] = {
2255
    watch_mem_readb,
2256
    watch_mem_readw,
2257
    watch_mem_readl,
2258
};
2259

    
2260
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2261
    watch_mem_writeb,
2262
    watch_mem_writew,
2263
    watch_mem_writel,
2264
};
2265
#endif
2266

    
2267
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2268
                                 unsigned int len)
2269
{
2270
    CPUReadMemoryFunc **mem_read;
2271
    uint32_t ret;
2272
    unsigned int idx;
2273

    
2274
    idx = SUBPAGE_IDX(addr - mmio->base);
2275
#if defined(DEBUG_SUBPAGE)
2276
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2277
           mmio, len, addr, idx);
2278
#endif
2279
    mem_read = mmio->mem_read[idx];
2280
    ret = (*mem_read[len])(mmio->opaque[idx], addr);
2281

    
2282
    return ret;
2283
}
2284

    
2285
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2286
                              uint32_t value, unsigned int len)
2287
{
2288
    CPUWriteMemoryFunc **mem_write;
2289
    unsigned int idx;
2290

    
2291
    idx = SUBPAGE_IDX(addr - mmio->base);
2292
#if defined(DEBUG_SUBPAGE)
2293
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2294
           mmio, len, addr, idx, value);
2295
#endif
2296
    mem_write = mmio->mem_write[idx];
2297
    (*mem_write[len])(mmio->opaque[idx], addr, value);
2298
}
2299

    
2300
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2301
{
2302
#if defined(DEBUG_SUBPAGE)
2303
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2304
#endif
2305

    
2306
    return subpage_readlen(opaque, addr, 0);
2307
}
2308

    
2309
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2310
                            uint32_t value)
2311
{
2312
#if defined(DEBUG_SUBPAGE)
2313
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2314
#endif
2315
    subpage_writelen(opaque, addr, value, 0);
2316
}
2317

    
2318
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2319
{
2320
#if defined(DEBUG_SUBPAGE)
2321
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2322
#endif
2323

    
2324
    return subpage_readlen(opaque, addr, 1);
2325
}
2326

    
2327
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2328
                            uint32_t value)
2329
{
2330
#if defined(DEBUG_SUBPAGE)
2331
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2332
#endif
2333
    subpage_writelen(opaque, addr, value, 1);
2334
}
2335

    
2336
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2337
{
2338
#if defined(DEBUG_SUBPAGE)
2339
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2340
#endif
2341

    
2342
    return subpage_readlen(opaque, addr, 2);
2343
}
2344

    
2345
static void subpage_writel (void *opaque,
2346
                         target_phys_addr_t addr, uint32_t value)
2347
{
2348
#if defined(DEBUG_SUBPAGE)
2349
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2350
#endif
2351
    subpage_writelen(opaque, addr, value, 2);
2352
}
2353

    
2354
static CPUReadMemoryFunc *subpage_read[] = {
2355
    &subpage_readb,
2356
    &subpage_readw,
2357
    &subpage_readl,
2358
};
2359

    
2360
static CPUWriteMemoryFunc *subpage_write[] = {
2361
    &subpage_writeb,
2362
    &subpage_writew,
2363
    &subpage_writel,
2364
};
2365

    
2366
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2367
                             int memory)
2368
{
2369
    int idx, eidx;
2370

    
2371
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2372
        return -1;
2373
    idx = SUBPAGE_IDX(start);
2374
    eidx = SUBPAGE_IDX(end);
2375
#if defined(DEBUG_SUBPAGE)
2376
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2377
           mmio, start, end, idx, eidx, memory);
2378
#endif
2379
    memory >>= IO_MEM_SHIFT;
2380
    for (; idx <= eidx; idx++) {
2381
        mmio->mem_read[idx] = io_mem_read[memory];
2382
        mmio->mem_write[idx] = io_mem_write[memory];
2383
        mmio->opaque[idx] = io_mem_opaque[memory];
2384
    }
2385

    
2386
    return 0;
2387
}
2388

    
2389
static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2390
                           int orig_memory)
2391
{
2392
    subpage_t *mmio;
2393
    int subpage_memory;
2394

    
2395
    mmio = qemu_mallocz(sizeof(subpage_t));
2396
    if (mmio != NULL) {
2397
        mmio->base = base;
2398
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2399
#if defined(DEBUG_SUBPAGE)
2400
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2401
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2402
#endif
2403
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2404
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2405
    }
2406

    
2407
    return mmio;
2408
}
2409

    
2410
static void io_mem_init(void)
2411
{
2412
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2413
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2414
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2415
    io_mem_nb = 5;
2416

    
2417
#if defined(CONFIG_SOFTMMU)
2418
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2419
                                          watch_mem_write, NULL);
2420
#endif
2421
    /* alloc dirty bits array */
2422
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2423
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2424
}
2425

    
2426
/* mem_read and mem_write are arrays of functions containing the
2427
   function to access byte (index 0), word (index 1) and dword (index
2428
   2). All functions must be supplied. If io_index is non zero, the
2429
   corresponding io zone is modified. If it is zero, a new io zone is
2430
   allocated. The return value can be used with
2431
   cpu_register_physical_memory(). (-1) is returned if error. */
2432
int cpu_register_io_memory(int io_index,
2433
                           CPUReadMemoryFunc **mem_read,
2434
                           CPUWriteMemoryFunc **mem_write,
2435
                           void *opaque)
2436
{
2437
    int i;
2438

    
2439
    if (io_index <= 0) {
2440
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2441
            return -1;
2442
        io_index = io_mem_nb++;
2443
    } else {
2444
        if (io_index >= IO_MEM_NB_ENTRIES)
2445
            return -1;
2446
    }
2447

    
2448
    for(i = 0;i < 3; i++) {
2449
        io_mem_read[io_index][i] = mem_read[i];
2450
        io_mem_write[io_index][i] = mem_write[i];
2451
    }
2452
    io_mem_opaque[io_index] = opaque;
2453
    return io_index << IO_MEM_SHIFT;
2454
}
2455

    
2456
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2457
{
2458
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2459
}
2460

    
2461
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2462
{
2463
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2464
}
2465

    
2466
/* physical memory access (slow version, mainly for debug) */
2467
#if defined(CONFIG_USER_ONLY)
2468
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2469
                            int len, int is_write)
2470
{
2471
    int l, flags;
2472
    target_ulong page;
2473
    void * p;
2474

    
2475
    while (len > 0) {
2476
        page = addr & TARGET_PAGE_MASK;
2477
        l = (page + TARGET_PAGE_SIZE) - addr;
2478
        if (l > len)
2479
            l = len;
2480
        flags = page_get_flags(page);
2481
        if (!(flags & PAGE_VALID))
2482
            return;
2483
        if (is_write) {
2484
            if (!(flags & PAGE_WRITE))
2485
                return;
2486
            p = lock_user(addr, len, 0);
2487
            memcpy(p, buf, len);
2488
            unlock_user(p, addr, len);
2489
        } else {
2490
            if (!(flags & PAGE_READ))
2491
                return;
2492
            p = lock_user(addr, len, 1);
2493
            memcpy(buf, p, len);
2494
            unlock_user(p, addr, 0);
2495
        }
2496
        len -= l;
2497
        buf += l;
2498
        addr += l;
2499
    }
2500
}
2501

    
2502
#else
2503
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2504
                            int len, int is_write)
2505
{
2506
    int l, io_index;
2507
    uint8_t *ptr;
2508
    uint32_t val;
2509
    target_phys_addr_t page;
2510
    unsigned long pd;
2511
    PhysPageDesc *p;
2512

    
2513
    while (len > 0) {
2514
        page = addr & TARGET_PAGE_MASK;
2515
        l = (page + TARGET_PAGE_SIZE) - addr;
2516
        if (l > len)
2517
            l = len;
2518
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2519
        if (!p) {
2520
            pd = IO_MEM_UNASSIGNED;
2521
        } else {
2522
            pd = p->phys_offset;
2523
        }
2524

    
2525
        if (is_write) {
2526
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2527
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2528
                /* XXX: could force cpu_single_env to NULL to avoid
2529
                   potential bugs */
2530
                if (l >= 4 && ((addr & 3) == 0)) {
2531
                    /* 32 bit write access */
2532
                    val = ldl_p(buf);
2533
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2534
                    l = 4;
2535
                } else if (l >= 2 && ((addr & 1) == 0)) {
2536
                    /* 16 bit write access */
2537
                    val = lduw_p(buf);
2538
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2539
                    l = 2;
2540
                } else {
2541
                    /* 8 bit write access */
2542
                    val = ldub_p(buf);
2543
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2544
                    l = 1;
2545
                }
2546
            } else {
2547
                unsigned long addr1;
2548
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2549
                /* RAM case */
2550
                ptr = phys_ram_base + addr1;
2551
                memcpy(ptr, buf, l);
2552
                if (!cpu_physical_memory_is_dirty(addr1)) {
2553
                    /* invalidate code */
2554
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2555
                    /* set dirty bit */
2556
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2557
                        (0xff & ~CODE_DIRTY_FLAG);
2558
                }
2559
            }
2560
        } else {
2561
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2562
                !(pd & IO_MEM_ROMD)) {
2563
                /* I/O case */
2564
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2565
                if (l >= 4 && ((addr & 3) == 0)) {
2566
                    /* 32 bit read access */
2567
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2568
                    stl_p(buf, val);
2569
                    l = 4;
2570
                } else if (l >= 2 && ((addr & 1) == 0)) {
2571
                    /* 16 bit read access */
2572
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2573
                    stw_p(buf, val);
2574
                    l = 2;
2575
                } else {
2576
                    /* 8 bit read access */
2577
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2578
                    stb_p(buf, val);
2579
                    l = 1;
2580
                }
2581
            } else {
2582
                /* RAM case */
2583
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2584
                    (addr & ~TARGET_PAGE_MASK);
2585
                memcpy(buf, ptr, l);
2586
            }
2587
        }
2588
        len -= l;
2589
        buf += l;
2590
        addr += l;
2591
    }
2592
}
2593

    
2594
/* used for ROM loading : can write in RAM and ROM */
2595
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2596
                                   const uint8_t *buf, int len)
2597
{
2598
    int l;
2599
    uint8_t *ptr;
2600
    target_phys_addr_t page;
2601
    unsigned long pd;
2602
    PhysPageDesc *p;
2603

    
2604
    while (len > 0) {
2605
        page = addr & TARGET_PAGE_MASK;
2606
        l = (page + TARGET_PAGE_SIZE) - addr;
2607
        if (l > len)
2608
            l = len;
2609
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2610
        if (!p) {
2611
            pd = IO_MEM_UNASSIGNED;
2612
        } else {
2613
            pd = p->phys_offset;
2614
        }
2615

    
2616
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2617
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2618
            !(pd & IO_MEM_ROMD)) {
2619
            /* do nothing */
2620
        } else {
2621
            unsigned long addr1;
2622
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2623
            /* ROM/RAM case */
2624
            ptr = phys_ram_base + addr1;
2625
            memcpy(ptr, buf, l);
2626
        }
2627
        len -= l;
2628
        buf += l;
2629
        addr += l;
2630
    }
2631
}
2632

    
2633

    
2634
/* warning: addr must be aligned */
2635
uint32_t ldl_phys(target_phys_addr_t addr)
2636
{
2637
    int io_index;
2638
    uint8_t *ptr;
2639
    uint32_t val;
2640
    unsigned long pd;
2641
    PhysPageDesc *p;
2642

    
2643
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2644
    if (!p) {
2645
        pd = IO_MEM_UNASSIGNED;
2646
    } else {
2647
        pd = p->phys_offset;
2648
    }
2649

    
2650
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2651
        !(pd & IO_MEM_ROMD)) {
2652
        /* I/O case */
2653
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2654
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2655
    } else {
2656
        /* RAM case */
2657
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2658
            (addr & ~TARGET_PAGE_MASK);
2659
        val = ldl_p(ptr);
2660
    }
2661
    return val;
2662
}
2663

    
2664
/* warning: addr must be aligned */
2665
uint64_t ldq_phys(target_phys_addr_t addr)
2666
{
2667
    int io_index;
2668
    uint8_t *ptr;
2669
    uint64_t val;
2670
    unsigned long pd;
2671
    PhysPageDesc *p;
2672

    
2673
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2674
    if (!p) {
2675
        pd = IO_MEM_UNASSIGNED;
2676
    } else {
2677
        pd = p->phys_offset;
2678
    }
2679

    
2680
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2681
        !(pd & IO_MEM_ROMD)) {
2682
        /* I/O case */
2683
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2684
#ifdef TARGET_WORDS_BIGENDIAN
2685
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2686
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2687
#else
2688
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2689
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2690
#endif
2691
    } else {
2692
        /* RAM case */
2693
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2694
            (addr & ~TARGET_PAGE_MASK);
2695
        val = ldq_p(ptr);
2696
    }
2697
    return val;
2698
}
2699

    
2700
/* XXX: optimize */
2701
uint32_t ldub_phys(target_phys_addr_t addr)
2702
{
2703
    uint8_t val;
2704
    cpu_physical_memory_read(addr, &val, 1);
2705
    return val;
2706
}
2707

    
2708
/* XXX: optimize */
2709
uint32_t lduw_phys(target_phys_addr_t addr)
2710
{
2711
    uint16_t val;
2712
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2713
    return tswap16(val);
2714
}
2715

    
2716
/* warning: addr must be aligned. The ram page is not masked as dirty
2717
   and the code inside is not invalidated. It is useful if the dirty
2718
   bits are used to track modified PTEs */
2719
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2720
{
2721
    int io_index;
2722
    uint8_t *ptr;
2723
    unsigned long pd;
2724
    PhysPageDesc *p;
2725

    
2726
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2727
    if (!p) {
2728
        pd = IO_MEM_UNASSIGNED;
2729
    } else {
2730
        pd = p->phys_offset;
2731
    }
2732

    
2733
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2734
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2735
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2736
    } else {
2737
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2738
            (addr & ~TARGET_PAGE_MASK);
2739
        stl_p(ptr, val);
2740
    }
2741
}
2742

    
2743
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2744
{
2745
    int io_index;
2746
    uint8_t *ptr;
2747
    unsigned long pd;
2748
    PhysPageDesc *p;
2749

    
2750
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2751
    if (!p) {
2752
        pd = IO_MEM_UNASSIGNED;
2753
    } else {
2754
        pd = p->phys_offset;
2755
    }
2756

    
2757
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2758
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2759
#ifdef TARGET_WORDS_BIGENDIAN
2760
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2761
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2762
#else
2763
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2764
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2765
#endif
2766
    } else {
2767
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2768
            (addr & ~TARGET_PAGE_MASK);
2769
        stq_p(ptr, val);
2770
    }
2771
}
2772

    
2773
/* warning: addr must be aligned */
2774
void stl_phys(target_phys_addr_t addr, uint32_t val)
2775
{
2776
    int io_index;
2777
    uint8_t *ptr;
2778
    unsigned long pd;
2779
    PhysPageDesc *p;
2780

    
2781
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2782
    if (!p) {
2783
        pd = IO_MEM_UNASSIGNED;
2784
    } else {
2785
        pd = p->phys_offset;
2786
    }
2787

    
2788
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2789
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2790
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2791
    } else {
2792
        unsigned long addr1;
2793
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2794
        /* RAM case */
2795
        ptr = phys_ram_base + addr1;
2796
        stl_p(ptr, val);
2797
        if (!cpu_physical_memory_is_dirty(addr1)) {
2798
            /* invalidate code */
2799
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2800
            /* set dirty bit */
2801
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2802
                (0xff & ~CODE_DIRTY_FLAG);
2803
        }
2804
    }
2805
}
2806

    
2807
/* XXX: optimize */
2808
void stb_phys(target_phys_addr_t addr, uint32_t val)
2809
{
2810
    uint8_t v = val;
2811
    cpu_physical_memory_write(addr, &v, 1);
2812
}
2813

    
2814
/* XXX: optimize */
2815
void stw_phys(target_phys_addr_t addr, uint32_t val)
2816
{
2817
    uint16_t v = tswap16(val);
2818
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2819
}
2820

    
2821
/* XXX: optimize */
2822
void stq_phys(target_phys_addr_t addr, uint64_t val)
2823
{
2824
    val = tswap64(val);
2825
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2826
}
2827

    
2828
#endif
2829

    
2830
/* virtual memory access for debug */
2831
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2832
                        uint8_t *buf, int len, int is_write)
2833
{
2834
    int l;
2835
    target_phys_addr_t phys_addr;
2836
    target_ulong page;
2837

    
2838
    while (len > 0) {
2839
        page = addr & TARGET_PAGE_MASK;
2840
        phys_addr = cpu_get_phys_page_debug(env, page);
2841
        /* if no physical page mapped, return an error */
2842
        if (phys_addr == -1)
2843
            return -1;
2844
        l = (page + TARGET_PAGE_SIZE) - addr;
2845
        if (l > len)
2846
            l = len;
2847
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2848
                               buf, l, is_write);
2849
        len -= l;
2850
        buf += l;
2851
        addr += l;
2852
    }
2853
    return 0;
2854
}
2855

    
2856
void dump_exec_info(FILE *f,
2857
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2858
{
2859
    int i, target_code_size, max_target_code_size;
2860
    int direct_jmp_count, direct_jmp2_count, cross_page;
2861
    TranslationBlock *tb;
2862

    
2863
    target_code_size = 0;
2864
    max_target_code_size = 0;
2865
    cross_page = 0;
2866
    direct_jmp_count = 0;
2867
    direct_jmp2_count = 0;
2868
    for(i = 0; i < nb_tbs; i++) {
2869
        tb = &tbs[i];
2870
        target_code_size += tb->size;
2871
        if (tb->size > max_target_code_size)
2872
            max_target_code_size = tb->size;
2873
        if (tb->page_addr[1] != -1)
2874
            cross_page++;
2875
        if (tb->tb_next_offset[0] != 0xffff) {
2876
            direct_jmp_count++;
2877
            if (tb->tb_next_offset[1] != 0xffff) {
2878
                direct_jmp2_count++;
2879
            }
2880
        }
2881
    }
2882
    /* XXX: avoid using doubles ? */
2883
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2884
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2885
                nb_tbs ? target_code_size / nb_tbs : 0,
2886
                max_target_code_size);
2887
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2888
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2889
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2890
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2891
            cross_page,
2892
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2893
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2894
                direct_jmp_count,
2895
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2896
                direct_jmp2_count,
2897
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2898
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2899
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2900
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2901
}
2902

    
2903
#if !defined(CONFIG_USER_ONLY)
2904

    
2905
#define MMUSUFFIX _cmmu
2906
#define GETPC() NULL
2907
#define env cpu_single_env
2908
#define SOFTMMU_CODE_ACCESS
2909

    
2910
#define SHIFT 0
2911
#include "softmmu_template.h"
2912

    
2913
#define SHIFT 1
2914
#include "softmmu_template.h"
2915

    
2916
#define SHIFT 2
2917
#include "softmmu_template.h"
2918

    
2919
#define SHIFT 3
2920
#include "softmmu_template.h"
2921

    
2922
#undef env
2923

    
2924
#endif