Statistics
| Branch: | Revision:

root / exec.c @ ec6338ba

History | View | Annotate | Download (87.1 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#if defined(CONFIG_USER_ONLY)
38
#include <qemu.h>
39
#endif
40

    
41
//#define DEBUG_TB_INVALIDATE
42
//#define DEBUG_FLUSH
43
//#define DEBUG_TLB
44
//#define DEBUG_UNASSIGNED
45

    
46
/* make various TB consistency checks */
47
//#define DEBUG_TB_CHECK
48
//#define DEBUG_TLB_CHECK
49

    
50
//#define DEBUG_IOPORT
51
//#define DEBUG_SUBPAGE
52

    
53
#if !defined(CONFIG_USER_ONLY)
54
/* TB consistency checks only implemented for usermode emulation.  */
55
#undef DEBUG_TB_CHECK
56
#endif
57

    
58
/* threshold to flush the translated code buffer */
59
#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
60

    
61
#define SMC_BITMAP_USE_THRESHOLD 10
62

    
63
#define MMAP_AREA_START        0x00000000
64
#define MMAP_AREA_END          0xa8000000
65

    
66
#if defined(TARGET_SPARC64)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 41
68
#elif defined(TARGET_SPARC)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 36
70
#elif defined(TARGET_ALPHA)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 42
72
#define TARGET_VIRT_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_PPC64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#else
76
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
77
#define TARGET_PHYS_ADDR_SPACE_BITS 32
78
#endif
79

    
80
TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
81
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
82
int nb_tbs;
83
/* any access to the tbs or the page table must use this lock */
84
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
85

    
86
uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
87
uint8_t *code_gen_ptr;
88

    
89
int phys_ram_size;
90
int phys_ram_fd;
91
uint8_t *phys_ram_base;
92
uint8_t *phys_ram_dirty;
93
static ram_addr_t phys_ram_alloc_offset = 0;
94

    
95
CPUState *first_cpu;
96
/* current CPU in the current thread. It is only valid inside
97
   cpu_exec() */
98
CPUState *cpu_single_env;
99

    
100
typedef struct PageDesc {
101
    /* list of TBs intersecting this ram page */
102
    TranslationBlock *first_tb;
103
    /* in order to optimize self modifying code, we count the number
104
       of lookups we do to a given page to use a bitmap */
105
    unsigned int code_write_count;
106
    uint8_t *code_bitmap;
107
#if defined(CONFIG_USER_ONLY)
108
    unsigned long flags;
109
#endif
110
} PageDesc;
111

    
112
typedef struct PhysPageDesc {
113
    /* offset in host memory of the page + io_index in the low 12 bits */
114
    uint32_t phys_offset;
115
} PhysPageDesc;
116

    
117
#define L2_BITS 10
118
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
119
/* XXX: this is a temporary hack for alpha target.
120
 *      In the future, this is to be replaced by a multi-level table
121
 *      to actually be able to handle the complete 64 bits address space.
122
 */
123
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
124
#else
125
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
126
#endif
127

    
128
#define L1_SIZE (1 << L1_BITS)
129
#define L2_SIZE (1 << L2_BITS)
130

    
131
static void io_mem_init(void);
132

    
133
unsigned long qemu_real_host_page_size;
134
unsigned long qemu_host_page_bits;
135
unsigned long qemu_host_page_size;
136
unsigned long qemu_host_page_mask;
137

    
138
/* XXX: for system emulation, it could just be an array */
139
static PageDesc *l1_map[L1_SIZE];
140
PhysPageDesc **l1_phys_map;
141

    
142
/* io memory support */
143
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
144
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
145
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
146
static int io_mem_nb;
147
#if defined(CONFIG_SOFTMMU)
148
static int io_mem_watch;
149
#endif
150

    
151
/* log support */
152
char *logfilename = "/tmp/qemu.log";
153
FILE *logfile;
154
int loglevel;
155
static int log_append = 0;
156

    
157
/* statistics */
158
static int tlb_flush_count;
159
static int tb_flush_count;
160
static int tb_phys_invalidate_count;
161

    
162
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
163
typedef struct subpage_t {
164
    target_phys_addr_t base;
165
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE];
166
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE];
167
    void *opaque[TARGET_PAGE_SIZE];
168
} subpage_t;
169

    
170
static void page_init(void)
171
{
172
    /* NOTE: we can always suppose that qemu_host_page_size >=
173
       TARGET_PAGE_SIZE */
174
#ifdef _WIN32
175
    {
176
        SYSTEM_INFO system_info;
177
        DWORD old_protect;
178

    
179
        GetSystemInfo(&system_info);
180
        qemu_real_host_page_size = system_info.dwPageSize;
181

    
182
        VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
183
                       PAGE_EXECUTE_READWRITE, &old_protect);
184
    }
185
#else
186
    qemu_real_host_page_size = getpagesize();
187
    {
188
        unsigned long start, end;
189

    
190
        start = (unsigned long)code_gen_buffer;
191
        start &= ~(qemu_real_host_page_size - 1);
192

    
193
        end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
194
        end += qemu_real_host_page_size - 1;
195
        end &= ~(qemu_real_host_page_size - 1);
196

    
197
        mprotect((void *)start, end - start,
198
                 PROT_READ | PROT_WRITE | PROT_EXEC);
199
    }
200
#endif
201

    
202
    if (qemu_host_page_size == 0)
203
        qemu_host_page_size = qemu_real_host_page_size;
204
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
205
        qemu_host_page_size = TARGET_PAGE_SIZE;
206
    qemu_host_page_bits = 0;
207
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
208
        qemu_host_page_bits++;
209
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
210
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
211
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
212
}
213

    
214
static inline PageDesc *page_find_alloc(unsigned int index)
215
{
216
    PageDesc **lp, *p;
217

    
218
    lp = &l1_map[index >> L2_BITS];
219
    p = *lp;
220
    if (!p) {
221
        /* allocate if not found */
222
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
223
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
224
        *lp = p;
225
    }
226
    return p + (index & (L2_SIZE - 1));
227
}
228

    
229
static inline PageDesc *page_find(unsigned int index)
230
{
231
    PageDesc *p;
232

    
233
    p = l1_map[index >> L2_BITS];
234
    if (!p)
235
        return 0;
236
    return p + (index & (L2_SIZE - 1));
237
}
238

    
239
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
240
{
241
    void **lp, **p;
242
    PhysPageDesc *pd;
243

    
244
    p = (void **)l1_phys_map;
245
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
246

    
247
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
248
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
249
#endif
250
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
251
    p = *lp;
252
    if (!p) {
253
        /* allocate if not found */
254
        if (!alloc)
255
            return NULL;
256
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
257
        memset(p, 0, sizeof(void *) * L1_SIZE);
258
        *lp = p;
259
    }
260
#endif
261
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
262
    pd = *lp;
263
    if (!pd) {
264
        int i;
265
        /* allocate if not found */
266
        if (!alloc)
267
            return NULL;
268
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
269
        *lp = pd;
270
        for (i = 0; i < L2_SIZE; i++)
271
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
272
    }
273
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
274
}
275

    
276
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
277
{
278
    return phys_page_find_alloc(index, 0);
279
}
280

    
281
#if !defined(CONFIG_USER_ONLY)
282
static void tlb_protect_code(ram_addr_t ram_addr);
283
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
284
                                    target_ulong vaddr);
285
#endif
286

    
287
void cpu_exec_init(CPUState *env)
288
{
289
    CPUState **penv;
290
    int cpu_index;
291

    
292
    if (!code_gen_ptr) {
293
        code_gen_ptr = code_gen_buffer;
294
        page_init();
295
        io_mem_init();
296
    }
297
    env->next_cpu = NULL;
298
    penv = &first_cpu;
299
    cpu_index = 0;
300
    while (*penv != NULL) {
301
        penv = (CPUState **)&(*penv)->next_cpu;
302
        cpu_index++;
303
    }
304
    env->cpu_index = cpu_index;
305
    env->nb_watchpoints = 0;
306
    *penv = env;
307
}
308

    
309
static inline void invalidate_page_bitmap(PageDesc *p)
310
{
311
    if (p->code_bitmap) {
312
        qemu_free(p->code_bitmap);
313
        p->code_bitmap = NULL;
314
    }
315
    p->code_write_count = 0;
316
}
317

    
318
/* set to NULL all the 'first_tb' fields in all PageDescs */
319
static void page_flush_tb(void)
320
{
321
    int i, j;
322
    PageDesc *p;
323

    
324
    for(i = 0; i < L1_SIZE; i++) {
325
        p = l1_map[i];
326
        if (p) {
327
            for(j = 0; j < L2_SIZE; j++) {
328
                p->first_tb = NULL;
329
                invalidate_page_bitmap(p);
330
                p++;
331
            }
332
        }
333
    }
334
}
335

    
336
/* flush all the translation blocks */
337
/* XXX: tb_flush is currently not thread safe */
338
void tb_flush(CPUState *env1)
339
{
340
    CPUState *env;
341
#if defined(DEBUG_FLUSH)
342
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
343
           (unsigned long)(code_gen_ptr - code_gen_buffer),
344
           nb_tbs, nb_tbs > 0 ?
345
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
346
#endif
347
    nb_tbs = 0;
348

    
349
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
350
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
351
    }
352

    
353
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
354
    page_flush_tb();
355

    
356
    code_gen_ptr = code_gen_buffer;
357
    /* XXX: flush processor icache at this point if cache flush is
358
       expensive */
359
    tb_flush_count++;
360
}
361

    
362
#ifdef DEBUG_TB_CHECK
363

    
364
static void tb_invalidate_check(target_ulong address)
365
{
366
    TranslationBlock *tb;
367
    int i;
368
    address &= TARGET_PAGE_MASK;
369
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
370
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
371
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
372
                  address >= tb->pc + tb->size)) {
373
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
374
                       address, (long)tb->pc, tb->size);
375
            }
376
        }
377
    }
378
}
379

    
380
/* verify that all the pages have correct rights for code */
381
static void tb_page_check(void)
382
{
383
    TranslationBlock *tb;
384
    int i, flags1, flags2;
385

    
386
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
387
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
388
            flags1 = page_get_flags(tb->pc);
389
            flags2 = page_get_flags(tb->pc + tb->size - 1);
390
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
391
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
392
                       (long)tb->pc, tb->size, flags1, flags2);
393
            }
394
        }
395
    }
396
}
397

    
398
void tb_jmp_check(TranslationBlock *tb)
399
{
400
    TranslationBlock *tb1;
401
    unsigned int n1;
402

    
403
    /* suppress any remaining jumps to this TB */
404
    tb1 = tb->jmp_first;
405
    for(;;) {
406
        n1 = (long)tb1 & 3;
407
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
408
        if (n1 == 2)
409
            break;
410
        tb1 = tb1->jmp_next[n1];
411
    }
412
    /* check end of list */
413
    if (tb1 != tb) {
414
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
415
    }
416
}
417

    
418
#endif
419

    
420
/* invalidate one TB */
421
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
422
                             int next_offset)
423
{
424
    TranslationBlock *tb1;
425
    for(;;) {
426
        tb1 = *ptb;
427
        if (tb1 == tb) {
428
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
429
            break;
430
        }
431
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
432
    }
433
}
434

    
435
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
436
{
437
    TranslationBlock *tb1;
438
    unsigned int n1;
439

    
440
    for(;;) {
441
        tb1 = *ptb;
442
        n1 = (long)tb1 & 3;
443
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
444
        if (tb1 == tb) {
445
            *ptb = tb1->page_next[n1];
446
            break;
447
        }
448
        ptb = &tb1->page_next[n1];
449
    }
450
}
451

    
452
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
453
{
454
    TranslationBlock *tb1, **ptb;
455
    unsigned int n1;
456

    
457
    ptb = &tb->jmp_next[n];
458
    tb1 = *ptb;
459
    if (tb1) {
460
        /* find tb(n) in circular list */
461
        for(;;) {
462
            tb1 = *ptb;
463
            n1 = (long)tb1 & 3;
464
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
465
            if (n1 == n && tb1 == tb)
466
                break;
467
            if (n1 == 2) {
468
                ptb = &tb1->jmp_first;
469
            } else {
470
                ptb = &tb1->jmp_next[n1];
471
            }
472
        }
473
        /* now we can suppress tb(n) from the list */
474
        *ptb = tb->jmp_next[n];
475

    
476
        tb->jmp_next[n] = NULL;
477
    }
478
}
479

    
480
/* reset the jump entry 'n' of a TB so that it is not chained to
481
   another TB */
482
static inline void tb_reset_jump(TranslationBlock *tb, int n)
483
{
484
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
485
}
486

    
487
static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
488
{
489
    CPUState *env;
490
    PageDesc *p;
491
    unsigned int h, n1;
492
    target_ulong phys_pc;
493
    TranslationBlock *tb1, *tb2;
494

    
495
    /* remove the TB from the hash list */
496
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
497
    h = tb_phys_hash_func(phys_pc);
498
    tb_remove(&tb_phys_hash[h], tb,
499
              offsetof(TranslationBlock, phys_hash_next));
500

    
501
    /* remove the TB from the page list */
502
    if (tb->page_addr[0] != page_addr) {
503
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
504
        tb_page_remove(&p->first_tb, tb);
505
        invalidate_page_bitmap(p);
506
    }
507
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
508
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
509
        tb_page_remove(&p->first_tb, tb);
510
        invalidate_page_bitmap(p);
511
    }
512

    
513
    tb_invalidated_flag = 1;
514

    
515
    /* remove the TB from the hash list */
516
    h = tb_jmp_cache_hash_func(tb->pc);
517
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
518
        if (env->tb_jmp_cache[h] == tb)
519
            env->tb_jmp_cache[h] = NULL;
520
    }
521

    
522
    /* suppress this TB from the two jump lists */
523
    tb_jmp_remove(tb, 0);
524
    tb_jmp_remove(tb, 1);
525

    
526
    /* suppress any remaining jumps to this TB */
527
    tb1 = tb->jmp_first;
528
    for(;;) {
529
        n1 = (long)tb1 & 3;
530
        if (n1 == 2)
531
            break;
532
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
533
        tb2 = tb1->jmp_next[n1];
534
        tb_reset_jump(tb1, n1);
535
        tb1->jmp_next[n1] = NULL;
536
        tb1 = tb2;
537
    }
538
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
539

    
540
    tb_phys_invalidate_count++;
541
}
542

    
543
static inline void set_bits(uint8_t *tab, int start, int len)
544
{
545
    int end, mask, end1;
546

    
547
    end = start + len;
548
    tab += start >> 3;
549
    mask = 0xff << (start & 7);
550
    if ((start & ~7) == (end & ~7)) {
551
        if (start < end) {
552
            mask &= ~(0xff << (end & 7));
553
            *tab |= mask;
554
        }
555
    } else {
556
        *tab++ |= mask;
557
        start = (start + 8) & ~7;
558
        end1 = end & ~7;
559
        while (start < end1) {
560
            *tab++ = 0xff;
561
            start += 8;
562
        }
563
        if (start < end) {
564
            mask = ~(0xff << (end & 7));
565
            *tab |= mask;
566
        }
567
    }
568
}
569

    
570
static void build_page_bitmap(PageDesc *p)
571
{
572
    int n, tb_start, tb_end;
573
    TranslationBlock *tb;
574

    
575
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
576
    if (!p->code_bitmap)
577
        return;
578
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
579

    
580
    tb = p->first_tb;
581
    while (tb != NULL) {
582
        n = (long)tb & 3;
583
        tb = (TranslationBlock *)((long)tb & ~3);
584
        /* NOTE: this is subtle as a TB may span two physical pages */
585
        if (n == 0) {
586
            /* NOTE: tb_end may be after the end of the page, but
587
               it is not a problem */
588
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
589
            tb_end = tb_start + tb->size;
590
            if (tb_end > TARGET_PAGE_SIZE)
591
                tb_end = TARGET_PAGE_SIZE;
592
        } else {
593
            tb_start = 0;
594
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
595
        }
596
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
597
        tb = tb->page_next[n];
598
    }
599
}
600

    
601
#ifdef TARGET_HAS_PRECISE_SMC
602

    
603
static void tb_gen_code(CPUState *env,
604
                        target_ulong pc, target_ulong cs_base, int flags,
605
                        int cflags)
606
{
607
    TranslationBlock *tb;
608
    uint8_t *tc_ptr;
609
    target_ulong phys_pc, phys_page2, virt_page2;
610
    int code_gen_size;
611

    
612
    phys_pc = get_phys_addr_code(env, pc);
613
    tb = tb_alloc(pc);
614
    if (!tb) {
615
        /* flush must be done */
616
        tb_flush(env);
617
        /* cannot fail at this point */
618
        tb = tb_alloc(pc);
619
    }
620
    tc_ptr = code_gen_ptr;
621
    tb->tc_ptr = tc_ptr;
622
    tb->cs_base = cs_base;
623
    tb->flags = flags;
624
    tb->cflags = cflags;
625
    cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
626
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
627

    
628
    /* check next page if needed */
629
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
630
    phys_page2 = -1;
631
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
632
        phys_page2 = get_phys_addr_code(env, virt_page2);
633
    }
634
    tb_link_phys(tb, phys_pc, phys_page2);
635
}
636
#endif
637

    
638
/* invalidate all TBs which intersect with the target physical page
639
   starting in range [start;end[. NOTE: start and end must refer to
640
   the same physical page. 'is_cpu_write_access' should be true if called
641
   from a real cpu write access: the virtual CPU will exit the current
642
   TB if code is modified inside this TB. */
643
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
644
                                   int is_cpu_write_access)
645
{
646
    int n, current_tb_modified, current_tb_not_found, current_flags;
647
    CPUState *env = cpu_single_env;
648
    PageDesc *p;
649
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
650
    target_ulong tb_start, tb_end;
651
    target_ulong current_pc, current_cs_base;
652

    
653
    p = page_find(start >> TARGET_PAGE_BITS);
654
    if (!p)
655
        return;
656
    if (!p->code_bitmap &&
657
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
658
        is_cpu_write_access) {
659
        /* build code bitmap */
660
        build_page_bitmap(p);
661
    }
662

    
663
    /* we remove all the TBs in the range [start, end[ */
664
    /* XXX: see if in some cases it could be faster to invalidate all the code */
665
    current_tb_not_found = is_cpu_write_access;
666
    current_tb_modified = 0;
667
    current_tb = NULL; /* avoid warning */
668
    current_pc = 0; /* avoid warning */
669
    current_cs_base = 0; /* avoid warning */
670
    current_flags = 0; /* avoid warning */
671
    tb = p->first_tb;
672
    while (tb != NULL) {
673
        n = (long)tb & 3;
674
        tb = (TranslationBlock *)((long)tb & ~3);
675
        tb_next = tb->page_next[n];
676
        /* NOTE: this is subtle as a TB may span two physical pages */
677
        if (n == 0) {
678
            /* NOTE: tb_end may be after the end of the page, but
679
               it is not a problem */
680
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
681
            tb_end = tb_start + tb->size;
682
        } else {
683
            tb_start = tb->page_addr[1];
684
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
685
        }
686
        if (!(tb_end <= start || tb_start >= end)) {
687
#ifdef TARGET_HAS_PRECISE_SMC
688
            if (current_tb_not_found) {
689
                current_tb_not_found = 0;
690
                current_tb = NULL;
691
                if (env->mem_write_pc) {
692
                    /* now we have a real cpu fault */
693
                    current_tb = tb_find_pc(env->mem_write_pc);
694
                }
695
            }
696
            if (current_tb == tb &&
697
                !(current_tb->cflags & CF_SINGLE_INSN)) {
698
                /* If we are modifying the current TB, we must stop
699
                its execution. We could be more precise by checking
700
                that the modification is after the current PC, but it
701
                would require a specialized function to partially
702
                restore the CPU state */
703

    
704
                current_tb_modified = 1;
705
                cpu_restore_state(current_tb, env,
706
                                  env->mem_write_pc, NULL);
707
#if defined(TARGET_I386)
708
                current_flags = env->hflags;
709
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
710
                current_cs_base = (target_ulong)env->segs[R_CS].base;
711
                current_pc = current_cs_base + env->eip;
712
#else
713
#error unsupported CPU
714
#endif
715
            }
716
#endif /* TARGET_HAS_PRECISE_SMC */
717
            /* we need to do that to handle the case where a signal
718
               occurs while doing tb_phys_invalidate() */
719
            saved_tb = NULL;
720
            if (env) {
721
                saved_tb = env->current_tb;
722
                env->current_tb = NULL;
723
            }
724
            tb_phys_invalidate(tb, -1);
725
            if (env) {
726
                env->current_tb = saved_tb;
727
                if (env->interrupt_request && env->current_tb)
728
                    cpu_interrupt(env, env->interrupt_request);
729
            }
730
        }
731
        tb = tb_next;
732
    }
733
#if !defined(CONFIG_USER_ONLY)
734
    /* if no code remaining, no need to continue to use slow writes */
735
    if (!p->first_tb) {
736
        invalidate_page_bitmap(p);
737
        if (is_cpu_write_access) {
738
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
739
        }
740
    }
741
#endif
742
#ifdef TARGET_HAS_PRECISE_SMC
743
    if (current_tb_modified) {
744
        /* we generate a block containing just the instruction
745
           modifying the memory. It will ensure that it cannot modify
746
           itself */
747
        env->current_tb = NULL;
748
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
749
                    CF_SINGLE_INSN);
750
        cpu_resume_from_signal(env, NULL);
751
    }
752
#endif
753
}
754

    
755
/* len must be <= 8 and start must be a multiple of len */
756
static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
757
{
758
    PageDesc *p;
759
    int offset, b;
760
#if 0
761
    if (1) {
762
        if (loglevel) {
763
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
764
                   cpu_single_env->mem_write_vaddr, len,
765
                   cpu_single_env->eip,
766
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
767
        }
768
    }
769
#endif
770
    p = page_find(start >> TARGET_PAGE_BITS);
771
    if (!p)
772
        return;
773
    if (p->code_bitmap) {
774
        offset = start & ~TARGET_PAGE_MASK;
775
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
776
        if (b & ((1 << len) - 1))
777
            goto do_invalidate;
778
    } else {
779
    do_invalidate:
780
        tb_invalidate_phys_page_range(start, start + len, 1);
781
    }
782
}
783

    
784
#if !defined(CONFIG_SOFTMMU)
785
static void tb_invalidate_phys_page(target_ulong addr,
786
                                    unsigned long pc, void *puc)
787
{
788
    int n, current_flags, current_tb_modified;
789
    target_ulong current_pc, current_cs_base;
790
    PageDesc *p;
791
    TranslationBlock *tb, *current_tb;
792
#ifdef TARGET_HAS_PRECISE_SMC
793
    CPUState *env = cpu_single_env;
794
#endif
795

    
796
    addr &= TARGET_PAGE_MASK;
797
    p = page_find(addr >> TARGET_PAGE_BITS);
798
    if (!p)
799
        return;
800
    tb = p->first_tb;
801
    current_tb_modified = 0;
802
    current_tb = NULL;
803
    current_pc = 0; /* avoid warning */
804
    current_cs_base = 0; /* avoid warning */
805
    current_flags = 0; /* avoid warning */
806
#ifdef TARGET_HAS_PRECISE_SMC
807
    if (tb && pc != 0) {
808
        current_tb = tb_find_pc(pc);
809
    }
810
#endif
811
    while (tb != NULL) {
812
        n = (long)tb & 3;
813
        tb = (TranslationBlock *)((long)tb & ~3);
814
#ifdef TARGET_HAS_PRECISE_SMC
815
        if (current_tb == tb &&
816
            !(current_tb->cflags & CF_SINGLE_INSN)) {
817
                /* If we are modifying the current TB, we must stop
818
                   its execution. We could be more precise by checking
819
                   that the modification is after the current PC, but it
820
                   would require a specialized function to partially
821
                   restore the CPU state */
822

    
823
            current_tb_modified = 1;
824
            cpu_restore_state(current_tb, env, pc, puc);
825
#if defined(TARGET_I386)
826
            current_flags = env->hflags;
827
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
828
            current_cs_base = (target_ulong)env->segs[R_CS].base;
829
            current_pc = current_cs_base + env->eip;
830
#else
831
#error unsupported CPU
832
#endif
833
        }
834
#endif /* TARGET_HAS_PRECISE_SMC */
835
        tb_phys_invalidate(tb, addr);
836
        tb = tb->page_next[n];
837
    }
838
    p->first_tb = NULL;
839
#ifdef TARGET_HAS_PRECISE_SMC
840
    if (current_tb_modified) {
841
        /* we generate a block containing just the instruction
842
           modifying the memory. It will ensure that it cannot modify
843
           itself */
844
        env->current_tb = NULL;
845
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
846
                    CF_SINGLE_INSN);
847
        cpu_resume_from_signal(env, puc);
848
    }
849
#endif
850
}
851
#endif
852

    
853
/* add the tb in the target page and protect it if necessary */
854
static inline void tb_alloc_page(TranslationBlock *tb,
855
                                 unsigned int n, target_ulong page_addr)
856
{
857
    PageDesc *p;
858
    TranslationBlock *last_first_tb;
859

    
860
    tb->page_addr[n] = page_addr;
861
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
862
    tb->page_next[n] = p->first_tb;
863
    last_first_tb = p->first_tb;
864
    p->first_tb = (TranslationBlock *)((long)tb | n);
865
    invalidate_page_bitmap(p);
866

    
867
#if defined(TARGET_HAS_SMC) || 1
868

    
869
#if defined(CONFIG_USER_ONLY)
870
    if (p->flags & PAGE_WRITE) {
871
        target_ulong addr;
872
        PageDesc *p2;
873
        int prot;
874

    
875
        /* force the host page as non writable (writes will have a
876
           page fault + mprotect overhead) */
877
        page_addr &= qemu_host_page_mask;
878
        prot = 0;
879
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
880
            addr += TARGET_PAGE_SIZE) {
881

    
882
            p2 = page_find (addr >> TARGET_PAGE_BITS);
883
            if (!p2)
884
                continue;
885
            prot |= p2->flags;
886
            p2->flags &= ~PAGE_WRITE;
887
            page_get_flags(addr);
888
          }
889
        mprotect(g2h(page_addr), qemu_host_page_size,
890
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
891
#ifdef DEBUG_TB_INVALIDATE
892
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
893
               page_addr);
894
#endif
895
    }
896
#else
897
    /* if some code is already present, then the pages are already
898
       protected. So we handle the case where only the first TB is
899
       allocated in a physical page */
900
    if (!last_first_tb) {
901
        tlb_protect_code(page_addr);
902
    }
903
#endif
904

    
905
#endif /* TARGET_HAS_SMC */
906
}
907

    
908
/* Allocate a new translation block. Flush the translation buffer if
909
   too many translation blocks or too much generated code. */
910
TranslationBlock *tb_alloc(target_ulong pc)
911
{
912
    TranslationBlock *tb;
913

    
914
    if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
915
        (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
916
        return NULL;
917
    tb = &tbs[nb_tbs++];
918
    tb->pc = pc;
919
    tb->cflags = 0;
920
    return tb;
921
}
922

    
923
/* add a new TB and link it to the physical page tables. phys_page2 is
924
   (-1) to indicate that only one page contains the TB. */
925
void tb_link_phys(TranslationBlock *tb,
926
                  target_ulong phys_pc, target_ulong phys_page2)
927
{
928
    unsigned int h;
929
    TranslationBlock **ptb;
930

    
931
    /* add in the physical hash table */
932
    h = tb_phys_hash_func(phys_pc);
933
    ptb = &tb_phys_hash[h];
934
    tb->phys_hash_next = *ptb;
935
    *ptb = tb;
936

    
937
    /* add in the page list */
938
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
939
    if (phys_page2 != -1)
940
        tb_alloc_page(tb, 1, phys_page2);
941
    else
942
        tb->page_addr[1] = -1;
943

    
944
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
945
    tb->jmp_next[0] = NULL;
946
    tb->jmp_next[1] = NULL;
947

    
948
    /* init original jump addresses */
949
    if (tb->tb_next_offset[0] != 0xffff)
950
        tb_reset_jump(tb, 0);
951
    if (tb->tb_next_offset[1] != 0xffff)
952
        tb_reset_jump(tb, 1);
953

    
954
#ifdef DEBUG_TB_CHECK
955
    tb_page_check();
956
#endif
957
}
958

    
959
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
960
   tb[1].tc_ptr. Return NULL if not found */
961
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
962
{
963
    int m_min, m_max, m;
964
    unsigned long v;
965
    TranslationBlock *tb;
966

    
967
    if (nb_tbs <= 0)
968
        return NULL;
969
    if (tc_ptr < (unsigned long)code_gen_buffer ||
970
        tc_ptr >= (unsigned long)code_gen_ptr)
971
        return NULL;
972
    /* binary search (cf Knuth) */
973
    m_min = 0;
974
    m_max = nb_tbs - 1;
975
    while (m_min <= m_max) {
976
        m = (m_min + m_max) >> 1;
977
        tb = &tbs[m];
978
        v = (unsigned long)tb->tc_ptr;
979
        if (v == tc_ptr)
980
            return tb;
981
        else if (tc_ptr < v) {
982
            m_max = m - 1;
983
        } else {
984
            m_min = m + 1;
985
        }
986
    }
987
    return &tbs[m_max];
988
}
989

    
990
static void tb_reset_jump_recursive(TranslationBlock *tb);
991

    
992
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
993
{
994
    TranslationBlock *tb1, *tb_next, **ptb;
995
    unsigned int n1;
996

    
997
    tb1 = tb->jmp_next[n];
998
    if (tb1 != NULL) {
999
        /* find head of list */
1000
        for(;;) {
1001
            n1 = (long)tb1 & 3;
1002
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1003
            if (n1 == 2)
1004
                break;
1005
            tb1 = tb1->jmp_next[n1];
1006
        }
1007
        /* we are now sure now that tb jumps to tb1 */
1008
        tb_next = tb1;
1009

    
1010
        /* remove tb from the jmp_first list */
1011
        ptb = &tb_next->jmp_first;
1012
        for(;;) {
1013
            tb1 = *ptb;
1014
            n1 = (long)tb1 & 3;
1015
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1016
            if (n1 == n && tb1 == tb)
1017
                break;
1018
            ptb = &tb1->jmp_next[n1];
1019
        }
1020
        *ptb = tb->jmp_next[n];
1021
        tb->jmp_next[n] = NULL;
1022

    
1023
        /* suppress the jump to next tb in generated code */
1024
        tb_reset_jump(tb, n);
1025

    
1026
        /* suppress jumps in the tb on which we could have jumped */
1027
        tb_reset_jump_recursive(tb_next);
1028
    }
1029
}
1030

    
1031
static void tb_reset_jump_recursive(TranslationBlock *tb)
1032
{
1033
    tb_reset_jump_recursive2(tb, 0);
1034
    tb_reset_jump_recursive2(tb, 1);
1035
}
1036

    
1037
#if defined(TARGET_HAS_ICE)
1038
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1039
{
1040
    target_phys_addr_t addr;
1041
    target_ulong pd;
1042
    ram_addr_t ram_addr;
1043
    PhysPageDesc *p;
1044

    
1045
    addr = cpu_get_phys_page_debug(env, pc);
1046
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1047
    if (!p) {
1048
        pd = IO_MEM_UNASSIGNED;
1049
    } else {
1050
        pd = p->phys_offset;
1051
    }
1052
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1053
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1054
}
1055
#endif
1056

    
1057
/* Add a watchpoint.  */
1058
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1059
{
1060
    int i;
1061

    
1062
    for (i = 0; i < env->nb_watchpoints; i++) {
1063
        if (addr == env->watchpoint[i].vaddr)
1064
            return 0;
1065
    }
1066
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1067
        return -1;
1068

    
1069
    i = env->nb_watchpoints++;
1070
    env->watchpoint[i].vaddr = addr;
1071
    tlb_flush_page(env, addr);
1072
    /* FIXME: This flush is needed because of the hack to make memory ops
1073
       terminate the TB.  It can be removed once the proper IO trap and
1074
       re-execute bits are in.  */
1075
    tb_flush(env);
1076
    return i;
1077
}
1078

    
1079
/* Remove a watchpoint.  */
1080
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1081
{
1082
    int i;
1083

    
1084
    for (i = 0; i < env->nb_watchpoints; i++) {
1085
        if (addr == env->watchpoint[i].vaddr) {
1086
            env->nb_watchpoints--;
1087
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1088
            tlb_flush_page(env, addr);
1089
            return 0;
1090
        }
1091
    }
1092
    return -1;
1093
}
1094

    
1095
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1096
   breakpoint is reached */
1097
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1098
{
1099
#if defined(TARGET_HAS_ICE)
1100
    int i;
1101

    
1102
    for(i = 0; i < env->nb_breakpoints; i++) {
1103
        if (env->breakpoints[i] == pc)
1104
            return 0;
1105
    }
1106

    
1107
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1108
        return -1;
1109
    env->breakpoints[env->nb_breakpoints++] = pc;
1110

    
1111
    breakpoint_invalidate(env, pc);
1112
    return 0;
1113
#else
1114
    return -1;
1115
#endif
1116
}
1117

    
1118
/* remove a breakpoint */
1119
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1120
{
1121
#if defined(TARGET_HAS_ICE)
1122
    int i;
1123
    for(i = 0; i < env->nb_breakpoints; i++) {
1124
        if (env->breakpoints[i] == pc)
1125
            goto found;
1126
    }
1127
    return -1;
1128
 found:
1129
    env->nb_breakpoints--;
1130
    if (i < env->nb_breakpoints)
1131
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1132

    
1133
    breakpoint_invalidate(env, pc);
1134
    return 0;
1135
#else
1136
    return -1;
1137
#endif
1138
}
1139

    
1140
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1141
   CPU loop after each instruction */
1142
void cpu_single_step(CPUState *env, int enabled)
1143
{
1144
#if defined(TARGET_HAS_ICE)
1145
    if (env->singlestep_enabled != enabled) {
1146
        env->singlestep_enabled = enabled;
1147
        /* must flush all the translated code to avoid inconsistancies */
1148
        /* XXX: only flush what is necessary */
1149
        tb_flush(env);
1150
    }
1151
#endif
1152
}
1153

    
1154
/* enable or disable low levels log */
1155
void cpu_set_log(int log_flags)
1156
{
1157
    loglevel = log_flags;
1158
    if (loglevel && !logfile) {
1159
        logfile = fopen(logfilename, log_append ? "a" : "w");
1160
        if (!logfile) {
1161
            perror(logfilename);
1162
            _exit(1);
1163
        }
1164
#if !defined(CONFIG_SOFTMMU)
1165
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1166
        {
1167
            static uint8_t logfile_buf[4096];
1168
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1169
        }
1170
#else
1171
        setvbuf(logfile, NULL, _IOLBF, 0);
1172
#endif
1173
        log_append = 1;
1174
    }
1175
    if (!loglevel && logfile) {
1176
        fclose(logfile);
1177
        logfile = NULL;
1178
    }
1179
}
1180

    
1181
void cpu_set_log_filename(const char *filename)
1182
{
1183
    logfilename = strdup(filename);
1184
    if (logfile) {
1185
        fclose(logfile);
1186
        logfile = NULL;
1187
    }
1188
    cpu_set_log(loglevel);
1189
}
1190

    
1191
/* mask must never be zero, except for A20 change call */
1192
void cpu_interrupt(CPUState *env, int mask)
1193
{
1194
    TranslationBlock *tb;
1195
    static int interrupt_lock;
1196

    
1197
    env->interrupt_request |= mask;
1198
    /* if the cpu is currently executing code, we must unlink it and
1199
       all the potentially executing TB */
1200
    tb = env->current_tb;
1201
    if (tb && !testandset(&interrupt_lock)) {
1202
        env->current_tb = NULL;
1203
        tb_reset_jump_recursive(tb);
1204
        interrupt_lock = 0;
1205
    }
1206
}
1207

    
1208
void cpu_reset_interrupt(CPUState *env, int mask)
1209
{
1210
    env->interrupt_request &= ~mask;
1211
}
1212

    
1213
CPULogItem cpu_log_items[] = {
1214
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1215
      "show generated host assembly code for each compiled TB" },
1216
    { CPU_LOG_TB_IN_ASM, "in_asm",
1217
      "show target assembly code for each compiled TB" },
1218
    { CPU_LOG_TB_OP, "op",
1219
      "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1220
#ifdef TARGET_I386
1221
    { CPU_LOG_TB_OP_OPT, "op_opt",
1222
      "show micro ops after optimization for each compiled TB" },
1223
#endif
1224
    { CPU_LOG_INT, "int",
1225
      "show interrupts/exceptions in short format" },
1226
    { CPU_LOG_EXEC, "exec",
1227
      "show trace before each executed TB (lots of logs)" },
1228
    { CPU_LOG_TB_CPU, "cpu",
1229
      "show CPU state before block translation" },
1230
#ifdef TARGET_I386
1231
    { CPU_LOG_PCALL, "pcall",
1232
      "show protected mode far calls/returns/exceptions" },
1233
#endif
1234
#ifdef DEBUG_IOPORT
1235
    { CPU_LOG_IOPORT, "ioport",
1236
      "show all i/o ports accesses" },
1237
#endif
1238
    { 0, NULL, NULL },
1239
};
1240

    
1241
static int cmp1(const char *s1, int n, const char *s2)
1242
{
1243
    if (strlen(s2) != n)
1244
        return 0;
1245
    return memcmp(s1, s2, n) == 0;
1246
}
1247

    
1248
/* takes a comma separated list of log masks. Return 0 if error. */
1249
int cpu_str_to_log_mask(const char *str)
1250
{
1251
    CPULogItem *item;
1252
    int mask;
1253
    const char *p, *p1;
1254

    
1255
    p = str;
1256
    mask = 0;
1257
    for(;;) {
1258
        p1 = strchr(p, ',');
1259
        if (!p1)
1260
            p1 = p + strlen(p);
1261
        if(cmp1(p,p1-p,"all")) {
1262
                for(item = cpu_log_items; item->mask != 0; item++) {
1263
                        mask |= item->mask;
1264
                }
1265
        } else {
1266
        for(item = cpu_log_items; item->mask != 0; item++) {
1267
            if (cmp1(p, p1 - p, item->name))
1268
                goto found;
1269
        }
1270
        return 0;
1271
        }
1272
    found:
1273
        mask |= item->mask;
1274
        if (*p1 != ',')
1275
            break;
1276
        p = p1 + 1;
1277
    }
1278
    return mask;
1279
}
1280

    
1281
void cpu_abort(CPUState *env, const char *fmt, ...)
1282
{
1283
    va_list ap;
1284

    
1285
    va_start(ap, fmt);
1286
    fprintf(stderr, "qemu: fatal: ");
1287
    vfprintf(stderr, fmt, ap);
1288
    fprintf(stderr, "\n");
1289
#ifdef TARGET_I386
1290
    if(env->intercept & INTERCEPT_SVM_MASK) {
1291
        /* most probably the virtual machine should not
1292
           be shut down but rather caught by the VMM */
1293
        vmexit(SVM_EXIT_SHUTDOWN, 0);
1294
    }
1295
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1296
#else
1297
    cpu_dump_state(env, stderr, fprintf, 0);
1298
#endif
1299
    if (logfile) {
1300
        fprintf(logfile, "qemu: fatal: ");
1301
        vfprintf(logfile, fmt, ap);
1302
        fprintf(logfile, "\n");
1303
#ifdef TARGET_I386
1304
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1305
#else
1306
        cpu_dump_state(env, logfile, fprintf, 0);
1307
#endif
1308
        fflush(logfile);
1309
        fclose(logfile);
1310
    }
1311
    va_end(ap);
1312
    abort();
1313
}
1314

    
1315
CPUState *cpu_copy(CPUState *env)
1316
{
1317
    CPUState *new_env = cpu_init();
1318
    /* preserve chaining and index */
1319
    CPUState *next_cpu = new_env->next_cpu;
1320
    int cpu_index = new_env->cpu_index;
1321
    memcpy(new_env, env, sizeof(CPUState));
1322
    new_env->next_cpu = next_cpu;
1323
    new_env->cpu_index = cpu_index;
1324
    return new_env;
1325
}
1326

    
1327
#if !defined(CONFIG_USER_ONLY)
1328

    
1329
/* NOTE: if flush_global is true, also flush global entries (not
1330
   implemented yet) */
1331
void tlb_flush(CPUState *env, int flush_global)
1332
{
1333
    int i;
1334

    
1335
#if defined(DEBUG_TLB)
1336
    printf("tlb_flush:\n");
1337
#endif
1338
    /* must reset current TB so that interrupts cannot modify the
1339
       links while we are modifying them */
1340
    env->current_tb = NULL;
1341

    
1342
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1343
        env->tlb_table[0][i].addr_read = -1;
1344
        env->tlb_table[0][i].addr_write = -1;
1345
        env->tlb_table[0][i].addr_code = -1;
1346
        env->tlb_table[1][i].addr_read = -1;
1347
        env->tlb_table[1][i].addr_write = -1;
1348
        env->tlb_table[1][i].addr_code = -1;
1349
#if (NB_MMU_MODES >= 3)
1350
        env->tlb_table[2][i].addr_read = -1;
1351
        env->tlb_table[2][i].addr_write = -1;
1352
        env->tlb_table[2][i].addr_code = -1;
1353
#if (NB_MMU_MODES == 4)
1354
        env->tlb_table[3][i].addr_read = -1;
1355
        env->tlb_table[3][i].addr_write = -1;
1356
        env->tlb_table[3][i].addr_code = -1;
1357
#endif
1358
#endif
1359
    }
1360

    
1361
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1362

    
1363
#if !defined(CONFIG_SOFTMMU)
1364
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1365
#endif
1366
#ifdef USE_KQEMU
1367
    if (env->kqemu_enabled) {
1368
        kqemu_flush(env, flush_global);
1369
    }
1370
#endif
1371
    tlb_flush_count++;
1372
}
1373

    
1374
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1375
{
1376
    if (addr == (tlb_entry->addr_read &
1377
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1378
        addr == (tlb_entry->addr_write &
1379
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1380
        addr == (tlb_entry->addr_code &
1381
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1382
        tlb_entry->addr_read = -1;
1383
        tlb_entry->addr_write = -1;
1384
        tlb_entry->addr_code = -1;
1385
    }
1386
}
1387

    
1388
void tlb_flush_page(CPUState *env, target_ulong addr)
1389
{
1390
    int i;
1391
    TranslationBlock *tb;
1392

    
1393
#if defined(DEBUG_TLB)
1394
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1395
#endif
1396
    /* must reset current TB so that interrupts cannot modify the
1397
       links while we are modifying them */
1398
    env->current_tb = NULL;
1399

    
1400
    addr &= TARGET_PAGE_MASK;
1401
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1402
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1403
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1404
#if (NB_MMU_MODES >= 3)
1405
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1406
#if (NB_MMU_MODES == 4)
1407
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1408
#endif
1409
#endif
1410

    
1411
    /* Discard jump cache entries for any tb which might potentially
1412
       overlap the flushed page.  */
1413
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1414
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1415

    
1416
    i = tb_jmp_cache_hash_page(addr);
1417
    memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
1418

    
1419
#if !defined(CONFIG_SOFTMMU)
1420
    if (addr < MMAP_AREA_END)
1421
        munmap((void *)addr, TARGET_PAGE_SIZE);
1422
#endif
1423
#ifdef USE_KQEMU
1424
    if (env->kqemu_enabled) {
1425
        kqemu_flush_page(env, addr);
1426
    }
1427
#endif
1428
}
1429

    
1430
/* update the TLBs so that writes to code in the virtual page 'addr'
1431
   can be detected */
1432
static void tlb_protect_code(ram_addr_t ram_addr)
1433
{
1434
    cpu_physical_memory_reset_dirty(ram_addr,
1435
                                    ram_addr + TARGET_PAGE_SIZE,
1436
                                    CODE_DIRTY_FLAG);
1437
}
1438

    
1439
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1440
   tested for self modifying code */
1441
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1442
                                    target_ulong vaddr)
1443
{
1444
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1445
}
1446

    
1447
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1448
                                         unsigned long start, unsigned long length)
1449
{
1450
    unsigned long addr;
1451
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1452
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1453
        if ((addr - start) < length) {
1454
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1455
        }
1456
    }
1457
}
1458

    
1459
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1460
                                     int dirty_flags)
1461
{
1462
    CPUState *env;
1463
    unsigned long length, start1;
1464
    int i, mask, len;
1465
    uint8_t *p;
1466

    
1467
    start &= TARGET_PAGE_MASK;
1468
    end = TARGET_PAGE_ALIGN(end);
1469

    
1470
    length = end - start;
1471
    if (length == 0)
1472
        return;
1473
    len = length >> TARGET_PAGE_BITS;
1474
#ifdef USE_KQEMU
1475
    /* XXX: should not depend on cpu context */
1476
    env = first_cpu;
1477
    if (env->kqemu_enabled) {
1478
        ram_addr_t addr;
1479
        addr = start;
1480
        for(i = 0; i < len; i++) {
1481
            kqemu_set_notdirty(env, addr);
1482
            addr += TARGET_PAGE_SIZE;
1483
        }
1484
    }
1485
#endif
1486
    mask = ~dirty_flags;
1487
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1488
    for(i = 0; i < len; i++)
1489
        p[i] &= mask;
1490

    
1491
    /* we modify the TLB cache so that the dirty bit will be set again
1492
       when accessing the range */
1493
    start1 = start + (unsigned long)phys_ram_base;
1494
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1495
        for(i = 0; i < CPU_TLB_SIZE; i++)
1496
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1497
        for(i = 0; i < CPU_TLB_SIZE; i++)
1498
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1499
#if (NB_MMU_MODES >= 3)
1500
        for(i = 0; i < CPU_TLB_SIZE; i++)
1501
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1502
#if (NB_MMU_MODES == 4)
1503
        for(i = 0; i < CPU_TLB_SIZE; i++)
1504
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1505
#endif
1506
#endif
1507
    }
1508

    
1509
#if !defined(CONFIG_SOFTMMU)
1510
    /* XXX: this is expensive */
1511
    {
1512
        VirtPageDesc *p;
1513
        int j;
1514
        target_ulong addr;
1515

    
1516
        for(i = 0; i < L1_SIZE; i++) {
1517
            p = l1_virt_map[i];
1518
            if (p) {
1519
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1520
                for(j = 0; j < L2_SIZE; j++) {
1521
                    if (p->valid_tag == virt_valid_tag &&
1522
                        p->phys_addr >= start && p->phys_addr < end &&
1523
                        (p->prot & PROT_WRITE)) {
1524
                        if (addr < MMAP_AREA_END) {
1525
                            mprotect((void *)addr, TARGET_PAGE_SIZE,
1526
                                     p->prot & ~PROT_WRITE);
1527
                        }
1528
                    }
1529
                    addr += TARGET_PAGE_SIZE;
1530
                    p++;
1531
                }
1532
            }
1533
        }
1534
    }
1535
#endif
1536
}
1537

    
1538
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1539
{
1540
    ram_addr_t ram_addr;
1541

    
1542
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1543
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1544
            tlb_entry->addend - (unsigned long)phys_ram_base;
1545
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1546
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1547
        }
1548
    }
1549
}
1550

    
1551
/* update the TLB according to the current state of the dirty bits */
1552
void cpu_tlb_update_dirty(CPUState *env)
1553
{
1554
    int i;
1555
    for(i = 0; i < CPU_TLB_SIZE; i++)
1556
        tlb_update_dirty(&env->tlb_table[0][i]);
1557
    for(i = 0; i < CPU_TLB_SIZE; i++)
1558
        tlb_update_dirty(&env->tlb_table[1][i]);
1559
#if (NB_MMU_MODES >= 3)
1560
    for(i = 0; i < CPU_TLB_SIZE; i++)
1561
        tlb_update_dirty(&env->tlb_table[2][i]);
1562
#if (NB_MMU_MODES == 4)
1563
    for(i = 0; i < CPU_TLB_SIZE; i++)
1564
        tlb_update_dirty(&env->tlb_table[3][i]);
1565
#endif
1566
#endif
1567
}
1568

    
1569
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1570
                                  unsigned long start)
1571
{
1572
    unsigned long addr;
1573
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1574
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1575
        if (addr == start) {
1576
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1577
        }
1578
    }
1579
}
1580

    
1581
/* update the TLB corresponding to virtual page vaddr and phys addr
1582
   addr so that it is no longer dirty */
1583
static inline void tlb_set_dirty(CPUState *env,
1584
                                 unsigned long addr, target_ulong vaddr)
1585
{
1586
    int i;
1587

    
1588
    addr &= TARGET_PAGE_MASK;
1589
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1590
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1591
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1592
#if (NB_MMU_MODES >= 3)
1593
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1594
#if (NB_MMU_MODES == 4)
1595
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1596
#endif
1597
#endif
1598
}
1599

    
1600
/* add a new TLB entry. At most one entry for a given virtual address
1601
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1602
   (can only happen in non SOFTMMU mode for I/O pages or pages
1603
   conflicting with the host address space). */
1604
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1605
                      target_phys_addr_t paddr, int prot,
1606
                      int mmu_idx, int is_softmmu)
1607
{
1608
    PhysPageDesc *p;
1609
    unsigned long pd;
1610
    unsigned int index;
1611
    target_ulong address;
1612
    target_phys_addr_t addend;
1613
    int ret;
1614
    CPUTLBEntry *te;
1615
    int i;
1616

    
1617
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1618
    if (!p) {
1619
        pd = IO_MEM_UNASSIGNED;
1620
    } else {
1621
        pd = p->phys_offset;
1622
    }
1623
#if defined(DEBUG_TLB)
1624
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1625
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1626
#endif
1627

    
1628
    ret = 0;
1629
#if !defined(CONFIG_SOFTMMU)
1630
    if (is_softmmu)
1631
#endif
1632
    {
1633
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1634
            /* IO memory case */
1635
            address = vaddr | pd;
1636
            addend = paddr;
1637
        } else {
1638
            /* standard memory */
1639
            address = vaddr;
1640
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1641
        }
1642

    
1643
        /* Make accesses to pages with watchpoints go via the
1644
           watchpoint trap routines.  */
1645
        for (i = 0; i < env->nb_watchpoints; i++) {
1646
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1647
                if (address & ~TARGET_PAGE_MASK) {
1648
                    env->watchpoint[i].addend = 0;
1649
                    address = vaddr | io_mem_watch;
1650
                } else {
1651
                    env->watchpoint[i].addend = pd - paddr +
1652
                        (unsigned long) phys_ram_base;
1653
                    /* TODO: Figure out how to make read watchpoints coexist
1654
                       with code.  */
1655
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1656
                }
1657
            }
1658
        }
1659

    
1660
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1661
        addend -= vaddr;
1662
        te = &env->tlb_table[mmu_idx][index];
1663
        te->addend = addend;
1664
        if (prot & PAGE_READ) {
1665
            te->addr_read = address;
1666
        } else {
1667
            te->addr_read = -1;
1668
        }
1669
        if (prot & PAGE_EXEC) {
1670
            te->addr_code = address;
1671
        } else {
1672
            te->addr_code = -1;
1673
        }
1674
        if (prot & PAGE_WRITE) {
1675
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1676
                (pd & IO_MEM_ROMD)) {
1677
                /* write access calls the I/O callback */
1678
                te->addr_write = vaddr |
1679
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1680
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1681
                       !cpu_physical_memory_is_dirty(pd)) {
1682
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1683
            } else {
1684
                te->addr_write = address;
1685
            }
1686
        } else {
1687
            te->addr_write = -1;
1688
        }
1689
    }
1690
#if !defined(CONFIG_SOFTMMU)
1691
    else {
1692
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1693
            /* IO access: no mapping is done as it will be handled by the
1694
               soft MMU */
1695
            if (!(env->hflags & HF_SOFTMMU_MASK))
1696
                ret = 2;
1697
        } else {
1698
            void *map_addr;
1699

    
1700
            if (vaddr >= MMAP_AREA_END) {
1701
                ret = 2;
1702
            } else {
1703
                if (prot & PROT_WRITE) {
1704
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1705
#if defined(TARGET_HAS_SMC) || 1
1706
                        first_tb ||
1707
#endif
1708
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1709
                         !cpu_physical_memory_is_dirty(pd))) {
1710
                        /* ROM: we do as if code was inside */
1711
                        /* if code is present, we only map as read only and save the
1712
                           original mapping */
1713
                        VirtPageDesc *vp;
1714

    
1715
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1716
                        vp->phys_addr = pd;
1717
                        vp->prot = prot;
1718
                        vp->valid_tag = virt_valid_tag;
1719
                        prot &= ~PAGE_WRITE;
1720
                    }
1721
                }
1722
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1723
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1724
                if (map_addr == MAP_FAILED) {
1725
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1726
                              paddr, vaddr);
1727
                }
1728
            }
1729
        }
1730
    }
1731
#endif
1732
    return ret;
1733
}
1734

    
1735
/* called from signal handler: invalidate the code and unprotect the
1736
   page. Return TRUE if the fault was succesfully handled. */
1737
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1738
{
1739
#if !defined(CONFIG_SOFTMMU)
1740
    VirtPageDesc *vp;
1741

    
1742
#if defined(DEBUG_TLB)
1743
    printf("page_unprotect: addr=0x%08x\n", addr);
1744
#endif
1745
    addr &= TARGET_PAGE_MASK;
1746

    
1747
    /* if it is not mapped, no need to worry here */
1748
    if (addr >= MMAP_AREA_END)
1749
        return 0;
1750
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1751
    if (!vp)
1752
        return 0;
1753
    /* NOTE: in this case, validate_tag is _not_ tested as it
1754
       validates only the code TLB */
1755
    if (vp->valid_tag != virt_valid_tag)
1756
        return 0;
1757
    if (!(vp->prot & PAGE_WRITE))
1758
        return 0;
1759
#if defined(DEBUG_TLB)
1760
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1761
           addr, vp->phys_addr, vp->prot);
1762
#endif
1763
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1764
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1765
                  (unsigned long)addr, vp->prot);
1766
    /* set the dirty bit */
1767
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1768
    /* flush the code inside */
1769
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1770
    return 1;
1771
#else
1772
    return 0;
1773
#endif
1774
}
1775

    
1776
#else
1777

    
1778
void tlb_flush(CPUState *env, int flush_global)
1779
{
1780
}
1781

    
1782
void tlb_flush_page(CPUState *env, target_ulong addr)
1783
{
1784
}
1785

    
1786
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1787
                      target_phys_addr_t paddr, int prot,
1788
                      int mmu_idx, int is_softmmu)
1789
{
1790
    return 0;
1791
}
1792

    
1793
/* dump memory mappings */
1794
void page_dump(FILE *f)
1795
{
1796
    unsigned long start, end;
1797
    int i, j, prot, prot1;
1798
    PageDesc *p;
1799

    
1800
    fprintf(f, "%-8s %-8s %-8s %s\n",
1801
            "start", "end", "size", "prot");
1802
    start = -1;
1803
    end = -1;
1804
    prot = 0;
1805
    for(i = 0; i <= L1_SIZE; i++) {
1806
        if (i < L1_SIZE)
1807
            p = l1_map[i];
1808
        else
1809
            p = NULL;
1810
        for(j = 0;j < L2_SIZE; j++) {
1811
            if (!p)
1812
                prot1 = 0;
1813
            else
1814
                prot1 = p[j].flags;
1815
            if (prot1 != prot) {
1816
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1817
                if (start != -1) {
1818
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1819
                            start, end, end - start,
1820
                            prot & PAGE_READ ? 'r' : '-',
1821
                            prot & PAGE_WRITE ? 'w' : '-',
1822
                            prot & PAGE_EXEC ? 'x' : '-');
1823
                }
1824
                if (prot1 != 0)
1825
                    start = end;
1826
                else
1827
                    start = -1;
1828
                prot = prot1;
1829
            }
1830
            if (!p)
1831
                break;
1832
        }
1833
    }
1834
}
1835

    
1836
int page_get_flags(target_ulong address)
1837
{
1838
    PageDesc *p;
1839

    
1840
    p = page_find(address >> TARGET_PAGE_BITS);
1841
    if (!p)
1842
        return 0;
1843
    return p->flags;
1844
}
1845

    
1846
/* modify the flags of a page and invalidate the code if
1847
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1848
   depending on PAGE_WRITE */
1849
void page_set_flags(target_ulong start, target_ulong end, int flags)
1850
{
1851
    PageDesc *p;
1852
    target_ulong addr;
1853

    
1854
    start = start & TARGET_PAGE_MASK;
1855
    end = TARGET_PAGE_ALIGN(end);
1856
    if (flags & PAGE_WRITE)
1857
        flags |= PAGE_WRITE_ORG;
1858
    spin_lock(&tb_lock);
1859
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1860
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1861
        /* if the write protection is set, then we invalidate the code
1862
           inside */
1863
        if (!(p->flags & PAGE_WRITE) &&
1864
            (flags & PAGE_WRITE) &&
1865
            p->first_tb) {
1866
            tb_invalidate_phys_page(addr, 0, NULL);
1867
        }
1868
        p->flags = flags;
1869
    }
1870
    spin_unlock(&tb_lock);
1871
}
1872

    
1873
int page_check_range(target_ulong start, target_ulong len, int flags)
1874
{
1875
    PageDesc *p;
1876
    target_ulong end;
1877
    target_ulong addr;
1878

    
1879
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1880
    start = start & TARGET_PAGE_MASK;
1881

    
1882
    if( end < start )
1883
        /* we've wrapped around */
1884
        return -1;
1885
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1886
        p = page_find(addr >> TARGET_PAGE_BITS);
1887
        if( !p )
1888
            return -1;
1889
        if( !(p->flags & PAGE_VALID) )
1890
            return -1;
1891

    
1892
        if (!(p->flags & PAGE_READ) && (flags & PAGE_READ) )
1893
            return -1;
1894
        if (!(p->flags & PAGE_WRITE) && (flags & PAGE_WRITE) )
1895
            return -1;
1896
    }
1897
    return 0;
1898
}
1899

    
1900
/* called from signal handler: invalidate the code and unprotect the
1901
   page. Return TRUE if the fault was succesfully handled. */
1902
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
1903
{
1904
    unsigned int page_index, prot, pindex;
1905
    PageDesc *p, *p1;
1906
    target_ulong host_start, host_end, addr;
1907

    
1908
    host_start = address & qemu_host_page_mask;
1909
    page_index = host_start >> TARGET_PAGE_BITS;
1910
    p1 = page_find(page_index);
1911
    if (!p1)
1912
        return 0;
1913
    host_end = host_start + qemu_host_page_size;
1914
    p = p1;
1915
    prot = 0;
1916
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1917
        prot |= p->flags;
1918
        p++;
1919
    }
1920
    /* if the page was really writable, then we change its
1921
       protection back to writable */
1922
    if (prot & PAGE_WRITE_ORG) {
1923
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
1924
        if (!(p1[pindex].flags & PAGE_WRITE)) {
1925
            mprotect((void *)g2h(host_start), qemu_host_page_size,
1926
                     (prot & PAGE_BITS) | PAGE_WRITE);
1927
            p1[pindex].flags |= PAGE_WRITE;
1928
            /* and since the content will be modified, we must invalidate
1929
               the corresponding translated code. */
1930
            tb_invalidate_phys_page(address, pc, puc);
1931
#ifdef DEBUG_TB_CHECK
1932
            tb_invalidate_check(address);
1933
#endif
1934
            return 1;
1935
        }
1936
    }
1937
    return 0;
1938
}
1939

    
1940
/* call this function when system calls directly modify a memory area */
1941
/* ??? This should be redundant now we have lock_user.  */
1942
void page_unprotect_range(target_ulong data, target_ulong data_size)
1943
{
1944
    target_ulong start, end, addr;
1945

    
1946
    start = data;
1947
    end = start + data_size;
1948
    start &= TARGET_PAGE_MASK;
1949
    end = TARGET_PAGE_ALIGN(end);
1950
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1951
        page_unprotect(addr, 0, NULL);
1952
    }
1953
}
1954

    
1955
static inline void tlb_set_dirty(CPUState *env,
1956
                                 unsigned long addr, target_ulong vaddr)
1957
{
1958
}
1959
#endif /* defined(CONFIG_USER_ONLY) */
1960

    
1961
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
1962
                             int memory);
1963
static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
1964
                           int orig_memory);
1965
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
1966
                      need_subpage)                                     \
1967
    do {                                                                \
1968
        if (addr > start_addr)                                          \
1969
            start_addr2 = 0;                                            \
1970
        else {                                                          \
1971
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
1972
            if (start_addr2 > 0)                                        \
1973
                need_subpage = 1;                                       \
1974
        }                                                               \
1975
                                                                        \
1976
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
1977
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
1978
        else {                                                          \
1979
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
1980
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
1981
                need_subpage = 1;                                       \
1982
        }                                                               \
1983
    } while (0)
1984

    
1985
/* register physical memory. 'size' must be a multiple of the target
1986
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1987
   io memory page */
1988
void cpu_register_physical_memory(target_phys_addr_t start_addr,
1989
                                  unsigned long size,
1990
                                  unsigned long phys_offset)
1991
{
1992
    target_phys_addr_t addr, end_addr;
1993
    PhysPageDesc *p;
1994
    CPUState *env;
1995
    unsigned long orig_size = size;
1996
    void *subpage;
1997

    
1998
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
1999
    end_addr = start_addr + (target_phys_addr_t)size;
2000
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2001
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2002
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2003
            unsigned long orig_memory = p->phys_offset;
2004
            target_phys_addr_t start_addr2, end_addr2;
2005
            int need_subpage = 0;
2006

    
2007
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2008
                          need_subpage);
2009
            if (need_subpage) {
2010
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2011
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2012
                                           &p->phys_offset, orig_memory);
2013
                } else {
2014
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2015
                                            >> IO_MEM_SHIFT];
2016
                }
2017
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2018
            } else {
2019
                p->phys_offset = phys_offset;
2020
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2021
                    (phys_offset & IO_MEM_ROMD))
2022
                    phys_offset += TARGET_PAGE_SIZE;
2023
            }
2024
        } else {
2025
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2026
            p->phys_offset = phys_offset;
2027
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2028
                (phys_offset & IO_MEM_ROMD))
2029
                phys_offset += TARGET_PAGE_SIZE;
2030
            else {
2031
                target_phys_addr_t start_addr2, end_addr2;
2032
                int need_subpage = 0;
2033

    
2034
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2035
                              end_addr2, need_subpage);
2036

    
2037
                if (need_subpage) {
2038
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2039
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2040
                    subpage_register(subpage, start_addr2, end_addr2,
2041
                                     phys_offset);
2042
                }
2043
            }
2044
        }
2045
    }
2046

    
2047
    /* since each CPU stores ram addresses in its TLB cache, we must
2048
       reset the modified entries */
2049
    /* XXX: slow ! */
2050
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2051
        tlb_flush(env, 1);
2052
    }
2053
}
2054

    
2055
/* XXX: temporary until new memory mapping API */
2056
uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2057
{
2058
    PhysPageDesc *p;
2059

    
2060
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2061
    if (!p)
2062
        return IO_MEM_UNASSIGNED;
2063
    return p->phys_offset;
2064
}
2065

    
2066
/* XXX: better than nothing */
2067
ram_addr_t qemu_ram_alloc(unsigned int size)
2068
{
2069
    ram_addr_t addr;
2070
    if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
2071
        fprintf(stderr, "Not enough memory (requested_size = %u, max memory = %d)\n",
2072
                size, phys_ram_size);
2073
        abort();
2074
    }
2075
    addr = phys_ram_alloc_offset;
2076
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2077
    return addr;
2078
}
2079

    
2080
void qemu_ram_free(ram_addr_t addr)
2081
{
2082
}
2083

    
2084
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2085
{
2086
#ifdef DEBUG_UNASSIGNED
2087
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2088
#endif
2089
#ifdef TARGET_SPARC
2090
    do_unassigned_access(addr, 0, 0, 0);
2091
#elif TARGET_CRIS
2092
    do_unassigned_access(addr, 0, 0, 0);
2093
#endif
2094
    return 0;
2095
}
2096

    
2097
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2098
{
2099
#ifdef DEBUG_UNASSIGNED
2100
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2101
#endif
2102
#ifdef TARGET_SPARC
2103
    do_unassigned_access(addr, 1, 0, 0);
2104
#elif TARGET_CRIS
2105
    do_unassigned_access(addr, 1, 0, 0);
2106
#endif
2107
}
2108

    
2109
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2110
    unassigned_mem_readb,
2111
    unassigned_mem_readb,
2112
    unassigned_mem_readb,
2113
};
2114

    
2115
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2116
    unassigned_mem_writeb,
2117
    unassigned_mem_writeb,
2118
    unassigned_mem_writeb,
2119
};
2120

    
2121
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2122
{
2123
    unsigned long ram_addr;
2124
    int dirty_flags;
2125
    ram_addr = addr - (unsigned long)phys_ram_base;
2126
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2127
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2128
#if !defined(CONFIG_USER_ONLY)
2129
        tb_invalidate_phys_page_fast(ram_addr, 1);
2130
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2131
#endif
2132
    }
2133
    stb_p((uint8_t *)(long)addr, val);
2134
#ifdef USE_KQEMU
2135
    if (cpu_single_env->kqemu_enabled &&
2136
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2137
        kqemu_modify_page(cpu_single_env, ram_addr);
2138
#endif
2139
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2140
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2141
    /* we remove the notdirty callback only if the code has been
2142
       flushed */
2143
    if (dirty_flags == 0xff)
2144
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2145
}
2146

    
2147
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2148
{
2149
    unsigned long ram_addr;
2150
    int dirty_flags;
2151
    ram_addr = addr - (unsigned long)phys_ram_base;
2152
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2153
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2154
#if !defined(CONFIG_USER_ONLY)
2155
        tb_invalidate_phys_page_fast(ram_addr, 2);
2156
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2157
#endif
2158
    }
2159
    stw_p((uint8_t *)(long)addr, val);
2160
#ifdef USE_KQEMU
2161
    if (cpu_single_env->kqemu_enabled &&
2162
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2163
        kqemu_modify_page(cpu_single_env, ram_addr);
2164
#endif
2165
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2166
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2167
    /* we remove the notdirty callback only if the code has been
2168
       flushed */
2169
    if (dirty_flags == 0xff)
2170
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2171
}
2172

    
2173
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2174
{
2175
    unsigned long ram_addr;
2176
    int dirty_flags;
2177
    ram_addr = addr - (unsigned long)phys_ram_base;
2178
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2179
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2180
#if !defined(CONFIG_USER_ONLY)
2181
        tb_invalidate_phys_page_fast(ram_addr, 4);
2182
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2183
#endif
2184
    }
2185
    stl_p((uint8_t *)(long)addr, val);
2186
#ifdef USE_KQEMU
2187
    if (cpu_single_env->kqemu_enabled &&
2188
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2189
        kqemu_modify_page(cpu_single_env, ram_addr);
2190
#endif
2191
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2192
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2193
    /* we remove the notdirty callback only if the code has been
2194
       flushed */
2195
    if (dirty_flags == 0xff)
2196
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2197
}
2198

    
2199
static CPUReadMemoryFunc *error_mem_read[3] = {
2200
    NULL, /* never used */
2201
    NULL, /* never used */
2202
    NULL, /* never used */
2203
};
2204

    
2205
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2206
    notdirty_mem_writeb,
2207
    notdirty_mem_writew,
2208
    notdirty_mem_writel,
2209
};
2210

    
2211
#if defined(CONFIG_SOFTMMU)
2212
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2213
   so these check for a hit then pass through to the normal out-of-line
2214
   phys routines.  */
2215
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2216
{
2217
    return ldub_phys(addr);
2218
}
2219

    
2220
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2221
{
2222
    return lduw_phys(addr);
2223
}
2224

    
2225
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2226
{
2227
    return ldl_phys(addr);
2228
}
2229

    
2230
/* Generate a debug exception if a watchpoint has been hit.
2231
   Returns the real physical address of the access.  addr will be a host
2232
   address in case of a RAM location.  */
2233
static target_ulong check_watchpoint(target_phys_addr_t addr)
2234
{
2235
    CPUState *env = cpu_single_env;
2236
    target_ulong watch;
2237
    target_ulong retaddr;
2238
    int i;
2239

    
2240
    retaddr = addr;
2241
    for (i = 0; i < env->nb_watchpoints; i++) {
2242
        watch = env->watchpoint[i].vaddr;
2243
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2244
            retaddr = addr - env->watchpoint[i].addend;
2245
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2246
                cpu_single_env->watchpoint_hit = i + 1;
2247
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2248
                break;
2249
            }
2250
        }
2251
    }
2252
    return retaddr;
2253
}
2254

    
2255
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2256
                             uint32_t val)
2257
{
2258
    addr = check_watchpoint(addr);
2259
    stb_phys(addr, val);
2260
}
2261

    
2262
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2263
                             uint32_t val)
2264
{
2265
    addr = check_watchpoint(addr);
2266
    stw_phys(addr, val);
2267
}
2268

    
2269
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2270
                             uint32_t val)
2271
{
2272
    addr = check_watchpoint(addr);
2273
    stl_phys(addr, val);
2274
}
2275

    
2276
static CPUReadMemoryFunc *watch_mem_read[3] = {
2277
    watch_mem_readb,
2278
    watch_mem_readw,
2279
    watch_mem_readl,
2280
};
2281

    
2282
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2283
    watch_mem_writeb,
2284
    watch_mem_writew,
2285
    watch_mem_writel,
2286
};
2287
#endif
2288

    
2289
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2290
                                 unsigned int len)
2291
{
2292
    CPUReadMemoryFunc **mem_read;
2293
    uint32_t ret;
2294
    unsigned int idx;
2295

    
2296
    idx = SUBPAGE_IDX(addr - mmio->base);
2297
#if defined(DEBUG_SUBPAGE)
2298
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2299
           mmio, len, addr, idx);
2300
#endif
2301
    mem_read = mmio->mem_read[idx];
2302
    ret = (*mem_read[len])(mmio->opaque[idx], addr);
2303

    
2304
    return ret;
2305
}
2306

    
2307
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2308
                              uint32_t value, unsigned int len)
2309
{
2310
    CPUWriteMemoryFunc **mem_write;
2311
    unsigned int idx;
2312

    
2313
    idx = SUBPAGE_IDX(addr - mmio->base);
2314
#if defined(DEBUG_SUBPAGE)
2315
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2316
           mmio, len, addr, idx, value);
2317
#endif
2318
    mem_write = mmio->mem_write[idx];
2319
    (*mem_write[len])(mmio->opaque[idx], addr, value);
2320
}
2321

    
2322
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2323
{
2324
#if defined(DEBUG_SUBPAGE)
2325
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2326
#endif
2327

    
2328
    return subpage_readlen(opaque, addr, 0);
2329
}
2330

    
2331
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2332
                            uint32_t value)
2333
{
2334
#if defined(DEBUG_SUBPAGE)
2335
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2336
#endif
2337
    subpage_writelen(opaque, addr, value, 0);
2338
}
2339

    
2340
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2341
{
2342
#if defined(DEBUG_SUBPAGE)
2343
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2344
#endif
2345

    
2346
    return subpage_readlen(opaque, addr, 1);
2347
}
2348

    
2349
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2350
                            uint32_t value)
2351
{
2352
#if defined(DEBUG_SUBPAGE)
2353
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2354
#endif
2355
    subpage_writelen(opaque, addr, value, 1);
2356
}
2357

    
2358
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2359
{
2360
#if defined(DEBUG_SUBPAGE)
2361
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2362
#endif
2363

    
2364
    return subpage_readlen(opaque, addr, 2);
2365
}
2366

    
2367
static void subpage_writel (void *opaque,
2368
                         target_phys_addr_t addr, uint32_t value)
2369
{
2370
#if defined(DEBUG_SUBPAGE)
2371
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2372
#endif
2373
    subpage_writelen(opaque, addr, value, 2);
2374
}
2375

    
2376
static CPUReadMemoryFunc *subpage_read[] = {
2377
    &subpage_readb,
2378
    &subpage_readw,
2379
    &subpage_readl,
2380
};
2381

    
2382
static CPUWriteMemoryFunc *subpage_write[] = {
2383
    &subpage_writeb,
2384
    &subpage_writew,
2385
    &subpage_writel,
2386
};
2387

    
2388
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2389
                             int memory)
2390
{
2391
    int idx, eidx;
2392

    
2393
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2394
        return -1;
2395
    idx = SUBPAGE_IDX(start);
2396
    eidx = SUBPAGE_IDX(end);
2397
#if defined(DEBUG_SUBPAGE)
2398
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2399
           mmio, start, end, idx, eidx, memory);
2400
#endif
2401
    memory >>= IO_MEM_SHIFT;
2402
    for (; idx <= eidx; idx++) {
2403
        mmio->mem_read[idx] = io_mem_read[memory];
2404
        mmio->mem_write[idx] = io_mem_write[memory];
2405
        mmio->opaque[idx] = io_mem_opaque[memory];
2406
    }
2407

    
2408
    return 0;
2409
}
2410

    
2411
static void *subpage_init (target_phys_addr_t base, uint32_t *phys,
2412
                           int orig_memory)
2413
{
2414
    subpage_t *mmio;
2415
    int subpage_memory;
2416

    
2417
    mmio = qemu_mallocz(sizeof(subpage_t));
2418
    if (mmio != NULL) {
2419
        mmio->base = base;
2420
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2421
#if defined(DEBUG_SUBPAGE)
2422
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2423
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2424
#endif
2425
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2426
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2427
    }
2428

    
2429
    return mmio;
2430
}
2431

    
2432
static void io_mem_init(void)
2433
{
2434
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2435
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2436
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2437
    io_mem_nb = 5;
2438

    
2439
#if defined(CONFIG_SOFTMMU)
2440
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2441
                                          watch_mem_write, NULL);
2442
#endif
2443
    /* alloc dirty bits array */
2444
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2445
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2446
}
2447

    
2448
/* mem_read and mem_write are arrays of functions containing the
2449
   function to access byte (index 0), word (index 1) and dword (index
2450
   2). All functions must be supplied. If io_index is non zero, the
2451
   corresponding io zone is modified. If it is zero, a new io zone is
2452
   allocated. The return value can be used with
2453
   cpu_register_physical_memory(). (-1) is returned if error. */
2454
int cpu_register_io_memory(int io_index,
2455
                           CPUReadMemoryFunc **mem_read,
2456
                           CPUWriteMemoryFunc **mem_write,
2457
                           void *opaque)
2458
{
2459
    int i;
2460

    
2461
    if (io_index <= 0) {
2462
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2463
            return -1;
2464
        io_index = io_mem_nb++;
2465
    } else {
2466
        if (io_index >= IO_MEM_NB_ENTRIES)
2467
            return -1;
2468
    }
2469

    
2470
    for(i = 0;i < 3; i++) {
2471
        io_mem_read[io_index][i] = mem_read[i];
2472
        io_mem_write[io_index][i] = mem_write[i];
2473
    }
2474
    io_mem_opaque[io_index] = opaque;
2475
    return io_index << IO_MEM_SHIFT;
2476
}
2477

    
2478
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2479
{
2480
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2481
}
2482

    
2483
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2484
{
2485
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2486
}
2487

    
2488
/* physical memory access (slow version, mainly for debug) */
2489
#if defined(CONFIG_USER_ONLY)
2490
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2491
                            int len, int is_write)
2492
{
2493
    int l, flags;
2494
    target_ulong page;
2495
    void * p;
2496

    
2497
    while (len > 0) {
2498
        page = addr & TARGET_PAGE_MASK;
2499
        l = (page + TARGET_PAGE_SIZE) - addr;
2500
        if (l > len)
2501
            l = len;
2502
        flags = page_get_flags(page);
2503
        if (!(flags & PAGE_VALID))
2504
            return;
2505
        if (is_write) {
2506
            if (!(flags & PAGE_WRITE))
2507
                return;
2508
            p = lock_user(addr, len, 0);
2509
            memcpy(p, buf, len);
2510
            unlock_user(p, addr, len);
2511
        } else {
2512
            if (!(flags & PAGE_READ))
2513
                return;
2514
            p = lock_user(addr, len, 1);
2515
            memcpy(buf, p, len);
2516
            unlock_user(p, addr, 0);
2517
        }
2518
        len -= l;
2519
        buf += l;
2520
        addr += l;
2521
    }
2522
}
2523

    
2524
#else
2525
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2526
                            int len, int is_write)
2527
{
2528
    int l, io_index;
2529
    uint8_t *ptr;
2530
    uint32_t val;
2531
    target_phys_addr_t page;
2532
    unsigned long pd;
2533
    PhysPageDesc *p;
2534

    
2535
    while (len > 0) {
2536
        page = addr & TARGET_PAGE_MASK;
2537
        l = (page + TARGET_PAGE_SIZE) - addr;
2538
        if (l > len)
2539
            l = len;
2540
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2541
        if (!p) {
2542
            pd = IO_MEM_UNASSIGNED;
2543
        } else {
2544
            pd = p->phys_offset;
2545
        }
2546

    
2547
        if (is_write) {
2548
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2549
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2550
                /* XXX: could force cpu_single_env to NULL to avoid
2551
                   potential bugs */
2552
                if (l >= 4 && ((addr & 3) == 0)) {
2553
                    /* 32 bit write access */
2554
                    val = ldl_p(buf);
2555
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2556
                    l = 4;
2557
                } else if (l >= 2 && ((addr & 1) == 0)) {
2558
                    /* 16 bit write access */
2559
                    val = lduw_p(buf);
2560
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2561
                    l = 2;
2562
                } else {
2563
                    /* 8 bit write access */
2564
                    val = ldub_p(buf);
2565
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2566
                    l = 1;
2567
                }
2568
            } else {
2569
                unsigned long addr1;
2570
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2571
                /* RAM case */
2572
                ptr = phys_ram_base + addr1;
2573
                memcpy(ptr, buf, l);
2574
                if (!cpu_physical_memory_is_dirty(addr1)) {
2575
                    /* invalidate code */
2576
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2577
                    /* set dirty bit */
2578
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2579
                        (0xff & ~CODE_DIRTY_FLAG);
2580
                }
2581
            }
2582
        } else {
2583
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2584
                !(pd & IO_MEM_ROMD)) {
2585
                /* I/O case */
2586
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2587
                if (l >= 4 && ((addr & 3) == 0)) {
2588
                    /* 32 bit read access */
2589
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2590
                    stl_p(buf, val);
2591
                    l = 4;
2592
                } else if (l >= 2 && ((addr & 1) == 0)) {
2593
                    /* 16 bit read access */
2594
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2595
                    stw_p(buf, val);
2596
                    l = 2;
2597
                } else {
2598
                    /* 8 bit read access */
2599
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2600
                    stb_p(buf, val);
2601
                    l = 1;
2602
                }
2603
            } else {
2604
                /* RAM case */
2605
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2606
                    (addr & ~TARGET_PAGE_MASK);
2607
                memcpy(buf, ptr, l);
2608
            }
2609
        }
2610
        len -= l;
2611
        buf += l;
2612
        addr += l;
2613
    }
2614
}
2615

    
2616
/* used for ROM loading : can write in RAM and ROM */
2617
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2618
                                   const uint8_t *buf, int len)
2619
{
2620
    int l;
2621
    uint8_t *ptr;
2622
    target_phys_addr_t page;
2623
    unsigned long pd;
2624
    PhysPageDesc *p;
2625

    
2626
    while (len > 0) {
2627
        page = addr & TARGET_PAGE_MASK;
2628
        l = (page + TARGET_PAGE_SIZE) - addr;
2629
        if (l > len)
2630
            l = len;
2631
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2632
        if (!p) {
2633
            pd = IO_MEM_UNASSIGNED;
2634
        } else {
2635
            pd = p->phys_offset;
2636
        }
2637

    
2638
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2639
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2640
            !(pd & IO_MEM_ROMD)) {
2641
            /* do nothing */
2642
        } else {
2643
            unsigned long addr1;
2644
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2645
            /* ROM/RAM case */
2646
            ptr = phys_ram_base + addr1;
2647
            memcpy(ptr, buf, l);
2648
        }
2649
        len -= l;
2650
        buf += l;
2651
        addr += l;
2652
    }
2653
}
2654

    
2655

    
2656
/* warning: addr must be aligned */
2657
uint32_t ldl_phys(target_phys_addr_t addr)
2658
{
2659
    int io_index;
2660
    uint8_t *ptr;
2661
    uint32_t val;
2662
    unsigned long pd;
2663
    PhysPageDesc *p;
2664

    
2665
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2666
    if (!p) {
2667
        pd = IO_MEM_UNASSIGNED;
2668
    } else {
2669
        pd = p->phys_offset;
2670
    }
2671

    
2672
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2673
        !(pd & IO_MEM_ROMD)) {
2674
        /* I/O case */
2675
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2676
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2677
    } else {
2678
        /* RAM case */
2679
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2680
            (addr & ~TARGET_PAGE_MASK);
2681
        val = ldl_p(ptr);
2682
    }
2683
    return val;
2684
}
2685

    
2686
/* warning: addr must be aligned */
2687
uint64_t ldq_phys(target_phys_addr_t addr)
2688
{
2689
    int io_index;
2690
    uint8_t *ptr;
2691
    uint64_t val;
2692
    unsigned long pd;
2693
    PhysPageDesc *p;
2694

    
2695
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2696
    if (!p) {
2697
        pd = IO_MEM_UNASSIGNED;
2698
    } else {
2699
        pd = p->phys_offset;
2700
    }
2701

    
2702
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2703
        !(pd & IO_MEM_ROMD)) {
2704
        /* I/O case */
2705
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2706
#ifdef TARGET_WORDS_BIGENDIAN
2707
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2708
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2709
#else
2710
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2711
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2712
#endif
2713
    } else {
2714
        /* RAM case */
2715
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2716
            (addr & ~TARGET_PAGE_MASK);
2717
        val = ldq_p(ptr);
2718
    }
2719
    return val;
2720
}
2721

    
2722
/* XXX: optimize */
2723
uint32_t ldub_phys(target_phys_addr_t addr)
2724
{
2725
    uint8_t val;
2726
    cpu_physical_memory_read(addr, &val, 1);
2727
    return val;
2728
}
2729

    
2730
/* XXX: optimize */
2731
uint32_t lduw_phys(target_phys_addr_t addr)
2732
{
2733
    uint16_t val;
2734
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2735
    return tswap16(val);
2736
}
2737

    
2738
/* warning: addr must be aligned. The ram page is not masked as dirty
2739
   and the code inside is not invalidated. It is useful if the dirty
2740
   bits are used to track modified PTEs */
2741
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2742
{
2743
    int io_index;
2744
    uint8_t *ptr;
2745
    unsigned long pd;
2746
    PhysPageDesc *p;
2747

    
2748
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2749
    if (!p) {
2750
        pd = IO_MEM_UNASSIGNED;
2751
    } else {
2752
        pd = p->phys_offset;
2753
    }
2754

    
2755
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2756
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2757
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2758
    } else {
2759
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2760
            (addr & ~TARGET_PAGE_MASK);
2761
        stl_p(ptr, val);
2762
    }
2763
}
2764

    
2765
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2766
{
2767
    int io_index;
2768
    uint8_t *ptr;
2769
    unsigned long pd;
2770
    PhysPageDesc *p;
2771

    
2772
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2773
    if (!p) {
2774
        pd = IO_MEM_UNASSIGNED;
2775
    } else {
2776
        pd = p->phys_offset;
2777
    }
2778

    
2779
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2780
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2781
#ifdef TARGET_WORDS_BIGENDIAN
2782
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2783
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2784
#else
2785
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2786
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2787
#endif
2788
    } else {
2789
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2790
            (addr & ~TARGET_PAGE_MASK);
2791
        stq_p(ptr, val);
2792
    }
2793
}
2794

    
2795
/* warning: addr must be aligned */
2796
void stl_phys(target_phys_addr_t addr, uint32_t val)
2797
{
2798
    int io_index;
2799
    uint8_t *ptr;
2800
    unsigned long pd;
2801
    PhysPageDesc *p;
2802

    
2803
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2804
    if (!p) {
2805
        pd = IO_MEM_UNASSIGNED;
2806
    } else {
2807
        pd = p->phys_offset;
2808
    }
2809

    
2810
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2811
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2812
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2813
    } else {
2814
        unsigned long addr1;
2815
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2816
        /* RAM case */
2817
        ptr = phys_ram_base + addr1;
2818
        stl_p(ptr, val);
2819
        if (!cpu_physical_memory_is_dirty(addr1)) {
2820
            /* invalidate code */
2821
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2822
            /* set dirty bit */
2823
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2824
                (0xff & ~CODE_DIRTY_FLAG);
2825
        }
2826
    }
2827
}
2828

    
2829
/* XXX: optimize */
2830
void stb_phys(target_phys_addr_t addr, uint32_t val)
2831
{
2832
    uint8_t v = val;
2833
    cpu_physical_memory_write(addr, &v, 1);
2834
}
2835

    
2836
/* XXX: optimize */
2837
void stw_phys(target_phys_addr_t addr, uint32_t val)
2838
{
2839
    uint16_t v = tswap16(val);
2840
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2841
}
2842

    
2843
/* XXX: optimize */
2844
void stq_phys(target_phys_addr_t addr, uint64_t val)
2845
{
2846
    val = tswap64(val);
2847
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2848
}
2849

    
2850
#endif
2851

    
2852
/* virtual memory access for debug */
2853
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2854
                        uint8_t *buf, int len, int is_write)
2855
{
2856
    int l;
2857
    target_phys_addr_t phys_addr;
2858
    target_ulong page;
2859

    
2860
    while (len > 0) {
2861
        page = addr & TARGET_PAGE_MASK;
2862
        phys_addr = cpu_get_phys_page_debug(env, page);
2863
        /* if no physical page mapped, return an error */
2864
        if (phys_addr == -1)
2865
            return -1;
2866
        l = (page + TARGET_PAGE_SIZE) - addr;
2867
        if (l > len)
2868
            l = len;
2869
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2870
                               buf, l, is_write);
2871
        len -= l;
2872
        buf += l;
2873
        addr += l;
2874
    }
2875
    return 0;
2876
}
2877

    
2878
void dump_exec_info(FILE *f,
2879
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2880
{
2881
    int i, target_code_size, max_target_code_size;
2882
    int direct_jmp_count, direct_jmp2_count, cross_page;
2883
    TranslationBlock *tb;
2884

    
2885
    target_code_size = 0;
2886
    max_target_code_size = 0;
2887
    cross_page = 0;
2888
    direct_jmp_count = 0;
2889
    direct_jmp2_count = 0;
2890
    for(i = 0; i < nb_tbs; i++) {
2891
        tb = &tbs[i];
2892
        target_code_size += tb->size;
2893
        if (tb->size > max_target_code_size)
2894
            max_target_code_size = tb->size;
2895
        if (tb->page_addr[1] != -1)
2896
            cross_page++;
2897
        if (tb->tb_next_offset[0] != 0xffff) {
2898
            direct_jmp_count++;
2899
            if (tb->tb_next_offset[1] != 0xffff) {
2900
                direct_jmp2_count++;
2901
            }
2902
        }
2903
    }
2904
    /* XXX: avoid using doubles ? */
2905
    cpu_fprintf(f, "TB count            %d\n", nb_tbs);
2906
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
2907
                nb_tbs ? target_code_size / nb_tbs : 0,
2908
                max_target_code_size);
2909
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
2910
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2911
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2912
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2913
            cross_page,
2914
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2915
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
2916
                direct_jmp_count,
2917
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2918
                direct_jmp2_count,
2919
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2920
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
2921
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2922
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
2923
}
2924

    
2925
#if !defined(CONFIG_USER_ONLY)
2926

    
2927
#define MMUSUFFIX _cmmu
2928
#define GETPC() NULL
2929
#define env cpu_single_env
2930
#define SOFTMMU_CODE_ACCESS
2931

    
2932
#define SHIFT 0
2933
#include "softmmu_template.h"
2934

    
2935
#define SHIFT 1
2936
#include "softmmu_template.h"
2937

    
2938
#define SHIFT 2
2939
#include "softmmu_template.h"
2940

    
2941
#define SHIFT 3
2942
#include "softmmu_template.h"
2943

    
2944
#undef env
2945

    
2946
#endif