Statistics
| Branch: | Revision:

root / exec.c @ 4369415f

History | View | Annotate | Download (92.5 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#if defined(CONFIG_USER_ONLY)
41
#include <qemu.h>
42
#endif
43

    
44
//#define DEBUG_TB_INVALIDATE
45
//#define DEBUG_FLUSH
46
//#define DEBUG_TLB
47
//#define DEBUG_UNASSIGNED
48

    
49
/* make various TB consistency checks */
50
//#define DEBUG_TB_CHECK
51
//#define DEBUG_TLB_CHECK
52

    
53
//#define DEBUG_IOPORT
54
//#define DEBUG_SUBPAGE
55

    
56
#if !defined(CONFIG_USER_ONLY)
57
/* TB consistency checks only implemented for usermode emulation.  */
58
#undef DEBUG_TB_CHECK
59
#endif
60

    
61
#define SMC_BITMAP_USE_THRESHOLD 10
62

    
63
#define MMAP_AREA_START        0x00000000
64
#define MMAP_AREA_END          0xa8000000
65

    
66
#if defined(TARGET_SPARC64)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 41
68
#elif defined(TARGET_SPARC)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 36
70
#elif defined(TARGET_ALPHA)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 42
72
#define TARGET_VIRT_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_PPC64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 42
77
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78
#define TARGET_PHYS_ADDR_SPACE_BITS 36
79
#else
80
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81
#define TARGET_PHYS_ADDR_SPACE_BITS 32
82
#endif
83

    
84
TranslationBlock *tbs;
85
int code_gen_max_blocks;
86
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87
int nb_tbs;
88
/* any access to the tbs or the page table must use this lock */
89
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90

    
91
uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
92
uint8_t *code_gen_buffer;
93
unsigned long code_gen_buffer_size;
94
/* threshold to flush the translated code buffer */
95
unsigned long code_gen_buffer_max_size; 
96
uint8_t *code_gen_ptr;
97

    
98
ram_addr_t phys_ram_size;
99
int phys_ram_fd;
100
uint8_t *phys_ram_base;
101
uint8_t *phys_ram_dirty;
102
static ram_addr_t phys_ram_alloc_offset = 0;
103

    
104
CPUState *first_cpu;
105
/* current CPU in the current thread. It is only valid inside
106
   cpu_exec() */
107
CPUState *cpu_single_env;
108

    
109
typedef struct PageDesc {
110
    /* list of TBs intersecting this ram page */
111
    TranslationBlock *first_tb;
112
    /* in order to optimize self modifying code, we count the number
113
       of lookups we do to a given page to use a bitmap */
114
    unsigned int code_write_count;
115
    uint8_t *code_bitmap;
116
#if defined(CONFIG_USER_ONLY)
117
    unsigned long flags;
118
#endif
119
} PageDesc;
120

    
121
typedef struct PhysPageDesc {
122
    /* offset in host memory of the page + io_index in the low 12 bits */
123
    ram_addr_t phys_offset;
124
} PhysPageDesc;
125

    
126
#define L2_BITS 10
127
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128
/* XXX: this is a temporary hack for alpha target.
129
 *      In the future, this is to be replaced by a multi-level table
130
 *      to actually be able to handle the complete 64 bits address space.
131
 */
132
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
133
#else
134
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
135
#endif
136

    
137
#define L1_SIZE (1 << L1_BITS)
138
#define L2_SIZE (1 << L2_BITS)
139

    
140
static void io_mem_init(void);
141

    
142
unsigned long qemu_real_host_page_size;
143
unsigned long qemu_host_page_bits;
144
unsigned long qemu_host_page_size;
145
unsigned long qemu_host_page_mask;
146

    
147
/* XXX: for system emulation, it could just be an array */
148
static PageDesc *l1_map[L1_SIZE];
149
PhysPageDesc **l1_phys_map;
150

    
151
/* io memory support */
152
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
153
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
154
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
155
static int io_mem_nb;
156
#if defined(CONFIG_SOFTMMU)
157
static int io_mem_watch;
158
#endif
159

    
160
/* log support */
161
char *logfilename = "/tmp/qemu.log";
162
FILE *logfile;
163
int loglevel;
164
static int log_append = 0;
165

    
166
/* statistics */
167
static int tlb_flush_count;
168
static int tb_flush_count;
169
static int tb_phys_invalidate_count;
170

    
171
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172
typedef struct subpage_t {
173
    target_phys_addr_t base;
174
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
175
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
176
    void *opaque[TARGET_PAGE_SIZE][2][4];
177
} subpage_t;
178

    
179
#ifdef _WIN32
180
static void map_exec(void *addr, long size)
181
{
182
    DWORD old_protect;
183
    VirtualProtect(addr, size,
184
                   PAGE_EXECUTE_READWRITE, &old_protect);
185
    
186
}
187
#else
188
static void map_exec(void *addr, long size)
189
{
190
    unsigned long start, end, page_size;
191
    
192
    page_size = getpagesize();
193
    start = (unsigned long)addr;
194
    start &= ~(page_size - 1);
195
    
196
    end = (unsigned long)addr + size;
197
    end += page_size - 1;
198
    end &= ~(page_size - 1);
199
    
200
    mprotect((void *)start, end - start,
201
             PROT_READ | PROT_WRITE | PROT_EXEC);
202
}
203
#endif
204

    
205
static void page_init(void)
206
{
207
    /* NOTE: we can always suppose that qemu_host_page_size >=
208
       TARGET_PAGE_SIZE */
209
#ifdef _WIN32
210
    {
211
        SYSTEM_INFO system_info;
212
        DWORD old_protect;
213

    
214
        GetSystemInfo(&system_info);
215
        qemu_real_host_page_size = system_info.dwPageSize;
216
    }
217
#else
218
    qemu_real_host_page_size = getpagesize();
219
#endif
220
    if (qemu_host_page_size == 0)
221
        qemu_host_page_size = qemu_real_host_page_size;
222
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
223
        qemu_host_page_size = TARGET_PAGE_SIZE;
224
    qemu_host_page_bits = 0;
225
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
226
        qemu_host_page_bits++;
227
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
228
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
230

    
231
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232
    {
233
        long long startaddr, endaddr;
234
        FILE *f;
235
        int n;
236

    
237
        f = fopen("/proc/self/maps", "r");
238
        if (f) {
239
            do {
240
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
241
                if (n == 2) {
242
                    startaddr = MIN(startaddr,
243
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
244
                    endaddr = MIN(endaddr,
245
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
246
                    page_set_flags(TARGET_PAGE_ALIGN(startaddr),
247
                                   TARGET_PAGE_ALIGN(endaddr),
248
                                   PAGE_RESERVED); 
249
                }
250
            } while (!feof(f));
251
            fclose(f);
252
        }
253
    }
254
#endif
255
}
256

    
257
static inline PageDesc *page_find_alloc(target_ulong index)
258
{
259
    PageDesc **lp, *p;
260

    
261
    lp = &l1_map[index >> L2_BITS];
262
    p = *lp;
263
    if (!p) {
264
        /* allocate if not found */
265
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
266
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
267
        *lp = p;
268
    }
269
    return p + (index & (L2_SIZE - 1));
270
}
271

    
272
static inline PageDesc *page_find(target_ulong index)
273
{
274
    PageDesc *p;
275

    
276
    p = l1_map[index >> L2_BITS];
277
    if (!p)
278
        return 0;
279
    return p + (index & (L2_SIZE - 1));
280
}
281

    
282
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
283
{
284
    void **lp, **p;
285
    PhysPageDesc *pd;
286

    
287
    p = (void **)l1_phys_map;
288
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
289

    
290
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
291
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
292
#endif
293
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
294
    p = *lp;
295
    if (!p) {
296
        /* allocate if not found */
297
        if (!alloc)
298
            return NULL;
299
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
300
        memset(p, 0, sizeof(void *) * L1_SIZE);
301
        *lp = p;
302
    }
303
#endif
304
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
305
    pd = *lp;
306
    if (!pd) {
307
        int i;
308
        /* allocate if not found */
309
        if (!alloc)
310
            return NULL;
311
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
312
        *lp = pd;
313
        for (i = 0; i < L2_SIZE; i++)
314
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
315
    }
316
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
317
}
318

    
319
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
320
{
321
    return phys_page_find_alloc(index, 0);
322
}
323

    
324
#if !defined(CONFIG_USER_ONLY)
325
static void tlb_protect_code(ram_addr_t ram_addr);
326
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
327
                                    target_ulong vaddr);
328
#endif
329

    
330
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
331

    
332
#if defined(CONFIG_USER_ONLY)
333
/* Currently it is not recommanded to allocate big chunks of data in
334
   user mode. It will change when a dedicated libc will be used */
335
#define USE_STATIC_CODE_GEN_BUFFER
336
#endif
337

    
338
#ifdef USE_STATIC_CODE_GEN_BUFFER
339
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
340
#endif
341

    
342
void code_gen_alloc(unsigned long tb_size)
343
{
344
#ifdef USE_STATIC_CODE_GEN_BUFFER
345
    code_gen_buffer = static_code_gen_buffer;
346
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
347
    map_exec(code_gen_buffer, code_gen_buffer_size);
348
#else
349
    code_gen_buffer_size = tb_size;
350
    if (code_gen_buffer_size == 0) {
351
#if defined(CONFIG_USER_ONLY)
352
        /* in user mode, phys_ram_size is not meaningful */
353
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
354
#else
355
        /* XXX: needs ajustments */
356
        code_gen_buffer_size = (int)(phys_ram_size / 4);
357
#endif
358
    }
359
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
360
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
361
    /* The code gen buffer location may have constraints depending on
362
       the host cpu and OS */
363
#if defined(__linux__) 
364
    {
365
        int flags;
366
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
367
#if defined(__x86_64__)
368
        flags |= MAP_32BIT;
369
        /* Cannot map more than that */
370
        if (code_gen_buffer_size > (800 * 1024 * 1024))
371
            code_gen_buffer_size = (800 * 1024 * 1024);
372
#endif
373
        code_gen_buffer = mmap(NULL, code_gen_buffer_size,
374
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
375
                               flags, -1, 0);
376
        if (code_gen_buffer == MAP_FAILED) {
377
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
378
            exit(1);
379
        }
380
    }
381
#else
382
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
383
    if (!code_gen_buffer) {
384
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
385
        exit(1);
386
    }
387
    map_exec(code_gen_buffer, code_gen_buffer_size);
388
#endif
389
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
390
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
391
    code_gen_buffer_max_size = code_gen_buffer_size - 
392
        code_gen_max_block_size();
393
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
394
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
395
}
396

    
397
/* Must be called before using the QEMU cpus. 'tb_size' is the size
398
   (in bytes) allocated to the translation buffer. Zero means default
399
   size. */
400
void cpu_exec_init_all(unsigned long tb_size)
401
{
402
    cpu_gen_init();
403
    code_gen_alloc(tb_size);
404
    code_gen_ptr = code_gen_buffer;
405
    page_init();
406
    io_mem_init();
407
}
408

    
409
void cpu_exec_init(CPUState *env)
410
{
411
    CPUState **penv;
412
    int cpu_index;
413

    
414
    env->next_cpu = NULL;
415
    penv = &first_cpu;
416
    cpu_index = 0;
417
    while (*penv != NULL) {
418
        penv = (CPUState **)&(*penv)->next_cpu;
419
        cpu_index++;
420
    }
421
    env->cpu_index = cpu_index;
422
    env->nb_watchpoints = 0;
423
    *penv = env;
424
}
425

    
426
static inline void invalidate_page_bitmap(PageDesc *p)
427
{
428
    if (p->code_bitmap) {
429
        qemu_free(p->code_bitmap);
430
        p->code_bitmap = NULL;
431
    }
432
    p->code_write_count = 0;
433
}
434

    
435
/* set to NULL all the 'first_tb' fields in all PageDescs */
436
static void page_flush_tb(void)
437
{
438
    int i, j;
439
    PageDesc *p;
440

    
441
    for(i = 0; i < L1_SIZE; i++) {
442
        p = l1_map[i];
443
        if (p) {
444
            for(j = 0; j < L2_SIZE; j++) {
445
                p->first_tb = NULL;
446
                invalidate_page_bitmap(p);
447
                p++;
448
            }
449
        }
450
    }
451
}
452

    
453
/* flush all the translation blocks */
454
/* XXX: tb_flush is currently not thread safe */
455
void tb_flush(CPUState *env1)
456
{
457
    CPUState *env;
458
#if defined(DEBUG_FLUSH)
459
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
460
           (unsigned long)(code_gen_ptr - code_gen_buffer),
461
           nb_tbs, nb_tbs > 0 ?
462
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
463
#endif
464
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
465
        cpu_abort(env1, "Internal error: code buffer overflow\n");
466

    
467
    nb_tbs = 0;
468

    
469
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
470
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
471
    }
472

    
473
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
474
    page_flush_tb();
475

    
476
    code_gen_ptr = code_gen_buffer;
477
    /* XXX: flush processor icache at this point if cache flush is
478
       expensive */
479
    tb_flush_count++;
480
}
481

    
482
#ifdef DEBUG_TB_CHECK
483

    
484
static void tb_invalidate_check(target_ulong address)
485
{
486
    TranslationBlock *tb;
487
    int i;
488
    address &= TARGET_PAGE_MASK;
489
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
490
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
491
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
492
                  address >= tb->pc + tb->size)) {
493
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
494
                       address, (long)tb->pc, tb->size);
495
            }
496
        }
497
    }
498
}
499

    
500
/* verify that all the pages have correct rights for code */
501
static void tb_page_check(void)
502
{
503
    TranslationBlock *tb;
504
    int i, flags1, flags2;
505

    
506
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
507
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
508
            flags1 = page_get_flags(tb->pc);
509
            flags2 = page_get_flags(tb->pc + tb->size - 1);
510
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
511
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
512
                       (long)tb->pc, tb->size, flags1, flags2);
513
            }
514
        }
515
    }
516
}
517

    
518
void tb_jmp_check(TranslationBlock *tb)
519
{
520
    TranslationBlock *tb1;
521
    unsigned int n1;
522

    
523
    /* suppress any remaining jumps to this TB */
524
    tb1 = tb->jmp_first;
525
    for(;;) {
526
        n1 = (long)tb1 & 3;
527
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
528
        if (n1 == 2)
529
            break;
530
        tb1 = tb1->jmp_next[n1];
531
    }
532
    /* check end of list */
533
    if (tb1 != tb) {
534
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
535
    }
536
}
537

    
538
#endif
539

    
540
/* invalidate one TB */
541
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
542
                             int next_offset)
543
{
544
    TranslationBlock *tb1;
545
    for(;;) {
546
        tb1 = *ptb;
547
        if (tb1 == tb) {
548
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
549
            break;
550
        }
551
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
552
    }
553
}
554

    
555
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
556
{
557
    TranslationBlock *tb1;
558
    unsigned int n1;
559

    
560
    for(;;) {
561
        tb1 = *ptb;
562
        n1 = (long)tb1 & 3;
563
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
564
        if (tb1 == tb) {
565
            *ptb = tb1->page_next[n1];
566
            break;
567
        }
568
        ptb = &tb1->page_next[n1];
569
    }
570
}
571

    
572
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
573
{
574
    TranslationBlock *tb1, **ptb;
575
    unsigned int n1;
576

    
577
    ptb = &tb->jmp_next[n];
578
    tb1 = *ptb;
579
    if (tb1) {
580
        /* find tb(n) in circular list */
581
        for(;;) {
582
            tb1 = *ptb;
583
            n1 = (long)tb1 & 3;
584
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
585
            if (n1 == n && tb1 == tb)
586
                break;
587
            if (n1 == 2) {
588
                ptb = &tb1->jmp_first;
589
            } else {
590
                ptb = &tb1->jmp_next[n1];
591
            }
592
        }
593
        /* now we can suppress tb(n) from the list */
594
        *ptb = tb->jmp_next[n];
595

    
596
        tb->jmp_next[n] = NULL;
597
    }
598
}
599

    
600
/* reset the jump entry 'n' of a TB so that it is not chained to
601
   another TB */
602
static inline void tb_reset_jump(TranslationBlock *tb, int n)
603
{
604
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
605
}
606

    
607
static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
608
{
609
    CPUState *env;
610
    PageDesc *p;
611
    unsigned int h, n1;
612
    target_phys_addr_t phys_pc;
613
    TranslationBlock *tb1, *tb2;
614

    
615
    /* remove the TB from the hash list */
616
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
617
    h = tb_phys_hash_func(phys_pc);
618
    tb_remove(&tb_phys_hash[h], tb,
619
              offsetof(TranslationBlock, phys_hash_next));
620

    
621
    /* remove the TB from the page list */
622
    if (tb->page_addr[0] != page_addr) {
623
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
624
        tb_page_remove(&p->first_tb, tb);
625
        invalidate_page_bitmap(p);
626
    }
627
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
628
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
629
        tb_page_remove(&p->first_tb, tb);
630
        invalidate_page_bitmap(p);
631
    }
632

    
633
    tb_invalidated_flag = 1;
634

    
635
    /* remove the TB from the hash list */
636
    h = tb_jmp_cache_hash_func(tb->pc);
637
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
638
        if (env->tb_jmp_cache[h] == tb)
639
            env->tb_jmp_cache[h] = NULL;
640
    }
641

    
642
    /* suppress this TB from the two jump lists */
643
    tb_jmp_remove(tb, 0);
644
    tb_jmp_remove(tb, 1);
645

    
646
    /* suppress any remaining jumps to this TB */
647
    tb1 = tb->jmp_first;
648
    for(;;) {
649
        n1 = (long)tb1 & 3;
650
        if (n1 == 2)
651
            break;
652
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
653
        tb2 = tb1->jmp_next[n1];
654
        tb_reset_jump(tb1, n1);
655
        tb1->jmp_next[n1] = NULL;
656
        tb1 = tb2;
657
    }
658
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
659

    
660
    tb_phys_invalidate_count++;
661
}
662

    
663
static inline void set_bits(uint8_t *tab, int start, int len)
664
{
665
    int end, mask, end1;
666

    
667
    end = start + len;
668
    tab += start >> 3;
669
    mask = 0xff << (start & 7);
670
    if ((start & ~7) == (end & ~7)) {
671
        if (start < end) {
672
            mask &= ~(0xff << (end & 7));
673
            *tab |= mask;
674
        }
675
    } else {
676
        *tab++ |= mask;
677
        start = (start + 8) & ~7;
678
        end1 = end & ~7;
679
        while (start < end1) {
680
            *tab++ = 0xff;
681
            start += 8;
682
        }
683
        if (start < end) {
684
            mask = ~(0xff << (end & 7));
685
            *tab |= mask;
686
        }
687
    }
688
}
689

    
690
static void build_page_bitmap(PageDesc *p)
691
{
692
    int n, tb_start, tb_end;
693
    TranslationBlock *tb;
694

    
695
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
696
    if (!p->code_bitmap)
697
        return;
698
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
699

    
700
    tb = p->first_tb;
701
    while (tb != NULL) {
702
        n = (long)tb & 3;
703
        tb = (TranslationBlock *)((long)tb & ~3);
704
        /* NOTE: this is subtle as a TB may span two physical pages */
705
        if (n == 0) {
706
            /* NOTE: tb_end may be after the end of the page, but
707
               it is not a problem */
708
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
709
            tb_end = tb_start + tb->size;
710
            if (tb_end > TARGET_PAGE_SIZE)
711
                tb_end = TARGET_PAGE_SIZE;
712
        } else {
713
            tb_start = 0;
714
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
715
        }
716
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
717
        tb = tb->page_next[n];
718
    }
719
}
720

    
721
#ifdef TARGET_HAS_PRECISE_SMC
722

    
723
static void tb_gen_code(CPUState *env,
724
                        target_ulong pc, target_ulong cs_base, int flags,
725
                        int cflags)
726
{
727
    TranslationBlock *tb;
728
    uint8_t *tc_ptr;
729
    target_ulong phys_pc, phys_page2, virt_page2;
730
    int code_gen_size;
731

    
732
    phys_pc = get_phys_addr_code(env, pc);
733
    tb = tb_alloc(pc);
734
    if (!tb) {
735
        /* flush must be done */
736
        tb_flush(env);
737
        /* cannot fail at this point */
738
        tb = tb_alloc(pc);
739
    }
740
    tc_ptr = code_gen_ptr;
741
    tb->tc_ptr = tc_ptr;
742
    tb->cs_base = cs_base;
743
    tb->flags = flags;
744
    tb->cflags = cflags;
745
    cpu_gen_code(env, tb, &code_gen_size);
746
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
747

    
748
    /* check next page if needed */
749
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
750
    phys_page2 = -1;
751
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
752
        phys_page2 = get_phys_addr_code(env, virt_page2);
753
    }
754
    tb_link_phys(tb, phys_pc, phys_page2);
755
}
756
#endif
757

    
758
/* invalidate all TBs which intersect with the target physical page
759
   starting in range [start;end[. NOTE: start and end must refer to
760
   the same physical page. 'is_cpu_write_access' should be true if called
761
   from a real cpu write access: the virtual CPU will exit the current
762
   TB if code is modified inside this TB. */
763
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
764
                                   int is_cpu_write_access)
765
{
766
    int n, current_tb_modified, current_tb_not_found, current_flags;
767
    CPUState *env = cpu_single_env;
768
    PageDesc *p;
769
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
770
    target_ulong tb_start, tb_end;
771
    target_ulong current_pc, current_cs_base;
772

    
773
    p = page_find(start >> TARGET_PAGE_BITS);
774
    if (!p)
775
        return;
776
    if (!p->code_bitmap &&
777
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
778
        is_cpu_write_access) {
779
        /* build code bitmap */
780
        build_page_bitmap(p);
781
    }
782

    
783
    /* we remove all the TBs in the range [start, end[ */
784
    /* XXX: see if in some cases it could be faster to invalidate all the code */
785
    current_tb_not_found = is_cpu_write_access;
786
    current_tb_modified = 0;
787
    current_tb = NULL; /* avoid warning */
788
    current_pc = 0; /* avoid warning */
789
    current_cs_base = 0; /* avoid warning */
790
    current_flags = 0; /* avoid warning */
791
    tb = p->first_tb;
792
    while (tb != NULL) {
793
        n = (long)tb & 3;
794
        tb = (TranslationBlock *)((long)tb & ~3);
795
        tb_next = tb->page_next[n];
796
        /* NOTE: this is subtle as a TB may span two physical pages */
797
        if (n == 0) {
798
            /* NOTE: tb_end may be after the end of the page, but
799
               it is not a problem */
800
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
801
            tb_end = tb_start + tb->size;
802
        } else {
803
            tb_start = tb->page_addr[1];
804
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
805
        }
806
        if (!(tb_end <= start || tb_start >= end)) {
807
#ifdef TARGET_HAS_PRECISE_SMC
808
            if (current_tb_not_found) {
809
                current_tb_not_found = 0;
810
                current_tb = NULL;
811
                if (env->mem_write_pc) {
812
                    /* now we have a real cpu fault */
813
                    current_tb = tb_find_pc(env->mem_write_pc);
814
                }
815
            }
816
            if (current_tb == tb &&
817
                !(current_tb->cflags & CF_SINGLE_INSN)) {
818
                /* If we are modifying the current TB, we must stop
819
                its execution. We could be more precise by checking
820
                that the modification is after the current PC, but it
821
                would require a specialized function to partially
822
                restore the CPU state */
823

    
824
                current_tb_modified = 1;
825
                cpu_restore_state(current_tb, env,
826
                                  env->mem_write_pc, NULL);
827
#if defined(TARGET_I386)
828
                current_flags = env->hflags;
829
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
830
                current_cs_base = (target_ulong)env->segs[R_CS].base;
831
                current_pc = current_cs_base + env->eip;
832
#else
833
#error unsupported CPU
834
#endif
835
            }
836
#endif /* TARGET_HAS_PRECISE_SMC */
837
            /* we need to do that to handle the case where a signal
838
               occurs while doing tb_phys_invalidate() */
839
            saved_tb = NULL;
840
            if (env) {
841
                saved_tb = env->current_tb;
842
                env->current_tb = NULL;
843
            }
844
            tb_phys_invalidate(tb, -1);
845
            if (env) {
846
                env->current_tb = saved_tb;
847
                if (env->interrupt_request && env->current_tb)
848
                    cpu_interrupt(env, env->interrupt_request);
849
            }
850
        }
851
        tb = tb_next;
852
    }
853
#if !defined(CONFIG_USER_ONLY)
854
    /* if no code remaining, no need to continue to use slow writes */
855
    if (!p->first_tb) {
856
        invalidate_page_bitmap(p);
857
        if (is_cpu_write_access) {
858
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
859
        }
860
    }
861
#endif
862
#ifdef TARGET_HAS_PRECISE_SMC
863
    if (current_tb_modified) {
864
        /* we generate a block containing just the instruction
865
           modifying the memory. It will ensure that it cannot modify
866
           itself */
867
        env->current_tb = NULL;
868
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
869
                    CF_SINGLE_INSN);
870
        cpu_resume_from_signal(env, NULL);
871
    }
872
#endif
873
}
874

    
875
/* len must be <= 8 and start must be a multiple of len */
876
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
877
{
878
    PageDesc *p;
879
    int offset, b;
880
#if 0
881
    if (1) {
882
        if (loglevel) {
883
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
884
                   cpu_single_env->mem_write_vaddr, len,
885
                   cpu_single_env->eip,
886
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
887
        }
888
    }
889
#endif
890
    p = page_find(start >> TARGET_PAGE_BITS);
891
    if (!p)
892
        return;
893
    if (p->code_bitmap) {
894
        offset = start & ~TARGET_PAGE_MASK;
895
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
896
        if (b & ((1 << len) - 1))
897
            goto do_invalidate;
898
    } else {
899
    do_invalidate:
900
        tb_invalidate_phys_page_range(start, start + len, 1);
901
    }
902
}
903

    
904
#if !defined(CONFIG_SOFTMMU)
905
static void tb_invalidate_phys_page(target_phys_addr_t addr,
906
                                    unsigned long pc, void *puc)
907
{
908
    int n, current_flags, current_tb_modified;
909
    target_ulong current_pc, current_cs_base;
910
    PageDesc *p;
911
    TranslationBlock *tb, *current_tb;
912
#ifdef TARGET_HAS_PRECISE_SMC
913
    CPUState *env = cpu_single_env;
914
#endif
915

    
916
    addr &= TARGET_PAGE_MASK;
917
    p = page_find(addr >> TARGET_PAGE_BITS);
918
    if (!p)
919
        return;
920
    tb = p->first_tb;
921
    current_tb_modified = 0;
922
    current_tb = NULL;
923
    current_pc = 0; /* avoid warning */
924
    current_cs_base = 0; /* avoid warning */
925
    current_flags = 0; /* avoid warning */
926
#ifdef TARGET_HAS_PRECISE_SMC
927
    if (tb && pc != 0) {
928
        current_tb = tb_find_pc(pc);
929
    }
930
#endif
931
    while (tb != NULL) {
932
        n = (long)tb & 3;
933
        tb = (TranslationBlock *)((long)tb & ~3);
934
#ifdef TARGET_HAS_PRECISE_SMC
935
        if (current_tb == tb &&
936
            !(current_tb->cflags & CF_SINGLE_INSN)) {
937
                /* If we are modifying the current TB, we must stop
938
                   its execution. We could be more precise by checking
939
                   that the modification is after the current PC, but it
940
                   would require a specialized function to partially
941
                   restore the CPU state */
942

    
943
            current_tb_modified = 1;
944
            cpu_restore_state(current_tb, env, pc, puc);
945
#if defined(TARGET_I386)
946
            current_flags = env->hflags;
947
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
948
            current_cs_base = (target_ulong)env->segs[R_CS].base;
949
            current_pc = current_cs_base + env->eip;
950
#else
951
#error unsupported CPU
952
#endif
953
        }
954
#endif /* TARGET_HAS_PRECISE_SMC */
955
        tb_phys_invalidate(tb, addr);
956
        tb = tb->page_next[n];
957
    }
958
    p->first_tb = NULL;
959
#ifdef TARGET_HAS_PRECISE_SMC
960
    if (current_tb_modified) {
961
        /* we generate a block containing just the instruction
962
           modifying the memory. It will ensure that it cannot modify
963
           itself */
964
        env->current_tb = NULL;
965
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
966
                    CF_SINGLE_INSN);
967
        cpu_resume_from_signal(env, puc);
968
    }
969
#endif
970
}
971
#endif
972

    
973
/* add the tb in the target page and protect it if necessary */
974
static inline void tb_alloc_page(TranslationBlock *tb,
975
                                 unsigned int n, target_ulong page_addr)
976
{
977
    PageDesc *p;
978
    TranslationBlock *last_first_tb;
979

    
980
    tb->page_addr[n] = page_addr;
981
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
982
    tb->page_next[n] = p->first_tb;
983
    last_first_tb = p->first_tb;
984
    p->first_tb = (TranslationBlock *)((long)tb | n);
985
    invalidate_page_bitmap(p);
986

    
987
#if defined(TARGET_HAS_SMC) || 1
988

    
989
#if defined(CONFIG_USER_ONLY)
990
    if (p->flags & PAGE_WRITE) {
991
        target_ulong addr;
992
        PageDesc *p2;
993
        int prot;
994

    
995
        /* force the host page as non writable (writes will have a
996
           page fault + mprotect overhead) */
997
        page_addr &= qemu_host_page_mask;
998
        prot = 0;
999
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1000
            addr += TARGET_PAGE_SIZE) {
1001

    
1002
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1003
            if (!p2)
1004
                continue;
1005
            prot |= p2->flags;
1006
            p2->flags &= ~PAGE_WRITE;
1007
            page_get_flags(addr);
1008
          }
1009
        mprotect(g2h(page_addr), qemu_host_page_size,
1010
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1011
#ifdef DEBUG_TB_INVALIDATE
1012
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1013
               page_addr);
1014
#endif
1015
    }
1016
#else
1017
    /* if some code is already present, then the pages are already
1018
       protected. So we handle the case where only the first TB is
1019
       allocated in a physical page */
1020
    if (!last_first_tb) {
1021
        tlb_protect_code(page_addr);
1022
    }
1023
#endif
1024

    
1025
#endif /* TARGET_HAS_SMC */
1026
}
1027

    
1028
/* Allocate a new translation block. Flush the translation buffer if
1029
   too many translation blocks or too much generated code. */
1030
TranslationBlock *tb_alloc(target_ulong pc)
1031
{
1032
    TranslationBlock *tb;
1033

    
1034
    if (nb_tbs >= code_gen_max_blocks ||
1035
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1036
        return NULL;
1037
    tb = &tbs[nb_tbs++];
1038
    tb->pc = pc;
1039
    tb->cflags = 0;
1040
    return tb;
1041
}
1042

    
1043
/* add a new TB and link it to the physical page tables. phys_page2 is
1044
   (-1) to indicate that only one page contains the TB. */
1045
void tb_link_phys(TranslationBlock *tb,
1046
                  target_ulong phys_pc, target_ulong phys_page2)
1047
{
1048
    unsigned int h;
1049
    TranslationBlock **ptb;
1050

    
1051
    /* add in the physical hash table */
1052
    h = tb_phys_hash_func(phys_pc);
1053
    ptb = &tb_phys_hash[h];
1054
    tb->phys_hash_next = *ptb;
1055
    *ptb = tb;
1056

    
1057
    /* add in the page list */
1058
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1059
    if (phys_page2 != -1)
1060
        tb_alloc_page(tb, 1, phys_page2);
1061
    else
1062
        tb->page_addr[1] = -1;
1063

    
1064
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1065
    tb->jmp_next[0] = NULL;
1066
    tb->jmp_next[1] = NULL;
1067

    
1068
    /* init original jump addresses */
1069
    if (tb->tb_next_offset[0] != 0xffff)
1070
        tb_reset_jump(tb, 0);
1071
    if (tb->tb_next_offset[1] != 0xffff)
1072
        tb_reset_jump(tb, 1);
1073

    
1074
#ifdef DEBUG_TB_CHECK
1075
    tb_page_check();
1076
#endif
1077
}
1078

    
1079
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1080
   tb[1].tc_ptr. Return NULL if not found */
1081
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1082
{
1083
    int m_min, m_max, m;
1084
    unsigned long v;
1085
    TranslationBlock *tb;
1086

    
1087
    if (nb_tbs <= 0)
1088
        return NULL;
1089
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1090
        tc_ptr >= (unsigned long)code_gen_ptr)
1091
        return NULL;
1092
    /* binary search (cf Knuth) */
1093
    m_min = 0;
1094
    m_max = nb_tbs - 1;
1095
    while (m_min <= m_max) {
1096
        m = (m_min + m_max) >> 1;
1097
        tb = &tbs[m];
1098
        v = (unsigned long)tb->tc_ptr;
1099
        if (v == tc_ptr)
1100
            return tb;
1101
        else if (tc_ptr < v) {
1102
            m_max = m - 1;
1103
        } else {
1104
            m_min = m + 1;
1105
        }
1106
    }
1107
    return &tbs[m_max];
1108
}
1109

    
1110
static void tb_reset_jump_recursive(TranslationBlock *tb);
1111

    
1112
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1113
{
1114
    TranslationBlock *tb1, *tb_next, **ptb;
1115
    unsigned int n1;
1116

    
1117
    tb1 = tb->jmp_next[n];
1118
    if (tb1 != NULL) {
1119
        /* find head of list */
1120
        for(;;) {
1121
            n1 = (long)tb1 & 3;
1122
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1123
            if (n1 == 2)
1124
                break;
1125
            tb1 = tb1->jmp_next[n1];
1126
        }
1127
        /* we are now sure now that tb jumps to tb1 */
1128
        tb_next = tb1;
1129

    
1130
        /* remove tb from the jmp_first list */
1131
        ptb = &tb_next->jmp_first;
1132
        for(;;) {
1133
            tb1 = *ptb;
1134
            n1 = (long)tb1 & 3;
1135
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1136
            if (n1 == n && tb1 == tb)
1137
                break;
1138
            ptb = &tb1->jmp_next[n1];
1139
        }
1140
        *ptb = tb->jmp_next[n];
1141
        tb->jmp_next[n] = NULL;
1142

    
1143
        /* suppress the jump to next tb in generated code */
1144
        tb_reset_jump(tb, n);
1145

    
1146
        /* suppress jumps in the tb on which we could have jumped */
1147
        tb_reset_jump_recursive(tb_next);
1148
    }
1149
}
1150

    
1151
static void tb_reset_jump_recursive(TranslationBlock *tb)
1152
{
1153
    tb_reset_jump_recursive2(tb, 0);
1154
    tb_reset_jump_recursive2(tb, 1);
1155
}
1156

    
1157
#if defined(TARGET_HAS_ICE)
1158
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1159
{
1160
    target_phys_addr_t addr;
1161
    target_ulong pd;
1162
    ram_addr_t ram_addr;
1163
    PhysPageDesc *p;
1164

    
1165
    addr = cpu_get_phys_page_debug(env, pc);
1166
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1167
    if (!p) {
1168
        pd = IO_MEM_UNASSIGNED;
1169
    } else {
1170
        pd = p->phys_offset;
1171
    }
1172
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1173
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1174
}
1175
#endif
1176

    
1177
/* Add a watchpoint.  */
1178
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1179
{
1180
    int i;
1181

    
1182
    for (i = 0; i < env->nb_watchpoints; i++) {
1183
        if (addr == env->watchpoint[i].vaddr)
1184
            return 0;
1185
    }
1186
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1187
        return -1;
1188

    
1189
    i = env->nb_watchpoints++;
1190
    env->watchpoint[i].vaddr = addr;
1191
    tlb_flush_page(env, addr);
1192
    /* FIXME: This flush is needed because of the hack to make memory ops
1193
       terminate the TB.  It can be removed once the proper IO trap and
1194
       re-execute bits are in.  */
1195
    tb_flush(env);
1196
    return i;
1197
}
1198

    
1199
/* Remove a watchpoint.  */
1200
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1201
{
1202
    int i;
1203

    
1204
    for (i = 0; i < env->nb_watchpoints; i++) {
1205
        if (addr == env->watchpoint[i].vaddr) {
1206
            env->nb_watchpoints--;
1207
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1208
            tlb_flush_page(env, addr);
1209
            return 0;
1210
        }
1211
    }
1212
    return -1;
1213
}
1214

    
1215
/* Remove all watchpoints. */
1216
void cpu_watchpoint_remove_all(CPUState *env) {
1217
    int i;
1218

    
1219
    for (i = 0; i < env->nb_watchpoints; i++) {
1220
        tlb_flush_page(env, env->watchpoint[i].vaddr);
1221
    }
1222
    env->nb_watchpoints = 0;
1223
}
1224

    
1225
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1226
   breakpoint is reached */
1227
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1228
{
1229
#if defined(TARGET_HAS_ICE)
1230
    int i;
1231

    
1232
    for(i = 0; i < env->nb_breakpoints; i++) {
1233
        if (env->breakpoints[i] == pc)
1234
            return 0;
1235
    }
1236

    
1237
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1238
        return -1;
1239
    env->breakpoints[env->nb_breakpoints++] = pc;
1240

    
1241
    breakpoint_invalidate(env, pc);
1242
    return 0;
1243
#else
1244
    return -1;
1245
#endif
1246
}
1247

    
1248
/* remove all breakpoints */
1249
void cpu_breakpoint_remove_all(CPUState *env) {
1250
#if defined(TARGET_HAS_ICE)
1251
    int i;
1252
    for(i = 0; i < env->nb_breakpoints; i++) {
1253
        breakpoint_invalidate(env, env->breakpoints[i]);
1254
    }
1255
    env->nb_breakpoints = 0;
1256
#endif
1257
}
1258

    
1259
/* remove a breakpoint */
1260
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1261
{
1262
#if defined(TARGET_HAS_ICE)
1263
    int i;
1264
    for(i = 0; i < env->nb_breakpoints; i++) {
1265
        if (env->breakpoints[i] == pc)
1266
            goto found;
1267
    }
1268
    return -1;
1269
 found:
1270
    env->nb_breakpoints--;
1271
    if (i < env->nb_breakpoints)
1272
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1273

    
1274
    breakpoint_invalidate(env, pc);
1275
    return 0;
1276
#else
1277
    return -1;
1278
#endif
1279
}
1280

    
1281
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1282
   CPU loop after each instruction */
1283
void cpu_single_step(CPUState *env, int enabled)
1284
{
1285
#if defined(TARGET_HAS_ICE)
1286
    if (env->singlestep_enabled != enabled) {
1287
        env->singlestep_enabled = enabled;
1288
        /* must flush all the translated code to avoid inconsistancies */
1289
        /* XXX: only flush what is necessary */
1290
        tb_flush(env);
1291
    }
1292
#endif
1293
}
1294

    
1295
/* enable or disable low levels log */
1296
void cpu_set_log(int log_flags)
1297
{
1298
    loglevel = log_flags;
1299
    if (loglevel && !logfile) {
1300
        logfile = fopen(logfilename, log_append ? "a" : "w");
1301
        if (!logfile) {
1302
            perror(logfilename);
1303
            _exit(1);
1304
        }
1305
#if !defined(CONFIG_SOFTMMU)
1306
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1307
        {
1308
            static uint8_t logfile_buf[4096];
1309
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1310
        }
1311
#else
1312
        setvbuf(logfile, NULL, _IOLBF, 0);
1313
#endif
1314
        log_append = 1;
1315
    }
1316
    if (!loglevel && logfile) {
1317
        fclose(logfile);
1318
        logfile = NULL;
1319
    }
1320
}
1321

    
1322
void cpu_set_log_filename(const char *filename)
1323
{
1324
    logfilename = strdup(filename);
1325
    if (logfile) {
1326
        fclose(logfile);
1327
        logfile = NULL;
1328
    }
1329
    cpu_set_log(loglevel);
1330
}
1331

    
1332
/* mask must never be zero, except for A20 change call */
1333
void cpu_interrupt(CPUState *env, int mask)
1334
{
1335
    TranslationBlock *tb;
1336
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1337

    
1338
    env->interrupt_request |= mask;
1339
    /* if the cpu is currently executing code, we must unlink it and
1340
       all the potentially executing TB */
1341
    tb = env->current_tb;
1342
    if (tb && !testandset(&interrupt_lock)) {
1343
        env->current_tb = NULL;
1344
        tb_reset_jump_recursive(tb);
1345
        resetlock(&interrupt_lock);
1346
    }
1347
}
1348

    
1349
void cpu_reset_interrupt(CPUState *env, int mask)
1350
{
1351
    env->interrupt_request &= ~mask;
1352
}
1353

    
1354
CPULogItem cpu_log_items[] = {
1355
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1356
      "show generated host assembly code for each compiled TB" },
1357
    { CPU_LOG_TB_IN_ASM, "in_asm",
1358
      "show target assembly code for each compiled TB" },
1359
    { CPU_LOG_TB_OP, "op",
1360
      "show micro ops for each compiled TB" },
1361
    { CPU_LOG_TB_OP_OPT, "op_opt",
1362
      "show micro ops "
1363
#ifdef TARGET_I386
1364
      "before eflags optimization and "
1365
#endif
1366
      "after liveness analysis" },
1367
    { CPU_LOG_INT, "int",
1368
      "show interrupts/exceptions in short format" },
1369
    { CPU_LOG_EXEC, "exec",
1370
      "show trace before each executed TB (lots of logs)" },
1371
    { CPU_LOG_TB_CPU, "cpu",
1372
      "show CPU state before block translation" },
1373
#ifdef TARGET_I386
1374
    { CPU_LOG_PCALL, "pcall",
1375
      "show protected mode far calls/returns/exceptions" },
1376
#endif
1377
#ifdef DEBUG_IOPORT
1378
    { CPU_LOG_IOPORT, "ioport",
1379
      "show all i/o ports accesses" },
1380
#endif
1381
    { 0, NULL, NULL },
1382
};
1383

    
1384
static int cmp1(const char *s1, int n, const char *s2)
1385
{
1386
    if (strlen(s2) != n)
1387
        return 0;
1388
    return memcmp(s1, s2, n) == 0;
1389
}
1390

    
1391
/* takes a comma separated list of log masks. Return 0 if error. */
1392
int cpu_str_to_log_mask(const char *str)
1393
{
1394
    CPULogItem *item;
1395
    int mask;
1396
    const char *p, *p1;
1397

    
1398
    p = str;
1399
    mask = 0;
1400
    for(;;) {
1401
        p1 = strchr(p, ',');
1402
        if (!p1)
1403
            p1 = p + strlen(p);
1404
        if(cmp1(p,p1-p,"all")) {
1405
                for(item = cpu_log_items; item->mask != 0; item++) {
1406
                        mask |= item->mask;
1407
                }
1408
        } else {
1409
        for(item = cpu_log_items; item->mask != 0; item++) {
1410
            if (cmp1(p, p1 - p, item->name))
1411
                goto found;
1412
        }
1413
        return 0;
1414
        }
1415
    found:
1416
        mask |= item->mask;
1417
        if (*p1 != ',')
1418
            break;
1419
        p = p1 + 1;
1420
    }
1421
    return mask;
1422
}
1423

    
1424
void cpu_abort(CPUState *env, const char *fmt, ...)
1425
{
1426
    va_list ap;
1427
    va_list ap2;
1428

    
1429
    va_start(ap, fmt);
1430
    va_copy(ap2, ap);
1431
    fprintf(stderr, "qemu: fatal: ");
1432
    vfprintf(stderr, fmt, ap);
1433
    fprintf(stderr, "\n");
1434
#ifdef TARGET_I386
1435
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1436
#else
1437
    cpu_dump_state(env, stderr, fprintf, 0);
1438
#endif
1439
    if (logfile) {
1440
        fprintf(logfile, "qemu: fatal: ");
1441
        vfprintf(logfile, fmt, ap2);
1442
        fprintf(logfile, "\n");
1443
#ifdef TARGET_I386
1444
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1445
#else
1446
        cpu_dump_state(env, logfile, fprintf, 0);
1447
#endif
1448
        fflush(logfile);
1449
        fclose(logfile);
1450
    }
1451
    va_end(ap2);
1452
    va_end(ap);
1453
    abort();
1454
}
1455

    
1456
CPUState *cpu_copy(CPUState *env)
1457
{
1458
    CPUState *new_env = cpu_init(env->cpu_model_str);
1459
    /* preserve chaining and index */
1460
    CPUState *next_cpu = new_env->next_cpu;
1461
    int cpu_index = new_env->cpu_index;
1462
    memcpy(new_env, env, sizeof(CPUState));
1463
    new_env->next_cpu = next_cpu;
1464
    new_env->cpu_index = cpu_index;
1465
    return new_env;
1466
}
1467

    
1468
#if !defined(CONFIG_USER_ONLY)
1469

    
1470
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1471
{
1472
    unsigned int i;
1473

    
1474
    /* Discard jump cache entries for any tb which might potentially
1475
       overlap the flushed page.  */
1476
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1477
    memset (&env->tb_jmp_cache[i], 0, 
1478
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1479

    
1480
    i = tb_jmp_cache_hash_page(addr);
1481
    memset (&env->tb_jmp_cache[i], 0, 
1482
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1483
}
1484

    
1485
/* NOTE: if flush_global is true, also flush global entries (not
1486
   implemented yet) */
1487
void tlb_flush(CPUState *env, int flush_global)
1488
{
1489
    int i;
1490

    
1491
#if defined(DEBUG_TLB)
1492
    printf("tlb_flush:\n");
1493
#endif
1494
    /* must reset current TB so that interrupts cannot modify the
1495
       links while we are modifying them */
1496
    env->current_tb = NULL;
1497

    
1498
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1499
        env->tlb_table[0][i].addr_read = -1;
1500
        env->tlb_table[0][i].addr_write = -1;
1501
        env->tlb_table[0][i].addr_code = -1;
1502
        env->tlb_table[1][i].addr_read = -1;
1503
        env->tlb_table[1][i].addr_write = -1;
1504
        env->tlb_table[1][i].addr_code = -1;
1505
#if (NB_MMU_MODES >= 3)
1506
        env->tlb_table[2][i].addr_read = -1;
1507
        env->tlb_table[2][i].addr_write = -1;
1508
        env->tlb_table[2][i].addr_code = -1;
1509
#if (NB_MMU_MODES == 4)
1510
        env->tlb_table[3][i].addr_read = -1;
1511
        env->tlb_table[3][i].addr_write = -1;
1512
        env->tlb_table[3][i].addr_code = -1;
1513
#endif
1514
#endif
1515
    }
1516

    
1517
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1518

    
1519
#if !defined(CONFIG_SOFTMMU)
1520
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1521
#endif
1522
#ifdef USE_KQEMU
1523
    if (env->kqemu_enabled) {
1524
        kqemu_flush(env, flush_global);
1525
    }
1526
#endif
1527
    tlb_flush_count++;
1528
}
1529

    
1530
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1531
{
1532
    if (addr == (tlb_entry->addr_read &
1533
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1534
        addr == (tlb_entry->addr_write &
1535
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1536
        addr == (tlb_entry->addr_code &
1537
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1538
        tlb_entry->addr_read = -1;
1539
        tlb_entry->addr_write = -1;
1540
        tlb_entry->addr_code = -1;
1541
    }
1542
}
1543

    
1544
void tlb_flush_page(CPUState *env, target_ulong addr)
1545
{
1546
    int i;
1547

    
1548
#if defined(DEBUG_TLB)
1549
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1550
#endif
1551
    /* must reset current TB so that interrupts cannot modify the
1552
       links while we are modifying them */
1553
    env->current_tb = NULL;
1554

    
1555
    addr &= TARGET_PAGE_MASK;
1556
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1557
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1558
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1559
#if (NB_MMU_MODES >= 3)
1560
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1561
#if (NB_MMU_MODES == 4)
1562
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1563
#endif
1564
#endif
1565

    
1566
    tlb_flush_jmp_cache(env, addr);
1567

    
1568
#if !defined(CONFIG_SOFTMMU)
1569
    if (addr < MMAP_AREA_END)
1570
        munmap((void *)addr, TARGET_PAGE_SIZE);
1571
#endif
1572
#ifdef USE_KQEMU
1573
    if (env->kqemu_enabled) {
1574
        kqemu_flush_page(env, addr);
1575
    }
1576
#endif
1577
}
1578

    
1579
/* update the TLBs so that writes to code in the virtual page 'addr'
1580
   can be detected */
1581
static void tlb_protect_code(ram_addr_t ram_addr)
1582
{
1583
    cpu_physical_memory_reset_dirty(ram_addr,
1584
                                    ram_addr + TARGET_PAGE_SIZE,
1585
                                    CODE_DIRTY_FLAG);
1586
}
1587

    
1588
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1589
   tested for self modifying code */
1590
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1591
                                    target_ulong vaddr)
1592
{
1593
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1594
}
1595

    
1596
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1597
                                         unsigned long start, unsigned long length)
1598
{
1599
    unsigned long addr;
1600
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1601
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1602
        if ((addr - start) < length) {
1603
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1604
        }
1605
    }
1606
}
1607

    
1608
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1609
                                     int dirty_flags)
1610
{
1611
    CPUState *env;
1612
    unsigned long length, start1;
1613
    int i, mask, len;
1614
    uint8_t *p;
1615

    
1616
    start &= TARGET_PAGE_MASK;
1617
    end = TARGET_PAGE_ALIGN(end);
1618

    
1619
    length = end - start;
1620
    if (length == 0)
1621
        return;
1622
    len = length >> TARGET_PAGE_BITS;
1623
#ifdef USE_KQEMU
1624
    /* XXX: should not depend on cpu context */
1625
    env = first_cpu;
1626
    if (env->kqemu_enabled) {
1627
        ram_addr_t addr;
1628
        addr = start;
1629
        for(i = 0; i < len; i++) {
1630
            kqemu_set_notdirty(env, addr);
1631
            addr += TARGET_PAGE_SIZE;
1632
        }
1633
    }
1634
#endif
1635
    mask = ~dirty_flags;
1636
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1637
    for(i = 0; i < len; i++)
1638
        p[i] &= mask;
1639

    
1640
    /* we modify the TLB cache so that the dirty bit will be set again
1641
       when accessing the range */
1642
    start1 = start + (unsigned long)phys_ram_base;
1643
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1644
        for(i = 0; i < CPU_TLB_SIZE; i++)
1645
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1646
        for(i = 0; i < CPU_TLB_SIZE; i++)
1647
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1648
#if (NB_MMU_MODES >= 3)
1649
        for(i = 0; i < CPU_TLB_SIZE; i++)
1650
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1651
#if (NB_MMU_MODES == 4)
1652
        for(i = 0; i < CPU_TLB_SIZE; i++)
1653
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1654
#endif
1655
#endif
1656
    }
1657

    
1658
#if !defined(CONFIG_SOFTMMU)
1659
    /* XXX: this is expensive */
1660
    {
1661
        VirtPageDesc *p;
1662
        int j;
1663
        target_ulong addr;
1664

    
1665
        for(i = 0; i < L1_SIZE; i++) {
1666
            p = l1_virt_map[i];
1667
            if (p) {
1668
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1669
                for(j = 0; j < L2_SIZE; j++) {
1670
                    if (p->valid_tag == virt_valid_tag &&
1671
                        p->phys_addr >= start && p->phys_addr < end &&
1672
                        (p->prot & PROT_WRITE)) {
1673
                        if (addr < MMAP_AREA_END) {
1674
                            mprotect((void *)addr, TARGET_PAGE_SIZE,
1675
                                     p->prot & ~PROT_WRITE);
1676
                        }
1677
                    }
1678
                    addr += TARGET_PAGE_SIZE;
1679
                    p++;
1680
                }
1681
            }
1682
        }
1683
    }
1684
#endif
1685
}
1686

    
1687
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1688
{
1689
    ram_addr_t ram_addr;
1690

    
1691
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1692
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1693
            tlb_entry->addend - (unsigned long)phys_ram_base;
1694
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1695
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1696
        }
1697
    }
1698
}
1699

    
1700
/* update the TLB according to the current state of the dirty bits */
1701
void cpu_tlb_update_dirty(CPUState *env)
1702
{
1703
    int i;
1704
    for(i = 0; i < CPU_TLB_SIZE; i++)
1705
        tlb_update_dirty(&env->tlb_table[0][i]);
1706
    for(i = 0; i < CPU_TLB_SIZE; i++)
1707
        tlb_update_dirty(&env->tlb_table[1][i]);
1708
#if (NB_MMU_MODES >= 3)
1709
    for(i = 0; i < CPU_TLB_SIZE; i++)
1710
        tlb_update_dirty(&env->tlb_table[2][i]);
1711
#if (NB_MMU_MODES == 4)
1712
    for(i = 0; i < CPU_TLB_SIZE; i++)
1713
        tlb_update_dirty(&env->tlb_table[3][i]);
1714
#endif
1715
#endif
1716
}
1717

    
1718
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1719
                                  unsigned long start)
1720
{
1721
    unsigned long addr;
1722
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1723
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1724
        if (addr == start) {
1725
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1726
        }
1727
    }
1728
}
1729

    
1730
/* update the TLB corresponding to virtual page vaddr and phys addr
1731
   addr so that it is no longer dirty */
1732
static inline void tlb_set_dirty(CPUState *env,
1733
                                 unsigned long addr, target_ulong vaddr)
1734
{
1735
    int i;
1736

    
1737
    addr &= TARGET_PAGE_MASK;
1738
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1739
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1740
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1741
#if (NB_MMU_MODES >= 3)
1742
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1743
#if (NB_MMU_MODES == 4)
1744
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1745
#endif
1746
#endif
1747
}
1748

    
1749
/* add a new TLB entry. At most one entry for a given virtual address
1750
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1751
   (can only happen in non SOFTMMU mode for I/O pages or pages
1752
   conflicting with the host address space). */
1753
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1754
                      target_phys_addr_t paddr, int prot,
1755
                      int mmu_idx, int is_softmmu)
1756
{
1757
    PhysPageDesc *p;
1758
    unsigned long pd;
1759
    unsigned int index;
1760
    target_ulong address;
1761
    target_phys_addr_t addend;
1762
    int ret;
1763
    CPUTLBEntry *te;
1764
    int i;
1765

    
1766
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1767
    if (!p) {
1768
        pd = IO_MEM_UNASSIGNED;
1769
    } else {
1770
        pd = p->phys_offset;
1771
    }
1772
#if defined(DEBUG_TLB)
1773
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1774
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1775
#endif
1776

    
1777
    ret = 0;
1778
#if !defined(CONFIG_SOFTMMU)
1779
    if (is_softmmu)
1780
#endif
1781
    {
1782
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1783
            /* IO memory case */
1784
            address = vaddr | pd;
1785
            addend = paddr;
1786
        } else {
1787
            /* standard memory */
1788
            address = vaddr;
1789
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1790
        }
1791

    
1792
        /* Make accesses to pages with watchpoints go via the
1793
           watchpoint trap routines.  */
1794
        for (i = 0; i < env->nb_watchpoints; i++) {
1795
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1796
                if (address & ~TARGET_PAGE_MASK) {
1797
                    env->watchpoint[i].addend = 0;
1798
                    address = vaddr | io_mem_watch;
1799
                } else {
1800
                    env->watchpoint[i].addend = pd - paddr +
1801
                        (unsigned long) phys_ram_base;
1802
                    /* TODO: Figure out how to make read watchpoints coexist
1803
                       with code.  */
1804
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1805
                }
1806
            }
1807
        }
1808

    
1809
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1810
        addend -= vaddr;
1811
        te = &env->tlb_table[mmu_idx][index];
1812
        te->addend = addend;
1813
        if (prot & PAGE_READ) {
1814
            te->addr_read = address;
1815
        } else {
1816
            te->addr_read = -1;
1817
        }
1818

    
1819
        if (prot & PAGE_EXEC) {
1820
            te->addr_code = address;
1821
        } else {
1822
            te->addr_code = -1;
1823
        }
1824
        if (prot & PAGE_WRITE) {
1825
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1826
                (pd & IO_MEM_ROMD)) {
1827
                /* write access calls the I/O callback */
1828
                te->addr_write = vaddr |
1829
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1830
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1831
                       !cpu_physical_memory_is_dirty(pd)) {
1832
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1833
            } else {
1834
                te->addr_write = address;
1835
            }
1836
        } else {
1837
            te->addr_write = -1;
1838
        }
1839
    }
1840
#if !defined(CONFIG_SOFTMMU)
1841
    else {
1842
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1843
            /* IO access: no mapping is done as it will be handled by the
1844
               soft MMU */
1845
            if (!(env->hflags & HF_SOFTMMU_MASK))
1846
                ret = 2;
1847
        } else {
1848
            void *map_addr;
1849

    
1850
            if (vaddr >= MMAP_AREA_END) {
1851
                ret = 2;
1852
            } else {
1853
                if (prot & PROT_WRITE) {
1854
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1855
#if defined(TARGET_HAS_SMC) || 1
1856
                        first_tb ||
1857
#endif
1858
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1859
                         !cpu_physical_memory_is_dirty(pd))) {
1860
                        /* ROM: we do as if code was inside */
1861
                        /* if code is present, we only map as read only and save the
1862
                           original mapping */
1863
                        VirtPageDesc *vp;
1864

    
1865
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1866
                        vp->phys_addr = pd;
1867
                        vp->prot = prot;
1868
                        vp->valid_tag = virt_valid_tag;
1869
                        prot &= ~PAGE_WRITE;
1870
                    }
1871
                }
1872
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1873
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1874
                if (map_addr == MAP_FAILED) {
1875
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1876
                              paddr, vaddr);
1877
                }
1878
            }
1879
        }
1880
    }
1881
#endif
1882
    return ret;
1883
}
1884

    
1885
/* called from signal handler: invalidate the code and unprotect the
1886
   page. Return TRUE if the fault was succesfully handled. */
1887
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1888
{
1889
#if !defined(CONFIG_SOFTMMU)
1890
    VirtPageDesc *vp;
1891

    
1892
#if defined(DEBUG_TLB)
1893
    printf("page_unprotect: addr=0x%08x\n", addr);
1894
#endif
1895
    addr &= TARGET_PAGE_MASK;
1896

    
1897
    /* if it is not mapped, no need to worry here */
1898
    if (addr >= MMAP_AREA_END)
1899
        return 0;
1900
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1901
    if (!vp)
1902
        return 0;
1903
    /* NOTE: in this case, validate_tag is _not_ tested as it
1904
       validates only the code TLB */
1905
    if (vp->valid_tag != virt_valid_tag)
1906
        return 0;
1907
    if (!(vp->prot & PAGE_WRITE))
1908
        return 0;
1909
#if defined(DEBUG_TLB)
1910
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1911
           addr, vp->phys_addr, vp->prot);
1912
#endif
1913
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1914
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1915
                  (unsigned long)addr, vp->prot);
1916
    /* set the dirty bit */
1917
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1918
    /* flush the code inside */
1919
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1920
    return 1;
1921
#else
1922
    return 0;
1923
#endif
1924
}
1925

    
1926
#else
1927

    
1928
void tlb_flush(CPUState *env, int flush_global)
1929
{
1930
}
1931

    
1932
void tlb_flush_page(CPUState *env, target_ulong addr)
1933
{
1934
}
1935

    
1936
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1937
                      target_phys_addr_t paddr, int prot,
1938
                      int mmu_idx, int is_softmmu)
1939
{
1940
    return 0;
1941
}
1942

    
1943
/* dump memory mappings */
1944
void page_dump(FILE *f)
1945
{
1946
    unsigned long start, end;
1947
    int i, j, prot, prot1;
1948
    PageDesc *p;
1949

    
1950
    fprintf(f, "%-8s %-8s %-8s %s\n",
1951
            "start", "end", "size", "prot");
1952
    start = -1;
1953
    end = -1;
1954
    prot = 0;
1955
    for(i = 0; i <= L1_SIZE; i++) {
1956
        if (i < L1_SIZE)
1957
            p = l1_map[i];
1958
        else
1959
            p = NULL;
1960
        for(j = 0;j < L2_SIZE; j++) {
1961
            if (!p)
1962
                prot1 = 0;
1963
            else
1964
                prot1 = p[j].flags;
1965
            if (prot1 != prot) {
1966
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1967
                if (start != -1) {
1968
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1969
                            start, end, end - start,
1970
                            prot & PAGE_READ ? 'r' : '-',
1971
                            prot & PAGE_WRITE ? 'w' : '-',
1972
                            prot & PAGE_EXEC ? 'x' : '-');
1973
                }
1974
                if (prot1 != 0)
1975
                    start = end;
1976
                else
1977
                    start = -1;
1978
                prot = prot1;
1979
            }
1980
            if (!p)
1981
                break;
1982
        }
1983
    }
1984
}
1985

    
1986
int page_get_flags(target_ulong address)
1987
{
1988
    PageDesc *p;
1989

    
1990
    p = page_find(address >> TARGET_PAGE_BITS);
1991
    if (!p)
1992
        return 0;
1993
    return p->flags;
1994
}
1995

    
1996
/* modify the flags of a page and invalidate the code if
1997
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1998
   depending on PAGE_WRITE */
1999
void page_set_flags(target_ulong start, target_ulong end, int flags)
2000
{
2001
    PageDesc *p;
2002
    target_ulong addr;
2003

    
2004
    start = start & TARGET_PAGE_MASK;
2005
    end = TARGET_PAGE_ALIGN(end);
2006
    if (flags & PAGE_WRITE)
2007
        flags |= PAGE_WRITE_ORG;
2008
    spin_lock(&tb_lock);
2009
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2010
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2011
        /* if the write protection is set, then we invalidate the code
2012
           inside */
2013
        if (!(p->flags & PAGE_WRITE) &&
2014
            (flags & PAGE_WRITE) &&
2015
            p->first_tb) {
2016
            tb_invalidate_phys_page(addr, 0, NULL);
2017
        }
2018
        p->flags = flags;
2019
    }
2020
    spin_unlock(&tb_lock);
2021
}
2022

    
2023
int page_check_range(target_ulong start, target_ulong len, int flags)
2024
{
2025
    PageDesc *p;
2026
    target_ulong end;
2027
    target_ulong addr;
2028

    
2029
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2030
    start = start & TARGET_PAGE_MASK;
2031

    
2032
    if( end < start )
2033
        /* we've wrapped around */
2034
        return -1;
2035
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2036
        p = page_find(addr >> TARGET_PAGE_BITS);
2037
        if( !p )
2038
            return -1;
2039
        if( !(p->flags & PAGE_VALID) )
2040
            return -1;
2041

    
2042
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2043
            return -1;
2044
        if (flags & PAGE_WRITE) {
2045
            if (!(p->flags & PAGE_WRITE_ORG))
2046
                return -1;
2047
            /* unprotect the page if it was put read-only because it
2048
               contains translated code */
2049
            if (!(p->flags & PAGE_WRITE)) {
2050
                if (!page_unprotect(addr, 0, NULL))
2051
                    return -1;
2052
            }
2053
            return 0;
2054
        }
2055
    }
2056
    return 0;
2057
}
2058

    
2059
/* called from signal handler: invalidate the code and unprotect the
2060
   page. Return TRUE if the fault was succesfully handled. */
2061
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2062
{
2063
    unsigned int page_index, prot, pindex;
2064
    PageDesc *p, *p1;
2065
    target_ulong host_start, host_end, addr;
2066

    
2067
    host_start = address & qemu_host_page_mask;
2068
    page_index = host_start >> TARGET_PAGE_BITS;
2069
    p1 = page_find(page_index);
2070
    if (!p1)
2071
        return 0;
2072
    host_end = host_start + qemu_host_page_size;
2073
    p = p1;
2074
    prot = 0;
2075
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2076
        prot |= p->flags;
2077
        p++;
2078
    }
2079
    /* if the page was really writable, then we change its
2080
       protection back to writable */
2081
    if (prot & PAGE_WRITE_ORG) {
2082
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2083
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2084
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2085
                     (prot & PAGE_BITS) | PAGE_WRITE);
2086
            p1[pindex].flags |= PAGE_WRITE;
2087
            /* and since the content will be modified, we must invalidate
2088
               the corresponding translated code. */
2089
            tb_invalidate_phys_page(address, pc, puc);
2090
#ifdef DEBUG_TB_CHECK
2091
            tb_invalidate_check(address);
2092
#endif
2093
            return 1;
2094
        }
2095
    }
2096
    return 0;
2097
}
2098

    
2099
static inline void tlb_set_dirty(CPUState *env,
2100
                                 unsigned long addr, target_ulong vaddr)
2101
{
2102
}
2103
#endif /* defined(CONFIG_USER_ONLY) */
2104

    
2105
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2106
                             ram_addr_t memory);
2107
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2108
                           ram_addr_t orig_memory);
2109
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2110
                      need_subpage)                                     \
2111
    do {                                                                \
2112
        if (addr > start_addr)                                          \
2113
            start_addr2 = 0;                                            \
2114
        else {                                                          \
2115
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2116
            if (start_addr2 > 0)                                        \
2117
                need_subpage = 1;                                       \
2118
        }                                                               \
2119
                                                                        \
2120
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2121
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2122
        else {                                                          \
2123
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2124
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2125
                need_subpage = 1;                                       \
2126
        }                                                               \
2127
    } while (0)
2128

    
2129
/* register physical memory. 'size' must be a multiple of the target
2130
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2131
   io memory page */
2132
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2133
                                  ram_addr_t size,
2134
                                  ram_addr_t phys_offset)
2135
{
2136
    target_phys_addr_t addr, end_addr;
2137
    PhysPageDesc *p;
2138
    CPUState *env;
2139
    ram_addr_t orig_size = size;
2140
    void *subpage;
2141

    
2142
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2143
    end_addr = start_addr + (target_phys_addr_t)size;
2144
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2145
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2146
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2147
            ram_addr_t orig_memory = p->phys_offset;
2148
            target_phys_addr_t start_addr2, end_addr2;
2149
            int need_subpage = 0;
2150

    
2151
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2152
                          need_subpage);
2153
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2154
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2155
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2156
                                           &p->phys_offset, orig_memory);
2157
                } else {
2158
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2159
                                            >> IO_MEM_SHIFT];
2160
                }
2161
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2162
            } else {
2163
                p->phys_offset = phys_offset;
2164
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2165
                    (phys_offset & IO_MEM_ROMD))
2166
                    phys_offset += TARGET_PAGE_SIZE;
2167
            }
2168
        } else {
2169
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2170
            p->phys_offset = phys_offset;
2171
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2172
                (phys_offset & IO_MEM_ROMD))
2173
                phys_offset += TARGET_PAGE_SIZE;
2174
            else {
2175
                target_phys_addr_t start_addr2, end_addr2;
2176
                int need_subpage = 0;
2177

    
2178
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2179
                              end_addr2, need_subpage);
2180

    
2181
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2182
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2183
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2184
                    subpage_register(subpage, start_addr2, end_addr2,
2185
                                     phys_offset);
2186
                }
2187
            }
2188
        }
2189
    }
2190

    
2191
    /* since each CPU stores ram addresses in its TLB cache, we must
2192
       reset the modified entries */
2193
    /* XXX: slow ! */
2194
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2195
        tlb_flush(env, 1);
2196
    }
2197
}
2198

    
2199
/* XXX: temporary until new memory mapping API */
2200
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2201
{
2202
    PhysPageDesc *p;
2203

    
2204
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2205
    if (!p)
2206
        return IO_MEM_UNASSIGNED;
2207
    return p->phys_offset;
2208
}
2209

    
2210
/* XXX: better than nothing */
2211
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2212
{
2213
    ram_addr_t addr;
2214
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2215
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2216
                (uint64_t)size, (uint64_t)phys_ram_size);
2217
        abort();
2218
    }
2219
    addr = phys_ram_alloc_offset;
2220
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2221
    return addr;
2222
}
2223

    
2224
void qemu_ram_free(ram_addr_t addr)
2225
{
2226
}
2227

    
2228
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2229
{
2230
#ifdef DEBUG_UNASSIGNED
2231
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2232
#endif
2233
#ifdef TARGET_SPARC
2234
    do_unassigned_access(addr, 0, 0, 0);
2235
#elif TARGET_CRIS
2236
    do_unassigned_access(addr, 0, 0, 0);
2237
#endif
2238
    return 0;
2239
}
2240

    
2241
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2242
{
2243
#ifdef DEBUG_UNASSIGNED
2244
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2245
#endif
2246
#ifdef TARGET_SPARC
2247
    do_unassigned_access(addr, 1, 0, 0);
2248
#elif TARGET_CRIS
2249
    do_unassigned_access(addr, 1, 0, 0);
2250
#endif
2251
}
2252

    
2253
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2254
    unassigned_mem_readb,
2255
    unassigned_mem_readb,
2256
    unassigned_mem_readb,
2257
};
2258

    
2259
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2260
    unassigned_mem_writeb,
2261
    unassigned_mem_writeb,
2262
    unassigned_mem_writeb,
2263
};
2264

    
2265
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2266
{
2267
    unsigned long ram_addr;
2268
    int dirty_flags;
2269
    ram_addr = addr - (unsigned long)phys_ram_base;
2270
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2271
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2272
#if !defined(CONFIG_USER_ONLY)
2273
        tb_invalidate_phys_page_fast(ram_addr, 1);
2274
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2275
#endif
2276
    }
2277
    stb_p((uint8_t *)(long)addr, val);
2278
#ifdef USE_KQEMU
2279
    if (cpu_single_env->kqemu_enabled &&
2280
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2281
        kqemu_modify_page(cpu_single_env, ram_addr);
2282
#endif
2283
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2284
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2285
    /* we remove the notdirty callback only if the code has been
2286
       flushed */
2287
    if (dirty_flags == 0xff)
2288
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2289
}
2290

    
2291
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2292
{
2293
    unsigned long ram_addr;
2294
    int dirty_flags;
2295
    ram_addr = addr - (unsigned long)phys_ram_base;
2296
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2297
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2298
#if !defined(CONFIG_USER_ONLY)
2299
        tb_invalidate_phys_page_fast(ram_addr, 2);
2300
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2301
#endif
2302
    }
2303
    stw_p((uint8_t *)(long)addr, val);
2304
#ifdef USE_KQEMU
2305
    if (cpu_single_env->kqemu_enabled &&
2306
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2307
        kqemu_modify_page(cpu_single_env, ram_addr);
2308
#endif
2309
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2310
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2311
    /* we remove the notdirty callback only if the code has been
2312
       flushed */
2313
    if (dirty_flags == 0xff)
2314
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2315
}
2316

    
2317
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2318
{
2319
    unsigned long ram_addr;
2320
    int dirty_flags;
2321
    ram_addr = addr - (unsigned long)phys_ram_base;
2322
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2323
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2324
#if !defined(CONFIG_USER_ONLY)
2325
        tb_invalidate_phys_page_fast(ram_addr, 4);
2326
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2327
#endif
2328
    }
2329
    stl_p((uint8_t *)(long)addr, val);
2330
#ifdef USE_KQEMU
2331
    if (cpu_single_env->kqemu_enabled &&
2332
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2333
        kqemu_modify_page(cpu_single_env, ram_addr);
2334
#endif
2335
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2336
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2337
    /* we remove the notdirty callback only if the code has been
2338
       flushed */
2339
    if (dirty_flags == 0xff)
2340
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2341
}
2342

    
2343
static CPUReadMemoryFunc *error_mem_read[3] = {
2344
    NULL, /* never used */
2345
    NULL, /* never used */
2346
    NULL, /* never used */
2347
};
2348

    
2349
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2350
    notdirty_mem_writeb,
2351
    notdirty_mem_writew,
2352
    notdirty_mem_writel,
2353
};
2354

    
2355
#if defined(CONFIG_SOFTMMU)
2356
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2357
   so these check for a hit then pass through to the normal out-of-line
2358
   phys routines.  */
2359
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2360
{
2361
    return ldub_phys(addr);
2362
}
2363

    
2364
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2365
{
2366
    return lduw_phys(addr);
2367
}
2368

    
2369
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2370
{
2371
    return ldl_phys(addr);
2372
}
2373

    
2374
/* Generate a debug exception if a watchpoint has been hit.
2375
   Returns the real physical address of the access.  addr will be a host
2376
   address in case of a RAM location.  */
2377
static target_ulong check_watchpoint(target_phys_addr_t addr)
2378
{
2379
    CPUState *env = cpu_single_env;
2380
    target_ulong watch;
2381
    target_ulong retaddr;
2382
    int i;
2383

    
2384
    retaddr = addr;
2385
    for (i = 0; i < env->nb_watchpoints; i++) {
2386
        watch = env->watchpoint[i].vaddr;
2387
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2388
            retaddr = addr - env->watchpoint[i].addend;
2389
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2390
                cpu_single_env->watchpoint_hit = i + 1;
2391
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2392
                break;
2393
            }
2394
        }
2395
    }
2396
    return retaddr;
2397
}
2398

    
2399
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2400
                             uint32_t val)
2401
{
2402
    addr = check_watchpoint(addr);
2403
    stb_phys(addr, val);
2404
}
2405

    
2406
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2407
                             uint32_t val)
2408
{
2409
    addr = check_watchpoint(addr);
2410
    stw_phys(addr, val);
2411
}
2412

    
2413
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2414
                             uint32_t val)
2415
{
2416
    addr = check_watchpoint(addr);
2417
    stl_phys(addr, val);
2418
}
2419

    
2420
static CPUReadMemoryFunc *watch_mem_read[3] = {
2421
    watch_mem_readb,
2422
    watch_mem_readw,
2423
    watch_mem_readl,
2424
};
2425

    
2426
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2427
    watch_mem_writeb,
2428
    watch_mem_writew,
2429
    watch_mem_writel,
2430
};
2431
#endif
2432

    
2433
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2434
                                 unsigned int len)
2435
{
2436
    uint32_t ret;
2437
    unsigned int idx;
2438

    
2439
    idx = SUBPAGE_IDX(addr - mmio->base);
2440
#if defined(DEBUG_SUBPAGE)
2441
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2442
           mmio, len, addr, idx);
2443
#endif
2444
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2445

    
2446
    return ret;
2447
}
2448

    
2449
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2450
                              uint32_t value, unsigned int len)
2451
{
2452
    unsigned int idx;
2453

    
2454
    idx = SUBPAGE_IDX(addr - mmio->base);
2455
#if defined(DEBUG_SUBPAGE)
2456
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2457
           mmio, len, addr, idx, value);
2458
#endif
2459
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2460
}
2461

    
2462
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2463
{
2464
#if defined(DEBUG_SUBPAGE)
2465
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2466
#endif
2467

    
2468
    return subpage_readlen(opaque, addr, 0);
2469
}
2470

    
2471
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2472
                            uint32_t value)
2473
{
2474
#if defined(DEBUG_SUBPAGE)
2475
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2476
#endif
2477
    subpage_writelen(opaque, addr, value, 0);
2478
}
2479

    
2480
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2481
{
2482
#if defined(DEBUG_SUBPAGE)
2483
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2484
#endif
2485

    
2486
    return subpage_readlen(opaque, addr, 1);
2487
}
2488

    
2489
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2490
                            uint32_t value)
2491
{
2492
#if defined(DEBUG_SUBPAGE)
2493
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2494
#endif
2495
    subpage_writelen(opaque, addr, value, 1);
2496
}
2497

    
2498
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2499
{
2500
#if defined(DEBUG_SUBPAGE)
2501
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2502
#endif
2503

    
2504
    return subpage_readlen(opaque, addr, 2);
2505
}
2506

    
2507
static void subpage_writel (void *opaque,
2508
                         target_phys_addr_t addr, uint32_t value)
2509
{
2510
#if defined(DEBUG_SUBPAGE)
2511
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2512
#endif
2513
    subpage_writelen(opaque, addr, value, 2);
2514
}
2515

    
2516
static CPUReadMemoryFunc *subpage_read[] = {
2517
    &subpage_readb,
2518
    &subpage_readw,
2519
    &subpage_readl,
2520
};
2521

    
2522
static CPUWriteMemoryFunc *subpage_write[] = {
2523
    &subpage_writeb,
2524
    &subpage_writew,
2525
    &subpage_writel,
2526
};
2527

    
2528
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2529
                             ram_addr_t memory)
2530
{
2531
    int idx, eidx;
2532
    unsigned int i;
2533

    
2534
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2535
        return -1;
2536
    idx = SUBPAGE_IDX(start);
2537
    eidx = SUBPAGE_IDX(end);
2538
#if defined(DEBUG_SUBPAGE)
2539
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2540
           mmio, start, end, idx, eidx, memory);
2541
#endif
2542
    memory >>= IO_MEM_SHIFT;
2543
    for (; idx <= eidx; idx++) {
2544
        for (i = 0; i < 4; i++) {
2545
            if (io_mem_read[memory][i]) {
2546
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2547
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2548
            }
2549
            if (io_mem_write[memory][i]) {
2550
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2551
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2552
            }
2553
        }
2554
    }
2555

    
2556
    return 0;
2557
}
2558

    
2559
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2560
                           ram_addr_t orig_memory)
2561
{
2562
    subpage_t *mmio;
2563
    int subpage_memory;
2564

    
2565
    mmio = qemu_mallocz(sizeof(subpage_t));
2566
    if (mmio != NULL) {
2567
        mmio->base = base;
2568
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2569
#if defined(DEBUG_SUBPAGE)
2570
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2571
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2572
#endif
2573
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2574
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2575
    }
2576

    
2577
    return mmio;
2578
}
2579

    
2580
static void io_mem_init(void)
2581
{
2582
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2583
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2584
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2585
    io_mem_nb = 5;
2586

    
2587
#if defined(CONFIG_SOFTMMU)
2588
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2589
                                          watch_mem_write, NULL);
2590
#endif
2591
    /* alloc dirty bits array */
2592
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2593
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2594
}
2595

    
2596
/* mem_read and mem_write are arrays of functions containing the
2597
   function to access byte (index 0), word (index 1) and dword (index
2598
   2). Functions can be omitted with a NULL function pointer. The
2599
   registered functions may be modified dynamically later.
2600
   If io_index is non zero, the corresponding io zone is
2601
   modified. If it is zero, a new io zone is allocated. The return
2602
   value can be used with cpu_register_physical_memory(). (-1) is
2603
   returned if error. */
2604
int cpu_register_io_memory(int io_index,
2605
                           CPUReadMemoryFunc **mem_read,
2606
                           CPUWriteMemoryFunc **mem_write,
2607
                           void *opaque)
2608
{
2609
    int i, subwidth = 0;
2610

    
2611
    if (io_index <= 0) {
2612
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2613
            return -1;
2614
        io_index = io_mem_nb++;
2615
    } else {
2616
        if (io_index >= IO_MEM_NB_ENTRIES)
2617
            return -1;
2618
    }
2619

    
2620
    for(i = 0;i < 3; i++) {
2621
        if (!mem_read[i] || !mem_write[i])
2622
            subwidth = IO_MEM_SUBWIDTH;
2623
        io_mem_read[io_index][i] = mem_read[i];
2624
        io_mem_write[io_index][i] = mem_write[i];
2625
    }
2626
    io_mem_opaque[io_index] = opaque;
2627
    return (io_index << IO_MEM_SHIFT) | subwidth;
2628
}
2629

    
2630
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2631
{
2632
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2633
}
2634

    
2635
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2636
{
2637
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2638
}
2639

    
2640
/* physical memory access (slow version, mainly for debug) */
2641
#if defined(CONFIG_USER_ONLY)
2642
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2643
                            int len, int is_write)
2644
{
2645
    int l, flags;
2646
    target_ulong page;
2647
    void * p;
2648

    
2649
    while (len > 0) {
2650
        page = addr & TARGET_PAGE_MASK;
2651
        l = (page + TARGET_PAGE_SIZE) - addr;
2652
        if (l > len)
2653
            l = len;
2654
        flags = page_get_flags(page);
2655
        if (!(flags & PAGE_VALID))
2656
            return;
2657
        if (is_write) {
2658
            if (!(flags & PAGE_WRITE))
2659
                return;
2660
            /* XXX: this code should not depend on lock_user */
2661
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2662
                /* FIXME - should this return an error rather than just fail? */
2663
                return;
2664
            memcpy(p, buf, l);
2665
            unlock_user(p, addr, l);
2666
        } else {
2667
            if (!(flags & PAGE_READ))
2668
                return;
2669
            /* XXX: this code should not depend on lock_user */
2670
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2671
                /* FIXME - should this return an error rather than just fail? */
2672
                return;
2673
            memcpy(buf, p, l);
2674
            unlock_user(p, addr, 0);
2675
        }
2676
        len -= l;
2677
        buf += l;
2678
        addr += l;
2679
    }
2680
}
2681

    
2682
#else
2683
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2684
                            int len, int is_write)
2685
{
2686
    int l, io_index;
2687
    uint8_t *ptr;
2688
    uint32_t val;
2689
    target_phys_addr_t page;
2690
    unsigned long pd;
2691
    PhysPageDesc *p;
2692

    
2693
    while (len > 0) {
2694
        page = addr & TARGET_PAGE_MASK;
2695
        l = (page + TARGET_PAGE_SIZE) - addr;
2696
        if (l > len)
2697
            l = len;
2698
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2699
        if (!p) {
2700
            pd = IO_MEM_UNASSIGNED;
2701
        } else {
2702
            pd = p->phys_offset;
2703
        }
2704

    
2705
        if (is_write) {
2706
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2707
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2708
                /* XXX: could force cpu_single_env to NULL to avoid
2709
                   potential bugs */
2710
                if (l >= 4 && ((addr & 3) == 0)) {
2711
                    /* 32 bit write access */
2712
                    val = ldl_p(buf);
2713
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2714
                    l = 4;
2715
                } else if (l >= 2 && ((addr & 1) == 0)) {
2716
                    /* 16 bit write access */
2717
                    val = lduw_p(buf);
2718
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2719
                    l = 2;
2720
                } else {
2721
                    /* 8 bit write access */
2722
                    val = ldub_p(buf);
2723
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2724
                    l = 1;
2725
                }
2726
            } else {
2727
                unsigned long addr1;
2728
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2729
                /* RAM case */
2730
                ptr = phys_ram_base + addr1;
2731
                memcpy(ptr, buf, l);
2732
                if (!cpu_physical_memory_is_dirty(addr1)) {
2733
                    /* invalidate code */
2734
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2735
                    /* set dirty bit */
2736
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2737
                        (0xff & ~CODE_DIRTY_FLAG);
2738
                }
2739
            }
2740
        } else {
2741
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2742
                !(pd & IO_MEM_ROMD)) {
2743
                /* I/O case */
2744
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2745
                if (l >= 4 && ((addr & 3) == 0)) {
2746
                    /* 32 bit read access */
2747
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2748
                    stl_p(buf, val);
2749
                    l = 4;
2750
                } else if (l >= 2 && ((addr & 1) == 0)) {
2751
                    /* 16 bit read access */
2752
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2753
                    stw_p(buf, val);
2754
                    l = 2;
2755
                } else {
2756
                    /* 8 bit read access */
2757
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2758
                    stb_p(buf, val);
2759
                    l = 1;
2760
                }
2761
            } else {
2762
                /* RAM case */
2763
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2764
                    (addr & ~TARGET_PAGE_MASK);
2765
                memcpy(buf, ptr, l);
2766
            }
2767
        }
2768
        len -= l;
2769
        buf += l;
2770
        addr += l;
2771
    }
2772
}
2773

    
2774
/* used for ROM loading : can write in RAM and ROM */
2775
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2776
                                   const uint8_t *buf, int len)
2777
{
2778
    int l;
2779
    uint8_t *ptr;
2780
    target_phys_addr_t page;
2781
    unsigned long pd;
2782
    PhysPageDesc *p;
2783

    
2784
    while (len > 0) {
2785
        page = addr & TARGET_PAGE_MASK;
2786
        l = (page + TARGET_PAGE_SIZE) - addr;
2787
        if (l > len)
2788
            l = len;
2789
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2790
        if (!p) {
2791
            pd = IO_MEM_UNASSIGNED;
2792
        } else {
2793
            pd = p->phys_offset;
2794
        }
2795

    
2796
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2797
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2798
            !(pd & IO_MEM_ROMD)) {
2799
            /* do nothing */
2800
        } else {
2801
            unsigned long addr1;
2802
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2803
            /* ROM/RAM case */
2804
            ptr = phys_ram_base + addr1;
2805
            memcpy(ptr, buf, l);
2806
        }
2807
        len -= l;
2808
        buf += l;
2809
        addr += l;
2810
    }
2811
}
2812

    
2813

    
2814
/* warning: addr must be aligned */
2815
uint32_t ldl_phys(target_phys_addr_t addr)
2816
{
2817
    int io_index;
2818
    uint8_t *ptr;
2819
    uint32_t val;
2820
    unsigned long pd;
2821
    PhysPageDesc *p;
2822

    
2823
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2824
    if (!p) {
2825
        pd = IO_MEM_UNASSIGNED;
2826
    } else {
2827
        pd = p->phys_offset;
2828
    }
2829

    
2830
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2831
        !(pd & IO_MEM_ROMD)) {
2832
        /* I/O case */
2833
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2834
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2835
    } else {
2836
        /* RAM case */
2837
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2838
            (addr & ~TARGET_PAGE_MASK);
2839
        val = ldl_p(ptr);
2840
    }
2841
    return val;
2842
}
2843

    
2844
/* warning: addr must be aligned */
2845
uint64_t ldq_phys(target_phys_addr_t addr)
2846
{
2847
    int io_index;
2848
    uint8_t *ptr;
2849
    uint64_t val;
2850
    unsigned long pd;
2851
    PhysPageDesc *p;
2852

    
2853
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2854
    if (!p) {
2855
        pd = IO_MEM_UNASSIGNED;
2856
    } else {
2857
        pd = p->phys_offset;
2858
    }
2859

    
2860
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2861
        !(pd & IO_MEM_ROMD)) {
2862
        /* I/O case */
2863
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2864
#ifdef TARGET_WORDS_BIGENDIAN
2865
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2866
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2867
#else
2868
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2869
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2870
#endif
2871
    } else {
2872
        /* RAM case */
2873
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2874
            (addr & ~TARGET_PAGE_MASK);
2875
        val = ldq_p(ptr);
2876
    }
2877
    return val;
2878
}
2879

    
2880
/* XXX: optimize */
2881
uint32_t ldub_phys(target_phys_addr_t addr)
2882
{
2883
    uint8_t val;
2884
    cpu_physical_memory_read(addr, &val, 1);
2885
    return val;
2886
}
2887

    
2888
/* XXX: optimize */
2889
uint32_t lduw_phys(target_phys_addr_t addr)
2890
{
2891
    uint16_t val;
2892
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2893
    return tswap16(val);
2894
}
2895

    
2896
/* warning: addr must be aligned. The ram page is not masked as dirty
2897
   and the code inside is not invalidated. It is useful if the dirty
2898
   bits are used to track modified PTEs */
2899
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2900
{
2901
    int io_index;
2902
    uint8_t *ptr;
2903
    unsigned long pd;
2904
    PhysPageDesc *p;
2905

    
2906
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2907
    if (!p) {
2908
        pd = IO_MEM_UNASSIGNED;
2909
    } else {
2910
        pd = p->phys_offset;
2911
    }
2912

    
2913
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2914
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2915
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2916
    } else {
2917
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2918
            (addr & ~TARGET_PAGE_MASK);
2919
        stl_p(ptr, val);
2920
    }
2921
}
2922

    
2923
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2924
{
2925
    int io_index;
2926
    uint8_t *ptr;
2927
    unsigned long pd;
2928
    PhysPageDesc *p;
2929

    
2930
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2931
    if (!p) {
2932
        pd = IO_MEM_UNASSIGNED;
2933
    } else {
2934
        pd = p->phys_offset;
2935
    }
2936

    
2937
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2938
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2939
#ifdef TARGET_WORDS_BIGENDIAN
2940
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2941
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2942
#else
2943
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2944
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2945
#endif
2946
    } else {
2947
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2948
            (addr & ~TARGET_PAGE_MASK);
2949
        stq_p(ptr, val);
2950
    }
2951
}
2952

    
2953
/* warning: addr must be aligned */
2954
void stl_phys(target_phys_addr_t addr, uint32_t val)
2955
{
2956
    int io_index;
2957
    uint8_t *ptr;
2958
    unsigned long pd;
2959
    PhysPageDesc *p;
2960

    
2961
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2962
    if (!p) {
2963
        pd = IO_MEM_UNASSIGNED;
2964
    } else {
2965
        pd = p->phys_offset;
2966
    }
2967

    
2968
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2969
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2970
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2971
    } else {
2972
        unsigned long addr1;
2973
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2974
        /* RAM case */
2975
        ptr = phys_ram_base + addr1;
2976
        stl_p(ptr, val);
2977
        if (!cpu_physical_memory_is_dirty(addr1)) {
2978
            /* invalidate code */
2979
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2980
            /* set dirty bit */
2981
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2982
                (0xff & ~CODE_DIRTY_FLAG);
2983
        }
2984
    }
2985
}
2986

    
2987
/* XXX: optimize */
2988
void stb_phys(target_phys_addr_t addr, uint32_t val)
2989
{
2990
    uint8_t v = val;
2991
    cpu_physical_memory_write(addr, &v, 1);
2992
}
2993

    
2994
/* XXX: optimize */
2995
void stw_phys(target_phys_addr_t addr, uint32_t val)
2996
{
2997
    uint16_t v = tswap16(val);
2998
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2999
}
3000

    
3001
/* XXX: optimize */
3002
void stq_phys(target_phys_addr_t addr, uint64_t val)
3003
{
3004
    val = tswap64(val);
3005
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3006
}
3007

    
3008
#endif
3009

    
3010
/* virtual memory access for debug */
3011
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3012
                        uint8_t *buf, int len, int is_write)
3013
{
3014
    int l;
3015
    target_phys_addr_t phys_addr;
3016
    target_ulong page;
3017

    
3018
    while (len > 0) {
3019
        page = addr & TARGET_PAGE_MASK;
3020
        phys_addr = cpu_get_phys_page_debug(env, page);
3021
        /* if no physical page mapped, return an error */
3022
        if (phys_addr == -1)
3023
            return -1;
3024
        l = (page + TARGET_PAGE_SIZE) - addr;
3025
        if (l > len)
3026
            l = len;
3027
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3028
                               buf, l, is_write);
3029
        len -= l;
3030
        buf += l;
3031
        addr += l;
3032
    }
3033
    return 0;
3034
}
3035

    
3036
void dump_exec_info(FILE *f,
3037
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3038
{
3039
    int i, target_code_size, max_target_code_size;
3040
    int direct_jmp_count, direct_jmp2_count, cross_page;
3041
    TranslationBlock *tb;
3042

    
3043
    target_code_size = 0;
3044
    max_target_code_size = 0;
3045
    cross_page = 0;
3046
    direct_jmp_count = 0;
3047
    direct_jmp2_count = 0;
3048
    for(i = 0; i < nb_tbs; i++) {
3049
        tb = &tbs[i];
3050
        target_code_size += tb->size;
3051
        if (tb->size > max_target_code_size)
3052
            max_target_code_size = tb->size;
3053
        if (tb->page_addr[1] != -1)
3054
            cross_page++;
3055
        if (tb->tb_next_offset[0] != 0xffff) {
3056
            direct_jmp_count++;
3057
            if (tb->tb_next_offset[1] != 0xffff) {
3058
                direct_jmp2_count++;
3059
            }
3060
        }
3061
    }
3062
    /* XXX: avoid using doubles ? */
3063
    cpu_fprintf(f, "Translation buffer state:\n");
3064
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3065
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3066
    cpu_fprintf(f, "TB count            %d/%d\n", 
3067
                nb_tbs, code_gen_max_blocks);
3068
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3069
                nb_tbs ? target_code_size / nb_tbs : 0,
3070
                max_target_code_size);
3071
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3072
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3073
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3074
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3075
            cross_page,
3076
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3077
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3078
                direct_jmp_count,
3079
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3080
                direct_jmp2_count,
3081
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3082
    cpu_fprintf(f, "\nStatistics:\n");
3083
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3084
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3085
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3086
    tcg_dump_info(f, cpu_fprintf);
3087
}
3088

    
3089
#if !defined(CONFIG_USER_ONLY)
3090

    
3091
#define MMUSUFFIX _cmmu
3092
#define GETPC() NULL
3093
#define env cpu_single_env
3094
#define SOFTMMU_CODE_ACCESS
3095

    
3096
#define SHIFT 0
3097
#include "softmmu_template.h"
3098

    
3099
#define SHIFT 1
3100
#include "softmmu_template.h"
3101

    
3102
#define SHIFT 2
3103
#include "softmmu_template.h"
3104

    
3105
#define SHIFT 3
3106
#include "softmmu_template.h"
3107

    
3108
#undef env
3109

    
3110
#endif