Statistics
| Branch: | Revision:

root / exec.c @ c8a706fe

History | View | Annotate | Download (93.3 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#if defined(CONFIG_USER_ONLY)
41
#include <qemu.h>
42
#endif
43

    
44
//#define DEBUG_TB_INVALIDATE
45
//#define DEBUG_FLUSH
46
//#define DEBUG_TLB
47
//#define DEBUG_UNASSIGNED
48

    
49
/* make various TB consistency checks */
50
//#define DEBUG_TB_CHECK
51
//#define DEBUG_TLB_CHECK
52

    
53
//#define DEBUG_IOPORT
54
//#define DEBUG_SUBPAGE
55

    
56
#if !defined(CONFIG_USER_ONLY)
57
/* TB consistency checks only implemented for usermode emulation.  */
58
#undef DEBUG_TB_CHECK
59
#endif
60

    
61
#define SMC_BITMAP_USE_THRESHOLD 10
62

    
63
#define MMAP_AREA_START        0x00000000
64
#define MMAP_AREA_END          0xa8000000
65

    
66
#if defined(TARGET_SPARC64)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 41
68
#elif defined(TARGET_SPARC)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 36
70
#elif defined(TARGET_ALPHA)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 42
72
#define TARGET_VIRT_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_PPC64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 42
77
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78
#define TARGET_PHYS_ADDR_SPACE_BITS 36
79
#else
80
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81
#define TARGET_PHYS_ADDR_SPACE_BITS 32
82
#endif
83

    
84
TranslationBlock *tbs;
85
int code_gen_max_blocks;
86
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87
int nb_tbs;
88
/* any access to the tbs or the page table must use this lock */
89
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90

    
91
uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
92
uint8_t *code_gen_buffer;
93
unsigned long code_gen_buffer_size;
94
/* threshold to flush the translated code buffer */
95
unsigned long code_gen_buffer_max_size; 
96
uint8_t *code_gen_ptr;
97

    
98
ram_addr_t phys_ram_size;
99
int phys_ram_fd;
100
uint8_t *phys_ram_base;
101
uint8_t *phys_ram_dirty;
102
static ram_addr_t phys_ram_alloc_offset = 0;
103

    
104
CPUState *first_cpu;
105
/* current CPU in the current thread. It is only valid inside
106
   cpu_exec() */
107
CPUState *cpu_single_env;
108

    
109
typedef struct PageDesc {
110
    /* list of TBs intersecting this ram page */
111
    TranslationBlock *first_tb;
112
    /* in order to optimize self modifying code, we count the number
113
       of lookups we do to a given page to use a bitmap */
114
    unsigned int code_write_count;
115
    uint8_t *code_bitmap;
116
#if defined(CONFIG_USER_ONLY)
117
    unsigned long flags;
118
#endif
119
} PageDesc;
120

    
121
typedef struct PhysPageDesc {
122
    /* offset in host memory of the page + io_index in the low 12 bits */
123
    ram_addr_t phys_offset;
124
} PhysPageDesc;
125

    
126
#define L2_BITS 10
127
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128
/* XXX: this is a temporary hack for alpha target.
129
 *      In the future, this is to be replaced by a multi-level table
130
 *      to actually be able to handle the complete 64 bits address space.
131
 */
132
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
133
#else
134
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
135
#endif
136

    
137
#define L1_SIZE (1 << L1_BITS)
138
#define L2_SIZE (1 << L2_BITS)
139

    
140
static void io_mem_init(void);
141

    
142
unsigned long qemu_real_host_page_size;
143
unsigned long qemu_host_page_bits;
144
unsigned long qemu_host_page_size;
145
unsigned long qemu_host_page_mask;
146

    
147
/* XXX: for system emulation, it could just be an array */
148
static PageDesc *l1_map[L1_SIZE];
149
PhysPageDesc **l1_phys_map;
150

    
151
/* io memory support */
152
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
153
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
154
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
155
static int io_mem_nb;
156
#if defined(CONFIG_SOFTMMU)
157
static int io_mem_watch;
158
#endif
159

    
160
/* log support */
161
char *logfilename = "/tmp/qemu.log";
162
FILE *logfile;
163
int loglevel;
164
static int log_append = 0;
165

    
166
/* statistics */
167
static int tlb_flush_count;
168
static int tb_flush_count;
169
static int tb_phys_invalidate_count;
170

    
171
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172
typedef struct subpage_t {
173
    target_phys_addr_t base;
174
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
175
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
176
    void *opaque[TARGET_PAGE_SIZE][2][4];
177
} subpage_t;
178

    
179
#ifdef _WIN32
180
static void map_exec(void *addr, long size)
181
{
182
    DWORD old_protect;
183
    VirtualProtect(addr, size,
184
                   PAGE_EXECUTE_READWRITE, &old_protect);
185
    
186
}
187
#else
188
static void map_exec(void *addr, long size)
189
{
190
    unsigned long start, end, page_size;
191
    
192
    page_size = getpagesize();
193
    start = (unsigned long)addr;
194
    start &= ~(page_size - 1);
195
    
196
    end = (unsigned long)addr + size;
197
    end += page_size - 1;
198
    end &= ~(page_size - 1);
199
    
200
    mprotect((void *)start, end - start,
201
             PROT_READ | PROT_WRITE | PROT_EXEC);
202
}
203
#endif
204

    
205
static void page_init(void)
206
{
207
    /* NOTE: we can always suppose that qemu_host_page_size >=
208
       TARGET_PAGE_SIZE */
209
#ifdef _WIN32
210
    {
211
        SYSTEM_INFO system_info;
212
        DWORD old_protect;
213

    
214
        GetSystemInfo(&system_info);
215
        qemu_real_host_page_size = system_info.dwPageSize;
216
    }
217
#else
218
    qemu_real_host_page_size = getpagesize();
219
#endif
220
    if (qemu_host_page_size == 0)
221
        qemu_host_page_size = qemu_real_host_page_size;
222
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
223
        qemu_host_page_size = TARGET_PAGE_SIZE;
224
    qemu_host_page_bits = 0;
225
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
226
        qemu_host_page_bits++;
227
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
228
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
230

    
231
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232
    {
233
        long long startaddr, endaddr;
234
        FILE *f;
235
        int n;
236

    
237
        mmap_lock();
238
        last_brk = (unsigned long)sbrk(0);
239
        f = fopen("/proc/self/maps", "r");
240
        if (f) {
241
            do {
242
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
243
                if (n == 2) {
244
                    startaddr = MIN(startaddr,
245
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
246
                    endaddr = MIN(endaddr,
247
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
248
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
249
                                   TARGET_PAGE_ALIGN(endaddr),
250
                                   PAGE_RESERVED); 
251
                }
252
            } while (!feof(f));
253
            fclose(f);
254
        }
255
        mmap_unlock();
256
    }
257
#endif
258
}
259

    
260
static inline PageDesc *page_find_alloc(target_ulong index)
261
{
262
    PageDesc **lp, *p;
263

    
264
    lp = &l1_map[index >> L2_BITS];
265
    p = *lp;
266
    if (!p) {
267
        /* allocate if not found */
268
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
269
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
270
        *lp = p;
271
    }
272
    return p + (index & (L2_SIZE - 1));
273
}
274

    
275
static inline PageDesc *page_find(target_ulong index)
276
{
277
    PageDesc *p;
278

    
279
    p = l1_map[index >> L2_BITS];
280
    if (!p)
281
        return 0;
282
    return p + (index & (L2_SIZE - 1));
283
}
284

    
285
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
286
{
287
    void **lp, **p;
288
    PhysPageDesc *pd;
289

    
290
    p = (void **)l1_phys_map;
291
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
292

    
293
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
294
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
295
#endif
296
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
297
    p = *lp;
298
    if (!p) {
299
        /* allocate if not found */
300
        if (!alloc)
301
            return NULL;
302
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
303
        memset(p, 0, sizeof(void *) * L1_SIZE);
304
        *lp = p;
305
    }
306
#endif
307
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
308
    pd = *lp;
309
    if (!pd) {
310
        int i;
311
        /* allocate if not found */
312
        if (!alloc)
313
            return NULL;
314
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
315
        *lp = pd;
316
        for (i = 0; i < L2_SIZE; i++)
317
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
318
    }
319
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
320
}
321

    
322
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
323
{
324
    return phys_page_find_alloc(index, 0);
325
}
326

    
327
#if !defined(CONFIG_USER_ONLY)
328
static void tlb_protect_code(ram_addr_t ram_addr);
329
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
330
                                    target_ulong vaddr);
331
#define mmap_lock() do { } while(0)
332
#define mmap_unlock() do { } while(0)
333
#endif
334

    
335
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
336

    
337
#if defined(CONFIG_USER_ONLY)
338
/* Currently it is not recommanded to allocate big chunks of data in
339
   user mode. It will change when a dedicated libc will be used */
340
#define USE_STATIC_CODE_GEN_BUFFER
341
#endif
342

    
343
#ifdef USE_STATIC_CODE_GEN_BUFFER
344
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
345
#endif
346

    
347
void code_gen_alloc(unsigned long tb_size)
348
{
349
#ifdef USE_STATIC_CODE_GEN_BUFFER
350
    code_gen_buffer = static_code_gen_buffer;
351
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
352
    map_exec(code_gen_buffer, code_gen_buffer_size);
353
#else
354
    code_gen_buffer_size = tb_size;
355
    if (code_gen_buffer_size == 0) {
356
#if defined(CONFIG_USER_ONLY)
357
        /* in user mode, phys_ram_size is not meaningful */
358
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
359
#else
360
        /* XXX: needs ajustments */
361
        code_gen_buffer_size = (int)(phys_ram_size / 4);
362
#endif
363
    }
364
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
365
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
366
    /* The code gen buffer location may have constraints depending on
367
       the host cpu and OS */
368
#if defined(__linux__) 
369
    {
370
        int flags;
371
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
372
#if defined(__x86_64__)
373
        flags |= MAP_32BIT;
374
        /* Cannot map more than that */
375
        if (code_gen_buffer_size > (800 * 1024 * 1024))
376
            code_gen_buffer_size = (800 * 1024 * 1024);
377
#endif
378
        code_gen_buffer = mmap(NULL, code_gen_buffer_size,
379
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
380
                               flags, -1, 0);
381
        if (code_gen_buffer == MAP_FAILED) {
382
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
383
            exit(1);
384
        }
385
    }
386
#else
387
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
388
    if (!code_gen_buffer) {
389
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
390
        exit(1);
391
    }
392
    map_exec(code_gen_buffer, code_gen_buffer_size);
393
#endif
394
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
395
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
396
    code_gen_buffer_max_size = code_gen_buffer_size - 
397
        code_gen_max_block_size();
398
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
399
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
400
}
401

    
402
/* Must be called before using the QEMU cpus. 'tb_size' is the size
403
   (in bytes) allocated to the translation buffer. Zero means default
404
   size. */
405
void cpu_exec_init_all(unsigned long tb_size)
406
{
407
    cpu_gen_init();
408
    code_gen_alloc(tb_size);
409
    code_gen_ptr = code_gen_buffer;
410
    page_init();
411
    io_mem_init();
412
}
413

    
414
void cpu_exec_init(CPUState *env)
415
{
416
    CPUState **penv;
417
    int cpu_index;
418

    
419
    env->next_cpu = NULL;
420
    penv = &first_cpu;
421
    cpu_index = 0;
422
    while (*penv != NULL) {
423
        penv = (CPUState **)&(*penv)->next_cpu;
424
        cpu_index++;
425
    }
426
    env->cpu_index = cpu_index;
427
    env->nb_watchpoints = 0;
428
    *penv = env;
429
}
430

    
431
static inline void invalidate_page_bitmap(PageDesc *p)
432
{
433
    if (p->code_bitmap) {
434
        qemu_free(p->code_bitmap);
435
        p->code_bitmap = NULL;
436
    }
437
    p->code_write_count = 0;
438
}
439

    
440
/* set to NULL all the 'first_tb' fields in all PageDescs */
441
static void page_flush_tb(void)
442
{
443
    int i, j;
444
    PageDesc *p;
445

    
446
    for(i = 0; i < L1_SIZE; i++) {
447
        p = l1_map[i];
448
        if (p) {
449
            for(j = 0; j < L2_SIZE; j++) {
450
                p->first_tb = NULL;
451
                invalidate_page_bitmap(p);
452
                p++;
453
            }
454
        }
455
    }
456
}
457

    
458
/* flush all the translation blocks */
459
/* XXX: tb_flush is currently not thread safe */
460
void tb_flush(CPUState *env1)
461
{
462
    CPUState *env;
463
#if defined(DEBUG_FLUSH)
464
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
465
           (unsigned long)(code_gen_ptr - code_gen_buffer),
466
           nb_tbs, nb_tbs > 0 ?
467
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
468
#endif
469
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
470
        cpu_abort(env1, "Internal error: code buffer overflow\n");
471

    
472
    nb_tbs = 0;
473

    
474
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
475
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
476
    }
477

    
478
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
479
    page_flush_tb();
480

    
481
    code_gen_ptr = code_gen_buffer;
482
    /* XXX: flush processor icache at this point if cache flush is
483
       expensive */
484
    tb_flush_count++;
485
}
486

    
487
#ifdef DEBUG_TB_CHECK
488

    
489
static void tb_invalidate_check(target_ulong address)
490
{
491
    TranslationBlock *tb;
492
    int i;
493
    address &= TARGET_PAGE_MASK;
494
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
495
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
496
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
497
                  address >= tb->pc + tb->size)) {
498
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
499
                       address, (long)tb->pc, tb->size);
500
            }
501
        }
502
    }
503
}
504

    
505
/* verify that all the pages have correct rights for code */
506
static void tb_page_check(void)
507
{
508
    TranslationBlock *tb;
509
    int i, flags1, flags2;
510

    
511
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
512
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
513
            flags1 = page_get_flags(tb->pc);
514
            flags2 = page_get_flags(tb->pc + tb->size - 1);
515
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
516
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
517
                       (long)tb->pc, tb->size, flags1, flags2);
518
            }
519
        }
520
    }
521
}
522

    
523
void tb_jmp_check(TranslationBlock *tb)
524
{
525
    TranslationBlock *tb1;
526
    unsigned int n1;
527

    
528
    /* suppress any remaining jumps to this TB */
529
    tb1 = tb->jmp_first;
530
    for(;;) {
531
        n1 = (long)tb1 & 3;
532
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
533
        if (n1 == 2)
534
            break;
535
        tb1 = tb1->jmp_next[n1];
536
    }
537
    /* check end of list */
538
    if (tb1 != tb) {
539
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
540
    }
541
}
542

    
543
#endif
544

    
545
/* invalidate one TB */
546
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
547
                             int next_offset)
548
{
549
    TranslationBlock *tb1;
550
    for(;;) {
551
        tb1 = *ptb;
552
        if (tb1 == tb) {
553
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
554
            break;
555
        }
556
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
557
    }
558
}
559

    
560
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
561
{
562
    TranslationBlock *tb1;
563
    unsigned int n1;
564

    
565
    for(;;) {
566
        tb1 = *ptb;
567
        n1 = (long)tb1 & 3;
568
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
569
        if (tb1 == tb) {
570
            *ptb = tb1->page_next[n1];
571
            break;
572
        }
573
        ptb = &tb1->page_next[n1];
574
    }
575
}
576

    
577
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
578
{
579
    TranslationBlock *tb1, **ptb;
580
    unsigned int n1;
581

    
582
    ptb = &tb->jmp_next[n];
583
    tb1 = *ptb;
584
    if (tb1) {
585
        /* find tb(n) in circular list */
586
        for(;;) {
587
            tb1 = *ptb;
588
            n1 = (long)tb1 & 3;
589
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
590
            if (n1 == n && tb1 == tb)
591
                break;
592
            if (n1 == 2) {
593
                ptb = &tb1->jmp_first;
594
            } else {
595
                ptb = &tb1->jmp_next[n1];
596
            }
597
        }
598
        /* now we can suppress tb(n) from the list */
599
        *ptb = tb->jmp_next[n];
600

    
601
        tb->jmp_next[n] = NULL;
602
    }
603
}
604

    
605
/* reset the jump entry 'n' of a TB so that it is not chained to
606
   another TB */
607
static inline void tb_reset_jump(TranslationBlock *tb, int n)
608
{
609
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
610
}
611

    
612
static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
613
{
614
    CPUState *env;
615
    PageDesc *p;
616
    unsigned int h, n1;
617
    target_phys_addr_t phys_pc;
618
    TranslationBlock *tb1, *tb2;
619

    
620
    /* remove the TB from the hash list */
621
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
622
    h = tb_phys_hash_func(phys_pc);
623
    tb_remove(&tb_phys_hash[h], tb,
624
              offsetof(TranslationBlock, phys_hash_next));
625

    
626
    /* remove the TB from the page list */
627
    if (tb->page_addr[0] != page_addr) {
628
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
629
        tb_page_remove(&p->first_tb, tb);
630
        invalidate_page_bitmap(p);
631
    }
632
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
633
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
634
        tb_page_remove(&p->first_tb, tb);
635
        invalidate_page_bitmap(p);
636
    }
637

    
638
    tb_invalidated_flag = 1;
639

    
640
    /* remove the TB from the hash list */
641
    h = tb_jmp_cache_hash_func(tb->pc);
642
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
643
        if (env->tb_jmp_cache[h] == tb)
644
            env->tb_jmp_cache[h] = NULL;
645
    }
646

    
647
    /* suppress this TB from the two jump lists */
648
    tb_jmp_remove(tb, 0);
649
    tb_jmp_remove(tb, 1);
650

    
651
    /* suppress any remaining jumps to this TB */
652
    tb1 = tb->jmp_first;
653
    for(;;) {
654
        n1 = (long)tb1 & 3;
655
        if (n1 == 2)
656
            break;
657
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
658
        tb2 = tb1->jmp_next[n1];
659
        tb_reset_jump(tb1, n1);
660
        tb1->jmp_next[n1] = NULL;
661
        tb1 = tb2;
662
    }
663
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
664

    
665
    tb_phys_invalidate_count++;
666
}
667

    
668
static inline void set_bits(uint8_t *tab, int start, int len)
669
{
670
    int end, mask, end1;
671

    
672
    end = start + len;
673
    tab += start >> 3;
674
    mask = 0xff << (start & 7);
675
    if ((start & ~7) == (end & ~7)) {
676
        if (start < end) {
677
            mask &= ~(0xff << (end & 7));
678
            *tab |= mask;
679
        }
680
    } else {
681
        *tab++ |= mask;
682
        start = (start + 8) & ~7;
683
        end1 = end & ~7;
684
        while (start < end1) {
685
            *tab++ = 0xff;
686
            start += 8;
687
        }
688
        if (start < end) {
689
            mask = ~(0xff << (end & 7));
690
            *tab |= mask;
691
        }
692
    }
693
}
694

    
695
static void build_page_bitmap(PageDesc *p)
696
{
697
    int n, tb_start, tb_end;
698
    TranslationBlock *tb;
699

    
700
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
701
    if (!p->code_bitmap)
702
        return;
703
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
704

    
705
    tb = p->first_tb;
706
    while (tb != NULL) {
707
        n = (long)tb & 3;
708
        tb = (TranslationBlock *)((long)tb & ~3);
709
        /* NOTE: this is subtle as a TB may span two physical pages */
710
        if (n == 0) {
711
            /* NOTE: tb_end may be after the end of the page, but
712
               it is not a problem */
713
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
714
            tb_end = tb_start + tb->size;
715
            if (tb_end > TARGET_PAGE_SIZE)
716
                tb_end = TARGET_PAGE_SIZE;
717
        } else {
718
            tb_start = 0;
719
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
720
        }
721
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
722
        tb = tb->page_next[n];
723
    }
724
}
725

    
726
#ifdef TARGET_HAS_PRECISE_SMC
727

    
728
static void tb_gen_code(CPUState *env,
729
                        target_ulong pc, target_ulong cs_base, int flags,
730
                        int cflags)
731
{
732
    TranslationBlock *tb;
733
    uint8_t *tc_ptr;
734
    target_ulong phys_pc, phys_page2, virt_page2;
735
    int code_gen_size;
736

    
737
    phys_pc = get_phys_addr_code(env, pc);
738
    tb = tb_alloc(pc);
739
    if (!tb) {
740
        /* flush must be done */
741
        tb_flush(env);
742
        /* cannot fail at this point */
743
        tb = tb_alloc(pc);
744
    }
745
    tc_ptr = code_gen_ptr;
746
    tb->tc_ptr = tc_ptr;
747
    tb->cs_base = cs_base;
748
    tb->flags = flags;
749
    tb->cflags = cflags;
750
    cpu_gen_code(env, tb, &code_gen_size);
751
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
752

    
753
    /* check next page if needed */
754
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
755
    phys_page2 = -1;
756
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
757
        phys_page2 = get_phys_addr_code(env, virt_page2);
758
    }
759
    tb_link_phys(tb, phys_pc, phys_page2);
760
}
761
#endif
762

    
763
/* invalidate all TBs which intersect with the target physical page
764
   starting in range [start;end[. NOTE: start and end must refer to
765
   the same physical page. 'is_cpu_write_access' should be true if called
766
   from a real cpu write access: the virtual CPU will exit the current
767
   TB if code is modified inside this TB. */
768
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
769
                                   int is_cpu_write_access)
770
{
771
    int n, current_tb_modified, current_tb_not_found, current_flags;
772
    CPUState *env = cpu_single_env;
773
    PageDesc *p;
774
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
775
    target_ulong tb_start, tb_end;
776
    target_ulong current_pc, current_cs_base;
777

    
778
    p = page_find(start >> TARGET_PAGE_BITS);
779
    if (!p)
780
        return;
781
    if (!p->code_bitmap &&
782
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
783
        is_cpu_write_access) {
784
        /* build code bitmap */
785
        build_page_bitmap(p);
786
    }
787

    
788
    /* we remove all the TBs in the range [start, end[ */
789
    /* XXX: see if in some cases it could be faster to invalidate all the code */
790
    current_tb_not_found = is_cpu_write_access;
791
    current_tb_modified = 0;
792
    current_tb = NULL; /* avoid warning */
793
    current_pc = 0; /* avoid warning */
794
    current_cs_base = 0; /* avoid warning */
795
    current_flags = 0; /* avoid warning */
796
    tb = p->first_tb;
797
    while (tb != NULL) {
798
        n = (long)tb & 3;
799
        tb = (TranslationBlock *)((long)tb & ~3);
800
        tb_next = tb->page_next[n];
801
        /* NOTE: this is subtle as a TB may span two physical pages */
802
        if (n == 0) {
803
            /* NOTE: tb_end may be after the end of the page, but
804
               it is not a problem */
805
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
806
            tb_end = tb_start + tb->size;
807
        } else {
808
            tb_start = tb->page_addr[1];
809
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
810
        }
811
        if (!(tb_end <= start || tb_start >= end)) {
812
#ifdef TARGET_HAS_PRECISE_SMC
813
            if (current_tb_not_found) {
814
                current_tb_not_found = 0;
815
                current_tb = NULL;
816
                if (env->mem_write_pc) {
817
                    /* now we have a real cpu fault */
818
                    current_tb = tb_find_pc(env->mem_write_pc);
819
                }
820
            }
821
            if (current_tb == tb &&
822
                !(current_tb->cflags & CF_SINGLE_INSN)) {
823
                /* If we are modifying the current TB, we must stop
824
                its execution. We could be more precise by checking
825
                that the modification is after the current PC, but it
826
                would require a specialized function to partially
827
                restore the CPU state */
828

    
829
                current_tb_modified = 1;
830
                cpu_restore_state(current_tb, env,
831
                                  env->mem_write_pc, NULL);
832
#if defined(TARGET_I386)
833
                current_flags = env->hflags;
834
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
835
                current_cs_base = (target_ulong)env->segs[R_CS].base;
836
                current_pc = current_cs_base + env->eip;
837
#else
838
#error unsupported CPU
839
#endif
840
            }
841
#endif /* TARGET_HAS_PRECISE_SMC */
842
            /* we need to do that to handle the case where a signal
843
               occurs while doing tb_phys_invalidate() */
844
            saved_tb = NULL;
845
            if (env) {
846
                saved_tb = env->current_tb;
847
                env->current_tb = NULL;
848
            }
849
            tb_phys_invalidate(tb, -1);
850
            if (env) {
851
                env->current_tb = saved_tb;
852
                if (env->interrupt_request && env->current_tb)
853
                    cpu_interrupt(env, env->interrupt_request);
854
            }
855
        }
856
        tb = tb_next;
857
    }
858
#if !defined(CONFIG_USER_ONLY)
859
    /* if no code remaining, no need to continue to use slow writes */
860
    if (!p->first_tb) {
861
        invalidate_page_bitmap(p);
862
        if (is_cpu_write_access) {
863
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
864
        }
865
    }
866
#endif
867
#ifdef TARGET_HAS_PRECISE_SMC
868
    if (current_tb_modified) {
869
        /* we generate a block containing just the instruction
870
           modifying the memory. It will ensure that it cannot modify
871
           itself */
872
        env->current_tb = NULL;
873
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
874
                    CF_SINGLE_INSN);
875
        cpu_resume_from_signal(env, NULL);
876
    }
877
#endif
878
}
879

    
880
/* len must be <= 8 and start must be a multiple of len */
881
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
882
{
883
    PageDesc *p;
884
    int offset, b;
885
#if 0
886
    if (1) {
887
        if (loglevel) {
888
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
889
                   cpu_single_env->mem_write_vaddr, len,
890
                   cpu_single_env->eip,
891
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
892
        }
893
    }
894
#endif
895
    p = page_find(start >> TARGET_PAGE_BITS);
896
    if (!p)
897
        return;
898
    if (p->code_bitmap) {
899
        offset = start & ~TARGET_PAGE_MASK;
900
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
901
        if (b & ((1 << len) - 1))
902
            goto do_invalidate;
903
    } else {
904
    do_invalidate:
905
        tb_invalidate_phys_page_range(start, start + len, 1);
906
    }
907
}
908

    
909
#if !defined(CONFIG_SOFTMMU)
910
static void tb_invalidate_phys_page(target_phys_addr_t addr,
911
                                    unsigned long pc, void *puc)
912
{
913
    int n, current_flags, current_tb_modified;
914
    target_ulong current_pc, current_cs_base;
915
    PageDesc *p;
916
    TranslationBlock *tb, *current_tb;
917
#ifdef TARGET_HAS_PRECISE_SMC
918
    CPUState *env = cpu_single_env;
919
#endif
920

    
921
    addr &= TARGET_PAGE_MASK;
922
    p = page_find(addr >> TARGET_PAGE_BITS);
923
    if (!p)
924
        return;
925
    tb = p->first_tb;
926
    current_tb_modified = 0;
927
    current_tb = NULL;
928
    current_pc = 0; /* avoid warning */
929
    current_cs_base = 0; /* avoid warning */
930
    current_flags = 0; /* avoid warning */
931
#ifdef TARGET_HAS_PRECISE_SMC
932
    if (tb && pc != 0) {
933
        current_tb = tb_find_pc(pc);
934
    }
935
#endif
936
    while (tb != NULL) {
937
        n = (long)tb & 3;
938
        tb = (TranslationBlock *)((long)tb & ~3);
939
#ifdef TARGET_HAS_PRECISE_SMC
940
        if (current_tb == tb &&
941
            !(current_tb->cflags & CF_SINGLE_INSN)) {
942
                /* If we are modifying the current TB, we must stop
943
                   its execution. We could be more precise by checking
944
                   that the modification is after the current PC, but it
945
                   would require a specialized function to partially
946
                   restore the CPU state */
947

    
948
            current_tb_modified = 1;
949
            cpu_restore_state(current_tb, env, pc, puc);
950
#if defined(TARGET_I386)
951
            current_flags = env->hflags;
952
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
953
            current_cs_base = (target_ulong)env->segs[R_CS].base;
954
            current_pc = current_cs_base + env->eip;
955
#else
956
#error unsupported CPU
957
#endif
958
        }
959
#endif /* TARGET_HAS_PRECISE_SMC */
960
        tb_phys_invalidate(tb, addr);
961
        tb = tb->page_next[n];
962
    }
963
    p->first_tb = NULL;
964
#ifdef TARGET_HAS_PRECISE_SMC
965
    if (current_tb_modified) {
966
        /* we generate a block containing just the instruction
967
           modifying the memory. It will ensure that it cannot modify
968
           itself */
969
        env->current_tb = NULL;
970
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
971
                    CF_SINGLE_INSN);
972
        cpu_resume_from_signal(env, puc);
973
    }
974
#endif
975
}
976
#endif
977

    
978
/* add the tb in the target page and protect it if necessary */
979
static inline void tb_alloc_page(TranslationBlock *tb,
980
                                 unsigned int n, target_ulong page_addr)
981
{
982
    PageDesc *p;
983
    TranslationBlock *last_first_tb;
984

    
985
    tb->page_addr[n] = page_addr;
986
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
987
    tb->page_next[n] = p->first_tb;
988
    last_first_tb = p->first_tb;
989
    p->first_tb = (TranslationBlock *)((long)tb | n);
990
    invalidate_page_bitmap(p);
991

    
992
#if defined(TARGET_HAS_SMC) || 1
993

    
994
#if defined(CONFIG_USER_ONLY)
995
    if (p->flags & PAGE_WRITE) {
996
        target_ulong addr;
997
        PageDesc *p2;
998
        int prot;
999

    
1000
        /* force the host page as non writable (writes will have a
1001
           page fault + mprotect overhead) */
1002
        page_addr &= qemu_host_page_mask;
1003
        prot = 0;
1004
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1005
            addr += TARGET_PAGE_SIZE) {
1006

    
1007
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1008
            if (!p2)
1009
                continue;
1010
            prot |= p2->flags;
1011
            p2->flags &= ~PAGE_WRITE;
1012
            page_get_flags(addr);
1013
          }
1014
        mprotect(g2h(page_addr), qemu_host_page_size,
1015
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1016
#ifdef DEBUG_TB_INVALIDATE
1017
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1018
               page_addr);
1019
#endif
1020
    }
1021
#else
1022
    /* if some code is already present, then the pages are already
1023
       protected. So we handle the case where only the first TB is
1024
       allocated in a physical page */
1025
    if (!last_first_tb) {
1026
        tlb_protect_code(page_addr);
1027
    }
1028
#endif
1029

    
1030
#endif /* TARGET_HAS_SMC */
1031
}
1032

    
1033
/* Allocate a new translation block. Flush the translation buffer if
1034
   too many translation blocks or too much generated code. */
1035
TranslationBlock *tb_alloc(target_ulong pc)
1036
{
1037
    TranslationBlock *tb;
1038

    
1039
    if (nb_tbs >= code_gen_max_blocks ||
1040
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1041
        return NULL;
1042
    tb = &tbs[nb_tbs++];
1043
    tb->pc = pc;
1044
    tb->cflags = 0;
1045
    return tb;
1046
}
1047

    
1048
/* add a new TB and link it to the physical page tables. phys_page2 is
1049
   (-1) to indicate that only one page contains the TB. */
1050
void tb_link_phys(TranslationBlock *tb,
1051
                  target_ulong phys_pc, target_ulong phys_page2)
1052
{
1053
    unsigned int h;
1054
    TranslationBlock **ptb;
1055

    
1056
    /* Grab the mmap lock to stop another thread invalidating this TB
1057
       before we are done.  */
1058
    mmap_lock();
1059
    /* add in the physical hash table */
1060
    h = tb_phys_hash_func(phys_pc);
1061
    ptb = &tb_phys_hash[h];
1062
    tb->phys_hash_next = *ptb;
1063
    *ptb = tb;
1064

    
1065
    /* add in the page list */
1066
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1067
    if (phys_page2 != -1)
1068
        tb_alloc_page(tb, 1, phys_page2);
1069
    else
1070
        tb->page_addr[1] = -1;
1071

    
1072
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1073
    tb->jmp_next[0] = NULL;
1074
    tb->jmp_next[1] = NULL;
1075

    
1076
    /* init original jump addresses */
1077
    if (tb->tb_next_offset[0] != 0xffff)
1078
        tb_reset_jump(tb, 0);
1079
    if (tb->tb_next_offset[1] != 0xffff)
1080
        tb_reset_jump(tb, 1);
1081

    
1082
#ifdef DEBUG_TB_CHECK
1083
    tb_page_check();
1084
#endif
1085
    mmap_unlock();
1086
}
1087

    
1088
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1089
   tb[1].tc_ptr. Return NULL if not found */
1090
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1091
{
1092
    int m_min, m_max, m;
1093
    unsigned long v;
1094
    TranslationBlock *tb;
1095

    
1096
    if (nb_tbs <= 0)
1097
        return NULL;
1098
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1099
        tc_ptr >= (unsigned long)code_gen_ptr)
1100
        return NULL;
1101
    /* binary search (cf Knuth) */
1102
    m_min = 0;
1103
    m_max = nb_tbs - 1;
1104
    while (m_min <= m_max) {
1105
        m = (m_min + m_max) >> 1;
1106
        tb = &tbs[m];
1107
        v = (unsigned long)tb->tc_ptr;
1108
        if (v == tc_ptr)
1109
            return tb;
1110
        else if (tc_ptr < v) {
1111
            m_max = m - 1;
1112
        } else {
1113
            m_min = m + 1;
1114
        }
1115
    }
1116
    return &tbs[m_max];
1117
}
1118

    
1119
static void tb_reset_jump_recursive(TranslationBlock *tb);
1120

    
1121
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1122
{
1123
    TranslationBlock *tb1, *tb_next, **ptb;
1124
    unsigned int n1;
1125

    
1126
    tb1 = tb->jmp_next[n];
1127
    if (tb1 != NULL) {
1128
        /* find head of list */
1129
        for(;;) {
1130
            n1 = (long)tb1 & 3;
1131
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1132
            if (n1 == 2)
1133
                break;
1134
            tb1 = tb1->jmp_next[n1];
1135
        }
1136
        /* we are now sure now that tb jumps to tb1 */
1137
        tb_next = tb1;
1138

    
1139
        /* remove tb from the jmp_first list */
1140
        ptb = &tb_next->jmp_first;
1141
        for(;;) {
1142
            tb1 = *ptb;
1143
            n1 = (long)tb1 & 3;
1144
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1145
            if (n1 == n && tb1 == tb)
1146
                break;
1147
            ptb = &tb1->jmp_next[n1];
1148
        }
1149
        *ptb = tb->jmp_next[n];
1150
        tb->jmp_next[n] = NULL;
1151

    
1152
        /* suppress the jump to next tb in generated code */
1153
        tb_reset_jump(tb, n);
1154

    
1155
        /* suppress jumps in the tb on which we could have jumped */
1156
        tb_reset_jump_recursive(tb_next);
1157
    }
1158
}
1159

    
1160
static void tb_reset_jump_recursive(TranslationBlock *tb)
1161
{
1162
    tb_reset_jump_recursive2(tb, 0);
1163
    tb_reset_jump_recursive2(tb, 1);
1164
}
1165

    
1166
#if defined(TARGET_HAS_ICE)
1167
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1168
{
1169
    target_phys_addr_t addr;
1170
    target_ulong pd;
1171
    ram_addr_t ram_addr;
1172
    PhysPageDesc *p;
1173

    
1174
    addr = cpu_get_phys_page_debug(env, pc);
1175
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1176
    if (!p) {
1177
        pd = IO_MEM_UNASSIGNED;
1178
    } else {
1179
        pd = p->phys_offset;
1180
    }
1181
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1182
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1183
}
1184
#endif
1185

    
1186
/* Add a watchpoint.  */
1187
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1188
{
1189
    int i;
1190

    
1191
    for (i = 0; i < env->nb_watchpoints; i++) {
1192
        if (addr == env->watchpoint[i].vaddr)
1193
            return 0;
1194
    }
1195
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1196
        return -1;
1197

    
1198
    i = env->nb_watchpoints++;
1199
    env->watchpoint[i].vaddr = addr;
1200
    tlb_flush_page(env, addr);
1201
    /* FIXME: This flush is needed because of the hack to make memory ops
1202
       terminate the TB.  It can be removed once the proper IO trap and
1203
       re-execute bits are in.  */
1204
    tb_flush(env);
1205
    return i;
1206
}
1207

    
1208
/* Remove a watchpoint.  */
1209
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1210
{
1211
    int i;
1212

    
1213
    for (i = 0; i < env->nb_watchpoints; i++) {
1214
        if (addr == env->watchpoint[i].vaddr) {
1215
            env->nb_watchpoints--;
1216
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1217
            tlb_flush_page(env, addr);
1218
            return 0;
1219
        }
1220
    }
1221
    return -1;
1222
}
1223

    
1224
/* Remove all watchpoints. */
1225
void cpu_watchpoint_remove_all(CPUState *env) {
1226
    int i;
1227

    
1228
    for (i = 0; i < env->nb_watchpoints; i++) {
1229
        tlb_flush_page(env, env->watchpoint[i].vaddr);
1230
    }
1231
    env->nb_watchpoints = 0;
1232
}
1233

    
1234
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1235
   breakpoint is reached */
1236
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1237
{
1238
#if defined(TARGET_HAS_ICE)
1239
    int i;
1240

    
1241
    for(i = 0; i < env->nb_breakpoints; i++) {
1242
        if (env->breakpoints[i] == pc)
1243
            return 0;
1244
    }
1245

    
1246
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1247
        return -1;
1248
    env->breakpoints[env->nb_breakpoints++] = pc;
1249

    
1250
    breakpoint_invalidate(env, pc);
1251
    return 0;
1252
#else
1253
    return -1;
1254
#endif
1255
}
1256

    
1257
/* remove all breakpoints */
1258
void cpu_breakpoint_remove_all(CPUState *env) {
1259
#if defined(TARGET_HAS_ICE)
1260
    int i;
1261
    for(i = 0; i < env->nb_breakpoints; i++) {
1262
        breakpoint_invalidate(env, env->breakpoints[i]);
1263
    }
1264
    env->nb_breakpoints = 0;
1265
#endif
1266
}
1267

    
1268
/* remove a breakpoint */
1269
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1270
{
1271
#if defined(TARGET_HAS_ICE)
1272
    int i;
1273
    for(i = 0; i < env->nb_breakpoints; i++) {
1274
        if (env->breakpoints[i] == pc)
1275
            goto found;
1276
    }
1277
    return -1;
1278
 found:
1279
    env->nb_breakpoints--;
1280
    if (i < env->nb_breakpoints)
1281
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1282

    
1283
    breakpoint_invalidate(env, pc);
1284
    return 0;
1285
#else
1286
    return -1;
1287
#endif
1288
}
1289

    
1290
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1291
   CPU loop after each instruction */
1292
void cpu_single_step(CPUState *env, int enabled)
1293
{
1294
#if defined(TARGET_HAS_ICE)
1295
    if (env->singlestep_enabled != enabled) {
1296
        env->singlestep_enabled = enabled;
1297
        /* must flush all the translated code to avoid inconsistancies */
1298
        /* XXX: only flush what is necessary */
1299
        tb_flush(env);
1300
    }
1301
#endif
1302
}
1303

    
1304
/* enable or disable low levels log */
1305
void cpu_set_log(int log_flags)
1306
{
1307
    loglevel = log_flags;
1308
    if (loglevel && !logfile) {
1309
        logfile = fopen(logfilename, log_append ? "a" : "w");
1310
        if (!logfile) {
1311
            perror(logfilename);
1312
            _exit(1);
1313
        }
1314
#if !defined(CONFIG_SOFTMMU)
1315
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1316
        {
1317
            static uint8_t logfile_buf[4096];
1318
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1319
        }
1320
#else
1321
        setvbuf(logfile, NULL, _IOLBF, 0);
1322
#endif
1323
        log_append = 1;
1324
    }
1325
    if (!loglevel && logfile) {
1326
        fclose(logfile);
1327
        logfile = NULL;
1328
    }
1329
}
1330

    
1331
void cpu_set_log_filename(const char *filename)
1332
{
1333
    logfilename = strdup(filename);
1334
    if (logfile) {
1335
        fclose(logfile);
1336
        logfile = NULL;
1337
    }
1338
    cpu_set_log(loglevel);
1339
}
1340

    
1341
/* mask must never be zero, except for A20 change call */
1342
void cpu_interrupt(CPUState *env, int mask)
1343
{
1344
    TranslationBlock *tb;
1345
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1346

    
1347
    env->interrupt_request |= mask;
1348
    /* if the cpu is currently executing code, we must unlink it and
1349
       all the potentially executing TB */
1350
    tb = env->current_tb;
1351
    if (tb && !testandset(&interrupt_lock)) {
1352
        env->current_tb = NULL;
1353
        tb_reset_jump_recursive(tb);
1354
        resetlock(&interrupt_lock);
1355
    }
1356
}
1357

    
1358
void cpu_reset_interrupt(CPUState *env, int mask)
1359
{
1360
    env->interrupt_request &= ~mask;
1361
}
1362

    
1363
CPULogItem cpu_log_items[] = {
1364
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1365
      "show generated host assembly code for each compiled TB" },
1366
    { CPU_LOG_TB_IN_ASM, "in_asm",
1367
      "show target assembly code for each compiled TB" },
1368
    { CPU_LOG_TB_OP, "op",
1369
      "show micro ops for each compiled TB" },
1370
    { CPU_LOG_TB_OP_OPT, "op_opt",
1371
      "show micro ops "
1372
#ifdef TARGET_I386
1373
      "before eflags optimization and "
1374
#endif
1375
      "after liveness analysis" },
1376
    { CPU_LOG_INT, "int",
1377
      "show interrupts/exceptions in short format" },
1378
    { CPU_LOG_EXEC, "exec",
1379
      "show trace before each executed TB (lots of logs)" },
1380
    { CPU_LOG_TB_CPU, "cpu",
1381
      "show CPU state before block translation" },
1382
#ifdef TARGET_I386
1383
    { CPU_LOG_PCALL, "pcall",
1384
      "show protected mode far calls/returns/exceptions" },
1385
#endif
1386
#ifdef DEBUG_IOPORT
1387
    { CPU_LOG_IOPORT, "ioport",
1388
      "show all i/o ports accesses" },
1389
#endif
1390
    { 0, NULL, NULL },
1391
};
1392

    
1393
static int cmp1(const char *s1, int n, const char *s2)
1394
{
1395
    if (strlen(s2) != n)
1396
        return 0;
1397
    return memcmp(s1, s2, n) == 0;
1398
}
1399

    
1400
/* takes a comma separated list of log masks. Return 0 if error. */
1401
int cpu_str_to_log_mask(const char *str)
1402
{
1403
    CPULogItem *item;
1404
    int mask;
1405
    const char *p, *p1;
1406

    
1407
    p = str;
1408
    mask = 0;
1409
    for(;;) {
1410
        p1 = strchr(p, ',');
1411
        if (!p1)
1412
            p1 = p + strlen(p);
1413
        if(cmp1(p,p1-p,"all")) {
1414
                for(item = cpu_log_items; item->mask != 0; item++) {
1415
                        mask |= item->mask;
1416
                }
1417
        } else {
1418
        for(item = cpu_log_items; item->mask != 0; item++) {
1419
            if (cmp1(p, p1 - p, item->name))
1420
                goto found;
1421
        }
1422
        return 0;
1423
        }
1424
    found:
1425
        mask |= item->mask;
1426
        if (*p1 != ',')
1427
            break;
1428
        p = p1 + 1;
1429
    }
1430
    return mask;
1431
}
1432

    
1433
void cpu_abort(CPUState *env, const char *fmt, ...)
1434
{
1435
    va_list ap;
1436
    va_list ap2;
1437

    
1438
    va_start(ap, fmt);
1439
    va_copy(ap2, ap);
1440
    fprintf(stderr, "qemu: fatal: ");
1441
    vfprintf(stderr, fmt, ap);
1442
    fprintf(stderr, "\n");
1443
#ifdef TARGET_I386
1444
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1445
#else
1446
    cpu_dump_state(env, stderr, fprintf, 0);
1447
#endif
1448
    if (logfile) {
1449
        fprintf(logfile, "qemu: fatal: ");
1450
        vfprintf(logfile, fmt, ap2);
1451
        fprintf(logfile, "\n");
1452
#ifdef TARGET_I386
1453
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1454
#else
1455
        cpu_dump_state(env, logfile, fprintf, 0);
1456
#endif
1457
        fflush(logfile);
1458
        fclose(logfile);
1459
    }
1460
    va_end(ap2);
1461
    va_end(ap);
1462
    abort();
1463
}
1464

    
1465
CPUState *cpu_copy(CPUState *env)
1466
{
1467
    CPUState *new_env = cpu_init(env->cpu_model_str);
1468
    /* preserve chaining and index */
1469
    CPUState *next_cpu = new_env->next_cpu;
1470
    int cpu_index = new_env->cpu_index;
1471
    memcpy(new_env, env, sizeof(CPUState));
1472
    new_env->next_cpu = next_cpu;
1473
    new_env->cpu_index = cpu_index;
1474
    return new_env;
1475
}
1476

    
1477
#if !defined(CONFIG_USER_ONLY)
1478

    
1479
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1480
{
1481
    unsigned int i;
1482

    
1483
    /* Discard jump cache entries for any tb which might potentially
1484
       overlap the flushed page.  */
1485
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1486
    memset (&env->tb_jmp_cache[i], 0, 
1487
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1488

    
1489
    i = tb_jmp_cache_hash_page(addr);
1490
    memset (&env->tb_jmp_cache[i], 0, 
1491
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1492
}
1493

    
1494
/* NOTE: if flush_global is true, also flush global entries (not
1495
   implemented yet) */
1496
void tlb_flush(CPUState *env, int flush_global)
1497
{
1498
    int i;
1499

    
1500
#if defined(DEBUG_TLB)
1501
    printf("tlb_flush:\n");
1502
#endif
1503
    /* must reset current TB so that interrupts cannot modify the
1504
       links while we are modifying them */
1505
    env->current_tb = NULL;
1506

    
1507
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1508
        env->tlb_table[0][i].addr_read = -1;
1509
        env->tlb_table[0][i].addr_write = -1;
1510
        env->tlb_table[0][i].addr_code = -1;
1511
        env->tlb_table[1][i].addr_read = -1;
1512
        env->tlb_table[1][i].addr_write = -1;
1513
        env->tlb_table[1][i].addr_code = -1;
1514
#if (NB_MMU_MODES >= 3)
1515
        env->tlb_table[2][i].addr_read = -1;
1516
        env->tlb_table[2][i].addr_write = -1;
1517
        env->tlb_table[2][i].addr_code = -1;
1518
#if (NB_MMU_MODES == 4)
1519
        env->tlb_table[3][i].addr_read = -1;
1520
        env->tlb_table[3][i].addr_write = -1;
1521
        env->tlb_table[3][i].addr_code = -1;
1522
#endif
1523
#endif
1524
    }
1525

    
1526
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1527

    
1528
#if !defined(CONFIG_SOFTMMU)
1529
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1530
#endif
1531
#ifdef USE_KQEMU
1532
    if (env->kqemu_enabled) {
1533
        kqemu_flush(env, flush_global);
1534
    }
1535
#endif
1536
    tlb_flush_count++;
1537
}
1538

    
1539
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1540
{
1541
    if (addr == (tlb_entry->addr_read &
1542
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1543
        addr == (tlb_entry->addr_write &
1544
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1545
        addr == (tlb_entry->addr_code &
1546
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1547
        tlb_entry->addr_read = -1;
1548
        tlb_entry->addr_write = -1;
1549
        tlb_entry->addr_code = -1;
1550
    }
1551
}
1552

    
1553
void tlb_flush_page(CPUState *env, target_ulong addr)
1554
{
1555
    int i;
1556

    
1557
#if defined(DEBUG_TLB)
1558
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1559
#endif
1560
    /* must reset current TB so that interrupts cannot modify the
1561
       links while we are modifying them */
1562
    env->current_tb = NULL;
1563

    
1564
    addr &= TARGET_PAGE_MASK;
1565
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1566
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1567
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1568
#if (NB_MMU_MODES >= 3)
1569
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1570
#if (NB_MMU_MODES == 4)
1571
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1572
#endif
1573
#endif
1574

    
1575
    tlb_flush_jmp_cache(env, addr);
1576

    
1577
#if !defined(CONFIG_SOFTMMU)
1578
    if (addr < MMAP_AREA_END)
1579
        munmap((void *)addr, TARGET_PAGE_SIZE);
1580
#endif
1581
#ifdef USE_KQEMU
1582
    if (env->kqemu_enabled) {
1583
        kqemu_flush_page(env, addr);
1584
    }
1585
#endif
1586
}
1587

    
1588
/* update the TLBs so that writes to code in the virtual page 'addr'
1589
   can be detected */
1590
static void tlb_protect_code(ram_addr_t ram_addr)
1591
{
1592
    cpu_physical_memory_reset_dirty(ram_addr,
1593
                                    ram_addr + TARGET_PAGE_SIZE,
1594
                                    CODE_DIRTY_FLAG);
1595
}
1596

    
1597
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1598
   tested for self modifying code */
1599
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1600
                                    target_ulong vaddr)
1601
{
1602
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1603
}
1604

    
1605
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1606
                                         unsigned long start, unsigned long length)
1607
{
1608
    unsigned long addr;
1609
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1610
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1611
        if ((addr - start) < length) {
1612
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1613
        }
1614
    }
1615
}
1616

    
1617
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1618
                                     int dirty_flags)
1619
{
1620
    CPUState *env;
1621
    unsigned long length, start1;
1622
    int i, mask, len;
1623
    uint8_t *p;
1624

    
1625
    start &= TARGET_PAGE_MASK;
1626
    end = TARGET_PAGE_ALIGN(end);
1627

    
1628
    length = end - start;
1629
    if (length == 0)
1630
        return;
1631
    len = length >> TARGET_PAGE_BITS;
1632
#ifdef USE_KQEMU
1633
    /* XXX: should not depend on cpu context */
1634
    env = first_cpu;
1635
    if (env->kqemu_enabled) {
1636
        ram_addr_t addr;
1637
        addr = start;
1638
        for(i = 0; i < len; i++) {
1639
            kqemu_set_notdirty(env, addr);
1640
            addr += TARGET_PAGE_SIZE;
1641
        }
1642
    }
1643
#endif
1644
    mask = ~dirty_flags;
1645
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1646
    for(i = 0; i < len; i++)
1647
        p[i] &= mask;
1648

    
1649
    /* we modify the TLB cache so that the dirty bit will be set again
1650
       when accessing the range */
1651
    start1 = start + (unsigned long)phys_ram_base;
1652
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1653
        for(i = 0; i < CPU_TLB_SIZE; i++)
1654
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1655
        for(i = 0; i < CPU_TLB_SIZE; i++)
1656
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1657
#if (NB_MMU_MODES >= 3)
1658
        for(i = 0; i < CPU_TLB_SIZE; i++)
1659
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1660
#if (NB_MMU_MODES == 4)
1661
        for(i = 0; i < CPU_TLB_SIZE; i++)
1662
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1663
#endif
1664
#endif
1665
    }
1666

    
1667
#if !defined(CONFIG_SOFTMMU)
1668
    /* XXX: this is expensive */
1669
    {
1670
        VirtPageDesc *p;
1671
        int j;
1672
        target_ulong addr;
1673

    
1674
        for(i = 0; i < L1_SIZE; i++) {
1675
            p = l1_virt_map[i];
1676
            if (p) {
1677
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1678
                for(j = 0; j < L2_SIZE; j++) {
1679
                    if (p->valid_tag == virt_valid_tag &&
1680
                        p->phys_addr >= start && p->phys_addr < end &&
1681
                        (p->prot & PROT_WRITE)) {
1682
                        if (addr < MMAP_AREA_END) {
1683
                            mprotect((void *)addr, TARGET_PAGE_SIZE,
1684
                                     p->prot & ~PROT_WRITE);
1685
                        }
1686
                    }
1687
                    addr += TARGET_PAGE_SIZE;
1688
                    p++;
1689
                }
1690
            }
1691
        }
1692
    }
1693
#endif
1694
}
1695

    
1696
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1697
{
1698
    ram_addr_t ram_addr;
1699

    
1700
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1701
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1702
            tlb_entry->addend - (unsigned long)phys_ram_base;
1703
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1704
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1705
        }
1706
    }
1707
}
1708

    
1709
/* update the TLB according to the current state of the dirty bits */
1710
void cpu_tlb_update_dirty(CPUState *env)
1711
{
1712
    int i;
1713
    for(i = 0; i < CPU_TLB_SIZE; i++)
1714
        tlb_update_dirty(&env->tlb_table[0][i]);
1715
    for(i = 0; i < CPU_TLB_SIZE; i++)
1716
        tlb_update_dirty(&env->tlb_table[1][i]);
1717
#if (NB_MMU_MODES >= 3)
1718
    for(i = 0; i < CPU_TLB_SIZE; i++)
1719
        tlb_update_dirty(&env->tlb_table[2][i]);
1720
#if (NB_MMU_MODES == 4)
1721
    for(i = 0; i < CPU_TLB_SIZE; i++)
1722
        tlb_update_dirty(&env->tlb_table[3][i]);
1723
#endif
1724
#endif
1725
}
1726

    
1727
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1728
                                  unsigned long start)
1729
{
1730
    unsigned long addr;
1731
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1732
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1733
        if (addr == start) {
1734
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1735
        }
1736
    }
1737
}
1738

    
1739
/* update the TLB corresponding to virtual page vaddr and phys addr
1740
   addr so that it is no longer dirty */
1741
static inline void tlb_set_dirty(CPUState *env,
1742
                                 unsigned long addr, target_ulong vaddr)
1743
{
1744
    int i;
1745

    
1746
    addr &= TARGET_PAGE_MASK;
1747
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1748
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1749
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1750
#if (NB_MMU_MODES >= 3)
1751
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1752
#if (NB_MMU_MODES == 4)
1753
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1754
#endif
1755
#endif
1756
}
1757

    
1758
/* add a new TLB entry. At most one entry for a given virtual address
1759
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1760
   (can only happen in non SOFTMMU mode for I/O pages or pages
1761
   conflicting with the host address space). */
1762
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1763
                      target_phys_addr_t paddr, int prot,
1764
                      int mmu_idx, int is_softmmu)
1765
{
1766
    PhysPageDesc *p;
1767
    unsigned long pd;
1768
    unsigned int index;
1769
    target_ulong address;
1770
    target_phys_addr_t addend;
1771
    int ret;
1772
    CPUTLBEntry *te;
1773
    int i;
1774

    
1775
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1776
    if (!p) {
1777
        pd = IO_MEM_UNASSIGNED;
1778
    } else {
1779
        pd = p->phys_offset;
1780
    }
1781
#if defined(DEBUG_TLB)
1782
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1783
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1784
#endif
1785

    
1786
    ret = 0;
1787
#if !defined(CONFIG_SOFTMMU)
1788
    if (is_softmmu)
1789
#endif
1790
    {
1791
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1792
            /* IO memory case */
1793
            address = vaddr | pd;
1794
            addend = paddr;
1795
        } else {
1796
            /* standard memory */
1797
            address = vaddr;
1798
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1799
        }
1800

    
1801
        /* Make accesses to pages with watchpoints go via the
1802
           watchpoint trap routines.  */
1803
        for (i = 0; i < env->nb_watchpoints; i++) {
1804
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1805
                if (address & ~TARGET_PAGE_MASK) {
1806
                    env->watchpoint[i].addend = 0;
1807
                    address = vaddr | io_mem_watch;
1808
                } else {
1809
                    env->watchpoint[i].addend = pd - paddr +
1810
                        (unsigned long) phys_ram_base;
1811
                    /* TODO: Figure out how to make read watchpoints coexist
1812
                       with code.  */
1813
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1814
                }
1815
            }
1816
        }
1817

    
1818
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1819
        addend -= vaddr;
1820
        te = &env->tlb_table[mmu_idx][index];
1821
        te->addend = addend;
1822
        if (prot & PAGE_READ) {
1823
            te->addr_read = address;
1824
        } else {
1825
            te->addr_read = -1;
1826
        }
1827

    
1828
        if (prot & PAGE_EXEC) {
1829
            te->addr_code = address;
1830
        } else {
1831
            te->addr_code = -1;
1832
        }
1833
        if (prot & PAGE_WRITE) {
1834
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1835
                (pd & IO_MEM_ROMD)) {
1836
                /* write access calls the I/O callback */
1837
                te->addr_write = vaddr |
1838
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1839
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1840
                       !cpu_physical_memory_is_dirty(pd)) {
1841
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1842
            } else {
1843
                te->addr_write = address;
1844
            }
1845
        } else {
1846
            te->addr_write = -1;
1847
        }
1848
    }
1849
#if !defined(CONFIG_SOFTMMU)
1850
    else {
1851
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1852
            /* IO access: no mapping is done as it will be handled by the
1853
               soft MMU */
1854
            if (!(env->hflags & HF_SOFTMMU_MASK))
1855
                ret = 2;
1856
        } else {
1857
            void *map_addr;
1858

    
1859
            if (vaddr >= MMAP_AREA_END) {
1860
                ret = 2;
1861
            } else {
1862
                if (prot & PROT_WRITE) {
1863
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1864
#if defined(TARGET_HAS_SMC) || 1
1865
                        first_tb ||
1866
#endif
1867
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1868
                         !cpu_physical_memory_is_dirty(pd))) {
1869
                        /* ROM: we do as if code was inside */
1870
                        /* if code is present, we only map as read only and save the
1871
                           original mapping */
1872
                        VirtPageDesc *vp;
1873

    
1874
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1875
                        vp->phys_addr = pd;
1876
                        vp->prot = prot;
1877
                        vp->valid_tag = virt_valid_tag;
1878
                        prot &= ~PAGE_WRITE;
1879
                    }
1880
                }
1881
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1882
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1883
                if (map_addr == MAP_FAILED) {
1884
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1885
                              paddr, vaddr);
1886
                }
1887
            }
1888
        }
1889
    }
1890
#endif
1891
    return ret;
1892
}
1893

    
1894
/* called from signal handler: invalidate the code and unprotect the
1895
   page. Return TRUE if the fault was succesfully handled. */
1896
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1897
{
1898
#if !defined(CONFIG_SOFTMMU)
1899
    VirtPageDesc *vp;
1900

    
1901
#if defined(DEBUG_TLB)
1902
    printf("page_unprotect: addr=0x%08x\n", addr);
1903
#endif
1904
    addr &= TARGET_PAGE_MASK;
1905

    
1906
    /* if it is not mapped, no need to worry here */
1907
    if (addr >= MMAP_AREA_END)
1908
        return 0;
1909
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1910
    if (!vp)
1911
        return 0;
1912
    /* NOTE: in this case, validate_tag is _not_ tested as it
1913
       validates only the code TLB */
1914
    if (vp->valid_tag != virt_valid_tag)
1915
        return 0;
1916
    if (!(vp->prot & PAGE_WRITE))
1917
        return 0;
1918
#if defined(DEBUG_TLB)
1919
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1920
           addr, vp->phys_addr, vp->prot);
1921
#endif
1922
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1923
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1924
                  (unsigned long)addr, vp->prot);
1925
    /* set the dirty bit */
1926
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1927
    /* flush the code inside */
1928
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1929
    return 1;
1930
#else
1931
    return 0;
1932
#endif
1933
}
1934

    
1935
#else
1936

    
1937
void tlb_flush(CPUState *env, int flush_global)
1938
{
1939
}
1940

    
1941
void tlb_flush_page(CPUState *env, target_ulong addr)
1942
{
1943
}
1944

    
1945
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1946
                      target_phys_addr_t paddr, int prot,
1947
                      int mmu_idx, int is_softmmu)
1948
{
1949
    return 0;
1950
}
1951

    
1952
/* dump memory mappings */
1953
void page_dump(FILE *f)
1954
{
1955
    unsigned long start, end;
1956
    int i, j, prot, prot1;
1957
    PageDesc *p;
1958

    
1959
    fprintf(f, "%-8s %-8s %-8s %s\n",
1960
            "start", "end", "size", "prot");
1961
    start = -1;
1962
    end = -1;
1963
    prot = 0;
1964
    for(i = 0; i <= L1_SIZE; i++) {
1965
        if (i < L1_SIZE)
1966
            p = l1_map[i];
1967
        else
1968
            p = NULL;
1969
        for(j = 0;j < L2_SIZE; j++) {
1970
            if (!p)
1971
                prot1 = 0;
1972
            else
1973
                prot1 = p[j].flags;
1974
            if (prot1 != prot) {
1975
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1976
                if (start != -1) {
1977
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1978
                            start, end, end - start,
1979
                            prot & PAGE_READ ? 'r' : '-',
1980
                            prot & PAGE_WRITE ? 'w' : '-',
1981
                            prot & PAGE_EXEC ? 'x' : '-');
1982
                }
1983
                if (prot1 != 0)
1984
                    start = end;
1985
                else
1986
                    start = -1;
1987
                prot = prot1;
1988
            }
1989
            if (!p)
1990
                break;
1991
        }
1992
    }
1993
}
1994

    
1995
int page_get_flags(target_ulong address)
1996
{
1997
    PageDesc *p;
1998

    
1999
    p = page_find(address >> TARGET_PAGE_BITS);
2000
    if (!p)
2001
        return 0;
2002
    return p->flags;
2003
}
2004

    
2005
/* modify the flags of a page and invalidate the code if
2006
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2007
   depending on PAGE_WRITE */
2008
void page_set_flags(target_ulong start, target_ulong end, int flags)
2009
{
2010
    PageDesc *p;
2011
    target_ulong addr;
2012

    
2013
    /* mmap_lock should already be held.  */
2014
    start = start & TARGET_PAGE_MASK;
2015
    end = TARGET_PAGE_ALIGN(end);
2016
    if (flags & PAGE_WRITE)
2017
        flags |= PAGE_WRITE_ORG;
2018
    spin_lock(&tb_lock);
2019
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2020
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2021
        /* if the write protection is set, then we invalidate the code
2022
           inside */
2023
        if (!(p->flags & PAGE_WRITE) &&
2024
            (flags & PAGE_WRITE) &&
2025
            p->first_tb) {
2026
            tb_invalidate_phys_page(addr, 0, NULL);
2027
        }
2028
        p->flags = flags;
2029
    }
2030
    spin_unlock(&tb_lock);
2031
}
2032

    
2033
int page_check_range(target_ulong start, target_ulong len, int flags)
2034
{
2035
    PageDesc *p;
2036
    target_ulong end;
2037
    target_ulong addr;
2038

    
2039
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2040
    start = start & TARGET_PAGE_MASK;
2041

    
2042
    if( end < start )
2043
        /* we've wrapped around */
2044
        return -1;
2045
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2046
        p = page_find(addr >> TARGET_PAGE_BITS);
2047
        if( !p )
2048
            return -1;
2049
        if( !(p->flags & PAGE_VALID) )
2050
            return -1;
2051

    
2052
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2053
            return -1;
2054
        if (flags & PAGE_WRITE) {
2055
            if (!(p->flags & PAGE_WRITE_ORG))
2056
                return -1;
2057
            /* unprotect the page if it was put read-only because it
2058
               contains translated code */
2059
            if (!(p->flags & PAGE_WRITE)) {
2060
                if (!page_unprotect(addr, 0, NULL))
2061
                    return -1;
2062
            }
2063
            return 0;
2064
        }
2065
    }
2066
    return 0;
2067
}
2068

    
2069
/* called from signal handler: invalidate the code and unprotect the
2070
   page. Return TRUE if the fault was succesfully handled. */
2071
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2072
{
2073
    unsigned int page_index, prot, pindex;
2074
    PageDesc *p, *p1;
2075
    target_ulong host_start, host_end, addr;
2076

    
2077
    /* Technically this isn't safe inside a signal handler.  However we
2078
       know this only ever happens in a synchronous SEGV handler, so in
2079
       practice it seems to be ok.  */
2080
    mmap_lock();
2081

    
2082
    host_start = address & qemu_host_page_mask;
2083
    page_index = host_start >> TARGET_PAGE_BITS;
2084
    p1 = page_find(page_index);
2085
    if (!p1) {
2086
        mmap_unlock();
2087
        return 0;
2088
    }
2089
    host_end = host_start + qemu_host_page_size;
2090
    p = p1;
2091
    prot = 0;
2092
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2093
        prot |= p->flags;
2094
        p++;
2095
    }
2096
    /* if the page was really writable, then we change its
2097
       protection back to writable */
2098
    if (prot & PAGE_WRITE_ORG) {
2099
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2100
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2101
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2102
                     (prot & PAGE_BITS) | PAGE_WRITE);
2103
            p1[pindex].flags |= PAGE_WRITE;
2104
            /* and since the content will be modified, we must invalidate
2105
               the corresponding translated code. */
2106
            tb_invalidate_phys_page(address, pc, puc);
2107
#ifdef DEBUG_TB_CHECK
2108
            tb_invalidate_check(address);
2109
#endif
2110
            mmap_unlock();
2111
            return 1;
2112
        }
2113
    }
2114
    mmap_unlock();
2115
    return 0;
2116
}
2117

    
2118
static inline void tlb_set_dirty(CPUState *env,
2119
                                 unsigned long addr, target_ulong vaddr)
2120
{
2121
}
2122
#endif /* defined(CONFIG_USER_ONLY) */
2123

    
2124
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2125
                             ram_addr_t memory);
2126
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2127
                           ram_addr_t orig_memory);
2128
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2129
                      need_subpage)                                     \
2130
    do {                                                                \
2131
        if (addr > start_addr)                                          \
2132
            start_addr2 = 0;                                            \
2133
        else {                                                          \
2134
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2135
            if (start_addr2 > 0)                                        \
2136
                need_subpage = 1;                                       \
2137
        }                                                               \
2138
                                                                        \
2139
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2140
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2141
        else {                                                          \
2142
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2143
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2144
                need_subpage = 1;                                       \
2145
        }                                                               \
2146
    } while (0)
2147

    
2148
/* register physical memory. 'size' must be a multiple of the target
2149
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2150
   io memory page */
2151
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2152
                                  ram_addr_t size,
2153
                                  ram_addr_t phys_offset)
2154
{
2155
    target_phys_addr_t addr, end_addr;
2156
    PhysPageDesc *p;
2157
    CPUState *env;
2158
    ram_addr_t orig_size = size;
2159
    void *subpage;
2160

    
2161
#ifdef USE_KQEMU
2162
    /* XXX: should not depend on cpu context */
2163
    env = first_cpu;
2164
    if (env->kqemu_enabled) {
2165
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2166
    }
2167
#endif
2168
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2169
    end_addr = start_addr + (target_phys_addr_t)size;
2170
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2171
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2172
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2173
            ram_addr_t orig_memory = p->phys_offset;
2174
            target_phys_addr_t start_addr2, end_addr2;
2175
            int need_subpage = 0;
2176

    
2177
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2178
                          need_subpage);
2179
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2180
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2181
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2182
                                           &p->phys_offset, orig_memory);
2183
                } else {
2184
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2185
                                            >> IO_MEM_SHIFT];
2186
                }
2187
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2188
            } else {
2189
                p->phys_offset = phys_offset;
2190
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2191
                    (phys_offset & IO_MEM_ROMD))
2192
                    phys_offset += TARGET_PAGE_SIZE;
2193
            }
2194
        } else {
2195
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2196
            p->phys_offset = phys_offset;
2197
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2198
                (phys_offset & IO_MEM_ROMD))
2199
                phys_offset += TARGET_PAGE_SIZE;
2200
            else {
2201
                target_phys_addr_t start_addr2, end_addr2;
2202
                int need_subpage = 0;
2203

    
2204
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2205
                              end_addr2, need_subpage);
2206

    
2207
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2208
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2209
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2210
                    subpage_register(subpage, start_addr2, end_addr2,
2211
                                     phys_offset);
2212
                }
2213
            }
2214
        }
2215
    }
2216

    
2217
    /* since each CPU stores ram addresses in its TLB cache, we must
2218
       reset the modified entries */
2219
    /* XXX: slow ! */
2220
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2221
        tlb_flush(env, 1);
2222
    }
2223
}
2224

    
2225
/* XXX: temporary until new memory mapping API */
2226
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2227
{
2228
    PhysPageDesc *p;
2229

    
2230
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2231
    if (!p)
2232
        return IO_MEM_UNASSIGNED;
2233
    return p->phys_offset;
2234
}
2235

    
2236
/* XXX: better than nothing */
2237
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2238
{
2239
    ram_addr_t addr;
2240
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2241
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2242
                (uint64_t)size, (uint64_t)phys_ram_size);
2243
        abort();
2244
    }
2245
    addr = phys_ram_alloc_offset;
2246
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2247
    return addr;
2248
}
2249

    
2250
void qemu_ram_free(ram_addr_t addr)
2251
{
2252
}
2253

    
2254
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2255
{
2256
#ifdef DEBUG_UNASSIGNED
2257
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2258
#endif
2259
#ifdef TARGET_SPARC
2260
    do_unassigned_access(addr, 0, 0, 0);
2261
#elif TARGET_CRIS
2262
    do_unassigned_access(addr, 0, 0, 0);
2263
#endif
2264
    return 0;
2265
}
2266

    
2267
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2268
{
2269
#ifdef DEBUG_UNASSIGNED
2270
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2271
#endif
2272
#ifdef TARGET_SPARC
2273
    do_unassigned_access(addr, 1, 0, 0);
2274
#elif TARGET_CRIS
2275
    do_unassigned_access(addr, 1, 0, 0);
2276
#endif
2277
}
2278

    
2279
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2280
    unassigned_mem_readb,
2281
    unassigned_mem_readb,
2282
    unassigned_mem_readb,
2283
};
2284

    
2285
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2286
    unassigned_mem_writeb,
2287
    unassigned_mem_writeb,
2288
    unassigned_mem_writeb,
2289
};
2290

    
2291
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2292
{
2293
    unsigned long ram_addr;
2294
    int dirty_flags;
2295
    ram_addr = addr - (unsigned long)phys_ram_base;
2296
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2297
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2298
#if !defined(CONFIG_USER_ONLY)
2299
        tb_invalidate_phys_page_fast(ram_addr, 1);
2300
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2301
#endif
2302
    }
2303
    stb_p((uint8_t *)(long)addr, val);
2304
#ifdef USE_KQEMU
2305
    if (cpu_single_env->kqemu_enabled &&
2306
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2307
        kqemu_modify_page(cpu_single_env, ram_addr);
2308
#endif
2309
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2310
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2311
    /* we remove the notdirty callback only if the code has been
2312
       flushed */
2313
    if (dirty_flags == 0xff)
2314
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2315
}
2316

    
2317
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2318
{
2319
    unsigned long ram_addr;
2320
    int dirty_flags;
2321
    ram_addr = addr - (unsigned long)phys_ram_base;
2322
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2323
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2324
#if !defined(CONFIG_USER_ONLY)
2325
        tb_invalidate_phys_page_fast(ram_addr, 2);
2326
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2327
#endif
2328
    }
2329
    stw_p((uint8_t *)(long)addr, val);
2330
#ifdef USE_KQEMU
2331
    if (cpu_single_env->kqemu_enabled &&
2332
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2333
        kqemu_modify_page(cpu_single_env, ram_addr);
2334
#endif
2335
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2336
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2337
    /* we remove the notdirty callback only if the code has been
2338
       flushed */
2339
    if (dirty_flags == 0xff)
2340
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2341
}
2342

    
2343
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2344
{
2345
    unsigned long ram_addr;
2346
    int dirty_flags;
2347
    ram_addr = addr - (unsigned long)phys_ram_base;
2348
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2349
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2350
#if !defined(CONFIG_USER_ONLY)
2351
        tb_invalidate_phys_page_fast(ram_addr, 4);
2352
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2353
#endif
2354
    }
2355
    stl_p((uint8_t *)(long)addr, val);
2356
#ifdef USE_KQEMU
2357
    if (cpu_single_env->kqemu_enabled &&
2358
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2359
        kqemu_modify_page(cpu_single_env, ram_addr);
2360
#endif
2361
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2362
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2363
    /* we remove the notdirty callback only if the code has been
2364
       flushed */
2365
    if (dirty_flags == 0xff)
2366
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2367
}
2368

    
2369
static CPUReadMemoryFunc *error_mem_read[3] = {
2370
    NULL, /* never used */
2371
    NULL, /* never used */
2372
    NULL, /* never used */
2373
};
2374

    
2375
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2376
    notdirty_mem_writeb,
2377
    notdirty_mem_writew,
2378
    notdirty_mem_writel,
2379
};
2380

    
2381
#if defined(CONFIG_SOFTMMU)
2382
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2383
   so these check for a hit then pass through to the normal out-of-line
2384
   phys routines.  */
2385
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2386
{
2387
    return ldub_phys(addr);
2388
}
2389

    
2390
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2391
{
2392
    return lduw_phys(addr);
2393
}
2394

    
2395
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2396
{
2397
    return ldl_phys(addr);
2398
}
2399

    
2400
/* Generate a debug exception if a watchpoint has been hit.
2401
   Returns the real physical address of the access.  addr will be a host
2402
   address in case of a RAM location.  */
2403
static target_ulong check_watchpoint(target_phys_addr_t addr)
2404
{
2405
    CPUState *env = cpu_single_env;
2406
    target_ulong watch;
2407
    target_ulong retaddr;
2408
    int i;
2409

    
2410
    retaddr = addr;
2411
    for (i = 0; i < env->nb_watchpoints; i++) {
2412
        watch = env->watchpoint[i].vaddr;
2413
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2414
            retaddr = addr - env->watchpoint[i].addend;
2415
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2416
                cpu_single_env->watchpoint_hit = i + 1;
2417
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2418
                break;
2419
            }
2420
        }
2421
    }
2422
    return retaddr;
2423
}
2424

    
2425
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2426
                             uint32_t val)
2427
{
2428
    addr = check_watchpoint(addr);
2429
    stb_phys(addr, val);
2430
}
2431

    
2432
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2433
                             uint32_t val)
2434
{
2435
    addr = check_watchpoint(addr);
2436
    stw_phys(addr, val);
2437
}
2438

    
2439
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2440
                             uint32_t val)
2441
{
2442
    addr = check_watchpoint(addr);
2443
    stl_phys(addr, val);
2444
}
2445

    
2446
static CPUReadMemoryFunc *watch_mem_read[3] = {
2447
    watch_mem_readb,
2448
    watch_mem_readw,
2449
    watch_mem_readl,
2450
};
2451

    
2452
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2453
    watch_mem_writeb,
2454
    watch_mem_writew,
2455
    watch_mem_writel,
2456
};
2457
#endif
2458

    
2459
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2460
                                 unsigned int len)
2461
{
2462
    uint32_t ret;
2463
    unsigned int idx;
2464

    
2465
    idx = SUBPAGE_IDX(addr - mmio->base);
2466
#if defined(DEBUG_SUBPAGE)
2467
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2468
           mmio, len, addr, idx);
2469
#endif
2470
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2471

    
2472
    return ret;
2473
}
2474

    
2475
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2476
                              uint32_t value, unsigned int len)
2477
{
2478
    unsigned int idx;
2479

    
2480
    idx = SUBPAGE_IDX(addr - mmio->base);
2481
#if defined(DEBUG_SUBPAGE)
2482
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2483
           mmio, len, addr, idx, value);
2484
#endif
2485
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2486
}
2487

    
2488
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2489
{
2490
#if defined(DEBUG_SUBPAGE)
2491
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2492
#endif
2493

    
2494
    return subpage_readlen(opaque, addr, 0);
2495
}
2496

    
2497
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2498
                            uint32_t value)
2499
{
2500
#if defined(DEBUG_SUBPAGE)
2501
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2502
#endif
2503
    subpage_writelen(opaque, addr, value, 0);
2504
}
2505

    
2506
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2507
{
2508
#if defined(DEBUG_SUBPAGE)
2509
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2510
#endif
2511

    
2512
    return subpage_readlen(opaque, addr, 1);
2513
}
2514

    
2515
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2516
                            uint32_t value)
2517
{
2518
#if defined(DEBUG_SUBPAGE)
2519
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2520
#endif
2521
    subpage_writelen(opaque, addr, value, 1);
2522
}
2523

    
2524
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2525
{
2526
#if defined(DEBUG_SUBPAGE)
2527
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2528
#endif
2529

    
2530
    return subpage_readlen(opaque, addr, 2);
2531
}
2532

    
2533
static void subpage_writel (void *opaque,
2534
                         target_phys_addr_t addr, uint32_t value)
2535
{
2536
#if defined(DEBUG_SUBPAGE)
2537
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2538
#endif
2539
    subpage_writelen(opaque, addr, value, 2);
2540
}
2541

    
2542
static CPUReadMemoryFunc *subpage_read[] = {
2543
    &subpage_readb,
2544
    &subpage_readw,
2545
    &subpage_readl,
2546
};
2547

    
2548
static CPUWriteMemoryFunc *subpage_write[] = {
2549
    &subpage_writeb,
2550
    &subpage_writew,
2551
    &subpage_writel,
2552
};
2553

    
2554
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2555
                             ram_addr_t memory)
2556
{
2557
    int idx, eidx;
2558
    unsigned int i;
2559

    
2560
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2561
        return -1;
2562
    idx = SUBPAGE_IDX(start);
2563
    eidx = SUBPAGE_IDX(end);
2564
#if defined(DEBUG_SUBPAGE)
2565
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2566
           mmio, start, end, idx, eidx, memory);
2567
#endif
2568
    memory >>= IO_MEM_SHIFT;
2569
    for (; idx <= eidx; idx++) {
2570
        for (i = 0; i < 4; i++) {
2571
            if (io_mem_read[memory][i]) {
2572
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2573
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2574
            }
2575
            if (io_mem_write[memory][i]) {
2576
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2577
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2578
            }
2579
        }
2580
    }
2581

    
2582
    return 0;
2583
}
2584

    
2585
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2586
                           ram_addr_t orig_memory)
2587
{
2588
    subpage_t *mmio;
2589
    int subpage_memory;
2590

    
2591
    mmio = qemu_mallocz(sizeof(subpage_t));
2592
    if (mmio != NULL) {
2593
        mmio->base = base;
2594
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2595
#if defined(DEBUG_SUBPAGE)
2596
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2597
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2598
#endif
2599
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2600
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2601
    }
2602

    
2603
    return mmio;
2604
}
2605

    
2606
static void io_mem_init(void)
2607
{
2608
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2609
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2610
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2611
    io_mem_nb = 5;
2612

    
2613
#if defined(CONFIG_SOFTMMU)
2614
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2615
                                          watch_mem_write, NULL);
2616
#endif
2617
    /* alloc dirty bits array */
2618
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2619
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2620
}
2621

    
2622
/* mem_read and mem_write are arrays of functions containing the
2623
   function to access byte (index 0), word (index 1) and dword (index
2624
   2). Functions can be omitted with a NULL function pointer. The
2625
   registered functions may be modified dynamically later.
2626
   If io_index is non zero, the corresponding io zone is
2627
   modified. If it is zero, a new io zone is allocated. The return
2628
   value can be used with cpu_register_physical_memory(). (-1) is
2629
   returned if error. */
2630
int cpu_register_io_memory(int io_index,
2631
                           CPUReadMemoryFunc **mem_read,
2632
                           CPUWriteMemoryFunc **mem_write,
2633
                           void *opaque)
2634
{
2635
    int i, subwidth = 0;
2636

    
2637
    if (io_index <= 0) {
2638
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2639
            return -1;
2640
        io_index = io_mem_nb++;
2641
    } else {
2642
        if (io_index >= IO_MEM_NB_ENTRIES)
2643
            return -1;
2644
    }
2645

    
2646
    for(i = 0;i < 3; i++) {
2647
        if (!mem_read[i] || !mem_write[i])
2648
            subwidth = IO_MEM_SUBWIDTH;
2649
        io_mem_read[io_index][i] = mem_read[i];
2650
        io_mem_write[io_index][i] = mem_write[i];
2651
    }
2652
    io_mem_opaque[io_index] = opaque;
2653
    return (io_index << IO_MEM_SHIFT) | subwidth;
2654
}
2655

    
2656
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2657
{
2658
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2659
}
2660

    
2661
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2662
{
2663
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2664
}
2665

    
2666
/* physical memory access (slow version, mainly for debug) */
2667
#if defined(CONFIG_USER_ONLY)
2668
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2669
                            int len, int is_write)
2670
{
2671
    int l, flags;
2672
    target_ulong page;
2673
    void * p;
2674

    
2675
    while (len > 0) {
2676
        page = addr & TARGET_PAGE_MASK;
2677
        l = (page + TARGET_PAGE_SIZE) - addr;
2678
        if (l > len)
2679
            l = len;
2680
        flags = page_get_flags(page);
2681
        if (!(flags & PAGE_VALID))
2682
            return;
2683
        if (is_write) {
2684
            if (!(flags & PAGE_WRITE))
2685
                return;
2686
            /* XXX: this code should not depend on lock_user */
2687
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2688
                /* FIXME - should this return an error rather than just fail? */
2689
                return;
2690
            memcpy(p, buf, l);
2691
            unlock_user(p, addr, l);
2692
        } else {
2693
            if (!(flags & PAGE_READ))
2694
                return;
2695
            /* XXX: this code should not depend on lock_user */
2696
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2697
                /* FIXME - should this return an error rather than just fail? */
2698
                return;
2699
            memcpy(buf, p, l);
2700
            unlock_user(p, addr, 0);
2701
        }
2702
        len -= l;
2703
        buf += l;
2704
        addr += l;
2705
    }
2706
}
2707

    
2708
#else
2709
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2710
                            int len, int is_write)
2711
{
2712
    int l, io_index;
2713
    uint8_t *ptr;
2714
    uint32_t val;
2715
    target_phys_addr_t page;
2716
    unsigned long pd;
2717
    PhysPageDesc *p;
2718

    
2719
    while (len > 0) {
2720
        page = addr & TARGET_PAGE_MASK;
2721
        l = (page + TARGET_PAGE_SIZE) - addr;
2722
        if (l > len)
2723
            l = len;
2724
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2725
        if (!p) {
2726
            pd = IO_MEM_UNASSIGNED;
2727
        } else {
2728
            pd = p->phys_offset;
2729
        }
2730

    
2731
        if (is_write) {
2732
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2733
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2734
                /* XXX: could force cpu_single_env to NULL to avoid
2735
                   potential bugs */
2736
                if (l >= 4 && ((addr & 3) == 0)) {
2737
                    /* 32 bit write access */
2738
                    val = ldl_p(buf);
2739
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2740
                    l = 4;
2741
                } else if (l >= 2 && ((addr & 1) == 0)) {
2742
                    /* 16 bit write access */
2743
                    val = lduw_p(buf);
2744
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2745
                    l = 2;
2746
                } else {
2747
                    /* 8 bit write access */
2748
                    val = ldub_p(buf);
2749
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2750
                    l = 1;
2751
                }
2752
            } else {
2753
                unsigned long addr1;
2754
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2755
                /* RAM case */
2756
                ptr = phys_ram_base + addr1;
2757
                memcpy(ptr, buf, l);
2758
                if (!cpu_physical_memory_is_dirty(addr1)) {
2759
                    /* invalidate code */
2760
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2761
                    /* set dirty bit */
2762
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2763
                        (0xff & ~CODE_DIRTY_FLAG);
2764
                }
2765
            }
2766
        } else {
2767
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2768
                !(pd & IO_MEM_ROMD)) {
2769
                /* I/O case */
2770
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2771
                if (l >= 4 && ((addr & 3) == 0)) {
2772
                    /* 32 bit read access */
2773
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2774
                    stl_p(buf, val);
2775
                    l = 4;
2776
                } else if (l >= 2 && ((addr & 1) == 0)) {
2777
                    /* 16 bit read access */
2778
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2779
                    stw_p(buf, val);
2780
                    l = 2;
2781
                } else {
2782
                    /* 8 bit read access */
2783
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2784
                    stb_p(buf, val);
2785
                    l = 1;
2786
                }
2787
            } else {
2788
                /* RAM case */
2789
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2790
                    (addr & ~TARGET_PAGE_MASK);
2791
                memcpy(buf, ptr, l);
2792
            }
2793
        }
2794
        len -= l;
2795
        buf += l;
2796
        addr += l;
2797
    }
2798
}
2799

    
2800
/* used for ROM loading : can write in RAM and ROM */
2801
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2802
                                   const uint8_t *buf, int len)
2803
{
2804
    int l;
2805
    uint8_t *ptr;
2806
    target_phys_addr_t page;
2807
    unsigned long pd;
2808
    PhysPageDesc *p;
2809

    
2810
    while (len > 0) {
2811
        page = addr & TARGET_PAGE_MASK;
2812
        l = (page + TARGET_PAGE_SIZE) - addr;
2813
        if (l > len)
2814
            l = len;
2815
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2816
        if (!p) {
2817
            pd = IO_MEM_UNASSIGNED;
2818
        } else {
2819
            pd = p->phys_offset;
2820
        }
2821

    
2822
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2823
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2824
            !(pd & IO_MEM_ROMD)) {
2825
            /* do nothing */
2826
        } else {
2827
            unsigned long addr1;
2828
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2829
            /* ROM/RAM case */
2830
            ptr = phys_ram_base + addr1;
2831
            memcpy(ptr, buf, l);
2832
        }
2833
        len -= l;
2834
        buf += l;
2835
        addr += l;
2836
    }
2837
}
2838

    
2839

    
2840
/* warning: addr must be aligned */
2841
uint32_t ldl_phys(target_phys_addr_t addr)
2842
{
2843
    int io_index;
2844
    uint8_t *ptr;
2845
    uint32_t val;
2846
    unsigned long pd;
2847
    PhysPageDesc *p;
2848

    
2849
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2850
    if (!p) {
2851
        pd = IO_MEM_UNASSIGNED;
2852
    } else {
2853
        pd = p->phys_offset;
2854
    }
2855

    
2856
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2857
        !(pd & IO_MEM_ROMD)) {
2858
        /* I/O case */
2859
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2860
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2861
    } else {
2862
        /* RAM case */
2863
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2864
            (addr & ~TARGET_PAGE_MASK);
2865
        val = ldl_p(ptr);
2866
    }
2867
    return val;
2868
}
2869

    
2870
/* warning: addr must be aligned */
2871
uint64_t ldq_phys(target_phys_addr_t addr)
2872
{
2873
    int io_index;
2874
    uint8_t *ptr;
2875
    uint64_t val;
2876
    unsigned long pd;
2877
    PhysPageDesc *p;
2878

    
2879
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2880
    if (!p) {
2881
        pd = IO_MEM_UNASSIGNED;
2882
    } else {
2883
        pd = p->phys_offset;
2884
    }
2885

    
2886
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2887
        !(pd & IO_MEM_ROMD)) {
2888
        /* I/O case */
2889
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2890
#ifdef TARGET_WORDS_BIGENDIAN
2891
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2892
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2893
#else
2894
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2895
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2896
#endif
2897
    } else {
2898
        /* RAM case */
2899
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2900
            (addr & ~TARGET_PAGE_MASK);
2901
        val = ldq_p(ptr);
2902
    }
2903
    return val;
2904
}
2905

    
2906
/* XXX: optimize */
2907
uint32_t ldub_phys(target_phys_addr_t addr)
2908
{
2909
    uint8_t val;
2910
    cpu_physical_memory_read(addr, &val, 1);
2911
    return val;
2912
}
2913

    
2914
/* XXX: optimize */
2915
uint32_t lduw_phys(target_phys_addr_t addr)
2916
{
2917
    uint16_t val;
2918
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2919
    return tswap16(val);
2920
}
2921

    
2922
/* warning: addr must be aligned. The ram page is not masked as dirty
2923
   and the code inside is not invalidated. It is useful if the dirty
2924
   bits are used to track modified PTEs */
2925
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2926
{
2927
    int io_index;
2928
    uint8_t *ptr;
2929
    unsigned long pd;
2930
    PhysPageDesc *p;
2931

    
2932
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2933
    if (!p) {
2934
        pd = IO_MEM_UNASSIGNED;
2935
    } else {
2936
        pd = p->phys_offset;
2937
    }
2938

    
2939
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2940
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2941
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2942
    } else {
2943
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2944
            (addr & ~TARGET_PAGE_MASK);
2945
        stl_p(ptr, val);
2946
    }
2947
}
2948

    
2949
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2950
{
2951
    int io_index;
2952
    uint8_t *ptr;
2953
    unsigned long pd;
2954
    PhysPageDesc *p;
2955

    
2956
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2957
    if (!p) {
2958
        pd = IO_MEM_UNASSIGNED;
2959
    } else {
2960
        pd = p->phys_offset;
2961
    }
2962

    
2963
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2964
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2965
#ifdef TARGET_WORDS_BIGENDIAN
2966
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2967
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2968
#else
2969
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2970
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2971
#endif
2972
    } else {
2973
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2974
            (addr & ~TARGET_PAGE_MASK);
2975
        stq_p(ptr, val);
2976
    }
2977
}
2978

    
2979
/* warning: addr must be aligned */
2980
void stl_phys(target_phys_addr_t addr, uint32_t val)
2981
{
2982
    int io_index;
2983
    uint8_t *ptr;
2984
    unsigned long pd;
2985
    PhysPageDesc *p;
2986

    
2987
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2988
    if (!p) {
2989
        pd = IO_MEM_UNASSIGNED;
2990
    } else {
2991
        pd = p->phys_offset;
2992
    }
2993

    
2994
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2995
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2996
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2997
    } else {
2998
        unsigned long addr1;
2999
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3000
        /* RAM case */
3001
        ptr = phys_ram_base + addr1;
3002
        stl_p(ptr, val);
3003
        if (!cpu_physical_memory_is_dirty(addr1)) {
3004
            /* invalidate code */
3005
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3006
            /* set dirty bit */
3007
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3008
                (0xff & ~CODE_DIRTY_FLAG);
3009
        }
3010
    }
3011
}
3012

    
3013
/* XXX: optimize */
3014
void stb_phys(target_phys_addr_t addr, uint32_t val)
3015
{
3016
    uint8_t v = val;
3017
    cpu_physical_memory_write(addr, &v, 1);
3018
}
3019

    
3020
/* XXX: optimize */
3021
void stw_phys(target_phys_addr_t addr, uint32_t val)
3022
{
3023
    uint16_t v = tswap16(val);
3024
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3025
}
3026

    
3027
/* XXX: optimize */
3028
void stq_phys(target_phys_addr_t addr, uint64_t val)
3029
{
3030
    val = tswap64(val);
3031
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3032
}
3033

    
3034
#endif
3035

    
3036
/* virtual memory access for debug */
3037
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3038
                        uint8_t *buf, int len, int is_write)
3039
{
3040
    int l;
3041
    target_phys_addr_t phys_addr;
3042
    target_ulong page;
3043

    
3044
    while (len > 0) {
3045
        page = addr & TARGET_PAGE_MASK;
3046
        phys_addr = cpu_get_phys_page_debug(env, page);
3047
        /* if no physical page mapped, return an error */
3048
        if (phys_addr == -1)
3049
            return -1;
3050
        l = (page + TARGET_PAGE_SIZE) - addr;
3051
        if (l > len)
3052
            l = len;
3053
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3054
                               buf, l, is_write);
3055
        len -= l;
3056
        buf += l;
3057
        addr += l;
3058
    }
3059
    return 0;
3060
}
3061

    
3062
void dump_exec_info(FILE *f,
3063
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3064
{
3065
    int i, target_code_size, max_target_code_size;
3066
    int direct_jmp_count, direct_jmp2_count, cross_page;
3067
    TranslationBlock *tb;
3068

    
3069
    target_code_size = 0;
3070
    max_target_code_size = 0;
3071
    cross_page = 0;
3072
    direct_jmp_count = 0;
3073
    direct_jmp2_count = 0;
3074
    for(i = 0; i < nb_tbs; i++) {
3075
        tb = &tbs[i];
3076
        target_code_size += tb->size;
3077
        if (tb->size > max_target_code_size)
3078
            max_target_code_size = tb->size;
3079
        if (tb->page_addr[1] != -1)
3080
            cross_page++;
3081
        if (tb->tb_next_offset[0] != 0xffff) {
3082
            direct_jmp_count++;
3083
            if (tb->tb_next_offset[1] != 0xffff) {
3084
                direct_jmp2_count++;
3085
            }
3086
        }
3087
    }
3088
    /* XXX: avoid using doubles ? */
3089
    cpu_fprintf(f, "Translation buffer state:\n");
3090
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3091
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3092
    cpu_fprintf(f, "TB count            %d/%d\n", 
3093
                nb_tbs, code_gen_max_blocks);
3094
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3095
                nb_tbs ? target_code_size / nb_tbs : 0,
3096
                max_target_code_size);
3097
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3098
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3099
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3100
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3101
            cross_page,
3102
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3103
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3104
                direct_jmp_count,
3105
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3106
                direct_jmp2_count,
3107
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3108
    cpu_fprintf(f, "\nStatistics:\n");
3109
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3110
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3111
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3112
    tcg_dump_info(f, cpu_fprintf);
3113
}
3114

    
3115
#if !defined(CONFIG_USER_ONLY)
3116

    
3117
#define MMUSUFFIX _cmmu
3118
#define GETPC() NULL
3119
#define env cpu_single_env
3120
#define SOFTMMU_CODE_ACCESS
3121

    
3122
#define SHIFT 0
3123
#include "softmmu_template.h"
3124

    
3125
#define SHIFT 1
3126
#include "softmmu_template.h"
3127

    
3128
#define SHIFT 2
3129
#include "softmmu_template.h"
3130

    
3131
#define SHIFT 3
3132
#include "softmmu_template.h"
3133

    
3134
#undef env
3135

    
3136
#endif