Statistics
| Branch: | Revision:

root / exec.c @ bf20dc07

History | View | Annotate | Download (93.8 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#endif
44

    
45
//#define DEBUG_TB_INVALIDATE
46
//#define DEBUG_FLUSH
47
//#define DEBUG_TLB
48
//#define DEBUG_UNASSIGNED
49

    
50
/* make various TB consistency checks */
51
//#define DEBUG_TB_CHECK
52
//#define DEBUG_TLB_CHECK
53

    
54
//#define DEBUG_IOPORT
55
//#define DEBUG_SUBPAGE
56

    
57
#if !defined(CONFIG_USER_ONLY)
58
/* TB consistency checks only implemented for usermode emulation.  */
59
#undef DEBUG_TB_CHECK
60
#endif
61

    
62
#define SMC_BITMAP_USE_THRESHOLD 10
63

    
64
#define MMAP_AREA_START        0x00000000
65
#define MMAP_AREA_END          0xa8000000
66

    
67
#if defined(TARGET_SPARC64)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 41
69
#elif defined(TARGET_SPARC)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 36
71
#elif defined(TARGET_ALPHA)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 42
73
#define TARGET_VIRT_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_PPC64)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 36
80
#else
81
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82
#define TARGET_PHYS_ADDR_SPACE_BITS 32
83
#endif
84

    
85
TranslationBlock *tbs;
86
int code_gen_max_blocks;
87
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88
int nb_tbs;
89
/* any access to the tbs or the page table must use this lock */
90
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91

    
92
uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
93
uint8_t *code_gen_buffer;
94
unsigned long code_gen_buffer_size;
95
/* threshold to flush the translated code buffer */
96
unsigned long code_gen_buffer_max_size; 
97
uint8_t *code_gen_ptr;
98

    
99
#if !defined(CONFIG_USER_ONLY)
100
ram_addr_t phys_ram_size;
101
int phys_ram_fd;
102
uint8_t *phys_ram_base;
103
uint8_t *phys_ram_dirty;
104
static ram_addr_t phys_ram_alloc_offset = 0;
105
#endif
106

    
107
CPUState *first_cpu;
108
/* current CPU in the current thread. It is only valid inside
109
   cpu_exec() */
110
CPUState *cpu_single_env;
111
/* 0 = Do not count executed instructions.
112
   1 = Precise instruction counting.
113
   2 = Adaptive rate instruction counting.  */
114
int use_icount = 0;
115
/* Current instruction counter.  While executing translated code this may
116
   include some instructions that have not yet been executed.  */
117
int64_t qemu_icount;
118

    
119
typedef struct PageDesc {
120
    /* list of TBs intersecting this ram page */
121
    TranslationBlock *first_tb;
122
    /* in order to optimize self modifying code, we count the number
123
       of lookups we do to a given page to use a bitmap */
124
    unsigned int code_write_count;
125
    uint8_t *code_bitmap;
126
#if defined(CONFIG_USER_ONLY)
127
    unsigned long flags;
128
#endif
129
} PageDesc;
130

    
131
typedef struct PhysPageDesc {
132
    /* offset in host memory of the page + io_index in the low bits */
133
    ram_addr_t phys_offset;
134
} PhysPageDesc;
135

    
136
#define L2_BITS 10
137
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
138
/* XXX: this is a temporary hack for alpha target.
139
 *      In the future, this is to be replaced by a multi-level table
140
 *      to actually be able to handle the complete 64 bits address space.
141
 */
142
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
143
#else
144
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
145
#endif
146

    
147
#define L1_SIZE (1 << L1_BITS)
148
#define L2_SIZE (1 << L2_BITS)
149

    
150
unsigned long qemu_real_host_page_size;
151
unsigned long qemu_host_page_bits;
152
unsigned long qemu_host_page_size;
153
unsigned long qemu_host_page_mask;
154

    
155
/* XXX: for system emulation, it could just be an array */
156
static PageDesc *l1_map[L1_SIZE];
157
PhysPageDesc **l1_phys_map;
158

    
159
#if !defined(CONFIG_USER_ONLY)
160
static void io_mem_init(void);
161

    
162
/* io memory support */
163
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
164
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
165
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
166
static int io_mem_nb;
167
static int io_mem_watch;
168
#endif
169

    
170
/* log support */
171
char *logfilename = "/tmp/qemu.log";
172
FILE *logfile;
173
int loglevel;
174
static int log_append = 0;
175

    
176
/* statistics */
177
static int tlb_flush_count;
178
static int tb_flush_count;
179
static int tb_phys_invalidate_count;
180

    
181
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
182
typedef struct subpage_t {
183
    target_phys_addr_t base;
184
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
185
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
186
    void *opaque[TARGET_PAGE_SIZE][2][4];
187
} subpage_t;
188

    
189
#ifdef _WIN32
190
static void map_exec(void *addr, long size)
191
{
192
    DWORD old_protect;
193
    VirtualProtect(addr, size,
194
                   PAGE_EXECUTE_READWRITE, &old_protect);
195
    
196
}
197
#else
198
static void map_exec(void *addr, long size)
199
{
200
    unsigned long start, end, page_size;
201
    
202
    page_size = getpagesize();
203
    start = (unsigned long)addr;
204
    start &= ~(page_size - 1);
205
    
206
    end = (unsigned long)addr + size;
207
    end += page_size - 1;
208
    end &= ~(page_size - 1);
209
    
210
    mprotect((void *)start, end - start,
211
             PROT_READ | PROT_WRITE | PROT_EXEC);
212
}
213
#endif
214

    
215
static void page_init(void)
216
{
217
    /* NOTE: we can always suppose that qemu_host_page_size >=
218
       TARGET_PAGE_SIZE */
219
#ifdef _WIN32
220
    {
221
        SYSTEM_INFO system_info;
222
        DWORD old_protect;
223

    
224
        GetSystemInfo(&system_info);
225
        qemu_real_host_page_size = system_info.dwPageSize;
226
    }
227
#else
228
    qemu_real_host_page_size = getpagesize();
229
#endif
230
    if (qemu_host_page_size == 0)
231
        qemu_host_page_size = qemu_real_host_page_size;
232
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
233
        qemu_host_page_size = TARGET_PAGE_SIZE;
234
    qemu_host_page_bits = 0;
235
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
236
        qemu_host_page_bits++;
237
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
238
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
239
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
240

    
241
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
242
    {
243
        long long startaddr, endaddr;
244
        FILE *f;
245
        int n;
246

    
247
        mmap_lock();
248
        last_brk = (unsigned long)sbrk(0);
249
        f = fopen("/proc/self/maps", "r");
250
        if (f) {
251
            do {
252
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
253
                if (n == 2) {
254
                    startaddr = MIN(startaddr,
255
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
256
                    endaddr = MIN(endaddr,
257
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
258
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
259
                                   TARGET_PAGE_ALIGN(endaddr),
260
                                   PAGE_RESERVED); 
261
                }
262
            } while (!feof(f));
263
            fclose(f);
264
        }
265
        mmap_unlock();
266
    }
267
#endif
268
}
269

    
270
static inline PageDesc *page_find_alloc(target_ulong index)
271
{
272
    PageDesc **lp, *p;
273

    
274
#if TARGET_LONG_BITS > 32
275
    /* Host memory outside guest VM.  For 32-bit targets we have already
276
       excluded high addresses.  */
277
    if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
278
        return NULL;
279
#endif
280
    lp = &l1_map[index >> L2_BITS];
281
    p = *lp;
282
    if (!p) {
283
        /* allocate if not found */
284
#if defined(CONFIG_USER_ONLY)
285
        unsigned long addr;
286
        size_t len = sizeof(PageDesc) * L2_SIZE;
287
        /* Don't use qemu_malloc because it may recurse.  */
288
        p = mmap(0, len, PROT_READ | PROT_WRITE,
289
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
290
        *lp = p;
291
        addr = h2g(p);
292
        if (addr == (target_ulong)addr) {
293
            page_set_flags(addr & TARGET_PAGE_MASK,
294
                           TARGET_PAGE_ALIGN(addr + len),
295
                           PAGE_RESERVED); 
296
        }
297
#else
298
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
299
        *lp = p;
300
#endif
301
    }
302
    return p + (index & (L2_SIZE - 1));
303
}
304

    
305
static inline PageDesc *page_find(target_ulong index)
306
{
307
    PageDesc *p;
308

    
309
    p = l1_map[index >> L2_BITS];
310
    if (!p)
311
        return 0;
312
    return p + (index & (L2_SIZE - 1));
313
}
314

    
315
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
316
{
317
    void **lp, **p;
318
    PhysPageDesc *pd;
319

    
320
    p = (void **)l1_phys_map;
321
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
322

    
323
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
324
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
325
#endif
326
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
327
    p = *lp;
328
    if (!p) {
329
        /* allocate if not found */
330
        if (!alloc)
331
            return NULL;
332
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
333
        memset(p, 0, sizeof(void *) * L1_SIZE);
334
        *lp = p;
335
    }
336
#endif
337
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
338
    pd = *lp;
339
    if (!pd) {
340
        int i;
341
        /* allocate if not found */
342
        if (!alloc)
343
            return NULL;
344
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
345
        *lp = pd;
346
        for (i = 0; i < L2_SIZE; i++)
347
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
348
    }
349
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
350
}
351

    
352
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
353
{
354
    return phys_page_find_alloc(index, 0);
355
}
356

    
357
#if !defined(CONFIG_USER_ONLY)
358
static void tlb_protect_code(ram_addr_t ram_addr);
359
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
360
                                    target_ulong vaddr);
361
#define mmap_lock() do { } while(0)
362
#define mmap_unlock() do { } while(0)
363
#endif
364

    
365
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
366

    
367
#if defined(CONFIG_USER_ONLY)
368
/* Currently it is not recommanded to allocate big chunks of data in
369
   user mode. It will change when a dedicated libc will be used */
370
#define USE_STATIC_CODE_GEN_BUFFER
371
#endif
372

    
373
#ifdef USE_STATIC_CODE_GEN_BUFFER
374
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
375
#endif
376

    
377
void code_gen_alloc(unsigned long tb_size)
378
{
379
#ifdef USE_STATIC_CODE_GEN_BUFFER
380
    code_gen_buffer = static_code_gen_buffer;
381
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
382
    map_exec(code_gen_buffer, code_gen_buffer_size);
383
#else
384
    code_gen_buffer_size = tb_size;
385
    if (code_gen_buffer_size == 0) {
386
#if defined(CONFIG_USER_ONLY)
387
        /* in user mode, phys_ram_size is not meaningful */
388
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
389
#else
390
        /* XXX: needs ajustments */
391
        code_gen_buffer_size = (int)(phys_ram_size / 4);
392
#endif
393
    }
394
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
395
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
396
    /* The code gen buffer location may have constraints depending on
397
       the host cpu and OS */
398
#if defined(__linux__) 
399
    {
400
        int flags;
401
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
402
#if defined(__x86_64__)
403
        flags |= MAP_32BIT;
404
        /* Cannot map more than that */
405
        if (code_gen_buffer_size > (800 * 1024 * 1024))
406
            code_gen_buffer_size = (800 * 1024 * 1024);
407
#endif
408
        code_gen_buffer = mmap(NULL, code_gen_buffer_size,
409
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
410
                               flags, -1, 0);
411
        if (code_gen_buffer == MAP_FAILED) {
412
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
413
            exit(1);
414
        }
415
    }
416
#else
417
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
418
    if (!code_gen_buffer) {
419
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
420
        exit(1);
421
    }
422
    map_exec(code_gen_buffer, code_gen_buffer_size);
423
#endif
424
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
425
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
426
    code_gen_buffer_max_size = code_gen_buffer_size - 
427
        code_gen_max_block_size();
428
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
429
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
430
}
431

    
432
/* Must be called before using the QEMU cpus. 'tb_size' is the size
433
   (in bytes) allocated to the translation buffer. Zero means default
434
   size. */
435
void cpu_exec_init_all(unsigned long tb_size)
436
{
437
    cpu_gen_init();
438
    code_gen_alloc(tb_size);
439
    code_gen_ptr = code_gen_buffer;
440
    page_init();
441
#if !defined(CONFIG_USER_ONLY)
442
    io_mem_init();
443
#endif
444
}
445

    
446
void cpu_exec_init(CPUState *env)
447
{
448
    CPUState **penv;
449
    int cpu_index;
450

    
451
    env->next_cpu = NULL;
452
    penv = &first_cpu;
453
    cpu_index = 0;
454
    while (*penv != NULL) {
455
        penv = (CPUState **)&(*penv)->next_cpu;
456
        cpu_index++;
457
    }
458
    env->cpu_index = cpu_index;
459
    env->nb_watchpoints = 0;
460
    *penv = env;
461
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
462
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
463
                    cpu_save, cpu_load, env);
464
#endif
465
}
466

    
467
static inline void invalidate_page_bitmap(PageDesc *p)
468
{
469
    if (p->code_bitmap) {
470
        qemu_free(p->code_bitmap);
471
        p->code_bitmap = NULL;
472
    }
473
    p->code_write_count = 0;
474
}
475

    
476
/* set to NULL all the 'first_tb' fields in all PageDescs */
477
static void page_flush_tb(void)
478
{
479
    int i, j;
480
    PageDesc *p;
481

    
482
    for(i = 0; i < L1_SIZE; i++) {
483
        p = l1_map[i];
484
        if (p) {
485
            for(j = 0; j < L2_SIZE; j++) {
486
                p->first_tb = NULL;
487
                invalidate_page_bitmap(p);
488
                p++;
489
            }
490
        }
491
    }
492
}
493

    
494
/* flush all the translation blocks */
495
/* XXX: tb_flush is currently not thread safe */
496
void tb_flush(CPUState *env1)
497
{
498
    CPUState *env;
499
#if defined(DEBUG_FLUSH)
500
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
501
           (unsigned long)(code_gen_ptr - code_gen_buffer),
502
           nb_tbs, nb_tbs > 0 ?
503
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
504
#endif
505
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
506
        cpu_abort(env1, "Internal error: code buffer overflow\n");
507

    
508
    nb_tbs = 0;
509

    
510
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
511
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
512
    }
513

    
514
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
515
    page_flush_tb();
516

    
517
    code_gen_ptr = code_gen_buffer;
518
    /* XXX: flush processor icache at this point if cache flush is
519
       expensive */
520
    tb_flush_count++;
521
}
522

    
523
#ifdef DEBUG_TB_CHECK
524

    
525
static void tb_invalidate_check(target_ulong address)
526
{
527
    TranslationBlock *tb;
528
    int i;
529
    address &= TARGET_PAGE_MASK;
530
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
531
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
532
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
533
                  address >= tb->pc + tb->size)) {
534
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
535
                       address, (long)tb->pc, tb->size);
536
            }
537
        }
538
    }
539
}
540

    
541
/* verify that all the pages have correct rights for code */
542
static void tb_page_check(void)
543
{
544
    TranslationBlock *tb;
545
    int i, flags1, flags2;
546

    
547
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
548
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
549
            flags1 = page_get_flags(tb->pc);
550
            flags2 = page_get_flags(tb->pc + tb->size - 1);
551
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
552
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
553
                       (long)tb->pc, tb->size, flags1, flags2);
554
            }
555
        }
556
    }
557
}
558

    
559
void tb_jmp_check(TranslationBlock *tb)
560
{
561
    TranslationBlock *tb1;
562
    unsigned int n1;
563

    
564
    /* suppress any remaining jumps to this TB */
565
    tb1 = tb->jmp_first;
566
    for(;;) {
567
        n1 = (long)tb1 & 3;
568
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
569
        if (n1 == 2)
570
            break;
571
        tb1 = tb1->jmp_next[n1];
572
    }
573
    /* check end of list */
574
    if (tb1 != tb) {
575
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
576
    }
577
}
578

    
579
#endif
580

    
581
/* invalidate one TB */
582
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
583
                             int next_offset)
584
{
585
    TranslationBlock *tb1;
586
    for(;;) {
587
        tb1 = *ptb;
588
        if (tb1 == tb) {
589
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
590
            break;
591
        }
592
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
593
    }
594
}
595

    
596
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
597
{
598
    TranslationBlock *tb1;
599
    unsigned int n1;
600

    
601
    for(;;) {
602
        tb1 = *ptb;
603
        n1 = (long)tb1 & 3;
604
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
605
        if (tb1 == tb) {
606
            *ptb = tb1->page_next[n1];
607
            break;
608
        }
609
        ptb = &tb1->page_next[n1];
610
    }
611
}
612

    
613
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
614
{
615
    TranslationBlock *tb1, **ptb;
616
    unsigned int n1;
617

    
618
    ptb = &tb->jmp_next[n];
619
    tb1 = *ptb;
620
    if (tb1) {
621
        /* find tb(n) in circular list */
622
        for(;;) {
623
            tb1 = *ptb;
624
            n1 = (long)tb1 & 3;
625
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
626
            if (n1 == n && tb1 == tb)
627
                break;
628
            if (n1 == 2) {
629
                ptb = &tb1->jmp_first;
630
            } else {
631
                ptb = &tb1->jmp_next[n1];
632
            }
633
        }
634
        /* now we can suppress tb(n) from the list */
635
        *ptb = tb->jmp_next[n];
636

    
637
        tb->jmp_next[n] = NULL;
638
    }
639
}
640

    
641
/* reset the jump entry 'n' of a TB so that it is not chained to
642
   another TB */
643
static inline void tb_reset_jump(TranslationBlock *tb, int n)
644
{
645
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
646
}
647

    
648
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
649
{
650
    CPUState *env;
651
    PageDesc *p;
652
    unsigned int h, n1;
653
    target_phys_addr_t phys_pc;
654
    TranslationBlock *tb1, *tb2;
655

    
656
    /* remove the TB from the hash list */
657
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
658
    h = tb_phys_hash_func(phys_pc);
659
    tb_remove(&tb_phys_hash[h], tb,
660
              offsetof(TranslationBlock, phys_hash_next));
661

    
662
    /* remove the TB from the page list */
663
    if (tb->page_addr[0] != page_addr) {
664
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
665
        tb_page_remove(&p->first_tb, tb);
666
        invalidate_page_bitmap(p);
667
    }
668
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
669
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
670
        tb_page_remove(&p->first_tb, tb);
671
        invalidate_page_bitmap(p);
672
    }
673

    
674
    tb_invalidated_flag = 1;
675

    
676
    /* remove the TB from the hash list */
677
    h = tb_jmp_cache_hash_func(tb->pc);
678
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
679
        if (env->tb_jmp_cache[h] == tb)
680
            env->tb_jmp_cache[h] = NULL;
681
    }
682

    
683
    /* suppress this TB from the two jump lists */
684
    tb_jmp_remove(tb, 0);
685
    tb_jmp_remove(tb, 1);
686

    
687
    /* suppress any remaining jumps to this TB */
688
    tb1 = tb->jmp_first;
689
    for(;;) {
690
        n1 = (long)tb1 & 3;
691
        if (n1 == 2)
692
            break;
693
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
694
        tb2 = tb1->jmp_next[n1];
695
        tb_reset_jump(tb1, n1);
696
        tb1->jmp_next[n1] = NULL;
697
        tb1 = tb2;
698
    }
699
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
700

    
701
    tb_phys_invalidate_count++;
702
}
703

    
704
static inline void set_bits(uint8_t *tab, int start, int len)
705
{
706
    int end, mask, end1;
707

    
708
    end = start + len;
709
    tab += start >> 3;
710
    mask = 0xff << (start & 7);
711
    if ((start & ~7) == (end & ~7)) {
712
        if (start < end) {
713
            mask &= ~(0xff << (end & 7));
714
            *tab |= mask;
715
        }
716
    } else {
717
        *tab++ |= mask;
718
        start = (start + 8) & ~7;
719
        end1 = end & ~7;
720
        while (start < end1) {
721
            *tab++ = 0xff;
722
            start += 8;
723
        }
724
        if (start < end) {
725
            mask = ~(0xff << (end & 7));
726
            *tab |= mask;
727
        }
728
    }
729
}
730

    
731
static void build_page_bitmap(PageDesc *p)
732
{
733
    int n, tb_start, tb_end;
734
    TranslationBlock *tb;
735

    
736
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
737
    if (!p->code_bitmap)
738
        return;
739

    
740
    tb = p->first_tb;
741
    while (tb != NULL) {
742
        n = (long)tb & 3;
743
        tb = (TranslationBlock *)((long)tb & ~3);
744
        /* NOTE: this is subtle as a TB may span two physical pages */
745
        if (n == 0) {
746
            /* NOTE: tb_end may be after the end of the page, but
747
               it is not a problem */
748
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
749
            tb_end = tb_start + tb->size;
750
            if (tb_end > TARGET_PAGE_SIZE)
751
                tb_end = TARGET_PAGE_SIZE;
752
        } else {
753
            tb_start = 0;
754
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
755
        }
756
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
757
        tb = tb->page_next[n];
758
    }
759
}
760

    
761
TranslationBlock *tb_gen_code(CPUState *env,
762
                              target_ulong pc, target_ulong cs_base,
763
                              int flags, int cflags)
764
{
765
    TranslationBlock *tb;
766
    uint8_t *tc_ptr;
767
    target_ulong phys_pc, phys_page2, virt_page2;
768
    int code_gen_size;
769

    
770
    phys_pc = get_phys_addr_code(env, pc);
771
    tb = tb_alloc(pc);
772
    if (!tb) {
773
        /* flush must be done */
774
        tb_flush(env);
775
        /* cannot fail at this point */
776
        tb = tb_alloc(pc);
777
        /* Don't forget to invalidate previous TB info.  */
778
        tb_invalidated_flag = 1;
779
    }
780
    tc_ptr = code_gen_ptr;
781
    tb->tc_ptr = tc_ptr;
782
    tb->cs_base = cs_base;
783
    tb->flags = flags;
784
    tb->cflags = cflags;
785
    cpu_gen_code(env, tb, &code_gen_size);
786
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
787

    
788
    /* check next page if needed */
789
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
790
    phys_page2 = -1;
791
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
792
        phys_page2 = get_phys_addr_code(env, virt_page2);
793
    }
794
    tb_link_phys(tb, phys_pc, phys_page2);
795
    return tb;
796
}
797

    
798
/* invalidate all TBs which intersect with the target physical page
799
   starting in range [start;end[. NOTE: start and end must refer to
800
   the same physical page. 'is_cpu_write_access' should be true if called
801
   from a real cpu write access: the virtual CPU will exit the current
802
   TB if code is modified inside this TB. */
803
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
804
                                   int is_cpu_write_access)
805
{
806
    int n, current_tb_modified, current_tb_not_found, current_flags;
807
    CPUState *env = cpu_single_env;
808
    PageDesc *p;
809
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
810
    target_ulong tb_start, tb_end;
811
    target_ulong current_pc, current_cs_base;
812

    
813
    p = page_find(start >> TARGET_PAGE_BITS);
814
    if (!p)
815
        return;
816
    if (!p->code_bitmap &&
817
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
818
        is_cpu_write_access) {
819
        /* build code bitmap */
820
        build_page_bitmap(p);
821
    }
822

    
823
    /* we remove all the TBs in the range [start, end[ */
824
    /* XXX: see if in some cases it could be faster to invalidate all the code */
825
    current_tb_not_found = is_cpu_write_access;
826
    current_tb_modified = 0;
827
    current_tb = NULL; /* avoid warning */
828
    current_pc = 0; /* avoid warning */
829
    current_cs_base = 0; /* avoid warning */
830
    current_flags = 0; /* avoid warning */
831
    tb = p->first_tb;
832
    while (tb != NULL) {
833
        n = (long)tb & 3;
834
        tb = (TranslationBlock *)((long)tb & ~3);
835
        tb_next = tb->page_next[n];
836
        /* NOTE: this is subtle as a TB may span two physical pages */
837
        if (n == 0) {
838
            /* NOTE: tb_end may be after the end of the page, but
839
               it is not a problem */
840
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
841
            tb_end = tb_start + tb->size;
842
        } else {
843
            tb_start = tb->page_addr[1];
844
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
845
        }
846
        if (!(tb_end <= start || tb_start >= end)) {
847
#ifdef TARGET_HAS_PRECISE_SMC
848
            if (current_tb_not_found) {
849
                current_tb_not_found = 0;
850
                current_tb = NULL;
851
                if (env->mem_io_pc) {
852
                    /* now we have a real cpu fault */
853
                    current_tb = tb_find_pc(env->mem_io_pc);
854
                }
855
            }
856
            if (current_tb == tb &&
857
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
858
                /* If we are modifying the current TB, we must stop
859
                its execution. We could be more precise by checking
860
                that the modification is after the current PC, but it
861
                would require a specialized function to partially
862
                restore the CPU state */
863

    
864
                current_tb_modified = 1;
865
                cpu_restore_state(current_tb, env,
866
                                  env->mem_io_pc, NULL);
867
#if defined(TARGET_I386)
868
                current_flags = env->hflags;
869
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
870
                current_cs_base = (target_ulong)env->segs[R_CS].base;
871
                current_pc = current_cs_base + env->eip;
872
#else
873
#error unsupported CPU
874
#endif
875
            }
876
#endif /* TARGET_HAS_PRECISE_SMC */
877
            /* we need to do that to handle the case where a signal
878
               occurs while doing tb_phys_invalidate() */
879
            saved_tb = NULL;
880
            if (env) {
881
                saved_tb = env->current_tb;
882
                env->current_tb = NULL;
883
            }
884
            tb_phys_invalidate(tb, -1);
885
            if (env) {
886
                env->current_tb = saved_tb;
887
                if (env->interrupt_request && env->current_tb)
888
                    cpu_interrupt(env, env->interrupt_request);
889
            }
890
        }
891
        tb = tb_next;
892
    }
893
#if !defined(CONFIG_USER_ONLY)
894
    /* if no code remaining, no need to continue to use slow writes */
895
    if (!p->first_tb) {
896
        invalidate_page_bitmap(p);
897
        if (is_cpu_write_access) {
898
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
899
        }
900
    }
901
#endif
902
#ifdef TARGET_HAS_PRECISE_SMC
903
    if (current_tb_modified) {
904
        /* we generate a block containing just the instruction
905
           modifying the memory. It will ensure that it cannot modify
906
           itself */
907
        env->current_tb = NULL;
908
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
909
        cpu_resume_from_signal(env, NULL);
910
    }
911
#endif
912
}
913

    
914
/* len must be <= 8 and start must be a multiple of len */
915
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
916
{
917
    PageDesc *p;
918
    int offset, b;
919
#if 0
920
    if (1) {
921
        if (loglevel) {
922
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
923
                   cpu_single_env->mem_io_vaddr, len,
924
                   cpu_single_env->eip,
925
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
926
        }
927
    }
928
#endif
929
    p = page_find(start >> TARGET_PAGE_BITS);
930
    if (!p)
931
        return;
932
    if (p->code_bitmap) {
933
        offset = start & ~TARGET_PAGE_MASK;
934
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
935
        if (b & ((1 << len) - 1))
936
            goto do_invalidate;
937
    } else {
938
    do_invalidate:
939
        tb_invalidate_phys_page_range(start, start + len, 1);
940
    }
941
}
942

    
943
#if !defined(CONFIG_SOFTMMU)
944
static void tb_invalidate_phys_page(target_phys_addr_t addr,
945
                                    unsigned long pc, void *puc)
946
{
947
    int n, current_flags, current_tb_modified;
948
    target_ulong current_pc, current_cs_base;
949
    PageDesc *p;
950
    TranslationBlock *tb, *current_tb;
951
#ifdef TARGET_HAS_PRECISE_SMC
952
    CPUState *env = cpu_single_env;
953
#endif
954

    
955
    addr &= TARGET_PAGE_MASK;
956
    p = page_find(addr >> TARGET_PAGE_BITS);
957
    if (!p)
958
        return;
959
    tb = p->first_tb;
960
    current_tb_modified = 0;
961
    current_tb = NULL;
962
    current_pc = 0; /* avoid warning */
963
    current_cs_base = 0; /* avoid warning */
964
    current_flags = 0; /* avoid warning */
965
#ifdef TARGET_HAS_PRECISE_SMC
966
    if (tb && pc != 0) {
967
        current_tb = tb_find_pc(pc);
968
    }
969
#endif
970
    while (tb != NULL) {
971
        n = (long)tb & 3;
972
        tb = (TranslationBlock *)((long)tb & ~3);
973
#ifdef TARGET_HAS_PRECISE_SMC
974
        if (current_tb == tb &&
975
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
976
                /* If we are modifying the current TB, we must stop
977
                   its execution. We could be more precise by checking
978
                   that the modification is after the current PC, but it
979
                   would require a specialized function to partially
980
                   restore the CPU state */
981

    
982
            current_tb_modified = 1;
983
            cpu_restore_state(current_tb, env, pc, puc);
984
#if defined(TARGET_I386)
985
            current_flags = env->hflags;
986
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
987
            current_cs_base = (target_ulong)env->segs[R_CS].base;
988
            current_pc = current_cs_base + env->eip;
989
#else
990
#error unsupported CPU
991
#endif
992
        }
993
#endif /* TARGET_HAS_PRECISE_SMC */
994
        tb_phys_invalidate(tb, addr);
995
        tb = tb->page_next[n];
996
    }
997
    p->first_tb = NULL;
998
#ifdef TARGET_HAS_PRECISE_SMC
999
    if (current_tb_modified) {
1000
        /* we generate a block containing just the instruction
1001
           modifying the memory. It will ensure that it cannot modify
1002
           itself */
1003
        env->current_tb = NULL;
1004
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1005
        cpu_resume_from_signal(env, puc);
1006
    }
1007
#endif
1008
}
1009
#endif
1010

    
1011
/* add the tb in the target page and protect it if necessary */
1012
static inline void tb_alloc_page(TranslationBlock *tb,
1013
                                 unsigned int n, target_ulong page_addr)
1014
{
1015
    PageDesc *p;
1016
    TranslationBlock *last_first_tb;
1017

    
1018
    tb->page_addr[n] = page_addr;
1019
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1020
    tb->page_next[n] = p->first_tb;
1021
    last_first_tb = p->first_tb;
1022
    p->first_tb = (TranslationBlock *)((long)tb | n);
1023
    invalidate_page_bitmap(p);
1024

    
1025
#if defined(TARGET_HAS_SMC) || 1
1026

    
1027
#if defined(CONFIG_USER_ONLY)
1028
    if (p->flags & PAGE_WRITE) {
1029
        target_ulong addr;
1030
        PageDesc *p2;
1031
        int prot;
1032

    
1033
        /* force the host page as non writable (writes will have a
1034
           page fault + mprotect overhead) */
1035
        page_addr &= qemu_host_page_mask;
1036
        prot = 0;
1037
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1038
            addr += TARGET_PAGE_SIZE) {
1039

    
1040
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1041
            if (!p2)
1042
                continue;
1043
            prot |= p2->flags;
1044
            p2->flags &= ~PAGE_WRITE;
1045
            page_get_flags(addr);
1046
          }
1047
        mprotect(g2h(page_addr), qemu_host_page_size,
1048
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1049
#ifdef DEBUG_TB_INVALIDATE
1050
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1051
               page_addr);
1052
#endif
1053
    }
1054
#else
1055
    /* if some code is already present, then the pages are already
1056
       protected. So we handle the case where only the first TB is
1057
       allocated in a physical page */
1058
    if (!last_first_tb) {
1059
        tlb_protect_code(page_addr);
1060
    }
1061
#endif
1062

    
1063
#endif /* TARGET_HAS_SMC */
1064
}
1065

    
1066
/* Allocate a new translation block. Flush the translation buffer if
1067
   too many translation blocks or too much generated code. */
1068
TranslationBlock *tb_alloc(target_ulong pc)
1069
{
1070
    TranslationBlock *tb;
1071

    
1072
    if (nb_tbs >= code_gen_max_blocks ||
1073
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1074
        return NULL;
1075
    tb = &tbs[nb_tbs++];
1076
    tb->pc = pc;
1077
    tb->cflags = 0;
1078
    return tb;
1079
}
1080

    
1081
void tb_free(TranslationBlock *tb)
1082
{
1083
    /* In practice this is mostly used for single use temporary TB
1084
       Ignore the hard cases and just back up if this TB happens to
1085
       be the last one generated.  */
1086
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1087
        code_gen_ptr = tb->tc_ptr;
1088
        nb_tbs--;
1089
    }
1090
}
1091

    
1092
/* add a new TB and link it to the physical page tables. phys_page2 is
1093
   (-1) to indicate that only one page contains the TB. */
1094
void tb_link_phys(TranslationBlock *tb,
1095
                  target_ulong phys_pc, target_ulong phys_page2)
1096
{
1097
    unsigned int h;
1098
    TranslationBlock **ptb;
1099

    
1100
    /* Grab the mmap lock to stop another thread invalidating this TB
1101
       before we are done.  */
1102
    mmap_lock();
1103
    /* add in the physical hash table */
1104
    h = tb_phys_hash_func(phys_pc);
1105
    ptb = &tb_phys_hash[h];
1106
    tb->phys_hash_next = *ptb;
1107
    *ptb = tb;
1108

    
1109
    /* add in the page list */
1110
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1111
    if (phys_page2 != -1)
1112
        tb_alloc_page(tb, 1, phys_page2);
1113
    else
1114
        tb->page_addr[1] = -1;
1115

    
1116
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1117
    tb->jmp_next[0] = NULL;
1118
    tb->jmp_next[1] = NULL;
1119

    
1120
    /* init original jump addresses */
1121
    if (tb->tb_next_offset[0] != 0xffff)
1122
        tb_reset_jump(tb, 0);
1123
    if (tb->tb_next_offset[1] != 0xffff)
1124
        tb_reset_jump(tb, 1);
1125

    
1126
#ifdef DEBUG_TB_CHECK
1127
    tb_page_check();
1128
#endif
1129
    mmap_unlock();
1130
}
1131

    
1132
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1133
   tb[1].tc_ptr. Return NULL if not found */
1134
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1135
{
1136
    int m_min, m_max, m;
1137
    unsigned long v;
1138
    TranslationBlock *tb;
1139

    
1140
    if (nb_tbs <= 0)
1141
        return NULL;
1142
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1143
        tc_ptr >= (unsigned long)code_gen_ptr)
1144
        return NULL;
1145
    /* binary search (cf Knuth) */
1146
    m_min = 0;
1147
    m_max = nb_tbs - 1;
1148
    while (m_min <= m_max) {
1149
        m = (m_min + m_max) >> 1;
1150
        tb = &tbs[m];
1151
        v = (unsigned long)tb->tc_ptr;
1152
        if (v == tc_ptr)
1153
            return tb;
1154
        else if (tc_ptr < v) {
1155
            m_max = m - 1;
1156
        } else {
1157
            m_min = m + 1;
1158
        }
1159
    }
1160
    return &tbs[m_max];
1161
}
1162

    
1163
static void tb_reset_jump_recursive(TranslationBlock *tb);
1164

    
1165
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1166
{
1167
    TranslationBlock *tb1, *tb_next, **ptb;
1168
    unsigned int n1;
1169

    
1170
    tb1 = tb->jmp_next[n];
1171
    if (tb1 != NULL) {
1172
        /* find head of list */
1173
        for(;;) {
1174
            n1 = (long)tb1 & 3;
1175
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1176
            if (n1 == 2)
1177
                break;
1178
            tb1 = tb1->jmp_next[n1];
1179
        }
1180
        /* we are now sure now that tb jumps to tb1 */
1181
        tb_next = tb1;
1182

    
1183
        /* remove tb from the jmp_first list */
1184
        ptb = &tb_next->jmp_first;
1185
        for(;;) {
1186
            tb1 = *ptb;
1187
            n1 = (long)tb1 & 3;
1188
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1189
            if (n1 == n && tb1 == tb)
1190
                break;
1191
            ptb = &tb1->jmp_next[n1];
1192
        }
1193
        *ptb = tb->jmp_next[n];
1194
        tb->jmp_next[n] = NULL;
1195

    
1196
        /* suppress the jump to next tb in generated code */
1197
        tb_reset_jump(tb, n);
1198

    
1199
        /* suppress jumps in the tb on which we could have jumped */
1200
        tb_reset_jump_recursive(tb_next);
1201
    }
1202
}
1203

    
1204
static void tb_reset_jump_recursive(TranslationBlock *tb)
1205
{
1206
    tb_reset_jump_recursive2(tb, 0);
1207
    tb_reset_jump_recursive2(tb, 1);
1208
}
1209

    
1210
#if defined(TARGET_HAS_ICE)
1211
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1212
{
1213
    target_phys_addr_t addr;
1214
    target_ulong pd;
1215
    ram_addr_t ram_addr;
1216
    PhysPageDesc *p;
1217

    
1218
    addr = cpu_get_phys_page_debug(env, pc);
1219
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1220
    if (!p) {
1221
        pd = IO_MEM_UNASSIGNED;
1222
    } else {
1223
        pd = p->phys_offset;
1224
    }
1225
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1226
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1227
}
1228
#endif
1229

    
1230
/* Add a watchpoint.  */
1231
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1232
{
1233
    int i;
1234

    
1235
    for (i = 0; i < env->nb_watchpoints; i++) {
1236
        if (addr == env->watchpoint[i].vaddr)
1237
            return 0;
1238
    }
1239
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1240
        return -1;
1241

    
1242
    i = env->nb_watchpoints++;
1243
    env->watchpoint[i].vaddr = addr;
1244
    env->watchpoint[i].type = type;
1245
    tlb_flush_page(env, addr);
1246
    /* FIXME: This flush is needed because of the hack to make memory ops
1247
       terminate the TB.  It can be removed once the proper IO trap and
1248
       re-execute bits are in.  */
1249
    tb_flush(env);
1250
    return i;
1251
}
1252

    
1253
/* Remove a watchpoint.  */
1254
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1255
{
1256
    int i;
1257

    
1258
    for (i = 0; i < env->nb_watchpoints; i++) {
1259
        if (addr == env->watchpoint[i].vaddr) {
1260
            env->nb_watchpoints--;
1261
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1262
            tlb_flush_page(env, addr);
1263
            return 0;
1264
        }
1265
    }
1266
    return -1;
1267
}
1268

    
1269
/* Remove all watchpoints. */
1270
void cpu_watchpoint_remove_all(CPUState *env) {
1271
    int i;
1272

    
1273
    for (i = 0; i < env->nb_watchpoints; i++) {
1274
        tlb_flush_page(env, env->watchpoint[i].vaddr);
1275
    }
1276
    env->nb_watchpoints = 0;
1277
}
1278

    
1279
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1280
   breakpoint is reached */
1281
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1282
{
1283
#if defined(TARGET_HAS_ICE)
1284
    int i;
1285

    
1286
    for(i = 0; i < env->nb_breakpoints; i++) {
1287
        if (env->breakpoints[i] == pc)
1288
            return 0;
1289
    }
1290

    
1291
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1292
        return -1;
1293
    env->breakpoints[env->nb_breakpoints++] = pc;
1294

    
1295
    breakpoint_invalidate(env, pc);
1296
    return 0;
1297
#else
1298
    return -1;
1299
#endif
1300
}
1301

    
1302
/* remove all breakpoints */
1303
void cpu_breakpoint_remove_all(CPUState *env) {
1304
#if defined(TARGET_HAS_ICE)
1305
    int i;
1306
    for(i = 0; i < env->nb_breakpoints; i++) {
1307
        breakpoint_invalidate(env, env->breakpoints[i]);
1308
    }
1309
    env->nb_breakpoints = 0;
1310
#endif
1311
}
1312

    
1313
/* remove a breakpoint */
1314
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1315
{
1316
#if defined(TARGET_HAS_ICE)
1317
    int i;
1318
    for(i = 0; i < env->nb_breakpoints; i++) {
1319
        if (env->breakpoints[i] == pc)
1320
            goto found;
1321
    }
1322
    return -1;
1323
 found:
1324
    env->nb_breakpoints--;
1325
    if (i < env->nb_breakpoints)
1326
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1327

    
1328
    breakpoint_invalidate(env, pc);
1329
    return 0;
1330
#else
1331
    return -1;
1332
#endif
1333
}
1334

    
1335
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1336
   CPU loop after each instruction */
1337
void cpu_single_step(CPUState *env, int enabled)
1338
{
1339
#if defined(TARGET_HAS_ICE)
1340
    if (env->singlestep_enabled != enabled) {
1341
        env->singlestep_enabled = enabled;
1342
        /* must flush all the translated code to avoid inconsistancies */
1343
        /* XXX: only flush what is necessary */
1344
        tb_flush(env);
1345
    }
1346
#endif
1347
}
1348

    
1349
/* enable or disable low levels log */
1350
void cpu_set_log(int log_flags)
1351
{
1352
    loglevel = log_flags;
1353
    if (loglevel && !logfile) {
1354
        logfile = fopen(logfilename, log_append ? "a" : "w");
1355
        if (!logfile) {
1356
            perror(logfilename);
1357
            _exit(1);
1358
        }
1359
#if !defined(CONFIG_SOFTMMU)
1360
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1361
        {
1362
            static uint8_t logfile_buf[4096];
1363
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1364
        }
1365
#else
1366
        setvbuf(logfile, NULL, _IOLBF, 0);
1367
#endif
1368
        log_append = 1;
1369
    }
1370
    if (!loglevel && logfile) {
1371
        fclose(logfile);
1372
        logfile = NULL;
1373
    }
1374
}
1375

    
1376
void cpu_set_log_filename(const char *filename)
1377
{
1378
    logfilename = strdup(filename);
1379
    if (logfile) {
1380
        fclose(logfile);
1381
        logfile = NULL;
1382
    }
1383
    cpu_set_log(loglevel);
1384
}
1385

    
1386
/* mask must never be zero, except for A20 change call */
1387
void cpu_interrupt(CPUState *env, int mask)
1388
{
1389
#if !defined(USE_NPTL)
1390
    TranslationBlock *tb;
1391
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1392
#endif
1393
    int old_mask;
1394

    
1395
    old_mask = env->interrupt_request;
1396
    /* FIXME: This is probably not threadsafe.  A different thread could
1397
       be in the middle of a read-modify-write operation.  */
1398
    env->interrupt_request |= mask;
1399
#if defined(USE_NPTL)
1400
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1401
       problem and hope the cpu will stop of its own accord.  For userspace
1402
       emulation this often isn't actually as bad as it sounds.  Often
1403
       signals are used primarily to interrupt blocking syscalls.  */
1404
#else
1405
    if (use_icount) {
1406
        env->icount_decr.u16.high = 0x8000;
1407
#ifndef CONFIG_USER_ONLY
1408
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1409
           an async event happened and we need to process it.  */
1410
        if (!can_do_io(env)
1411
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1412
            cpu_abort(env, "Raised interrupt while not in I/O function");
1413
        }
1414
#endif
1415
    } else {
1416
        tb = env->current_tb;
1417
        /* if the cpu is currently executing code, we must unlink it and
1418
           all the potentially executing TB */
1419
        if (tb && !testandset(&interrupt_lock)) {
1420
            env->current_tb = NULL;
1421
            tb_reset_jump_recursive(tb);
1422
            resetlock(&interrupt_lock);
1423
        }
1424
    }
1425
#endif
1426
}
1427

    
1428
void cpu_reset_interrupt(CPUState *env, int mask)
1429
{
1430
    env->interrupt_request &= ~mask;
1431
}
1432

    
1433
CPULogItem cpu_log_items[] = {
1434
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1435
      "show generated host assembly code for each compiled TB" },
1436
    { CPU_LOG_TB_IN_ASM, "in_asm",
1437
      "show target assembly code for each compiled TB" },
1438
    { CPU_LOG_TB_OP, "op",
1439
      "show micro ops for each compiled TB" },
1440
    { CPU_LOG_TB_OP_OPT, "op_opt",
1441
      "show micro ops "
1442
#ifdef TARGET_I386
1443
      "before eflags optimization and "
1444
#endif
1445
      "after liveness analysis" },
1446
    { CPU_LOG_INT, "int",
1447
      "show interrupts/exceptions in short format" },
1448
    { CPU_LOG_EXEC, "exec",
1449
      "show trace before each executed TB (lots of logs)" },
1450
    { CPU_LOG_TB_CPU, "cpu",
1451
      "show CPU state before block translation" },
1452
#ifdef TARGET_I386
1453
    { CPU_LOG_PCALL, "pcall",
1454
      "show protected mode far calls/returns/exceptions" },
1455
#endif
1456
#ifdef DEBUG_IOPORT
1457
    { CPU_LOG_IOPORT, "ioport",
1458
      "show all i/o ports accesses" },
1459
#endif
1460
    { 0, NULL, NULL },
1461
};
1462

    
1463
static int cmp1(const char *s1, int n, const char *s2)
1464
{
1465
    if (strlen(s2) != n)
1466
        return 0;
1467
    return memcmp(s1, s2, n) == 0;
1468
}
1469

    
1470
/* takes a comma separated list of log masks. Return 0 if error. */
1471
int cpu_str_to_log_mask(const char *str)
1472
{
1473
    CPULogItem *item;
1474
    int mask;
1475
    const char *p, *p1;
1476

    
1477
    p = str;
1478
    mask = 0;
1479
    for(;;) {
1480
        p1 = strchr(p, ',');
1481
        if (!p1)
1482
            p1 = p + strlen(p);
1483
        if(cmp1(p,p1-p,"all")) {
1484
                for(item = cpu_log_items; item->mask != 0; item++) {
1485
                        mask |= item->mask;
1486
                }
1487
        } else {
1488
        for(item = cpu_log_items; item->mask != 0; item++) {
1489
            if (cmp1(p, p1 - p, item->name))
1490
                goto found;
1491
        }
1492
        return 0;
1493
        }
1494
    found:
1495
        mask |= item->mask;
1496
        if (*p1 != ',')
1497
            break;
1498
        p = p1 + 1;
1499
    }
1500
    return mask;
1501
}
1502

    
1503
void cpu_abort(CPUState *env, const char *fmt, ...)
1504
{
1505
    va_list ap;
1506
    va_list ap2;
1507

    
1508
    va_start(ap, fmt);
1509
    va_copy(ap2, ap);
1510
    fprintf(stderr, "qemu: fatal: ");
1511
    vfprintf(stderr, fmt, ap);
1512
    fprintf(stderr, "\n");
1513
#ifdef TARGET_I386
1514
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1515
#else
1516
    cpu_dump_state(env, stderr, fprintf, 0);
1517
#endif
1518
    if (logfile) {
1519
        fprintf(logfile, "qemu: fatal: ");
1520
        vfprintf(logfile, fmt, ap2);
1521
        fprintf(logfile, "\n");
1522
#ifdef TARGET_I386
1523
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1524
#else
1525
        cpu_dump_state(env, logfile, fprintf, 0);
1526
#endif
1527
        fflush(logfile);
1528
        fclose(logfile);
1529
    }
1530
    va_end(ap2);
1531
    va_end(ap);
1532
    abort();
1533
}
1534

    
1535
CPUState *cpu_copy(CPUState *env)
1536
{
1537
    CPUState *new_env = cpu_init(env->cpu_model_str);
1538
    /* preserve chaining and index */
1539
    CPUState *next_cpu = new_env->next_cpu;
1540
    int cpu_index = new_env->cpu_index;
1541
    memcpy(new_env, env, sizeof(CPUState));
1542
    new_env->next_cpu = next_cpu;
1543
    new_env->cpu_index = cpu_index;
1544
    return new_env;
1545
}
1546

    
1547
#if !defined(CONFIG_USER_ONLY)
1548

    
1549
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1550
{
1551
    unsigned int i;
1552

    
1553
    /* Discard jump cache entries for any tb which might potentially
1554
       overlap the flushed page.  */
1555
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1556
    memset (&env->tb_jmp_cache[i], 0, 
1557
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1558

    
1559
    i = tb_jmp_cache_hash_page(addr);
1560
    memset (&env->tb_jmp_cache[i], 0, 
1561
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1562
}
1563

    
1564
/* NOTE: if flush_global is true, also flush global entries (not
1565
   implemented yet) */
1566
void tlb_flush(CPUState *env, int flush_global)
1567
{
1568
    int i;
1569

    
1570
#if defined(DEBUG_TLB)
1571
    printf("tlb_flush:\n");
1572
#endif
1573
    /* must reset current TB so that interrupts cannot modify the
1574
       links while we are modifying them */
1575
    env->current_tb = NULL;
1576

    
1577
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1578
        env->tlb_table[0][i].addr_read = -1;
1579
        env->tlb_table[0][i].addr_write = -1;
1580
        env->tlb_table[0][i].addr_code = -1;
1581
        env->tlb_table[1][i].addr_read = -1;
1582
        env->tlb_table[1][i].addr_write = -1;
1583
        env->tlb_table[1][i].addr_code = -1;
1584
#if (NB_MMU_MODES >= 3)
1585
        env->tlb_table[2][i].addr_read = -1;
1586
        env->tlb_table[2][i].addr_write = -1;
1587
        env->tlb_table[2][i].addr_code = -1;
1588
#if (NB_MMU_MODES == 4)
1589
        env->tlb_table[3][i].addr_read = -1;
1590
        env->tlb_table[3][i].addr_write = -1;
1591
        env->tlb_table[3][i].addr_code = -1;
1592
#endif
1593
#endif
1594
    }
1595

    
1596
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1597

    
1598
#ifdef USE_KQEMU
1599
    if (env->kqemu_enabled) {
1600
        kqemu_flush(env, flush_global);
1601
    }
1602
#endif
1603
    tlb_flush_count++;
1604
}
1605

    
1606
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1607
{
1608
    if (addr == (tlb_entry->addr_read &
1609
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1610
        addr == (tlb_entry->addr_write &
1611
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1612
        addr == (tlb_entry->addr_code &
1613
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1614
        tlb_entry->addr_read = -1;
1615
        tlb_entry->addr_write = -1;
1616
        tlb_entry->addr_code = -1;
1617
    }
1618
}
1619

    
1620
void tlb_flush_page(CPUState *env, target_ulong addr)
1621
{
1622
    int i;
1623

    
1624
#if defined(DEBUG_TLB)
1625
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1626
#endif
1627
    /* must reset current TB so that interrupts cannot modify the
1628
       links while we are modifying them */
1629
    env->current_tb = NULL;
1630

    
1631
    addr &= TARGET_PAGE_MASK;
1632
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1633
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1634
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1635
#if (NB_MMU_MODES >= 3)
1636
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1637
#if (NB_MMU_MODES == 4)
1638
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1639
#endif
1640
#endif
1641

    
1642
    tlb_flush_jmp_cache(env, addr);
1643

    
1644
#ifdef USE_KQEMU
1645
    if (env->kqemu_enabled) {
1646
        kqemu_flush_page(env, addr);
1647
    }
1648
#endif
1649
}
1650

    
1651
/* update the TLBs so that writes to code in the virtual page 'addr'
1652
   can be detected */
1653
static void tlb_protect_code(ram_addr_t ram_addr)
1654
{
1655
    cpu_physical_memory_reset_dirty(ram_addr,
1656
                                    ram_addr + TARGET_PAGE_SIZE,
1657
                                    CODE_DIRTY_FLAG);
1658
}
1659

    
1660
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1661
   tested for self modifying code */
1662
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1663
                                    target_ulong vaddr)
1664
{
1665
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1666
}
1667

    
1668
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1669
                                         unsigned long start, unsigned long length)
1670
{
1671
    unsigned long addr;
1672
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1673
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1674
        if ((addr - start) < length) {
1675
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1676
        }
1677
    }
1678
}
1679

    
1680
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1681
                                     int dirty_flags)
1682
{
1683
    CPUState *env;
1684
    unsigned long length, start1;
1685
    int i, mask, len;
1686
    uint8_t *p;
1687

    
1688
    start &= TARGET_PAGE_MASK;
1689
    end = TARGET_PAGE_ALIGN(end);
1690

    
1691
    length = end - start;
1692
    if (length == 0)
1693
        return;
1694
    len = length >> TARGET_PAGE_BITS;
1695
#ifdef USE_KQEMU
1696
    /* XXX: should not depend on cpu context */
1697
    env = first_cpu;
1698
    if (env->kqemu_enabled) {
1699
        ram_addr_t addr;
1700
        addr = start;
1701
        for(i = 0; i < len; i++) {
1702
            kqemu_set_notdirty(env, addr);
1703
            addr += TARGET_PAGE_SIZE;
1704
        }
1705
    }
1706
#endif
1707
    mask = ~dirty_flags;
1708
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1709
    for(i = 0; i < len; i++)
1710
        p[i] &= mask;
1711

    
1712
    /* we modify the TLB cache so that the dirty bit will be set again
1713
       when accessing the range */
1714
    start1 = start + (unsigned long)phys_ram_base;
1715
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1716
        for(i = 0; i < CPU_TLB_SIZE; i++)
1717
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1718
        for(i = 0; i < CPU_TLB_SIZE; i++)
1719
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1720
#if (NB_MMU_MODES >= 3)
1721
        for(i = 0; i < CPU_TLB_SIZE; i++)
1722
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1723
#if (NB_MMU_MODES == 4)
1724
        for(i = 0; i < CPU_TLB_SIZE; i++)
1725
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1726
#endif
1727
#endif
1728
    }
1729
}
1730

    
1731
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1732
{
1733
    ram_addr_t ram_addr;
1734

    
1735
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1736
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1737
            tlb_entry->addend - (unsigned long)phys_ram_base;
1738
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1739
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1740
        }
1741
    }
1742
}
1743

    
1744
/* update the TLB according to the current state of the dirty bits */
1745
void cpu_tlb_update_dirty(CPUState *env)
1746
{
1747
    int i;
1748
    for(i = 0; i < CPU_TLB_SIZE; i++)
1749
        tlb_update_dirty(&env->tlb_table[0][i]);
1750
    for(i = 0; i < CPU_TLB_SIZE; i++)
1751
        tlb_update_dirty(&env->tlb_table[1][i]);
1752
#if (NB_MMU_MODES >= 3)
1753
    for(i = 0; i < CPU_TLB_SIZE; i++)
1754
        tlb_update_dirty(&env->tlb_table[2][i]);
1755
#if (NB_MMU_MODES == 4)
1756
    for(i = 0; i < CPU_TLB_SIZE; i++)
1757
        tlb_update_dirty(&env->tlb_table[3][i]);
1758
#endif
1759
#endif
1760
}
1761

    
1762
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1763
{
1764
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1765
        tlb_entry->addr_write = vaddr;
1766
}
1767

    
1768
/* update the TLB corresponding to virtual page vaddr
1769
   so that it is no longer dirty */
1770
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1771
{
1772
    int i;
1773

    
1774
    vaddr &= TARGET_PAGE_MASK;
1775
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1776
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1777
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1778
#if (NB_MMU_MODES >= 3)
1779
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1780
#if (NB_MMU_MODES == 4)
1781
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1782
#endif
1783
#endif
1784
}
1785

    
1786
/* add a new TLB entry. At most one entry for a given virtual address
1787
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1788
   (can only happen in non SOFTMMU mode for I/O pages or pages
1789
   conflicting with the host address space). */
1790
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1791
                      target_phys_addr_t paddr, int prot,
1792
                      int mmu_idx, int is_softmmu)
1793
{
1794
    PhysPageDesc *p;
1795
    unsigned long pd;
1796
    unsigned int index;
1797
    target_ulong address;
1798
    target_ulong code_address;
1799
    target_phys_addr_t addend;
1800
    int ret;
1801
    CPUTLBEntry *te;
1802
    int i;
1803
    target_phys_addr_t iotlb;
1804

    
1805
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1806
    if (!p) {
1807
        pd = IO_MEM_UNASSIGNED;
1808
    } else {
1809
        pd = p->phys_offset;
1810
    }
1811
#if defined(DEBUG_TLB)
1812
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1813
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1814
#endif
1815

    
1816
    ret = 0;
1817
    address = vaddr;
1818
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1819
        /* IO memory case (romd handled later) */
1820
        address |= TLB_MMIO;
1821
    }
1822
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1823
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1824
        /* Normal RAM.  */
1825
        iotlb = pd & TARGET_PAGE_MASK;
1826
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1827
            iotlb |= IO_MEM_NOTDIRTY;
1828
        else
1829
            iotlb |= IO_MEM_ROM;
1830
    } else {
1831
        /* IO handlers are currently passed a phsical address.
1832
           It would be nice to pass an offset from the base address
1833
           of that region.  This would avoid having to special case RAM,
1834
           and avoid full address decoding in every device.
1835
           We can't use the high bits of pd for this because
1836
           IO_MEM_ROMD uses these as a ram address.  */
1837
        iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1838
    }
1839

    
1840
    code_address = address;
1841
    /* Make accesses to pages with watchpoints go via the
1842
       watchpoint trap routines.  */
1843
    for (i = 0; i < env->nb_watchpoints; i++) {
1844
        if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1845
            iotlb = io_mem_watch + paddr;
1846
            /* TODO: The memory case can be optimized by not trapping
1847
               reads of pages with a write breakpoint.  */
1848
            address |= TLB_MMIO;
1849
        }
1850
    }
1851

    
1852
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1853
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
1854
    te = &env->tlb_table[mmu_idx][index];
1855
    te->addend = addend - vaddr;
1856
    if (prot & PAGE_READ) {
1857
        te->addr_read = address;
1858
    } else {
1859
        te->addr_read = -1;
1860
    }
1861

    
1862
    if (prot & PAGE_EXEC) {
1863
        te->addr_code = code_address;
1864
    } else {
1865
        te->addr_code = -1;
1866
    }
1867
    if (prot & PAGE_WRITE) {
1868
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1869
            (pd & IO_MEM_ROMD)) {
1870
            /* Write access calls the I/O callback.  */
1871
            te->addr_write = address | TLB_MMIO;
1872
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1873
                   !cpu_physical_memory_is_dirty(pd)) {
1874
            te->addr_write = address | TLB_NOTDIRTY;
1875
        } else {
1876
            te->addr_write = address;
1877
        }
1878
    } else {
1879
        te->addr_write = -1;
1880
    }
1881
    return ret;
1882
}
1883

    
1884
#else
1885

    
1886
void tlb_flush(CPUState *env, int flush_global)
1887
{
1888
}
1889

    
1890
void tlb_flush_page(CPUState *env, target_ulong addr)
1891
{
1892
}
1893

    
1894
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1895
                      target_phys_addr_t paddr, int prot,
1896
                      int mmu_idx, int is_softmmu)
1897
{
1898
    return 0;
1899
}
1900

    
1901
/* dump memory mappings */
1902
void page_dump(FILE *f)
1903
{
1904
    unsigned long start, end;
1905
    int i, j, prot, prot1;
1906
    PageDesc *p;
1907

    
1908
    fprintf(f, "%-8s %-8s %-8s %s\n",
1909
            "start", "end", "size", "prot");
1910
    start = -1;
1911
    end = -1;
1912
    prot = 0;
1913
    for(i = 0; i <= L1_SIZE; i++) {
1914
        if (i < L1_SIZE)
1915
            p = l1_map[i];
1916
        else
1917
            p = NULL;
1918
        for(j = 0;j < L2_SIZE; j++) {
1919
            if (!p)
1920
                prot1 = 0;
1921
            else
1922
                prot1 = p[j].flags;
1923
            if (prot1 != prot) {
1924
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1925
                if (start != -1) {
1926
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1927
                            start, end, end - start,
1928
                            prot & PAGE_READ ? 'r' : '-',
1929
                            prot & PAGE_WRITE ? 'w' : '-',
1930
                            prot & PAGE_EXEC ? 'x' : '-');
1931
                }
1932
                if (prot1 != 0)
1933
                    start = end;
1934
                else
1935
                    start = -1;
1936
                prot = prot1;
1937
            }
1938
            if (!p)
1939
                break;
1940
        }
1941
    }
1942
}
1943

    
1944
int page_get_flags(target_ulong address)
1945
{
1946
    PageDesc *p;
1947

    
1948
    p = page_find(address >> TARGET_PAGE_BITS);
1949
    if (!p)
1950
        return 0;
1951
    return p->flags;
1952
}
1953

    
1954
/* modify the flags of a page and invalidate the code if
1955
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1956
   depending on PAGE_WRITE */
1957
void page_set_flags(target_ulong start, target_ulong end, int flags)
1958
{
1959
    PageDesc *p;
1960
    target_ulong addr;
1961

    
1962
    /* mmap_lock should already be held.  */
1963
    start = start & TARGET_PAGE_MASK;
1964
    end = TARGET_PAGE_ALIGN(end);
1965
    if (flags & PAGE_WRITE)
1966
        flags |= PAGE_WRITE_ORG;
1967
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1968
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1969
        /* We may be called for host regions that are outside guest
1970
           address space.  */
1971
        if (!p)
1972
            return;
1973
        /* if the write protection is set, then we invalidate the code
1974
           inside */
1975
        if (!(p->flags & PAGE_WRITE) &&
1976
            (flags & PAGE_WRITE) &&
1977
            p->first_tb) {
1978
            tb_invalidate_phys_page(addr, 0, NULL);
1979
        }
1980
        p->flags = flags;
1981
    }
1982
}
1983

    
1984
int page_check_range(target_ulong start, target_ulong len, int flags)
1985
{
1986
    PageDesc *p;
1987
    target_ulong end;
1988
    target_ulong addr;
1989

    
1990
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1991
    start = start & TARGET_PAGE_MASK;
1992

    
1993
    if( end < start )
1994
        /* we've wrapped around */
1995
        return -1;
1996
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1997
        p = page_find(addr >> TARGET_PAGE_BITS);
1998
        if( !p )
1999
            return -1;
2000
        if( !(p->flags & PAGE_VALID) )
2001
            return -1;
2002

    
2003
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2004
            return -1;
2005
        if (flags & PAGE_WRITE) {
2006
            if (!(p->flags & PAGE_WRITE_ORG))
2007
                return -1;
2008
            /* unprotect the page if it was put read-only because it
2009
               contains translated code */
2010
            if (!(p->flags & PAGE_WRITE)) {
2011
                if (!page_unprotect(addr, 0, NULL))
2012
                    return -1;
2013
            }
2014
            return 0;
2015
        }
2016
    }
2017
    return 0;
2018
}
2019

    
2020
/* called from signal handler: invalidate the code and unprotect the
2021
   page. Return TRUE if the fault was succesfully handled. */
2022
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2023
{
2024
    unsigned int page_index, prot, pindex;
2025
    PageDesc *p, *p1;
2026
    target_ulong host_start, host_end, addr;
2027

    
2028
    /* Technically this isn't safe inside a signal handler.  However we
2029
       know this only ever happens in a synchronous SEGV handler, so in
2030
       practice it seems to be ok.  */
2031
    mmap_lock();
2032

    
2033
    host_start = address & qemu_host_page_mask;
2034
    page_index = host_start >> TARGET_PAGE_BITS;
2035
    p1 = page_find(page_index);
2036
    if (!p1) {
2037
        mmap_unlock();
2038
        return 0;
2039
    }
2040
    host_end = host_start + qemu_host_page_size;
2041
    p = p1;
2042
    prot = 0;
2043
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2044
        prot |= p->flags;
2045
        p++;
2046
    }
2047
    /* if the page was really writable, then we change its
2048
       protection back to writable */
2049
    if (prot & PAGE_WRITE_ORG) {
2050
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2051
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2052
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2053
                     (prot & PAGE_BITS) | PAGE_WRITE);
2054
            p1[pindex].flags |= PAGE_WRITE;
2055
            /* and since the content will be modified, we must invalidate
2056
               the corresponding translated code. */
2057
            tb_invalidate_phys_page(address, pc, puc);
2058
#ifdef DEBUG_TB_CHECK
2059
            tb_invalidate_check(address);
2060
#endif
2061
            mmap_unlock();
2062
            return 1;
2063
        }
2064
    }
2065
    mmap_unlock();
2066
    return 0;
2067
}
2068

    
2069
static inline void tlb_set_dirty(CPUState *env,
2070
                                 unsigned long addr, target_ulong vaddr)
2071
{
2072
}
2073
#endif /* defined(CONFIG_USER_ONLY) */
2074

    
2075
#if !defined(CONFIG_USER_ONLY)
2076
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2077
                             ram_addr_t memory);
2078
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2079
                           ram_addr_t orig_memory);
2080
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2081
                      need_subpage)                                     \
2082
    do {                                                                \
2083
        if (addr > start_addr)                                          \
2084
            start_addr2 = 0;                                            \
2085
        else {                                                          \
2086
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2087
            if (start_addr2 > 0)                                        \
2088
                need_subpage = 1;                                       \
2089
        }                                                               \
2090
                                                                        \
2091
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2092
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2093
        else {                                                          \
2094
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2095
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2096
                need_subpage = 1;                                       \
2097
        }                                                               \
2098
    } while (0)
2099

    
2100
/* register physical memory. 'size' must be a multiple of the target
2101
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2102
   io memory page */
2103
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2104
                                  ram_addr_t size,
2105
                                  ram_addr_t phys_offset)
2106
{
2107
    target_phys_addr_t addr, end_addr;
2108
    PhysPageDesc *p;
2109
    CPUState *env;
2110
    ram_addr_t orig_size = size;
2111
    void *subpage;
2112

    
2113
#ifdef USE_KQEMU
2114
    /* XXX: should not depend on cpu context */
2115
    env = first_cpu;
2116
    if (env->kqemu_enabled) {
2117
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2118
    }
2119
#endif
2120
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2121
    end_addr = start_addr + (target_phys_addr_t)size;
2122
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2123
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2124
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2125
            ram_addr_t orig_memory = p->phys_offset;
2126
            target_phys_addr_t start_addr2, end_addr2;
2127
            int need_subpage = 0;
2128

    
2129
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2130
                          need_subpage);
2131
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2132
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2133
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2134
                                           &p->phys_offset, orig_memory);
2135
                } else {
2136
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2137
                                            >> IO_MEM_SHIFT];
2138
                }
2139
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2140
            } else {
2141
                p->phys_offset = phys_offset;
2142
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2143
                    (phys_offset & IO_MEM_ROMD))
2144
                    phys_offset += TARGET_PAGE_SIZE;
2145
            }
2146
        } else {
2147
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2148
            p->phys_offset = phys_offset;
2149
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2150
                (phys_offset & IO_MEM_ROMD))
2151
                phys_offset += TARGET_PAGE_SIZE;
2152
            else {
2153
                target_phys_addr_t start_addr2, end_addr2;
2154
                int need_subpage = 0;
2155

    
2156
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2157
                              end_addr2, need_subpage);
2158

    
2159
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2160
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2161
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2162
                    subpage_register(subpage, start_addr2, end_addr2,
2163
                                     phys_offset);
2164
                }
2165
            }
2166
        }
2167
    }
2168

    
2169
    /* since each CPU stores ram addresses in its TLB cache, we must
2170
       reset the modified entries */
2171
    /* XXX: slow ! */
2172
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2173
        tlb_flush(env, 1);
2174
    }
2175
}
2176

    
2177
/* XXX: temporary until new memory mapping API */
2178
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2179
{
2180
    PhysPageDesc *p;
2181

    
2182
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2183
    if (!p)
2184
        return IO_MEM_UNASSIGNED;
2185
    return p->phys_offset;
2186
}
2187

    
2188
/* XXX: better than nothing */
2189
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2190
{
2191
    ram_addr_t addr;
2192
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2193
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2194
                (uint64_t)size, (uint64_t)phys_ram_size);
2195
        abort();
2196
    }
2197
    addr = phys_ram_alloc_offset;
2198
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2199
    return addr;
2200
}
2201

    
2202
void qemu_ram_free(ram_addr_t addr)
2203
{
2204
}
2205

    
2206
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2207
{
2208
#ifdef DEBUG_UNASSIGNED
2209
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2210
#endif
2211
#ifdef TARGET_SPARC
2212
    do_unassigned_access(addr, 0, 0, 0);
2213
#elif TARGET_CRIS
2214
    do_unassigned_access(addr, 0, 0, 0);
2215
#endif
2216
    return 0;
2217
}
2218

    
2219
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2220
{
2221
#ifdef DEBUG_UNASSIGNED
2222
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2223
#endif
2224
#ifdef TARGET_SPARC
2225
    do_unassigned_access(addr, 1, 0, 0);
2226
#elif TARGET_CRIS
2227
    do_unassigned_access(addr, 1, 0, 0);
2228
#endif
2229
}
2230

    
2231
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2232
    unassigned_mem_readb,
2233
    unassigned_mem_readb,
2234
    unassigned_mem_readb,
2235
};
2236

    
2237
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2238
    unassigned_mem_writeb,
2239
    unassigned_mem_writeb,
2240
    unassigned_mem_writeb,
2241
};
2242

    
2243
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2244
                                uint32_t val)
2245
{
2246
    int dirty_flags;
2247
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2248
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2249
#if !defined(CONFIG_USER_ONLY)
2250
        tb_invalidate_phys_page_fast(ram_addr, 1);
2251
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2252
#endif
2253
    }
2254
    stb_p(phys_ram_base + ram_addr, val);
2255
#ifdef USE_KQEMU
2256
    if (cpu_single_env->kqemu_enabled &&
2257
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2258
        kqemu_modify_page(cpu_single_env, ram_addr);
2259
#endif
2260
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2261
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2262
    /* we remove the notdirty callback only if the code has been
2263
       flushed */
2264
    if (dirty_flags == 0xff)
2265
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2266
}
2267

    
2268
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2269
                                uint32_t val)
2270
{
2271
    int dirty_flags;
2272
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2273
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2274
#if !defined(CONFIG_USER_ONLY)
2275
        tb_invalidate_phys_page_fast(ram_addr, 2);
2276
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2277
#endif
2278
    }
2279
    stw_p(phys_ram_base + ram_addr, val);
2280
#ifdef USE_KQEMU
2281
    if (cpu_single_env->kqemu_enabled &&
2282
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2283
        kqemu_modify_page(cpu_single_env, ram_addr);
2284
#endif
2285
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2286
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2287
    /* we remove the notdirty callback only if the code has been
2288
       flushed */
2289
    if (dirty_flags == 0xff)
2290
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2291
}
2292

    
2293
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2294
                                uint32_t val)
2295
{
2296
    int dirty_flags;
2297
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2298
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2299
#if !defined(CONFIG_USER_ONLY)
2300
        tb_invalidate_phys_page_fast(ram_addr, 4);
2301
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2302
#endif
2303
    }
2304
    stl_p(phys_ram_base + ram_addr, val);
2305
#ifdef USE_KQEMU
2306
    if (cpu_single_env->kqemu_enabled &&
2307
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2308
        kqemu_modify_page(cpu_single_env, ram_addr);
2309
#endif
2310
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2311
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2312
    /* we remove the notdirty callback only if the code has been
2313
       flushed */
2314
    if (dirty_flags == 0xff)
2315
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2316
}
2317

    
2318
static CPUReadMemoryFunc *error_mem_read[3] = {
2319
    NULL, /* never used */
2320
    NULL, /* never used */
2321
    NULL, /* never used */
2322
};
2323

    
2324
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2325
    notdirty_mem_writeb,
2326
    notdirty_mem_writew,
2327
    notdirty_mem_writel,
2328
};
2329

    
2330
/* Generate a debug exception if a watchpoint has been hit.  */
2331
static void check_watchpoint(int offset, int flags)
2332
{
2333
    CPUState *env = cpu_single_env;
2334
    target_ulong vaddr;
2335
    int i;
2336

    
2337
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2338
    for (i = 0; i < env->nb_watchpoints; i++) {
2339
        if (vaddr == env->watchpoint[i].vaddr
2340
                && (env->watchpoint[i].type & flags)) {
2341
            env->watchpoint_hit = i + 1;
2342
            cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2343
            break;
2344
        }
2345
    }
2346
}
2347

    
2348
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2349
   so these check for a hit then pass through to the normal out-of-line
2350
   phys routines.  */
2351
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2352
{
2353
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2354
    return ldub_phys(addr);
2355
}
2356

    
2357
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2358
{
2359
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2360
    return lduw_phys(addr);
2361
}
2362

    
2363
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2364
{
2365
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2366
    return ldl_phys(addr);
2367
}
2368

    
2369
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2370
                             uint32_t val)
2371
{
2372
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2373
    stb_phys(addr, val);
2374
}
2375

    
2376
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2377
                             uint32_t val)
2378
{
2379
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2380
    stw_phys(addr, val);
2381
}
2382

    
2383
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2384
                             uint32_t val)
2385
{
2386
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2387
    stl_phys(addr, val);
2388
}
2389

    
2390
static CPUReadMemoryFunc *watch_mem_read[3] = {
2391
    watch_mem_readb,
2392
    watch_mem_readw,
2393
    watch_mem_readl,
2394
};
2395

    
2396
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2397
    watch_mem_writeb,
2398
    watch_mem_writew,
2399
    watch_mem_writel,
2400
};
2401

    
2402
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2403
                                 unsigned int len)
2404
{
2405
    uint32_t ret;
2406
    unsigned int idx;
2407

    
2408
    idx = SUBPAGE_IDX(addr - mmio->base);
2409
#if defined(DEBUG_SUBPAGE)
2410
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2411
           mmio, len, addr, idx);
2412
#endif
2413
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2414

    
2415
    return ret;
2416
}
2417

    
2418
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2419
                              uint32_t value, unsigned int len)
2420
{
2421
    unsigned int idx;
2422

    
2423
    idx = SUBPAGE_IDX(addr - mmio->base);
2424
#if defined(DEBUG_SUBPAGE)
2425
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2426
           mmio, len, addr, idx, value);
2427
#endif
2428
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2429
}
2430

    
2431
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2432
{
2433
#if defined(DEBUG_SUBPAGE)
2434
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2435
#endif
2436

    
2437
    return subpage_readlen(opaque, addr, 0);
2438
}
2439

    
2440
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2441
                            uint32_t value)
2442
{
2443
#if defined(DEBUG_SUBPAGE)
2444
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2445
#endif
2446
    subpage_writelen(opaque, addr, value, 0);
2447
}
2448

    
2449
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2450
{
2451
#if defined(DEBUG_SUBPAGE)
2452
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2453
#endif
2454

    
2455
    return subpage_readlen(opaque, addr, 1);
2456
}
2457

    
2458
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2459
                            uint32_t value)
2460
{
2461
#if defined(DEBUG_SUBPAGE)
2462
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2463
#endif
2464
    subpage_writelen(opaque, addr, value, 1);
2465
}
2466

    
2467
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2468
{
2469
#if defined(DEBUG_SUBPAGE)
2470
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2471
#endif
2472

    
2473
    return subpage_readlen(opaque, addr, 2);
2474
}
2475

    
2476
static void subpage_writel (void *opaque,
2477
                         target_phys_addr_t addr, uint32_t value)
2478
{
2479
#if defined(DEBUG_SUBPAGE)
2480
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2481
#endif
2482
    subpage_writelen(opaque, addr, value, 2);
2483
}
2484

    
2485
static CPUReadMemoryFunc *subpage_read[] = {
2486
    &subpage_readb,
2487
    &subpage_readw,
2488
    &subpage_readl,
2489
};
2490

    
2491
static CPUWriteMemoryFunc *subpage_write[] = {
2492
    &subpage_writeb,
2493
    &subpage_writew,
2494
    &subpage_writel,
2495
};
2496

    
2497
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2498
                             ram_addr_t memory)
2499
{
2500
    int idx, eidx;
2501
    unsigned int i;
2502

    
2503
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2504
        return -1;
2505
    idx = SUBPAGE_IDX(start);
2506
    eidx = SUBPAGE_IDX(end);
2507
#if defined(DEBUG_SUBPAGE)
2508
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2509
           mmio, start, end, idx, eidx, memory);
2510
#endif
2511
    memory >>= IO_MEM_SHIFT;
2512
    for (; idx <= eidx; idx++) {
2513
        for (i = 0; i < 4; i++) {
2514
            if (io_mem_read[memory][i]) {
2515
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2516
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2517
            }
2518
            if (io_mem_write[memory][i]) {
2519
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2520
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2521
            }
2522
        }
2523
    }
2524

    
2525
    return 0;
2526
}
2527

    
2528
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2529
                           ram_addr_t orig_memory)
2530
{
2531
    subpage_t *mmio;
2532
    int subpage_memory;
2533

    
2534
    mmio = qemu_mallocz(sizeof(subpage_t));
2535
    if (mmio != NULL) {
2536
        mmio->base = base;
2537
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2538
#if defined(DEBUG_SUBPAGE)
2539
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2540
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2541
#endif
2542
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2543
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2544
    }
2545

    
2546
    return mmio;
2547
}
2548

    
2549
static void io_mem_init(void)
2550
{
2551
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2552
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2553
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2554
    io_mem_nb = 5;
2555

    
2556
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2557
                                          watch_mem_write, NULL);
2558
    /* alloc dirty bits array */
2559
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2560
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2561
}
2562

    
2563
/* mem_read and mem_write are arrays of functions containing the
2564
   function to access byte (index 0), word (index 1) and dword (index
2565
   2). Functions can be omitted with a NULL function pointer. The
2566
   registered functions may be modified dynamically later.
2567
   If io_index is non zero, the corresponding io zone is
2568
   modified. If it is zero, a new io zone is allocated. The return
2569
   value can be used with cpu_register_physical_memory(). (-1) is
2570
   returned if error. */
2571
int cpu_register_io_memory(int io_index,
2572
                           CPUReadMemoryFunc **mem_read,
2573
                           CPUWriteMemoryFunc **mem_write,
2574
                           void *opaque)
2575
{
2576
    int i, subwidth = 0;
2577

    
2578
    if (io_index <= 0) {
2579
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2580
            return -1;
2581
        io_index = io_mem_nb++;
2582
    } else {
2583
        if (io_index >= IO_MEM_NB_ENTRIES)
2584
            return -1;
2585
    }
2586

    
2587
    for(i = 0;i < 3; i++) {
2588
        if (!mem_read[i] || !mem_write[i])
2589
            subwidth = IO_MEM_SUBWIDTH;
2590
        io_mem_read[io_index][i] = mem_read[i];
2591
        io_mem_write[io_index][i] = mem_write[i];
2592
    }
2593
    io_mem_opaque[io_index] = opaque;
2594
    return (io_index << IO_MEM_SHIFT) | subwidth;
2595
}
2596

    
2597
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2598
{
2599
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2600
}
2601

    
2602
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2603
{
2604
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2605
}
2606

    
2607
#endif /* !defined(CONFIG_USER_ONLY) */
2608

    
2609
/* physical memory access (slow version, mainly for debug) */
2610
#if defined(CONFIG_USER_ONLY)
2611
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2612
                            int len, int is_write)
2613
{
2614
    int l, flags;
2615
    target_ulong page;
2616
    void * p;
2617

    
2618
    while (len > 0) {
2619
        page = addr & TARGET_PAGE_MASK;
2620
        l = (page + TARGET_PAGE_SIZE) - addr;
2621
        if (l > len)
2622
            l = len;
2623
        flags = page_get_flags(page);
2624
        if (!(flags & PAGE_VALID))
2625
            return;
2626
        if (is_write) {
2627
            if (!(flags & PAGE_WRITE))
2628
                return;
2629
            /* XXX: this code should not depend on lock_user */
2630
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2631
                /* FIXME - should this return an error rather than just fail? */
2632
                return;
2633
            memcpy(p, buf, l);
2634
            unlock_user(p, addr, l);
2635
        } else {
2636
            if (!(flags & PAGE_READ))
2637
                return;
2638
            /* XXX: this code should not depend on lock_user */
2639
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2640
                /* FIXME - should this return an error rather than just fail? */
2641
                return;
2642
            memcpy(buf, p, l);
2643
            unlock_user(p, addr, 0);
2644
        }
2645
        len -= l;
2646
        buf += l;
2647
        addr += l;
2648
    }
2649
}
2650

    
2651
#else
2652
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2653
                            int len, int is_write)
2654
{
2655
    int l, io_index;
2656
    uint8_t *ptr;
2657
    uint32_t val;
2658
    target_phys_addr_t page;
2659
    unsigned long pd;
2660
    PhysPageDesc *p;
2661

    
2662
    while (len > 0) {
2663
        page = addr & TARGET_PAGE_MASK;
2664
        l = (page + TARGET_PAGE_SIZE) - addr;
2665
        if (l > len)
2666
            l = len;
2667
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2668
        if (!p) {
2669
            pd = IO_MEM_UNASSIGNED;
2670
        } else {
2671
            pd = p->phys_offset;
2672
        }
2673

    
2674
        if (is_write) {
2675
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2676
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2677
                /* XXX: could force cpu_single_env to NULL to avoid
2678
                   potential bugs */
2679
                if (l >= 4 && ((addr & 3) == 0)) {
2680
                    /* 32 bit write access */
2681
                    val = ldl_p(buf);
2682
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2683
                    l = 4;
2684
                } else if (l >= 2 && ((addr & 1) == 0)) {
2685
                    /* 16 bit write access */
2686
                    val = lduw_p(buf);
2687
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2688
                    l = 2;
2689
                } else {
2690
                    /* 8 bit write access */
2691
                    val = ldub_p(buf);
2692
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2693
                    l = 1;
2694
                }
2695
            } else {
2696
                unsigned long addr1;
2697
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2698
                /* RAM case */
2699
                ptr = phys_ram_base + addr1;
2700
                memcpy(ptr, buf, l);
2701
                if (!cpu_physical_memory_is_dirty(addr1)) {
2702
                    /* invalidate code */
2703
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2704
                    /* set dirty bit */
2705
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2706
                        (0xff & ~CODE_DIRTY_FLAG);
2707
                }
2708
            }
2709
        } else {
2710
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2711
                !(pd & IO_MEM_ROMD)) {
2712
                /* I/O case */
2713
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2714
                if (l >= 4 && ((addr & 3) == 0)) {
2715
                    /* 32 bit read access */
2716
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2717
                    stl_p(buf, val);
2718
                    l = 4;
2719
                } else if (l >= 2 && ((addr & 1) == 0)) {
2720
                    /* 16 bit read access */
2721
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2722
                    stw_p(buf, val);
2723
                    l = 2;
2724
                } else {
2725
                    /* 8 bit read access */
2726
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2727
                    stb_p(buf, val);
2728
                    l = 1;
2729
                }
2730
            } else {
2731
                /* RAM case */
2732
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2733
                    (addr & ~TARGET_PAGE_MASK);
2734
                memcpy(buf, ptr, l);
2735
            }
2736
        }
2737
        len -= l;
2738
        buf += l;
2739
        addr += l;
2740
    }
2741
}
2742

    
2743
/* used for ROM loading : can write in RAM and ROM */
2744
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2745
                                   const uint8_t *buf, int len)
2746
{
2747
    int l;
2748
    uint8_t *ptr;
2749
    target_phys_addr_t page;
2750
    unsigned long pd;
2751
    PhysPageDesc *p;
2752

    
2753
    while (len > 0) {
2754
        page = addr & TARGET_PAGE_MASK;
2755
        l = (page + TARGET_PAGE_SIZE) - addr;
2756
        if (l > len)
2757
            l = len;
2758
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2759
        if (!p) {
2760
            pd = IO_MEM_UNASSIGNED;
2761
        } else {
2762
            pd = p->phys_offset;
2763
        }
2764

    
2765
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2766
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2767
            !(pd & IO_MEM_ROMD)) {
2768
            /* do nothing */
2769
        } else {
2770
            unsigned long addr1;
2771
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2772
            /* ROM/RAM case */
2773
            ptr = phys_ram_base + addr1;
2774
            memcpy(ptr, buf, l);
2775
        }
2776
        len -= l;
2777
        buf += l;
2778
        addr += l;
2779
    }
2780
}
2781

    
2782

    
2783
/* warning: addr must be aligned */
2784
uint32_t ldl_phys(target_phys_addr_t addr)
2785
{
2786
    int io_index;
2787
    uint8_t *ptr;
2788
    uint32_t val;
2789
    unsigned long pd;
2790
    PhysPageDesc *p;
2791

    
2792
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2793
    if (!p) {
2794
        pd = IO_MEM_UNASSIGNED;
2795
    } else {
2796
        pd = p->phys_offset;
2797
    }
2798

    
2799
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2800
        !(pd & IO_MEM_ROMD)) {
2801
        /* I/O case */
2802
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2803
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2804
    } else {
2805
        /* RAM case */
2806
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2807
            (addr & ~TARGET_PAGE_MASK);
2808
        val = ldl_p(ptr);
2809
    }
2810
    return val;
2811
}
2812

    
2813
/* warning: addr must be aligned */
2814
uint64_t ldq_phys(target_phys_addr_t addr)
2815
{
2816
    int io_index;
2817
    uint8_t *ptr;
2818
    uint64_t val;
2819
    unsigned long pd;
2820
    PhysPageDesc *p;
2821

    
2822
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2823
    if (!p) {
2824
        pd = IO_MEM_UNASSIGNED;
2825
    } else {
2826
        pd = p->phys_offset;
2827
    }
2828

    
2829
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2830
        !(pd & IO_MEM_ROMD)) {
2831
        /* I/O case */
2832
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2833
#ifdef TARGET_WORDS_BIGENDIAN
2834
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2835
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2836
#else
2837
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2838
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2839
#endif
2840
    } else {
2841
        /* RAM case */
2842
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2843
            (addr & ~TARGET_PAGE_MASK);
2844
        val = ldq_p(ptr);
2845
    }
2846
    return val;
2847
}
2848

    
2849
/* XXX: optimize */
2850
uint32_t ldub_phys(target_phys_addr_t addr)
2851
{
2852
    uint8_t val;
2853
    cpu_physical_memory_read(addr, &val, 1);
2854
    return val;
2855
}
2856

    
2857
/* XXX: optimize */
2858
uint32_t lduw_phys(target_phys_addr_t addr)
2859
{
2860
    uint16_t val;
2861
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2862
    return tswap16(val);
2863
}
2864

    
2865
/* warning: addr must be aligned. The ram page is not masked as dirty
2866
   and the code inside is not invalidated. It is useful if the dirty
2867
   bits are used to track modified PTEs */
2868
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2869
{
2870
    int io_index;
2871
    uint8_t *ptr;
2872
    unsigned long pd;
2873
    PhysPageDesc *p;
2874

    
2875
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2876
    if (!p) {
2877
        pd = IO_MEM_UNASSIGNED;
2878
    } else {
2879
        pd = p->phys_offset;
2880
    }
2881

    
2882
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2883
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2884
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2885
    } else {
2886
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2887
            (addr & ~TARGET_PAGE_MASK);
2888
        stl_p(ptr, val);
2889
    }
2890
}
2891

    
2892
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2893
{
2894
    int io_index;
2895
    uint8_t *ptr;
2896
    unsigned long pd;
2897
    PhysPageDesc *p;
2898

    
2899
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2900
    if (!p) {
2901
        pd = IO_MEM_UNASSIGNED;
2902
    } else {
2903
        pd = p->phys_offset;
2904
    }
2905

    
2906
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2907
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2908
#ifdef TARGET_WORDS_BIGENDIAN
2909
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2910
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2911
#else
2912
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2913
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2914
#endif
2915
    } else {
2916
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2917
            (addr & ~TARGET_PAGE_MASK);
2918
        stq_p(ptr, val);
2919
    }
2920
}
2921

    
2922
/* warning: addr must be aligned */
2923
void stl_phys(target_phys_addr_t addr, uint32_t val)
2924
{
2925
    int io_index;
2926
    uint8_t *ptr;
2927
    unsigned long pd;
2928
    PhysPageDesc *p;
2929

    
2930
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2931
    if (!p) {
2932
        pd = IO_MEM_UNASSIGNED;
2933
    } else {
2934
        pd = p->phys_offset;
2935
    }
2936

    
2937
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2938
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2939
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2940
    } else {
2941
        unsigned long addr1;
2942
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2943
        /* RAM case */
2944
        ptr = phys_ram_base + addr1;
2945
        stl_p(ptr, val);
2946
        if (!cpu_physical_memory_is_dirty(addr1)) {
2947
            /* invalidate code */
2948
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2949
            /* set dirty bit */
2950
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2951
                (0xff & ~CODE_DIRTY_FLAG);
2952
        }
2953
    }
2954
}
2955

    
2956
/* XXX: optimize */
2957
void stb_phys(target_phys_addr_t addr, uint32_t val)
2958
{
2959
    uint8_t v = val;
2960
    cpu_physical_memory_write(addr, &v, 1);
2961
}
2962

    
2963
/* XXX: optimize */
2964
void stw_phys(target_phys_addr_t addr, uint32_t val)
2965
{
2966
    uint16_t v = tswap16(val);
2967
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2968
}
2969

    
2970
/* XXX: optimize */
2971
void stq_phys(target_phys_addr_t addr, uint64_t val)
2972
{
2973
    val = tswap64(val);
2974
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2975
}
2976

    
2977
#endif
2978

    
2979
/* virtual memory access for debug */
2980
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2981
                        uint8_t *buf, int len, int is_write)
2982
{
2983
    int l;
2984
    target_phys_addr_t phys_addr;
2985
    target_ulong page;
2986

    
2987
    while (len > 0) {
2988
        page = addr & TARGET_PAGE_MASK;
2989
        phys_addr = cpu_get_phys_page_debug(env, page);
2990
        /* if no physical page mapped, return an error */
2991
        if (phys_addr == -1)
2992
            return -1;
2993
        l = (page + TARGET_PAGE_SIZE) - addr;
2994
        if (l > len)
2995
            l = len;
2996
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2997
                               buf, l, is_write);
2998
        len -= l;
2999
        buf += l;
3000
        addr += l;
3001
    }
3002
    return 0;
3003
}
3004

    
3005
/* in deterministic execution mode, instructions doing device I/Os
3006
   must be at the end of the TB */
3007
void cpu_io_recompile(CPUState *env, void *retaddr)
3008
{
3009
    TranslationBlock *tb;
3010
    uint32_t n, cflags;
3011
    target_ulong pc, cs_base;
3012
    uint64_t flags;
3013

    
3014
    tb = tb_find_pc((unsigned long)retaddr);
3015
    if (!tb) {
3016
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3017
                  retaddr);
3018
    }
3019
    n = env->icount_decr.u16.low + tb->icount;
3020
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3021
    /* Calculate how many instructions had been executed before the fault
3022
       occurred.  */
3023
    n = n - env->icount_decr.u16.low;
3024
    /* Generate a new TB ending on the I/O insn.  */
3025
    n++;
3026
    /* On MIPS and SH, delay slot instructions can only be restarted if
3027
       they were already the first instruction in the TB.  If this is not
3028
       the first instruction in a TB then re-execute the preceding
3029
       branch.  */
3030
#if defined(TARGET_MIPS)
3031
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3032
        env->active_tc.PC -= 4;
3033
        env->icount_decr.u16.low++;
3034
        env->hflags &= ~MIPS_HFLAG_BMASK;
3035
    }
3036
#elif defined(TARGET_SH4)
3037
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3038
            && n > 1) {
3039
        env->pc -= 2;
3040
        env->icount_decr.u16.low++;
3041
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3042
    }
3043
#endif
3044
    /* This should never happen.  */
3045
    if (n > CF_COUNT_MASK)
3046
        cpu_abort(env, "TB too big during recompile");
3047

    
3048
    cflags = n | CF_LAST_IO;
3049
    pc = tb->pc;
3050
    cs_base = tb->cs_base;
3051
    flags = tb->flags;
3052
    tb_phys_invalidate(tb, -1);
3053
    /* FIXME: In theory this could raise an exception.  In practice
3054
       we have already translated the block once so it's probably ok.  */
3055
    tb_gen_code(env, pc, cs_base, flags, cflags);
3056
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3057
       the first in the TB) then we end up generating a whole new TB and
3058
       repeating the fault, which is horribly inefficient.
3059
       Better would be to execute just this insn uncached, or generate a
3060
       second new TB.  */
3061
    cpu_resume_from_signal(env, NULL);
3062
}
3063

    
3064
void dump_exec_info(FILE *f,
3065
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3066
{
3067
    int i, target_code_size, max_target_code_size;
3068
    int direct_jmp_count, direct_jmp2_count, cross_page;
3069
    TranslationBlock *tb;
3070

    
3071
    target_code_size = 0;
3072
    max_target_code_size = 0;
3073
    cross_page = 0;
3074
    direct_jmp_count = 0;
3075
    direct_jmp2_count = 0;
3076
    for(i = 0; i < nb_tbs; i++) {
3077
        tb = &tbs[i];
3078
        target_code_size += tb->size;
3079
        if (tb->size > max_target_code_size)
3080
            max_target_code_size = tb->size;
3081
        if (tb->page_addr[1] != -1)
3082
            cross_page++;
3083
        if (tb->tb_next_offset[0] != 0xffff) {
3084
            direct_jmp_count++;
3085
            if (tb->tb_next_offset[1] != 0xffff) {
3086
                direct_jmp2_count++;
3087
            }
3088
        }
3089
    }
3090
    /* XXX: avoid using doubles ? */
3091
    cpu_fprintf(f, "Translation buffer state:\n");
3092
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3093
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3094
    cpu_fprintf(f, "TB count            %d/%d\n", 
3095
                nb_tbs, code_gen_max_blocks);
3096
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3097
                nb_tbs ? target_code_size / nb_tbs : 0,
3098
                max_target_code_size);
3099
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3100
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3101
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3102
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3103
            cross_page,
3104
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3105
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3106
                direct_jmp_count,
3107
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3108
                direct_jmp2_count,
3109
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3110
    cpu_fprintf(f, "\nStatistics:\n");
3111
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3112
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3113
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3114
    tcg_dump_info(f, cpu_fprintf);
3115
}
3116

    
3117
#if !defined(CONFIG_USER_ONLY)
3118

    
3119
#define MMUSUFFIX _cmmu
3120
#define GETPC() NULL
3121
#define env cpu_single_env
3122
#define SOFTMMU_CODE_ACCESS
3123

    
3124
#define SHIFT 0
3125
#include "softmmu_template.h"
3126

    
3127
#define SHIFT 1
3128
#include "softmmu_template.h"
3129

    
3130
#define SHIFT 2
3131
#include "softmmu_template.h"
3132

    
3133
#define SHIFT 3
3134
#include "softmmu_template.h"
3135

    
3136
#undef env
3137

    
3138
#endif