Statistics
| Branch: | Revision:

root / exec.c @ 2e70f6ef

History | View | Annotate | Download (93.6 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#if defined(CONFIG_USER_ONLY)
41
#include <qemu.h>
42
#endif
43

    
44
//#define DEBUG_TB_INVALIDATE
45
//#define DEBUG_FLUSH
46
//#define DEBUG_TLB
47
//#define DEBUG_UNASSIGNED
48

    
49
/* make various TB consistency checks */
50
//#define DEBUG_TB_CHECK
51
//#define DEBUG_TLB_CHECK
52

    
53
//#define DEBUG_IOPORT
54
//#define DEBUG_SUBPAGE
55

    
56
#if !defined(CONFIG_USER_ONLY)
57
/* TB consistency checks only implemented for usermode emulation.  */
58
#undef DEBUG_TB_CHECK
59
#endif
60

    
61
#define SMC_BITMAP_USE_THRESHOLD 10
62

    
63
#define MMAP_AREA_START        0x00000000
64
#define MMAP_AREA_END          0xa8000000
65

    
66
#if defined(TARGET_SPARC64)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 41
68
#elif defined(TARGET_SPARC)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 36
70
#elif defined(TARGET_ALPHA)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 42
72
#define TARGET_VIRT_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_PPC64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 42
77
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78
#define TARGET_PHYS_ADDR_SPACE_BITS 36
79
#else
80
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81
#define TARGET_PHYS_ADDR_SPACE_BITS 32
82
#endif
83

    
84
TranslationBlock *tbs;
85
int code_gen_max_blocks;
86
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87
int nb_tbs;
88
/* any access to the tbs or the page table must use this lock */
89
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90

    
91
uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
92
uint8_t *code_gen_buffer;
93
unsigned long code_gen_buffer_size;
94
/* threshold to flush the translated code buffer */
95
unsigned long code_gen_buffer_max_size; 
96
uint8_t *code_gen_ptr;
97

    
98
#if !defined(CONFIG_USER_ONLY)
99
ram_addr_t phys_ram_size;
100
int phys_ram_fd;
101
uint8_t *phys_ram_base;
102
uint8_t *phys_ram_dirty;
103
static ram_addr_t phys_ram_alloc_offset = 0;
104
#endif
105

    
106
CPUState *first_cpu;
107
/* current CPU in the current thread. It is only valid inside
108
   cpu_exec() */
109
CPUState *cpu_single_env;
110
/* 0 = Do not count executed instructions.
111
   1 = Precice instruction counting.
112
   2 = Adaptive rate instruction counting.  */
113
int use_icount = 0;
114
/* Current instruction counter.  While executing translated code this may
115
   include some instructions that have not yet been executed.  */
116
int64_t qemu_icount;
117

    
118
typedef struct PageDesc {
119
    /* list of TBs intersecting this ram page */
120
    TranslationBlock *first_tb;
121
    /* in order to optimize self modifying code, we count the number
122
       of lookups we do to a given page to use a bitmap */
123
    unsigned int code_write_count;
124
    uint8_t *code_bitmap;
125
#if defined(CONFIG_USER_ONLY)
126
    unsigned long flags;
127
#endif
128
} PageDesc;
129

    
130
typedef struct PhysPageDesc {
131
    /* offset in host memory of the page + io_index in the low bits */
132
    ram_addr_t phys_offset;
133
} PhysPageDesc;
134

    
135
#define L2_BITS 10
136
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
137
/* XXX: this is a temporary hack for alpha target.
138
 *      In the future, this is to be replaced by a multi-level table
139
 *      to actually be able to handle the complete 64 bits address space.
140
 */
141
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
142
#else
143
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
144
#endif
145

    
146
#define L1_SIZE (1 << L1_BITS)
147
#define L2_SIZE (1 << L2_BITS)
148

    
149
unsigned long qemu_real_host_page_size;
150
unsigned long qemu_host_page_bits;
151
unsigned long qemu_host_page_size;
152
unsigned long qemu_host_page_mask;
153

    
154
/* XXX: for system emulation, it could just be an array */
155
static PageDesc *l1_map[L1_SIZE];
156
PhysPageDesc **l1_phys_map;
157

    
158
#if !defined(CONFIG_USER_ONLY)
159
static void io_mem_init(void);
160

    
161
/* io memory support */
162
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
163
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
164
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
165
static int io_mem_nb;
166
static int io_mem_watch;
167
#endif
168

    
169
/* log support */
170
char *logfilename = "/tmp/qemu.log";
171
FILE *logfile;
172
int loglevel;
173
static int log_append = 0;
174

    
175
/* statistics */
176
static int tlb_flush_count;
177
static int tb_flush_count;
178
static int tb_phys_invalidate_count;
179

    
180
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
181
typedef struct subpage_t {
182
    target_phys_addr_t base;
183
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
184
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
185
    void *opaque[TARGET_PAGE_SIZE][2][4];
186
} subpage_t;
187

    
188
#ifdef _WIN32
189
static void map_exec(void *addr, long size)
190
{
191
    DWORD old_protect;
192
    VirtualProtect(addr, size,
193
                   PAGE_EXECUTE_READWRITE, &old_protect);
194
    
195
}
196
#else
197
static void map_exec(void *addr, long size)
198
{
199
    unsigned long start, end, page_size;
200
    
201
    page_size = getpagesize();
202
    start = (unsigned long)addr;
203
    start &= ~(page_size - 1);
204
    
205
    end = (unsigned long)addr + size;
206
    end += page_size - 1;
207
    end &= ~(page_size - 1);
208
    
209
    mprotect((void *)start, end - start,
210
             PROT_READ | PROT_WRITE | PROT_EXEC);
211
}
212
#endif
213

    
214
static void page_init(void)
215
{
216
    /* NOTE: we can always suppose that qemu_host_page_size >=
217
       TARGET_PAGE_SIZE */
218
#ifdef _WIN32
219
    {
220
        SYSTEM_INFO system_info;
221
        DWORD old_protect;
222

    
223
        GetSystemInfo(&system_info);
224
        qemu_real_host_page_size = system_info.dwPageSize;
225
    }
226
#else
227
    qemu_real_host_page_size = getpagesize();
228
#endif
229
    if (qemu_host_page_size == 0)
230
        qemu_host_page_size = qemu_real_host_page_size;
231
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
232
        qemu_host_page_size = TARGET_PAGE_SIZE;
233
    qemu_host_page_bits = 0;
234
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
235
        qemu_host_page_bits++;
236
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
237
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
238
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
239

    
240
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
241
    {
242
        long long startaddr, endaddr;
243
        FILE *f;
244
        int n;
245

    
246
        mmap_lock();
247
        last_brk = (unsigned long)sbrk(0);
248
        f = fopen("/proc/self/maps", "r");
249
        if (f) {
250
            do {
251
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
252
                if (n == 2) {
253
                    startaddr = MIN(startaddr,
254
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
255
                    endaddr = MIN(endaddr,
256
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
257
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
258
                                   TARGET_PAGE_ALIGN(endaddr),
259
                                   PAGE_RESERVED); 
260
                }
261
            } while (!feof(f));
262
            fclose(f);
263
        }
264
        mmap_unlock();
265
    }
266
#endif
267
}
268

    
269
static inline PageDesc *page_find_alloc(target_ulong index)
270
{
271
    PageDesc **lp, *p;
272

    
273
#if TARGET_LONG_BITS > 32
274
    /* Host memory outside guest VM.  For 32-bit targets we have already
275
       excluded high addresses.  */
276
    if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
277
        return NULL;
278
#endif
279
    lp = &l1_map[index >> L2_BITS];
280
    p = *lp;
281
    if (!p) {
282
        /* allocate if not found */
283
#if defined(CONFIG_USER_ONLY)
284
        unsigned long addr;
285
        size_t len = sizeof(PageDesc) * L2_SIZE;
286
        /* Don't use qemu_malloc because it may recurse.  */
287
        p = mmap(0, len, PROT_READ | PROT_WRITE,
288
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
289
        *lp = p;
290
        addr = h2g(p);
291
        if (addr == (target_ulong)addr) {
292
            page_set_flags(addr & TARGET_PAGE_MASK,
293
                           TARGET_PAGE_ALIGN(addr + len),
294
                           PAGE_RESERVED); 
295
        }
296
#else
297
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
298
        *lp = p;
299
#endif
300
    }
301
    return p + (index & (L2_SIZE - 1));
302
}
303

    
304
static inline PageDesc *page_find(target_ulong index)
305
{
306
    PageDesc *p;
307

    
308
    p = l1_map[index >> L2_BITS];
309
    if (!p)
310
        return 0;
311
    return p + (index & (L2_SIZE - 1));
312
}
313

    
314
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
315
{
316
    void **lp, **p;
317
    PhysPageDesc *pd;
318

    
319
    p = (void **)l1_phys_map;
320
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
321

    
322
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
323
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
324
#endif
325
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
326
    p = *lp;
327
    if (!p) {
328
        /* allocate if not found */
329
        if (!alloc)
330
            return NULL;
331
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
332
        memset(p, 0, sizeof(void *) * L1_SIZE);
333
        *lp = p;
334
    }
335
#endif
336
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
337
    pd = *lp;
338
    if (!pd) {
339
        int i;
340
        /* allocate if not found */
341
        if (!alloc)
342
            return NULL;
343
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
344
        *lp = pd;
345
        for (i = 0; i < L2_SIZE; i++)
346
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
347
    }
348
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
349
}
350

    
351
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
352
{
353
    return phys_page_find_alloc(index, 0);
354
}
355

    
356
#if !defined(CONFIG_USER_ONLY)
357
static void tlb_protect_code(ram_addr_t ram_addr);
358
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
359
                                    target_ulong vaddr);
360
#define mmap_lock() do { } while(0)
361
#define mmap_unlock() do { } while(0)
362
#endif
363

    
364
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
365

    
366
#if defined(CONFIG_USER_ONLY)
367
/* Currently it is not recommanded to allocate big chunks of data in
368
   user mode. It will change when a dedicated libc will be used */
369
#define USE_STATIC_CODE_GEN_BUFFER
370
#endif
371

    
372
#ifdef USE_STATIC_CODE_GEN_BUFFER
373
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
374
#endif
375

    
376
void code_gen_alloc(unsigned long tb_size)
377
{
378
#ifdef USE_STATIC_CODE_GEN_BUFFER
379
    code_gen_buffer = static_code_gen_buffer;
380
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
381
    map_exec(code_gen_buffer, code_gen_buffer_size);
382
#else
383
    code_gen_buffer_size = tb_size;
384
    if (code_gen_buffer_size == 0) {
385
#if defined(CONFIG_USER_ONLY)
386
        /* in user mode, phys_ram_size is not meaningful */
387
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
388
#else
389
        /* XXX: needs ajustments */
390
        code_gen_buffer_size = (int)(phys_ram_size / 4);
391
#endif
392
    }
393
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
394
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
395
    /* The code gen buffer location may have constraints depending on
396
       the host cpu and OS */
397
#if defined(__linux__) 
398
    {
399
        int flags;
400
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
401
#if defined(__x86_64__)
402
        flags |= MAP_32BIT;
403
        /* Cannot map more than that */
404
        if (code_gen_buffer_size > (800 * 1024 * 1024))
405
            code_gen_buffer_size = (800 * 1024 * 1024);
406
#endif
407
        code_gen_buffer = mmap(NULL, code_gen_buffer_size,
408
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
409
                               flags, -1, 0);
410
        if (code_gen_buffer == MAP_FAILED) {
411
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
412
            exit(1);
413
        }
414
    }
415
#else
416
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
417
    if (!code_gen_buffer) {
418
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
419
        exit(1);
420
    }
421
    map_exec(code_gen_buffer, code_gen_buffer_size);
422
#endif
423
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
424
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
425
    code_gen_buffer_max_size = code_gen_buffer_size - 
426
        code_gen_max_block_size();
427
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
428
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
429
}
430

    
431
/* Must be called before using the QEMU cpus. 'tb_size' is the size
432
   (in bytes) allocated to the translation buffer. Zero means default
433
   size. */
434
void cpu_exec_init_all(unsigned long tb_size)
435
{
436
    cpu_gen_init();
437
    code_gen_alloc(tb_size);
438
    code_gen_ptr = code_gen_buffer;
439
    page_init();
440
#if !defined(CONFIG_USER_ONLY)
441
    io_mem_init();
442
#endif
443
}
444

    
445
void cpu_exec_init(CPUState *env)
446
{
447
    CPUState **penv;
448
    int cpu_index;
449

    
450
    env->next_cpu = NULL;
451
    penv = &first_cpu;
452
    cpu_index = 0;
453
    while (*penv != NULL) {
454
        penv = (CPUState **)&(*penv)->next_cpu;
455
        cpu_index++;
456
    }
457
    env->cpu_index = cpu_index;
458
    env->nb_watchpoints = 0;
459
    *penv = env;
460
}
461

    
462
static inline void invalidate_page_bitmap(PageDesc *p)
463
{
464
    if (p->code_bitmap) {
465
        qemu_free(p->code_bitmap);
466
        p->code_bitmap = NULL;
467
    }
468
    p->code_write_count = 0;
469
}
470

    
471
/* set to NULL all the 'first_tb' fields in all PageDescs */
472
static void page_flush_tb(void)
473
{
474
    int i, j;
475
    PageDesc *p;
476

    
477
    for(i = 0; i < L1_SIZE; i++) {
478
        p = l1_map[i];
479
        if (p) {
480
            for(j = 0; j < L2_SIZE; j++) {
481
                p->first_tb = NULL;
482
                invalidate_page_bitmap(p);
483
                p++;
484
            }
485
        }
486
    }
487
}
488

    
489
/* flush all the translation blocks */
490
/* XXX: tb_flush is currently not thread safe */
491
void tb_flush(CPUState *env1)
492
{
493
    CPUState *env;
494
#if defined(DEBUG_FLUSH)
495
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
496
           (unsigned long)(code_gen_ptr - code_gen_buffer),
497
           nb_tbs, nb_tbs > 0 ?
498
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
499
#endif
500
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
501
        cpu_abort(env1, "Internal error: code buffer overflow\n");
502

    
503
    nb_tbs = 0;
504

    
505
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
506
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
507
    }
508

    
509
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
510
    page_flush_tb();
511

    
512
    code_gen_ptr = code_gen_buffer;
513
    /* XXX: flush processor icache at this point if cache flush is
514
       expensive */
515
    tb_flush_count++;
516
}
517

    
518
#ifdef DEBUG_TB_CHECK
519

    
520
static void tb_invalidate_check(target_ulong address)
521
{
522
    TranslationBlock *tb;
523
    int i;
524
    address &= TARGET_PAGE_MASK;
525
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
526
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
527
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
528
                  address >= tb->pc + tb->size)) {
529
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
530
                       address, (long)tb->pc, tb->size);
531
            }
532
        }
533
    }
534
}
535

    
536
/* verify that all the pages have correct rights for code */
537
static void tb_page_check(void)
538
{
539
    TranslationBlock *tb;
540
    int i, flags1, flags2;
541

    
542
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
543
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
544
            flags1 = page_get_flags(tb->pc);
545
            flags2 = page_get_flags(tb->pc + tb->size - 1);
546
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
547
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
548
                       (long)tb->pc, tb->size, flags1, flags2);
549
            }
550
        }
551
    }
552
}
553

    
554
void tb_jmp_check(TranslationBlock *tb)
555
{
556
    TranslationBlock *tb1;
557
    unsigned int n1;
558

    
559
    /* suppress any remaining jumps to this TB */
560
    tb1 = tb->jmp_first;
561
    for(;;) {
562
        n1 = (long)tb1 & 3;
563
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
564
        if (n1 == 2)
565
            break;
566
        tb1 = tb1->jmp_next[n1];
567
    }
568
    /* check end of list */
569
    if (tb1 != tb) {
570
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
571
    }
572
}
573

    
574
#endif
575

    
576
/* invalidate one TB */
577
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
578
                             int next_offset)
579
{
580
    TranslationBlock *tb1;
581
    for(;;) {
582
        tb1 = *ptb;
583
        if (tb1 == tb) {
584
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
585
            break;
586
        }
587
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
588
    }
589
}
590

    
591
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
592
{
593
    TranslationBlock *tb1;
594
    unsigned int n1;
595

    
596
    for(;;) {
597
        tb1 = *ptb;
598
        n1 = (long)tb1 & 3;
599
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
600
        if (tb1 == tb) {
601
            *ptb = tb1->page_next[n1];
602
            break;
603
        }
604
        ptb = &tb1->page_next[n1];
605
    }
606
}
607

    
608
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
609
{
610
    TranslationBlock *tb1, **ptb;
611
    unsigned int n1;
612

    
613
    ptb = &tb->jmp_next[n];
614
    tb1 = *ptb;
615
    if (tb1) {
616
        /* find tb(n) in circular list */
617
        for(;;) {
618
            tb1 = *ptb;
619
            n1 = (long)tb1 & 3;
620
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
621
            if (n1 == n && tb1 == tb)
622
                break;
623
            if (n1 == 2) {
624
                ptb = &tb1->jmp_first;
625
            } else {
626
                ptb = &tb1->jmp_next[n1];
627
            }
628
        }
629
        /* now we can suppress tb(n) from the list */
630
        *ptb = tb->jmp_next[n];
631

    
632
        tb->jmp_next[n] = NULL;
633
    }
634
}
635

    
636
/* reset the jump entry 'n' of a TB so that it is not chained to
637
   another TB */
638
static inline void tb_reset_jump(TranslationBlock *tb, int n)
639
{
640
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
641
}
642

    
643
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
644
{
645
    CPUState *env;
646
    PageDesc *p;
647
    unsigned int h, n1;
648
    target_phys_addr_t phys_pc;
649
    TranslationBlock *tb1, *tb2;
650

    
651
    /* remove the TB from the hash list */
652
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
653
    h = tb_phys_hash_func(phys_pc);
654
    tb_remove(&tb_phys_hash[h], tb,
655
              offsetof(TranslationBlock, phys_hash_next));
656

    
657
    /* remove the TB from the page list */
658
    if (tb->page_addr[0] != page_addr) {
659
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
660
        tb_page_remove(&p->first_tb, tb);
661
        invalidate_page_bitmap(p);
662
    }
663
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
664
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
665
        tb_page_remove(&p->first_tb, tb);
666
        invalidate_page_bitmap(p);
667
    }
668

    
669
    tb_invalidated_flag = 1;
670

    
671
    /* remove the TB from the hash list */
672
    h = tb_jmp_cache_hash_func(tb->pc);
673
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
674
        if (env->tb_jmp_cache[h] == tb)
675
            env->tb_jmp_cache[h] = NULL;
676
    }
677

    
678
    /* suppress this TB from the two jump lists */
679
    tb_jmp_remove(tb, 0);
680
    tb_jmp_remove(tb, 1);
681

    
682
    /* suppress any remaining jumps to this TB */
683
    tb1 = tb->jmp_first;
684
    for(;;) {
685
        n1 = (long)tb1 & 3;
686
        if (n1 == 2)
687
            break;
688
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
689
        tb2 = tb1->jmp_next[n1];
690
        tb_reset_jump(tb1, n1);
691
        tb1->jmp_next[n1] = NULL;
692
        tb1 = tb2;
693
    }
694
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
695

    
696
    tb_phys_invalidate_count++;
697
}
698

    
699
static inline void set_bits(uint8_t *tab, int start, int len)
700
{
701
    int end, mask, end1;
702

    
703
    end = start + len;
704
    tab += start >> 3;
705
    mask = 0xff << (start & 7);
706
    if ((start & ~7) == (end & ~7)) {
707
        if (start < end) {
708
            mask &= ~(0xff << (end & 7));
709
            *tab |= mask;
710
        }
711
    } else {
712
        *tab++ |= mask;
713
        start = (start + 8) & ~7;
714
        end1 = end & ~7;
715
        while (start < end1) {
716
            *tab++ = 0xff;
717
            start += 8;
718
        }
719
        if (start < end) {
720
            mask = ~(0xff << (end & 7));
721
            *tab |= mask;
722
        }
723
    }
724
}
725

    
726
static void build_page_bitmap(PageDesc *p)
727
{
728
    int n, tb_start, tb_end;
729
    TranslationBlock *tb;
730

    
731
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
732
    if (!p->code_bitmap)
733
        return;
734

    
735
    tb = p->first_tb;
736
    while (tb != NULL) {
737
        n = (long)tb & 3;
738
        tb = (TranslationBlock *)((long)tb & ~3);
739
        /* NOTE: this is subtle as a TB may span two physical pages */
740
        if (n == 0) {
741
            /* NOTE: tb_end may be after the end of the page, but
742
               it is not a problem */
743
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
744
            tb_end = tb_start + tb->size;
745
            if (tb_end > TARGET_PAGE_SIZE)
746
                tb_end = TARGET_PAGE_SIZE;
747
        } else {
748
            tb_start = 0;
749
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
750
        }
751
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
752
        tb = tb->page_next[n];
753
    }
754
}
755

    
756
TranslationBlock *tb_gen_code(CPUState *env,
757
                              target_ulong pc, target_ulong cs_base,
758
                              int flags, int cflags)
759
{
760
    TranslationBlock *tb;
761
    uint8_t *tc_ptr;
762
    target_ulong phys_pc, phys_page2, virt_page2;
763
    int code_gen_size;
764

    
765
    phys_pc = get_phys_addr_code(env, pc);
766
    tb = tb_alloc(pc);
767
    if (!tb) {
768
        /* flush must be done */
769
        tb_flush(env);
770
        /* cannot fail at this point */
771
        tb = tb_alloc(pc);
772
        /* Don't forget to invalidate previous TB info.  */
773
        tb_invalidated_flag = 1;
774
    }
775
    tc_ptr = code_gen_ptr;
776
    tb->tc_ptr = tc_ptr;
777
    tb->cs_base = cs_base;
778
    tb->flags = flags;
779
    tb->cflags = cflags;
780
    cpu_gen_code(env, tb, &code_gen_size);
781
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
782

    
783
    /* check next page if needed */
784
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
785
    phys_page2 = -1;
786
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
787
        phys_page2 = get_phys_addr_code(env, virt_page2);
788
    }
789
    tb_link_phys(tb, phys_pc, phys_page2);
790
    return tb;
791
}
792

    
793
/* invalidate all TBs which intersect with the target physical page
794
   starting in range [start;end[. NOTE: start and end must refer to
795
   the same physical page. 'is_cpu_write_access' should be true if called
796
   from a real cpu write access: the virtual CPU will exit the current
797
   TB if code is modified inside this TB. */
798
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
799
                                   int is_cpu_write_access)
800
{
801
    int n, current_tb_modified, current_tb_not_found, current_flags;
802
    CPUState *env = cpu_single_env;
803
    PageDesc *p;
804
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
805
    target_ulong tb_start, tb_end;
806
    target_ulong current_pc, current_cs_base;
807

    
808
    p = page_find(start >> TARGET_PAGE_BITS);
809
    if (!p)
810
        return;
811
    if (!p->code_bitmap &&
812
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
813
        is_cpu_write_access) {
814
        /* build code bitmap */
815
        build_page_bitmap(p);
816
    }
817

    
818
    /* we remove all the TBs in the range [start, end[ */
819
    /* XXX: see if in some cases it could be faster to invalidate all the code */
820
    current_tb_not_found = is_cpu_write_access;
821
    current_tb_modified = 0;
822
    current_tb = NULL; /* avoid warning */
823
    current_pc = 0; /* avoid warning */
824
    current_cs_base = 0; /* avoid warning */
825
    current_flags = 0; /* avoid warning */
826
    tb = p->first_tb;
827
    while (tb != NULL) {
828
        n = (long)tb & 3;
829
        tb = (TranslationBlock *)((long)tb & ~3);
830
        tb_next = tb->page_next[n];
831
        /* NOTE: this is subtle as a TB may span two physical pages */
832
        if (n == 0) {
833
            /* NOTE: tb_end may be after the end of the page, but
834
               it is not a problem */
835
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
836
            tb_end = tb_start + tb->size;
837
        } else {
838
            tb_start = tb->page_addr[1];
839
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
840
        }
841
        if (!(tb_end <= start || tb_start >= end)) {
842
#ifdef TARGET_HAS_PRECISE_SMC
843
            if (current_tb_not_found) {
844
                current_tb_not_found = 0;
845
                current_tb = NULL;
846
                if (env->mem_io_pc) {
847
                    /* now we have a real cpu fault */
848
                    current_tb = tb_find_pc(env->mem_io_pc);
849
                }
850
            }
851
            if (current_tb == tb &&
852
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
853
                /* If we are modifying the current TB, we must stop
854
                its execution. We could be more precise by checking
855
                that the modification is after the current PC, but it
856
                would require a specialized function to partially
857
                restore the CPU state */
858

    
859
                current_tb_modified = 1;
860
                cpu_restore_state(current_tb, env,
861
                                  env->mem_io_pc, NULL);
862
#if defined(TARGET_I386)
863
                current_flags = env->hflags;
864
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
865
                current_cs_base = (target_ulong)env->segs[R_CS].base;
866
                current_pc = current_cs_base + env->eip;
867
#else
868
#error unsupported CPU
869
#endif
870
            }
871
#endif /* TARGET_HAS_PRECISE_SMC */
872
            /* we need to do that to handle the case where a signal
873
               occurs while doing tb_phys_invalidate() */
874
            saved_tb = NULL;
875
            if (env) {
876
                saved_tb = env->current_tb;
877
                env->current_tb = NULL;
878
            }
879
            tb_phys_invalidate(tb, -1);
880
            if (env) {
881
                env->current_tb = saved_tb;
882
                if (env->interrupt_request && env->current_tb)
883
                    cpu_interrupt(env, env->interrupt_request);
884
            }
885
        }
886
        tb = tb_next;
887
    }
888
#if !defined(CONFIG_USER_ONLY)
889
    /* if no code remaining, no need to continue to use slow writes */
890
    if (!p->first_tb) {
891
        invalidate_page_bitmap(p);
892
        if (is_cpu_write_access) {
893
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
894
        }
895
    }
896
#endif
897
#ifdef TARGET_HAS_PRECISE_SMC
898
    if (current_tb_modified) {
899
        /* we generate a block containing just the instruction
900
           modifying the memory. It will ensure that it cannot modify
901
           itself */
902
        env->current_tb = NULL;
903
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
904
        cpu_resume_from_signal(env, NULL);
905
    }
906
#endif
907
}
908

    
909
/* len must be <= 8 and start must be a multiple of len */
910
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
911
{
912
    PageDesc *p;
913
    int offset, b;
914
#if 0
915
    if (1) {
916
        if (loglevel) {
917
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
918
                   cpu_single_env->mem_io_vaddr, len,
919
                   cpu_single_env->eip,
920
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
921
        }
922
    }
923
#endif
924
    p = page_find(start >> TARGET_PAGE_BITS);
925
    if (!p)
926
        return;
927
    if (p->code_bitmap) {
928
        offset = start & ~TARGET_PAGE_MASK;
929
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
930
        if (b & ((1 << len) - 1))
931
            goto do_invalidate;
932
    } else {
933
    do_invalidate:
934
        tb_invalidate_phys_page_range(start, start + len, 1);
935
    }
936
}
937

    
938
#if !defined(CONFIG_SOFTMMU)
939
static void tb_invalidate_phys_page(target_phys_addr_t addr,
940
                                    unsigned long pc, void *puc)
941
{
942
    int n, current_flags, current_tb_modified;
943
    target_ulong current_pc, current_cs_base;
944
    PageDesc *p;
945
    TranslationBlock *tb, *current_tb;
946
#ifdef TARGET_HAS_PRECISE_SMC
947
    CPUState *env = cpu_single_env;
948
#endif
949

    
950
    addr &= TARGET_PAGE_MASK;
951
    p = page_find(addr >> TARGET_PAGE_BITS);
952
    if (!p)
953
        return;
954
    tb = p->first_tb;
955
    current_tb_modified = 0;
956
    current_tb = NULL;
957
    current_pc = 0; /* avoid warning */
958
    current_cs_base = 0; /* avoid warning */
959
    current_flags = 0; /* avoid warning */
960
#ifdef TARGET_HAS_PRECISE_SMC
961
    if (tb && pc != 0) {
962
        current_tb = tb_find_pc(pc);
963
    }
964
#endif
965
    while (tb != NULL) {
966
        n = (long)tb & 3;
967
        tb = (TranslationBlock *)((long)tb & ~3);
968
#ifdef TARGET_HAS_PRECISE_SMC
969
        if (current_tb == tb &&
970
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
971
                /* If we are modifying the current TB, we must stop
972
                   its execution. We could be more precise by checking
973
                   that the modification is after the current PC, but it
974
                   would require a specialized function to partially
975
                   restore the CPU state */
976

    
977
            current_tb_modified = 1;
978
            cpu_restore_state(current_tb, env, pc, puc);
979
#if defined(TARGET_I386)
980
            current_flags = env->hflags;
981
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
982
            current_cs_base = (target_ulong)env->segs[R_CS].base;
983
            current_pc = current_cs_base + env->eip;
984
#else
985
#error unsupported CPU
986
#endif
987
        }
988
#endif /* TARGET_HAS_PRECISE_SMC */
989
        tb_phys_invalidate(tb, addr);
990
        tb = tb->page_next[n];
991
    }
992
    p->first_tb = NULL;
993
#ifdef TARGET_HAS_PRECISE_SMC
994
    if (current_tb_modified) {
995
        /* we generate a block containing just the instruction
996
           modifying the memory. It will ensure that it cannot modify
997
           itself */
998
        env->current_tb = NULL;
999
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1000
        cpu_resume_from_signal(env, puc);
1001
    }
1002
#endif
1003
}
1004
#endif
1005

    
1006
/* add the tb in the target page and protect it if necessary */
1007
static inline void tb_alloc_page(TranslationBlock *tb,
1008
                                 unsigned int n, target_ulong page_addr)
1009
{
1010
    PageDesc *p;
1011
    TranslationBlock *last_first_tb;
1012

    
1013
    tb->page_addr[n] = page_addr;
1014
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1015
    tb->page_next[n] = p->first_tb;
1016
    last_first_tb = p->first_tb;
1017
    p->first_tb = (TranslationBlock *)((long)tb | n);
1018
    invalidate_page_bitmap(p);
1019

    
1020
#if defined(TARGET_HAS_SMC) || 1
1021

    
1022
#if defined(CONFIG_USER_ONLY)
1023
    if (p->flags & PAGE_WRITE) {
1024
        target_ulong addr;
1025
        PageDesc *p2;
1026
        int prot;
1027

    
1028
        /* force the host page as non writable (writes will have a
1029
           page fault + mprotect overhead) */
1030
        page_addr &= qemu_host_page_mask;
1031
        prot = 0;
1032
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1033
            addr += TARGET_PAGE_SIZE) {
1034

    
1035
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1036
            if (!p2)
1037
                continue;
1038
            prot |= p2->flags;
1039
            p2->flags &= ~PAGE_WRITE;
1040
            page_get_flags(addr);
1041
          }
1042
        mprotect(g2h(page_addr), qemu_host_page_size,
1043
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1044
#ifdef DEBUG_TB_INVALIDATE
1045
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1046
               page_addr);
1047
#endif
1048
    }
1049
#else
1050
    /* if some code is already present, then the pages are already
1051
       protected. So we handle the case where only the first TB is
1052
       allocated in a physical page */
1053
    if (!last_first_tb) {
1054
        tlb_protect_code(page_addr);
1055
    }
1056
#endif
1057

    
1058
#endif /* TARGET_HAS_SMC */
1059
}
1060

    
1061
/* Allocate a new translation block. Flush the translation buffer if
1062
   too many translation blocks or too much generated code. */
1063
TranslationBlock *tb_alloc(target_ulong pc)
1064
{
1065
    TranslationBlock *tb;
1066

    
1067
    if (nb_tbs >= code_gen_max_blocks ||
1068
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1069
        return NULL;
1070
    tb = &tbs[nb_tbs++];
1071
    tb->pc = pc;
1072
    tb->cflags = 0;
1073
    return tb;
1074
}
1075

    
1076
void tb_free(TranslationBlock *tb)
1077
{
1078
    /* In practice this is mostly used for single use temorary TB
1079
       Ignore the hard cases and just back up if this TB happens to
1080
       be the last one generated.  */
1081
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1082
        code_gen_ptr = tb->tc_ptr;
1083
        nb_tbs--;
1084
    }
1085
}
1086

    
1087
/* add a new TB and link it to the physical page tables. phys_page2 is
1088
   (-1) to indicate that only one page contains the TB. */
1089
void tb_link_phys(TranslationBlock *tb,
1090
                  target_ulong phys_pc, target_ulong phys_page2)
1091
{
1092
    unsigned int h;
1093
    TranslationBlock **ptb;
1094

    
1095
    /* Grab the mmap lock to stop another thread invalidating this TB
1096
       before we are done.  */
1097
    mmap_lock();
1098
    /* add in the physical hash table */
1099
    h = tb_phys_hash_func(phys_pc);
1100
    ptb = &tb_phys_hash[h];
1101
    tb->phys_hash_next = *ptb;
1102
    *ptb = tb;
1103

    
1104
    /* add in the page list */
1105
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1106
    if (phys_page2 != -1)
1107
        tb_alloc_page(tb, 1, phys_page2);
1108
    else
1109
        tb->page_addr[1] = -1;
1110

    
1111
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1112
    tb->jmp_next[0] = NULL;
1113
    tb->jmp_next[1] = NULL;
1114

    
1115
    /* init original jump addresses */
1116
    if (tb->tb_next_offset[0] != 0xffff)
1117
        tb_reset_jump(tb, 0);
1118
    if (tb->tb_next_offset[1] != 0xffff)
1119
        tb_reset_jump(tb, 1);
1120

    
1121
#ifdef DEBUG_TB_CHECK
1122
    tb_page_check();
1123
#endif
1124
    mmap_unlock();
1125
}
1126

    
1127
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1128
   tb[1].tc_ptr. Return NULL if not found */
1129
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1130
{
1131
    int m_min, m_max, m;
1132
    unsigned long v;
1133
    TranslationBlock *tb;
1134

    
1135
    if (nb_tbs <= 0)
1136
        return NULL;
1137
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1138
        tc_ptr >= (unsigned long)code_gen_ptr)
1139
        return NULL;
1140
    /* binary search (cf Knuth) */
1141
    m_min = 0;
1142
    m_max = nb_tbs - 1;
1143
    while (m_min <= m_max) {
1144
        m = (m_min + m_max) >> 1;
1145
        tb = &tbs[m];
1146
        v = (unsigned long)tb->tc_ptr;
1147
        if (v == tc_ptr)
1148
            return tb;
1149
        else if (tc_ptr < v) {
1150
            m_max = m - 1;
1151
        } else {
1152
            m_min = m + 1;
1153
        }
1154
    }
1155
    return &tbs[m_max];
1156
}
1157

    
1158
static void tb_reset_jump_recursive(TranslationBlock *tb);
1159

    
1160
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1161
{
1162
    TranslationBlock *tb1, *tb_next, **ptb;
1163
    unsigned int n1;
1164

    
1165
    tb1 = tb->jmp_next[n];
1166
    if (tb1 != NULL) {
1167
        /* find head of list */
1168
        for(;;) {
1169
            n1 = (long)tb1 & 3;
1170
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1171
            if (n1 == 2)
1172
                break;
1173
            tb1 = tb1->jmp_next[n1];
1174
        }
1175
        /* we are now sure now that tb jumps to tb1 */
1176
        tb_next = tb1;
1177

    
1178
        /* remove tb from the jmp_first list */
1179
        ptb = &tb_next->jmp_first;
1180
        for(;;) {
1181
            tb1 = *ptb;
1182
            n1 = (long)tb1 & 3;
1183
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1184
            if (n1 == n && tb1 == tb)
1185
                break;
1186
            ptb = &tb1->jmp_next[n1];
1187
        }
1188
        *ptb = tb->jmp_next[n];
1189
        tb->jmp_next[n] = NULL;
1190

    
1191
        /* suppress the jump to next tb in generated code */
1192
        tb_reset_jump(tb, n);
1193

    
1194
        /* suppress jumps in the tb on which we could have jumped */
1195
        tb_reset_jump_recursive(tb_next);
1196
    }
1197
}
1198

    
1199
static void tb_reset_jump_recursive(TranslationBlock *tb)
1200
{
1201
    tb_reset_jump_recursive2(tb, 0);
1202
    tb_reset_jump_recursive2(tb, 1);
1203
}
1204

    
1205
#if defined(TARGET_HAS_ICE)
1206
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1207
{
1208
    target_phys_addr_t addr;
1209
    target_ulong pd;
1210
    ram_addr_t ram_addr;
1211
    PhysPageDesc *p;
1212

    
1213
    addr = cpu_get_phys_page_debug(env, pc);
1214
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1215
    if (!p) {
1216
        pd = IO_MEM_UNASSIGNED;
1217
    } else {
1218
        pd = p->phys_offset;
1219
    }
1220
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1221
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1222
}
1223
#endif
1224

    
1225
/* Add a watchpoint.  */
1226
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1227
{
1228
    int i;
1229

    
1230
    for (i = 0; i < env->nb_watchpoints; i++) {
1231
        if (addr == env->watchpoint[i].vaddr)
1232
            return 0;
1233
    }
1234
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1235
        return -1;
1236

    
1237
    i = env->nb_watchpoints++;
1238
    env->watchpoint[i].vaddr = addr;
1239
    env->watchpoint[i].type = type;
1240
    tlb_flush_page(env, addr);
1241
    /* FIXME: This flush is needed because of the hack to make memory ops
1242
       terminate the TB.  It can be removed once the proper IO trap and
1243
       re-execute bits are in.  */
1244
    tb_flush(env);
1245
    return i;
1246
}
1247

    
1248
/* Remove a watchpoint.  */
1249
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1250
{
1251
    int i;
1252

    
1253
    for (i = 0; i < env->nb_watchpoints; i++) {
1254
        if (addr == env->watchpoint[i].vaddr) {
1255
            env->nb_watchpoints--;
1256
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1257
            tlb_flush_page(env, addr);
1258
            return 0;
1259
        }
1260
    }
1261
    return -1;
1262
}
1263

    
1264
/* Remove all watchpoints. */
1265
void cpu_watchpoint_remove_all(CPUState *env) {
1266
    int i;
1267

    
1268
    for (i = 0; i < env->nb_watchpoints; i++) {
1269
        tlb_flush_page(env, env->watchpoint[i].vaddr);
1270
    }
1271
    env->nb_watchpoints = 0;
1272
}
1273

    
1274
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1275
   breakpoint is reached */
1276
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1277
{
1278
#if defined(TARGET_HAS_ICE)
1279
    int i;
1280

    
1281
    for(i = 0; i < env->nb_breakpoints; i++) {
1282
        if (env->breakpoints[i] == pc)
1283
            return 0;
1284
    }
1285

    
1286
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1287
        return -1;
1288
    env->breakpoints[env->nb_breakpoints++] = pc;
1289

    
1290
    breakpoint_invalidate(env, pc);
1291
    return 0;
1292
#else
1293
    return -1;
1294
#endif
1295
}
1296

    
1297
/* remove all breakpoints */
1298
void cpu_breakpoint_remove_all(CPUState *env) {
1299
#if defined(TARGET_HAS_ICE)
1300
    int i;
1301
    for(i = 0; i < env->nb_breakpoints; i++) {
1302
        breakpoint_invalidate(env, env->breakpoints[i]);
1303
    }
1304
    env->nb_breakpoints = 0;
1305
#endif
1306
}
1307

    
1308
/* remove a breakpoint */
1309
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1310
{
1311
#if defined(TARGET_HAS_ICE)
1312
    int i;
1313
    for(i = 0; i < env->nb_breakpoints; i++) {
1314
        if (env->breakpoints[i] == pc)
1315
            goto found;
1316
    }
1317
    return -1;
1318
 found:
1319
    env->nb_breakpoints--;
1320
    if (i < env->nb_breakpoints)
1321
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1322

    
1323
    breakpoint_invalidate(env, pc);
1324
    return 0;
1325
#else
1326
    return -1;
1327
#endif
1328
}
1329

    
1330
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1331
   CPU loop after each instruction */
1332
void cpu_single_step(CPUState *env, int enabled)
1333
{
1334
#if defined(TARGET_HAS_ICE)
1335
    if (env->singlestep_enabled != enabled) {
1336
        env->singlestep_enabled = enabled;
1337
        /* must flush all the translated code to avoid inconsistancies */
1338
        /* XXX: only flush what is necessary */
1339
        tb_flush(env);
1340
    }
1341
#endif
1342
}
1343

    
1344
/* enable or disable low levels log */
1345
void cpu_set_log(int log_flags)
1346
{
1347
    loglevel = log_flags;
1348
    if (loglevel && !logfile) {
1349
        logfile = fopen(logfilename, log_append ? "a" : "w");
1350
        if (!logfile) {
1351
            perror(logfilename);
1352
            _exit(1);
1353
        }
1354
#if !defined(CONFIG_SOFTMMU)
1355
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1356
        {
1357
            static uint8_t logfile_buf[4096];
1358
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1359
        }
1360
#else
1361
        setvbuf(logfile, NULL, _IOLBF, 0);
1362
#endif
1363
        log_append = 1;
1364
    }
1365
    if (!loglevel && logfile) {
1366
        fclose(logfile);
1367
        logfile = NULL;
1368
    }
1369
}
1370

    
1371
void cpu_set_log_filename(const char *filename)
1372
{
1373
    logfilename = strdup(filename);
1374
    if (logfile) {
1375
        fclose(logfile);
1376
        logfile = NULL;
1377
    }
1378
    cpu_set_log(loglevel);
1379
}
1380

    
1381
/* mask must never be zero, except for A20 change call */
1382
void cpu_interrupt(CPUState *env, int mask)
1383
{
1384
#if !defined(USE_NPTL)
1385
    TranslationBlock *tb;
1386
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1387
#endif
1388
    int old_mask;
1389

    
1390
    old_mask = env->interrupt_request;
1391
    /* FIXME: This is probably not threadsafe.  A different thread could
1392
       be in the mittle of a read-modify-write operation.  */
1393
    env->interrupt_request |= mask;
1394
#if defined(USE_NPTL)
1395
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1396
       problem and hope the cpu will stop of its own accord.  For userspace
1397
       emulation this often isn't actually as bad as it sounds.  Often
1398
       signals are used primarily to interrupt blocking syscalls.  */
1399
#else
1400
    if (use_icount) {
1401
        env->icount_decr.u16.high = 0x8000;
1402
#ifndef CONFIG_USER_ONLY
1403
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1404
           an async event happened and we need to process it.  */
1405
        if (!can_do_io(env)
1406
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1407
            cpu_abort(env, "Raised interrupt while not in I/O function");
1408
        }
1409
#endif
1410
    } else {
1411
        tb = env->current_tb;
1412
        /* if the cpu is currently executing code, we must unlink it and
1413
           all the potentially executing TB */
1414
        if (tb && !testandset(&interrupt_lock)) {
1415
            env->current_tb = NULL;
1416
            tb_reset_jump_recursive(tb);
1417
            resetlock(&interrupt_lock);
1418
        }
1419
    }
1420
#endif
1421
}
1422

    
1423
void cpu_reset_interrupt(CPUState *env, int mask)
1424
{
1425
    env->interrupt_request &= ~mask;
1426
}
1427

    
1428
CPULogItem cpu_log_items[] = {
1429
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1430
      "show generated host assembly code for each compiled TB" },
1431
    { CPU_LOG_TB_IN_ASM, "in_asm",
1432
      "show target assembly code for each compiled TB" },
1433
    { CPU_LOG_TB_OP, "op",
1434
      "show micro ops for each compiled TB" },
1435
    { CPU_LOG_TB_OP_OPT, "op_opt",
1436
      "show micro ops "
1437
#ifdef TARGET_I386
1438
      "before eflags optimization and "
1439
#endif
1440
      "after liveness analysis" },
1441
    { CPU_LOG_INT, "int",
1442
      "show interrupts/exceptions in short format" },
1443
    { CPU_LOG_EXEC, "exec",
1444
      "show trace before each executed TB (lots of logs)" },
1445
    { CPU_LOG_TB_CPU, "cpu",
1446
      "show CPU state before block translation" },
1447
#ifdef TARGET_I386
1448
    { CPU_LOG_PCALL, "pcall",
1449
      "show protected mode far calls/returns/exceptions" },
1450
#endif
1451
#ifdef DEBUG_IOPORT
1452
    { CPU_LOG_IOPORT, "ioport",
1453
      "show all i/o ports accesses" },
1454
#endif
1455
    { 0, NULL, NULL },
1456
};
1457

    
1458
static int cmp1(const char *s1, int n, const char *s2)
1459
{
1460
    if (strlen(s2) != n)
1461
        return 0;
1462
    return memcmp(s1, s2, n) == 0;
1463
}
1464

    
1465
/* takes a comma separated list of log masks. Return 0 if error. */
1466
int cpu_str_to_log_mask(const char *str)
1467
{
1468
    CPULogItem *item;
1469
    int mask;
1470
    const char *p, *p1;
1471

    
1472
    p = str;
1473
    mask = 0;
1474
    for(;;) {
1475
        p1 = strchr(p, ',');
1476
        if (!p1)
1477
            p1 = p + strlen(p);
1478
        if(cmp1(p,p1-p,"all")) {
1479
                for(item = cpu_log_items; item->mask != 0; item++) {
1480
                        mask |= item->mask;
1481
                }
1482
        } else {
1483
        for(item = cpu_log_items; item->mask != 0; item++) {
1484
            if (cmp1(p, p1 - p, item->name))
1485
                goto found;
1486
        }
1487
        return 0;
1488
        }
1489
    found:
1490
        mask |= item->mask;
1491
        if (*p1 != ',')
1492
            break;
1493
        p = p1 + 1;
1494
    }
1495
    return mask;
1496
}
1497

    
1498
void cpu_abort(CPUState *env, const char *fmt, ...)
1499
{
1500
    va_list ap;
1501
    va_list ap2;
1502

    
1503
    va_start(ap, fmt);
1504
    va_copy(ap2, ap);
1505
    fprintf(stderr, "qemu: fatal: ");
1506
    vfprintf(stderr, fmt, ap);
1507
    fprintf(stderr, "\n");
1508
#ifdef TARGET_I386
1509
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1510
#else
1511
    cpu_dump_state(env, stderr, fprintf, 0);
1512
#endif
1513
    if (logfile) {
1514
        fprintf(logfile, "qemu: fatal: ");
1515
        vfprintf(logfile, fmt, ap2);
1516
        fprintf(logfile, "\n");
1517
#ifdef TARGET_I386
1518
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1519
#else
1520
        cpu_dump_state(env, logfile, fprintf, 0);
1521
#endif
1522
        fflush(logfile);
1523
        fclose(logfile);
1524
    }
1525
    va_end(ap2);
1526
    va_end(ap);
1527
    abort();
1528
}
1529

    
1530
CPUState *cpu_copy(CPUState *env)
1531
{
1532
    CPUState *new_env = cpu_init(env->cpu_model_str);
1533
    /* preserve chaining and index */
1534
    CPUState *next_cpu = new_env->next_cpu;
1535
    int cpu_index = new_env->cpu_index;
1536
    memcpy(new_env, env, sizeof(CPUState));
1537
    new_env->next_cpu = next_cpu;
1538
    new_env->cpu_index = cpu_index;
1539
    return new_env;
1540
}
1541

    
1542
#if !defined(CONFIG_USER_ONLY)
1543

    
1544
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1545
{
1546
    unsigned int i;
1547

    
1548
    /* Discard jump cache entries for any tb which might potentially
1549
       overlap the flushed page.  */
1550
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1551
    memset (&env->tb_jmp_cache[i], 0, 
1552
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1553

    
1554
    i = tb_jmp_cache_hash_page(addr);
1555
    memset (&env->tb_jmp_cache[i], 0, 
1556
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1557
}
1558

    
1559
/* NOTE: if flush_global is true, also flush global entries (not
1560
   implemented yet) */
1561
void tlb_flush(CPUState *env, int flush_global)
1562
{
1563
    int i;
1564

    
1565
#if defined(DEBUG_TLB)
1566
    printf("tlb_flush:\n");
1567
#endif
1568
    /* must reset current TB so that interrupts cannot modify the
1569
       links while we are modifying them */
1570
    env->current_tb = NULL;
1571

    
1572
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1573
        env->tlb_table[0][i].addr_read = -1;
1574
        env->tlb_table[0][i].addr_write = -1;
1575
        env->tlb_table[0][i].addr_code = -1;
1576
        env->tlb_table[1][i].addr_read = -1;
1577
        env->tlb_table[1][i].addr_write = -1;
1578
        env->tlb_table[1][i].addr_code = -1;
1579
#if (NB_MMU_MODES >= 3)
1580
        env->tlb_table[2][i].addr_read = -1;
1581
        env->tlb_table[2][i].addr_write = -1;
1582
        env->tlb_table[2][i].addr_code = -1;
1583
#if (NB_MMU_MODES == 4)
1584
        env->tlb_table[3][i].addr_read = -1;
1585
        env->tlb_table[3][i].addr_write = -1;
1586
        env->tlb_table[3][i].addr_code = -1;
1587
#endif
1588
#endif
1589
    }
1590

    
1591
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1592

    
1593
#ifdef USE_KQEMU
1594
    if (env->kqemu_enabled) {
1595
        kqemu_flush(env, flush_global);
1596
    }
1597
#endif
1598
    tlb_flush_count++;
1599
}
1600

    
1601
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1602
{
1603
    if (addr == (tlb_entry->addr_read &
1604
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1605
        addr == (tlb_entry->addr_write &
1606
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1607
        addr == (tlb_entry->addr_code &
1608
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1609
        tlb_entry->addr_read = -1;
1610
        tlb_entry->addr_write = -1;
1611
        tlb_entry->addr_code = -1;
1612
    }
1613
}
1614

    
1615
void tlb_flush_page(CPUState *env, target_ulong addr)
1616
{
1617
    int i;
1618

    
1619
#if defined(DEBUG_TLB)
1620
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1621
#endif
1622
    /* must reset current TB so that interrupts cannot modify the
1623
       links while we are modifying them */
1624
    env->current_tb = NULL;
1625

    
1626
    addr &= TARGET_PAGE_MASK;
1627
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1628
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1629
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1630
#if (NB_MMU_MODES >= 3)
1631
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1632
#if (NB_MMU_MODES == 4)
1633
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1634
#endif
1635
#endif
1636

    
1637
    tlb_flush_jmp_cache(env, addr);
1638

    
1639
#ifdef USE_KQEMU
1640
    if (env->kqemu_enabled) {
1641
        kqemu_flush_page(env, addr);
1642
    }
1643
#endif
1644
}
1645

    
1646
/* update the TLBs so that writes to code in the virtual page 'addr'
1647
   can be detected */
1648
static void tlb_protect_code(ram_addr_t ram_addr)
1649
{
1650
    cpu_physical_memory_reset_dirty(ram_addr,
1651
                                    ram_addr + TARGET_PAGE_SIZE,
1652
                                    CODE_DIRTY_FLAG);
1653
}
1654

    
1655
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1656
   tested for self modifying code */
1657
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1658
                                    target_ulong vaddr)
1659
{
1660
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1661
}
1662

    
1663
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1664
                                         unsigned long start, unsigned long length)
1665
{
1666
    unsigned long addr;
1667
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1668
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1669
        if ((addr - start) < length) {
1670
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1671
        }
1672
    }
1673
}
1674

    
1675
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1676
                                     int dirty_flags)
1677
{
1678
    CPUState *env;
1679
    unsigned long length, start1;
1680
    int i, mask, len;
1681
    uint8_t *p;
1682

    
1683
    start &= TARGET_PAGE_MASK;
1684
    end = TARGET_PAGE_ALIGN(end);
1685

    
1686
    length = end - start;
1687
    if (length == 0)
1688
        return;
1689
    len = length >> TARGET_PAGE_BITS;
1690
#ifdef USE_KQEMU
1691
    /* XXX: should not depend on cpu context */
1692
    env = first_cpu;
1693
    if (env->kqemu_enabled) {
1694
        ram_addr_t addr;
1695
        addr = start;
1696
        for(i = 0; i < len; i++) {
1697
            kqemu_set_notdirty(env, addr);
1698
            addr += TARGET_PAGE_SIZE;
1699
        }
1700
    }
1701
#endif
1702
    mask = ~dirty_flags;
1703
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1704
    for(i = 0; i < len; i++)
1705
        p[i] &= mask;
1706

    
1707
    /* we modify the TLB cache so that the dirty bit will be set again
1708
       when accessing the range */
1709
    start1 = start + (unsigned long)phys_ram_base;
1710
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1711
        for(i = 0; i < CPU_TLB_SIZE; i++)
1712
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1713
        for(i = 0; i < CPU_TLB_SIZE; i++)
1714
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1715
#if (NB_MMU_MODES >= 3)
1716
        for(i = 0; i < CPU_TLB_SIZE; i++)
1717
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1718
#if (NB_MMU_MODES == 4)
1719
        for(i = 0; i < CPU_TLB_SIZE; i++)
1720
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1721
#endif
1722
#endif
1723
    }
1724
}
1725

    
1726
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1727
{
1728
    ram_addr_t ram_addr;
1729

    
1730
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1731
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1732
            tlb_entry->addend - (unsigned long)phys_ram_base;
1733
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1734
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1735
        }
1736
    }
1737
}
1738

    
1739
/* update the TLB according to the current state of the dirty bits */
1740
void cpu_tlb_update_dirty(CPUState *env)
1741
{
1742
    int i;
1743
    for(i = 0; i < CPU_TLB_SIZE; i++)
1744
        tlb_update_dirty(&env->tlb_table[0][i]);
1745
    for(i = 0; i < CPU_TLB_SIZE; i++)
1746
        tlb_update_dirty(&env->tlb_table[1][i]);
1747
#if (NB_MMU_MODES >= 3)
1748
    for(i = 0; i < CPU_TLB_SIZE; i++)
1749
        tlb_update_dirty(&env->tlb_table[2][i]);
1750
#if (NB_MMU_MODES == 4)
1751
    for(i = 0; i < CPU_TLB_SIZE; i++)
1752
        tlb_update_dirty(&env->tlb_table[3][i]);
1753
#endif
1754
#endif
1755
}
1756

    
1757
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1758
{
1759
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1760
        tlb_entry->addr_write = vaddr;
1761
}
1762

    
1763
/* update the TLB corresponding to virtual page vaddr
1764
   so that it is no longer dirty */
1765
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1766
{
1767
    int i;
1768

    
1769
    vaddr &= TARGET_PAGE_MASK;
1770
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1771
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1772
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1773
#if (NB_MMU_MODES >= 3)
1774
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1775
#if (NB_MMU_MODES == 4)
1776
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1777
#endif
1778
#endif
1779
}
1780

    
1781
/* add a new TLB entry. At most one entry for a given virtual address
1782
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1783
   (can only happen in non SOFTMMU mode for I/O pages or pages
1784
   conflicting with the host address space). */
1785
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1786
                      target_phys_addr_t paddr, int prot,
1787
                      int mmu_idx, int is_softmmu)
1788
{
1789
    PhysPageDesc *p;
1790
    unsigned long pd;
1791
    unsigned int index;
1792
    target_ulong address;
1793
    target_ulong code_address;
1794
    target_phys_addr_t addend;
1795
    int ret;
1796
    CPUTLBEntry *te;
1797
    int i;
1798
    target_phys_addr_t iotlb;
1799

    
1800
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1801
    if (!p) {
1802
        pd = IO_MEM_UNASSIGNED;
1803
    } else {
1804
        pd = p->phys_offset;
1805
    }
1806
#if defined(DEBUG_TLB)
1807
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1808
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1809
#endif
1810

    
1811
    ret = 0;
1812
    address = vaddr;
1813
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1814
        /* IO memory case (romd handled later) */
1815
        address |= TLB_MMIO;
1816
    }
1817
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1818
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1819
        /* Normal RAM.  */
1820
        iotlb = pd & TARGET_PAGE_MASK;
1821
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1822
            iotlb |= IO_MEM_NOTDIRTY;
1823
        else
1824
            iotlb |= IO_MEM_ROM;
1825
    } else {
1826
        /* IO handlers are currently passed a phsical address.
1827
           It would be nice to pass an offset from the base address
1828
           of that region.  This would avoid having to special case RAM,
1829
           and avoid full address decoding in every device.
1830
           We can't use the high bits of pd for this because
1831
           IO_MEM_ROMD uses these as a ram address.  */
1832
        iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1833
    }
1834

    
1835
    code_address = address;
1836
    /* Make accesses to pages with watchpoints go via the
1837
       watchpoint trap routines.  */
1838
    for (i = 0; i < env->nb_watchpoints; i++) {
1839
        if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1840
            iotlb = io_mem_watch + paddr;
1841
            /* TODO: The memory case can be optimized by not trapping
1842
               reads of pages with a write breakpoint.  */
1843
            address |= TLB_MMIO;
1844
        }
1845
    }
1846

    
1847
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1848
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
1849
    te = &env->tlb_table[mmu_idx][index];
1850
    te->addend = addend - vaddr;
1851
    if (prot & PAGE_READ) {
1852
        te->addr_read = address;
1853
    } else {
1854
        te->addr_read = -1;
1855
    }
1856

    
1857
    if (prot & PAGE_EXEC) {
1858
        te->addr_code = code_address;
1859
    } else {
1860
        te->addr_code = -1;
1861
    }
1862
    if (prot & PAGE_WRITE) {
1863
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1864
            (pd & IO_MEM_ROMD)) {
1865
            /* Write access calls the I/O callback.  */
1866
            te->addr_write = address | TLB_MMIO;
1867
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1868
                   !cpu_physical_memory_is_dirty(pd)) {
1869
            te->addr_write = address | TLB_NOTDIRTY;
1870
        } else {
1871
            te->addr_write = address;
1872
        }
1873
    } else {
1874
        te->addr_write = -1;
1875
    }
1876
    return ret;
1877
}
1878

    
1879
#else
1880

    
1881
void tlb_flush(CPUState *env, int flush_global)
1882
{
1883
}
1884

    
1885
void tlb_flush_page(CPUState *env, target_ulong addr)
1886
{
1887
}
1888

    
1889
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1890
                      target_phys_addr_t paddr, int prot,
1891
                      int mmu_idx, int is_softmmu)
1892
{
1893
    return 0;
1894
}
1895

    
1896
/* dump memory mappings */
1897
void page_dump(FILE *f)
1898
{
1899
    unsigned long start, end;
1900
    int i, j, prot, prot1;
1901
    PageDesc *p;
1902

    
1903
    fprintf(f, "%-8s %-8s %-8s %s\n",
1904
            "start", "end", "size", "prot");
1905
    start = -1;
1906
    end = -1;
1907
    prot = 0;
1908
    for(i = 0; i <= L1_SIZE; i++) {
1909
        if (i < L1_SIZE)
1910
            p = l1_map[i];
1911
        else
1912
            p = NULL;
1913
        for(j = 0;j < L2_SIZE; j++) {
1914
            if (!p)
1915
                prot1 = 0;
1916
            else
1917
                prot1 = p[j].flags;
1918
            if (prot1 != prot) {
1919
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1920
                if (start != -1) {
1921
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1922
                            start, end, end - start,
1923
                            prot & PAGE_READ ? 'r' : '-',
1924
                            prot & PAGE_WRITE ? 'w' : '-',
1925
                            prot & PAGE_EXEC ? 'x' : '-');
1926
                }
1927
                if (prot1 != 0)
1928
                    start = end;
1929
                else
1930
                    start = -1;
1931
                prot = prot1;
1932
            }
1933
            if (!p)
1934
                break;
1935
        }
1936
    }
1937
}
1938

    
1939
int page_get_flags(target_ulong address)
1940
{
1941
    PageDesc *p;
1942

    
1943
    p = page_find(address >> TARGET_PAGE_BITS);
1944
    if (!p)
1945
        return 0;
1946
    return p->flags;
1947
}
1948

    
1949
/* modify the flags of a page and invalidate the code if
1950
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1951
   depending on PAGE_WRITE */
1952
void page_set_flags(target_ulong start, target_ulong end, int flags)
1953
{
1954
    PageDesc *p;
1955
    target_ulong addr;
1956

    
1957
    /* mmap_lock should already be held.  */
1958
    start = start & TARGET_PAGE_MASK;
1959
    end = TARGET_PAGE_ALIGN(end);
1960
    if (flags & PAGE_WRITE)
1961
        flags |= PAGE_WRITE_ORG;
1962
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1963
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1964
        /* We may be called for host regions that are outside guest
1965
           address space.  */
1966
        if (!p)
1967
            return;
1968
        /* if the write protection is set, then we invalidate the code
1969
           inside */
1970
        if (!(p->flags & PAGE_WRITE) &&
1971
            (flags & PAGE_WRITE) &&
1972
            p->first_tb) {
1973
            tb_invalidate_phys_page(addr, 0, NULL);
1974
        }
1975
        p->flags = flags;
1976
    }
1977
}
1978

    
1979
int page_check_range(target_ulong start, target_ulong len, int flags)
1980
{
1981
    PageDesc *p;
1982
    target_ulong end;
1983
    target_ulong addr;
1984

    
1985
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
1986
    start = start & TARGET_PAGE_MASK;
1987

    
1988
    if( end < start )
1989
        /* we've wrapped around */
1990
        return -1;
1991
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1992
        p = page_find(addr >> TARGET_PAGE_BITS);
1993
        if( !p )
1994
            return -1;
1995
        if( !(p->flags & PAGE_VALID) )
1996
            return -1;
1997

    
1998
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
1999
            return -1;
2000
        if (flags & PAGE_WRITE) {
2001
            if (!(p->flags & PAGE_WRITE_ORG))
2002
                return -1;
2003
            /* unprotect the page if it was put read-only because it
2004
               contains translated code */
2005
            if (!(p->flags & PAGE_WRITE)) {
2006
                if (!page_unprotect(addr, 0, NULL))
2007
                    return -1;
2008
            }
2009
            return 0;
2010
        }
2011
    }
2012
    return 0;
2013
}
2014

    
2015
/* called from signal handler: invalidate the code and unprotect the
2016
   page. Return TRUE if the fault was succesfully handled. */
2017
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2018
{
2019
    unsigned int page_index, prot, pindex;
2020
    PageDesc *p, *p1;
2021
    target_ulong host_start, host_end, addr;
2022

    
2023
    /* Technically this isn't safe inside a signal handler.  However we
2024
       know this only ever happens in a synchronous SEGV handler, so in
2025
       practice it seems to be ok.  */
2026
    mmap_lock();
2027

    
2028
    host_start = address & qemu_host_page_mask;
2029
    page_index = host_start >> TARGET_PAGE_BITS;
2030
    p1 = page_find(page_index);
2031
    if (!p1) {
2032
        mmap_unlock();
2033
        return 0;
2034
    }
2035
    host_end = host_start + qemu_host_page_size;
2036
    p = p1;
2037
    prot = 0;
2038
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2039
        prot |= p->flags;
2040
        p++;
2041
    }
2042
    /* if the page was really writable, then we change its
2043
       protection back to writable */
2044
    if (prot & PAGE_WRITE_ORG) {
2045
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2046
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2047
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2048
                     (prot & PAGE_BITS) | PAGE_WRITE);
2049
            p1[pindex].flags |= PAGE_WRITE;
2050
            /* and since the content will be modified, we must invalidate
2051
               the corresponding translated code. */
2052
            tb_invalidate_phys_page(address, pc, puc);
2053
#ifdef DEBUG_TB_CHECK
2054
            tb_invalidate_check(address);
2055
#endif
2056
            mmap_unlock();
2057
            return 1;
2058
        }
2059
    }
2060
    mmap_unlock();
2061
    return 0;
2062
}
2063

    
2064
static inline void tlb_set_dirty(CPUState *env,
2065
                                 unsigned long addr, target_ulong vaddr)
2066
{
2067
}
2068
#endif /* defined(CONFIG_USER_ONLY) */
2069

    
2070
#if !defined(CONFIG_USER_ONLY)
2071
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2072
                             ram_addr_t memory);
2073
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2074
                           ram_addr_t orig_memory);
2075
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2076
                      need_subpage)                                     \
2077
    do {                                                                \
2078
        if (addr > start_addr)                                          \
2079
            start_addr2 = 0;                                            \
2080
        else {                                                          \
2081
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2082
            if (start_addr2 > 0)                                        \
2083
                need_subpage = 1;                                       \
2084
        }                                                               \
2085
                                                                        \
2086
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2087
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2088
        else {                                                          \
2089
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2090
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2091
                need_subpage = 1;                                       \
2092
        }                                                               \
2093
    } while (0)
2094

    
2095
/* register physical memory. 'size' must be a multiple of the target
2096
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2097
   io memory page */
2098
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2099
                                  ram_addr_t size,
2100
                                  ram_addr_t phys_offset)
2101
{
2102
    target_phys_addr_t addr, end_addr;
2103
    PhysPageDesc *p;
2104
    CPUState *env;
2105
    ram_addr_t orig_size = size;
2106
    void *subpage;
2107

    
2108
#ifdef USE_KQEMU
2109
    /* XXX: should not depend on cpu context */
2110
    env = first_cpu;
2111
    if (env->kqemu_enabled) {
2112
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2113
    }
2114
#endif
2115
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2116
    end_addr = start_addr + (target_phys_addr_t)size;
2117
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2118
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2119
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2120
            ram_addr_t orig_memory = p->phys_offset;
2121
            target_phys_addr_t start_addr2, end_addr2;
2122
            int need_subpage = 0;
2123

    
2124
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2125
                          need_subpage);
2126
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2127
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2128
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2129
                                           &p->phys_offset, orig_memory);
2130
                } else {
2131
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2132
                                            >> IO_MEM_SHIFT];
2133
                }
2134
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2135
            } else {
2136
                p->phys_offset = phys_offset;
2137
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2138
                    (phys_offset & IO_MEM_ROMD))
2139
                    phys_offset += TARGET_PAGE_SIZE;
2140
            }
2141
        } else {
2142
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2143
            p->phys_offset = phys_offset;
2144
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2145
                (phys_offset & IO_MEM_ROMD))
2146
                phys_offset += TARGET_PAGE_SIZE;
2147
            else {
2148
                target_phys_addr_t start_addr2, end_addr2;
2149
                int need_subpage = 0;
2150

    
2151
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2152
                              end_addr2, need_subpage);
2153

    
2154
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2155
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2156
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2157
                    subpage_register(subpage, start_addr2, end_addr2,
2158
                                     phys_offset);
2159
                }
2160
            }
2161
        }
2162
    }
2163

    
2164
    /* since each CPU stores ram addresses in its TLB cache, we must
2165
       reset the modified entries */
2166
    /* XXX: slow ! */
2167
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2168
        tlb_flush(env, 1);
2169
    }
2170
}
2171

    
2172
/* XXX: temporary until new memory mapping API */
2173
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2174
{
2175
    PhysPageDesc *p;
2176

    
2177
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2178
    if (!p)
2179
        return IO_MEM_UNASSIGNED;
2180
    return p->phys_offset;
2181
}
2182

    
2183
/* XXX: better than nothing */
2184
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2185
{
2186
    ram_addr_t addr;
2187
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2188
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2189
                (uint64_t)size, (uint64_t)phys_ram_size);
2190
        abort();
2191
    }
2192
    addr = phys_ram_alloc_offset;
2193
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2194
    return addr;
2195
}
2196

    
2197
void qemu_ram_free(ram_addr_t addr)
2198
{
2199
}
2200

    
2201
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2202
{
2203
#ifdef DEBUG_UNASSIGNED
2204
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2205
#endif
2206
#ifdef TARGET_SPARC
2207
    do_unassigned_access(addr, 0, 0, 0);
2208
#elif TARGET_CRIS
2209
    do_unassigned_access(addr, 0, 0, 0);
2210
#endif
2211
    return 0;
2212
}
2213

    
2214
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2215
{
2216
#ifdef DEBUG_UNASSIGNED
2217
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2218
#endif
2219
#ifdef TARGET_SPARC
2220
    do_unassigned_access(addr, 1, 0, 0);
2221
#elif TARGET_CRIS
2222
    do_unassigned_access(addr, 1, 0, 0);
2223
#endif
2224
}
2225

    
2226
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2227
    unassigned_mem_readb,
2228
    unassigned_mem_readb,
2229
    unassigned_mem_readb,
2230
};
2231

    
2232
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2233
    unassigned_mem_writeb,
2234
    unassigned_mem_writeb,
2235
    unassigned_mem_writeb,
2236
};
2237

    
2238
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2239
                                uint32_t val)
2240
{
2241
    int dirty_flags;
2242
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2243
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2244
#if !defined(CONFIG_USER_ONLY)
2245
        tb_invalidate_phys_page_fast(ram_addr, 1);
2246
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2247
#endif
2248
    }
2249
    stb_p(phys_ram_base + ram_addr, val);
2250
#ifdef USE_KQEMU
2251
    if (cpu_single_env->kqemu_enabled &&
2252
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2253
        kqemu_modify_page(cpu_single_env, ram_addr);
2254
#endif
2255
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2256
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2257
    /* we remove the notdirty callback only if the code has been
2258
       flushed */
2259
    if (dirty_flags == 0xff)
2260
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2261
}
2262

    
2263
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2264
                                uint32_t val)
2265
{
2266
    int dirty_flags;
2267
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2268
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2269
#if !defined(CONFIG_USER_ONLY)
2270
        tb_invalidate_phys_page_fast(ram_addr, 2);
2271
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2272
#endif
2273
    }
2274
    stw_p(phys_ram_base + ram_addr, val);
2275
#ifdef USE_KQEMU
2276
    if (cpu_single_env->kqemu_enabled &&
2277
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2278
        kqemu_modify_page(cpu_single_env, ram_addr);
2279
#endif
2280
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2281
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2282
    /* we remove the notdirty callback only if the code has been
2283
       flushed */
2284
    if (dirty_flags == 0xff)
2285
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2286
}
2287

    
2288
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2289
                                uint32_t val)
2290
{
2291
    int dirty_flags;
2292
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2293
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2294
#if !defined(CONFIG_USER_ONLY)
2295
        tb_invalidate_phys_page_fast(ram_addr, 4);
2296
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2297
#endif
2298
    }
2299
    stl_p(phys_ram_base + ram_addr, val);
2300
#ifdef USE_KQEMU
2301
    if (cpu_single_env->kqemu_enabled &&
2302
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2303
        kqemu_modify_page(cpu_single_env, ram_addr);
2304
#endif
2305
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2306
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2307
    /* we remove the notdirty callback only if the code has been
2308
       flushed */
2309
    if (dirty_flags == 0xff)
2310
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2311
}
2312

    
2313
static CPUReadMemoryFunc *error_mem_read[3] = {
2314
    NULL, /* never used */
2315
    NULL, /* never used */
2316
    NULL, /* never used */
2317
};
2318

    
2319
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2320
    notdirty_mem_writeb,
2321
    notdirty_mem_writew,
2322
    notdirty_mem_writel,
2323
};
2324

    
2325
/* Generate a debug exception if a watchpoint has been hit.  */
2326
static void check_watchpoint(int offset, int flags)
2327
{
2328
    CPUState *env = cpu_single_env;
2329
    target_ulong vaddr;
2330
    int i;
2331

    
2332
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2333
    for (i = 0; i < env->nb_watchpoints; i++) {
2334
        if (vaddr == env->watchpoint[i].vaddr
2335
                && (env->watchpoint[i].type & flags)) {
2336
            env->watchpoint_hit = i + 1;
2337
            cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2338
            break;
2339
        }
2340
    }
2341
}
2342

    
2343
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2344
   so these check for a hit then pass through to the normal out-of-line
2345
   phys routines.  */
2346
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2347
{
2348
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2349
    return ldub_phys(addr);
2350
}
2351

    
2352
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2353
{
2354
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2355
    return lduw_phys(addr);
2356
}
2357

    
2358
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2359
{
2360
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2361
    return ldl_phys(addr);
2362
}
2363

    
2364
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2365
                             uint32_t val)
2366
{
2367
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2368
    stb_phys(addr, val);
2369
}
2370

    
2371
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2372
                             uint32_t val)
2373
{
2374
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2375
    stw_phys(addr, val);
2376
}
2377

    
2378
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2379
                             uint32_t val)
2380
{
2381
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2382
    stl_phys(addr, val);
2383
}
2384

    
2385
static CPUReadMemoryFunc *watch_mem_read[3] = {
2386
    watch_mem_readb,
2387
    watch_mem_readw,
2388
    watch_mem_readl,
2389
};
2390

    
2391
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2392
    watch_mem_writeb,
2393
    watch_mem_writew,
2394
    watch_mem_writel,
2395
};
2396

    
2397
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2398
                                 unsigned int len)
2399
{
2400
    uint32_t ret;
2401
    unsigned int idx;
2402

    
2403
    idx = SUBPAGE_IDX(addr - mmio->base);
2404
#if defined(DEBUG_SUBPAGE)
2405
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2406
           mmio, len, addr, idx);
2407
#endif
2408
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2409

    
2410
    return ret;
2411
}
2412

    
2413
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2414
                              uint32_t value, unsigned int len)
2415
{
2416
    unsigned int idx;
2417

    
2418
    idx = SUBPAGE_IDX(addr - mmio->base);
2419
#if defined(DEBUG_SUBPAGE)
2420
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2421
           mmio, len, addr, idx, value);
2422
#endif
2423
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2424
}
2425

    
2426
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2427
{
2428
#if defined(DEBUG_SUBPAGE)
2429
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2430
#endif
2431

    
2432
    return subpage_readlen(opaque, addr, 0);
2433
}
2434

    
2435
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2436
                            uint32_t value)
2437
{
2438
#if defined(DEBUG_SUBPAGE)
2439
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2440
#endif
2441
    subpage_writelen(opaque, addr, value, 0);
2442
}
2443

    
2444
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2445
{
2446
#if defined(DEBUG_SUBPAGE)
2447
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2448
#endif
2449

    
2450
    return subpage_readlen(opaque, addr, 1);
2451
}
2452

    
2453
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2454
                            uint32_t value)
2455
{
2456
#if defined(DEBUG_SUBPAGE)
2457
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2458
#endif
2459
    subpage_writelen(opaque, addr, value, 1);
2460
}
2461

    
2462
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2463
{
2464
#if defined(DEBUG_SUBPAGE)
2465
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2466
#endif
2467

    
2468
    return subpage_readlen(opaque, addr, 2);
2469
}
2470

    
2471
static void subpage_writel (void *opaque,
2472
                         target_phys_addr_t addr, uint32_t value)
2473
{
2474
#if defined(DEBUG_SUBPAGE)
2475
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2476
#endif
2477
    subpage_writelen(opaque, addr, value, 2);
2478
}
2479

    
2480
static CPUReadMemoryFunc *subpage_read[] = {
2481
    &subpage_readb,
2482
    &subpage_readw,
2483
    &subpage_readl,
2484
};
2485

    
2486
static CPUWriteMemoryFunc *subpage_write[] = {
2487
    &subpage_writeb,
2488
    &subpage_writew,
2489
    &subpage_writel,
2490
};
2491

    
2492
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2493
                             ram_addr_t memory)
2494
{
2495
    int idx, eidx;
2496
    unsigned int i;
2497

    
2498
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2499
        return -1;
2500
    idx = SUBPAGE_IDX(start);
2501
    eidx = SUBPAGE_IDX(end);
2502
#if defined(DEBUG_SUBPAGE)
2503
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2504
           mmio, start, end, idx, eidx, memory);
2505
#endif
2506
    memory >>= IO_MEM_SHIFT;
2507
    for (; idx <= eidx; idx++) {
2508
        for (i = 0; i < 4; i++) {
2509
            if (io_mem_read[memory][i]) {
2510
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2511
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2512
            }
2513
            if (io_mem_write[memory][i]) {
2514
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2515
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2516
            }
2517
        }
2518
    }
2519

    
2520
    return 0;
2521
}
2522

    
2523
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2524
                           ram_addr_t orig_memory)
2525
{
2526
    subpage_t *mmio;
2527
    int subpage_memory;
2528

    
2529
    mmio = qemu_mallocz(sizeof(subpage_t));
2530
    if (mmio != NULL) {
2531
        mmio->base = base;
2532
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2533
#if defined(DEBUG_SUBPAGE)
2534
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2535
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2536
#endif
2537
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2538
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2539
    }
2540

    
2541
    return mmio;
2542
}
2543

    
2544
static void io_mem_init(void)
2545
{
2546
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2547
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2548
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2549
    io_mem_nb = 5;
2550

    
2551
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2552
                                          watch_mem_write, NULL);
2553
    /* alloc dirty bits array */
2554
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2555
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2556
}
2557

    
2558
/* mem_read and mem_write are arrays of functions containing the
2559
   function to access byte (index 0), word (index 1) and dword (index
2560
   2). Functions can be omitted with a NULL function pointer. The
2561
   registered functions may be modified dynamically later.
2562
   If io_index is non zero, the corresponding io zone is
2563
   modified. If it is zero, a new io zone is allocated. The return
2564
   value can be used with cpu_register_physical_memory(). (-1) is
2565
   returned if error. */
2566
int cpu_register_io_memory(int io_index,
2567
                           CPUReadMemoryFunc **mem_read,
2568
                           CPUWriteMemoryFunc **mem_write,
2569
                           void *opaque)
2570
{
2571
    int i, subwidth = 0;
2572

    
2573
    if (io_index <= 0) {
2574
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2575
            return -1;
2576
        io_index = io_mem_nb++;
2577
    } else {
2578
        if (io_index >= IO_MEM_NB_ENTRIES)
2579
            return -1;
2580
    }
2581

    
2582
    for(i = 0;i < 3; i++) {
2583
        if (!mem_read[i] || !mem_write[i])
2584
            subwidth = IO_MEM_SUBWIDTH;
2585
        io_mem_read[io_index][i] = mem_read[i];
2586
        io_mem_write[io_index][i] = mem_write[i];
2587
    }
2588
    io_mem_opaque[io_index] = opaque;
2589
    return (io_index << IO_MEM_SHIFT) | subwidth;
2590
}
2591

    
2592
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2593
{
2594
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2595
}
2596

    
2597
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2598
{
2599
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2600
}
2601

    
2602
#endif /* !defined(CONFIG_USER_ONLY) */
2603

    
2604
/* physical memory access (slow version, mainly for debug) */
2605
#if defined(CONFIG_USER_ONLY)
2606
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2607
                            int len, int is_write)
2608
{
2609
    int l, flags;
2610
    target_ulong page;
2611
    void * p;
2612

    
2613
    while (len > 0) {
2614
        page = addr & TARGET_PAGE_MASK;
2615
        l = (page + TARGET_PAGE_SIZE) - addr;
2616
        if (l > len)
2617
            l = len;
2618
        flags = page_get_flags(page);
2619
        if (!(flags & PAGE_VALID))
2620
            return;
2621
        if (is_write) {
2622
            if (!(flags & PAGE_WRITE))
2623
                return;
2624
            /* XXX: this code should not depend on lock_user */
2625
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2626
                /* FIXME - should this return an error rather than just fail? */
2627
                return;
2628
            memcpy(p, buf, l);
2629
            unlock_user(p, addr, l);
2630
        } else {
2631
            if (!(flags & PAGE_READ))
2632
                return;
2633
            /* XXX: this code should not depend on lock_user */
2634
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2635
                /* FIXME - should this return an error rather than just fail? */
2636
                return;
2637
            memcpy(buf, p, l);
2638
            unlock_user(p, addr, 0);
2639
        }
2640
        len -= l;
2641
        buf += l;
2642
        addr += l;
2643
    }
2644
}
2645

    
2646
#else
2647
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2648
                            int len, int is_write)
2649
{
2650
    int l, io_index;
2651
    uint8_t *ptr;
2652
    uint32_t val;
2653
    target_phys_addr_t page;
2654
    unsigned long pd;
2655
    PhysPageDesc *p;
2656

    
2657
    while (len > 0) {
2658
        page = addr & TARGET_PAGE_MASK;
2659
        l = (page + TARGET_PAGE_SIZE) - addr;
2660
        if (l > len)
2661
            l = len;
2662
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2663
        if (!p) {
2664
            pd = IO_MEM_UNASSIGNED;
2665
        } else {
2666
            pd = p->phys_offset;
2667
        }
2668

    
2669
        if (is_write) {
2670
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2671
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2672
                /* XXX: could force cpu_single_env to NULL to avoid
2673
                   potential bugs */
2674
                if (l >= 4 && ((addr & 3) == 0)) {
2675
                    /* 32 bit write access */
2676
                    val = ldl_p(buf);
2677
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2678
                    l = 4;
2679
                } else if (l >= 2 && ((addr & 1) == 0)) {
2680
                    /* 16 bit write access */
2681
                    val = lduw_p(buf);
2682
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2683
                    l = 2;
2684
                } else {
2685
                    /* 8 bit write access */
2686
                    val = ldub_p(buf);
2687
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2688
                    l = 1;
2689
                }
2690
            } else {
2691
                unsigned long addr1;
2692
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2693
                /* RAM case */
2694
                ptr = phys_ram_base + addr1;
2695
                memcpy(ptr, buf, l);
2696
                if (!cpu_physical_memory_is_dirty(addr1)) {
2697
                    /* invalidate code */
2698
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2699
                    /* set dirty bit */
2700
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2701
                        (0xff & ~CODE_DIRTY_FLAG);
2702
                }
2703
            }
2704
        } else {
2705
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2706
                !(pd & IO_MEM_ROMD)) {
2707
                /* I/O case */
2708
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2709
                if (l >= 4 && ((addr & 3) == 0)) {
2710
                    /* 32 bit read access */
2711
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2712
                    stl_p(buf, val);
2713
                    l = 4;
2714
                } else if (l >= 2 && ((addr & 1) == 0)) {
2715
                    /* 16 bit read access */
2716
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2717
                    stw_p(buf, val);
2718
                    l = 2;
2719
                } else {
2720
                    /* 8 bit read access */
2721
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2722
                    stb_p(buf, val);
2723
                    l = 1;
2724
                }
2725
            } else {
2726
                /* RAM case */
2727
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2728
                    (addr & ~TARGET_PAGE_MASK);
2729
                memcpy(buf, ptr, l);
2730
            }
2731
        }
2732
        len -= l;
2733
        buf += l;
2734
        addr += l;
2735
    }
2736
}
2737

    
2738
/* used for ROM loading : can write in RAM and ROM */
2739
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2740
                                   const uint8_t *buf, int len)
2741
{
2742
    int l;
2743
    uint8_t *ptr;
2744
    target_phys_addr_t page;
2745
    unsigned long pd;
2746
    PhysPageDesc *p;
2747

    
2748
    while (len > 0) {
2749
        page = addr & TARGET_PAGE_MASK;
2750
        l = (page + TARGET_PAGE_SIZE) - addr;
2751
        if (l > len)
2752
            l = len;
2753
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2754
        if (!p) {
2755
            pd = IO_MEM_UNASSIGNED;
2756
        } else {
2757
            pd = p->phys_offset;
2758
        }
2759

    
2760
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2761
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2762
            !(pd & IO_MEM_ROMD)) {
2763
            /* do nothing */
2764
        } else {
2765
            unsigned long addr1;
2766
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2767
            /* ROM/RAM case */
2768
            ptr = phys_ram_base + addr1;
2769
            memcpy(ptr, buf, l);
2770
        }
2771
        len -= l;
2772
        buf += l;
2773
        addr += l;
2774
    }
2775
}
2776

    
2777

    
2778
/* warning: addr must be aligned */
2779
uint32_t ldl_phys(target_phys_addr_t addr)
2780
{
2781
    int io_index;
2782
    uint8_t *ptr;
2783
    uint32_t val;
2784
    unsigned long pd;
2785
    PhysPageDesc *p;
2786

    
2787
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2788
    if (!p) {
2789
        pd = IO_MEM_UNASSIGNED;
2790
    } else {
2791
        pd = p->phys_offset;
2792
    }
2793

    
2794
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2795
        !(pd & IO_MEM_ROMD)) {
2796
        /* I/O case */
2797
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2798
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2799
    } else {
2800
        /* RAM case */
2801
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2802
            (addr & ~TARGET_PAGE_MASK);
2803
        val = ldl_p(ptr);
2804
    }
2805
    return val;
2806
}
2807

    
2808
/* warning: addr must be aligned */
2809
uint64_t ldq_phys(target_phys_addr_t addr)
2810
{
2811
    int io_index;
2812
    uint8_t *ptr;
2813
    uint64_t val;
2814
    unsigned long pd;
2815
    PhysPageDesc *p;
2816

    
2817
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2818
    if (!p) {
2819
        pd = IO_MEM_UNASSIGNED;
2820
    } else {
2821
        pd = p->phys_offset;
2822
    }
2823

    
2824
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2825
        !(pd & IO_MEM_ROMD)) {
2826
        /* I/O case */
2827
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2828
#ifdef TARGET_WORDS_BIGENDIAN
2829
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2830
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2831
#else
2832
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2833
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2834
#endif
2835
    } else {
2836
        /* RAM case */
2837
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2838
            (addr & ~TARGET_PAGE_MASK);
2839
        val = ldq_p(ptr);
2840
    }
2841
    return val;
2842
}
2843

    
2844
/* XXX: optimize */
2845
uint32_t ldub_phys(target_phys_addr_t addr)
2846
{
2847
    uint8_t val;
2848
    cpu_physical_memory_read(addr, &val, 1);
2849
    return val;
2850
}
2851

    
2852
/* XXX: optimize */
2853
uint32_t lduw_phys(target_phys_addr_t addr)
2854
{
2855
    uint16_t val;
2856
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2857
    return tswap16(val);
2858
}
2859

    
2860
/* warning: addr must be aligned. The ram page is not masked as dirty
2861
   and the code inside is not invalidated. It is useful if the dirty
2862
   bits are used to track modified PTEs */
2863
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2864
{
2865
    int io_index;
2866
    uint8_t *ptr;
2867
    unsigned long pd;
2868
    PhysPageDesc *p;
2869

    
2870
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2871
    if (!p) {
2872
        pd = IO_MEM_UNASSIGNED;
2873
    } else {
2874
        pd = p->phys_offset;
2875
    }
2876

    
2877
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2878
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2879
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2880
    } else {
2881
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2882
            (addr & ~TARGET_PAGE_MASK);
2883
        stl_p(ptr, val);
2884
    }
2885
}
2886

    
2887
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2888
{
2889
    int io_index;
2890
    uint8_t *ptr;
2891
    unsigned long pd;
2892
    PhysPageDesc *p;
2893

    
2894
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2895
    if (!p) {
2896
        pd = IO_MEM_UNASSIGNED;
2897
    } else {
2898
        pd = p->phys_offset;
2899
    }
2900

    
2901
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2902
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2903
#ifdef TARGET_WORDS_BIGENDIAN
2904
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2905
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2906
#else
2907
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2908
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2909
#endif
2910
    } else {
2911
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2912
            (addr & ~TARGET_PAGE_MASK);
2913
        stq_p(ptr, val);
2914
    }
2915
}
2916

    
2917
/* warning: addr must be aligned */
2918
void stl_phys(target_phys_addr_t addr, uint32_t val)
2919
{
2920
    int io_index;
2921
    uint8_t *ptr;
2922
    unsigned long pd;
2923
    PhysPageDesc *p;
2924

    
2925
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2926
    if (!p) {
2927
        pd = IO_MEM_UNASSIGNED;
2928
    } else {
2929
        pd = p->phys_offset;
2930
    }
2931

    
2932
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2933
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2934
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2935
    } else {
2936
        unsigned long addr1;
2937
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2938
        /* RAM case */
2939
        ptr = phys_ram_base + addr1;
2940
        stl_p(ptr, val);
2941
        if (!cpu_physical_memory_is_dirty(addr1)) {
2942
            /* invalidate code */
2943
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2944
            /* set dirty bit */
2945
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2946
                (0xff & ~CODE_DIRTY_FLAG);
2947
        }
2948
    }
2949
}
2950

    
2951
/* XXX: optimize */
2952
void stb_phys(target_phys_addr_t addr, uint32_t val)
2953
{
2954
    uint8_t v = val;
2955
    cpu_physical_memory_write(addr, &v, 1);
2956
}
2957

    
2958
/* XXX: optimize */
2959
void stw_phys(target_phys_addr_t addr, uint32_t val)
2960
{
2961
    uint16_t v = tswap16(val);
2962
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2963
}
2964

    
2965
/* XXX: optimize */
2966
void stq_phys(target_phys_addr_t addr, uint64_t val)
2967
{
2968
    val = tswap64(val);
2969
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2970
}
2971

    
2972
#endif
2973

    
2974
/* virtual memory access for debug */
2975
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2976
                        uint8_t *buf, int len, int is_write)
2977
{
2978
    int l;
2979
    target_phys_addr_t phys_addr;
2980
    target_ulong page;
2981

    
2982
    while (len > 0) {
2983
        page = addr & TARGET_PAGE_MASK;
2984
        phys_addr = cpu_get_phys_page_debug(env, page);
2985
        /* if no physical page mapped, return an error */
2986
        if (phys_addr == -1)
2987
            return -1;
2988
        l = (page + TARGET_PAGE_SIZE) - addr;
2989
        if (l > len)
2990
            l = len;
2991
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2992
                               buf, l, is_write);
2993
        len -= l;
2994
        buf += l;
2995
        addr += l;
2996
    }
2997
    return 0;
2998
}
2999

    
3000
/* in deterministic execution mode, instructions doing device I/Os
3001
   must be at the end of the TB */
3002
void cpu_io_recompile(CPUState *env, void *retaddr)
3003
{
3004
    TranslationBlock *tb;
3005
    uint32_t n, cflags;
3006
    target_ulong pc, cs_base;
3007
    uint64_t flags;
3008

    
3009
    tb = tb_find_pc((unsigned long)retaddr);
3010
    if (!tb) {
3011
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3012
                  retaddr);
3013
    }
3014
    n = env->icount_decr.u16.low + tb->icount;
3015
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3016
    /* Calculate how many instructions had been executed before the fault
3017
       occured.  */
3018
    n = n - env->icount_decr.u16.low;
3019
    /* Generate a new TB ending on the I/O insn.  */
3020
    n++;
3021
    /* On MIPS and SH, delay slot instructions can only be restarted if
3022
       they were already the first instruction in the TB.  If this is not
3023
       the first instruction in a TB then re-execute the preceeding
3024
       branch.  */
3025
#if defined(TARGET_MIPS)
3026
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3027
        env->active_tc.PC -= 4;
3028
        env->icount_decr.u16.low++;
3029
        env->hflags &= ~MIPS_HFLAG_BMASK;
3030
    }
3031
#elif defined(TARGET_SH4)
3032
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3033
            && n > 1) {
3034
        env->pc -= 2;
3035
        env->icount_decr.u16.low++;
3036
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3037
    }
3038
#endif
3039
    /* This should never happen.  */
3040
    if (n > CF_COUNT_MASK)
3041
        cpu_abort(env, "TB too big during recompile");
3042

    
3043
    cflags = n | CF_LAST_IO;
3044
    pc = tb->pc;
3045
    cs_base = tb->cs_base;
3046
    flags = tb->flags;
3047
    tb_phys_invalidate(tb, -1);
3048
    /* FIXME: In theory this could raise an exception.  In practice
3049
       we have already translated the block once so it's probably ok.  */
3050
    tb_gen_code(env, pc, cs_base, flags, cflags);
3051
    /* TODO: If env->pc != tb->pc (i.e. the failuting instruction was not
3052
       the first in the TB) then we end up generating a whole new TB and
3053
       repeating the fault, which is horribly inefficient.
3054
       Better would be to execute just this insn uncached, or generate a
3055
       second new TB.  */
3056
    cpu_resume_from_signal(env, NULL);
3057
}
3058

    
3059
void dump_exec_info(FILE *f,
3060
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3061
{
3062
    int i, target_code_size, max_target_code_size;
3063
    int direct_jmp_count, direct_jmp2_count, cross_page;
3064
    TranslationBlock *tb;
3065

    
3066
    target_code_size = 0;
3067
    max_target_code_size = 0;
3068
    cross_page = 0;
3069
    direct_jmp_count = 0;
3070
    direct_jmp2_count = 0;
3071
    for(i = 0; i < nb_tbs; i++) {
3072
        tb = &tbs[i];
3073
        target_code_size += tb->size;
3074
        if (tb->size > max_target_code_size)
3075
            max_target_code_size = tb->size;
3076
        if (tb->page_addr[1] != -1)
3077
            cross_page++;
3078
        if (tb->tb_next_offset[0] != 0xffff) {
3079
            direct_jmp_count++;
3080
            if (tb->tb_next_offset[1] != 0xffff) {
3081
                direct_jmp2_count++;
3082
            }
3083
        }
3084
    }
3085
    /* XXX: avoid using doubles ? */
3086
    cpu_fprintf(f, "Translation buffer state:\n");
3087
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3088
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3089
    cpu_fprintf(f, "TB count            %d/%d\n", 
3090
                nb_tbs, code_gen_max_blocks);
3091
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3092
                nb_tbs ? target_code_size / nb_tbs : 0,
3093
                max_target_code_size);
3094
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3095
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3096
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3097
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3098
            cross_page,
3099
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3100
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3101
                direct_jmp_count,
3102
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3103
                direct_jmp2_count,
3104
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3105
    cpu_fprintf(f, "\nStatistics:\n");
3106
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3107
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3108
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3109
    tcg_dump_info(f, cpu_fprintf);
3110
}
3111

    
3112
#if !defined(CONFIG_USER_ONLY)
3113

    
3114
#define MMUSUFFIX _cmmu
3115
#define GETPC() NULL
3116
#define env cpu_single_env
3117
#define SOFTMMU_CODE_ACCESS
3118

    
3119
#define SHIFT 0
3120
#include "softmmu_template.h"
3121

    
3122
#define SHIFT 1
3123
#include "softmmu_template.h"
3124

    
3125
#define SHIFT 2
3126
#include "softmmu_template.h"
3127

    
3128
#define SHIFT 3
3129
#include "softmmu_template.h"
3130

    
3131
#undef env
3132

    
3133
#endif