Statistics
| Branch: | Revision:

root / exec.c @ fb79ceb9

History | View | Annotate | Download (94.9 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#endif
44

    
45
//#define DEBUG_TB_INVALIDATE
46
//#define DEBUG_FLUSH
47
//#define DEBUG_TLB
48
//#define DEBUG_UNASSIGNED
49

    
50
/* make various TB consistency checks */
51
//#define DEBUG_TB_CHECK
52
//#define DEBUG_TLB_CHECK
53

    
54
//#define DEBUG_IOPORT
55
//#define DEBUG_SUBPAGE
56

    
57
#if !defined(CONFIG_USER_ONLY)
58
/* TB consistency checks only implemented for usermode emulation.  */
59
#undef DEBUG_TB_CHECK
60
#endif
61

    
62
#define SMC_BITMAP_USE_THRESHOLD 10
63

    
64
#define MMAP_AREA_START        0x00000000
65
#define MMAP_AREA_END          0xa8000000
66

    
67
#if defined(TARGET_SPARC64)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 41
69
#elif defined(TARGET_SPARC)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 36
71
#elif defined(TARGET_ALPHA)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 42
73
#define TARGET_VIRT_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_PPC64)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 36
80
#else
81
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82
#define TARGET_PHYS_ADDR_SPACE_BITS 32
83
#endif
84

    
85
TranslationBlock *tbs;
86
int code_gen_max_blocks;
87
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88
int nb_tbs;
89
/* any access to the tbs or the page table must use this lock */
90
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91

    
92
#if defined(__arm__)
93
/* The prologue must be reachable with a direct jump. ARM has a
94
 limited branch range (possibly also PPC and SPARC?) so place it in a
95
 section close to code segment. */
96
#define code_gen_section                                \
97
    __attribute__((__section__(".gen_code")))           \
98
    __attribute__((aligned (32)))
99
#else
100
#define code_gen_section                                \
101
    __attribute__((aligned (32)))
102
#endif
103

    
104
uint8_t code_gen_prologue[1024] code_gen_section;
105
uint8_t *code_gen_buffer;
106
unsigned long code_gen_buffer_size;
107
/* threshold to flush the translated code buffer */
108
unsigned long code_gen_buffer_max_size; 
109
uint8_t *code_gen_ptr;
110

    
111
#if !defined(CONFIG_USER_ONLY)
112
ram_addr_t phys_ram_size;
113
int phys_ram_fd;
114
uint8_t *phys_ram_base;
115
uint8_t *phys_ram_dirty;
116
static ram_addr_t phys_ram_alloc_offset = 0;
117
#endif
118

    
119
CPUState *first_cpu;
120
/* current CPU in the current thread. It is only valid inside
121
   cpu_exec() */
122
CPUState *cpu_single_env;
123
/* 0 = Do not count executed instructions.
124
   1 = Precise instruction counting.
125
   2 = Adaptive rate instruction counting.  */
126
int use_icount = 0;
127
/* Current instruction counter.  While executing translated code this may
128
   include some instructions that have not yet been executed.  */
129
int64_t qemu_icount;
130

    
131
typedef struct PageDesc {
132
    /* list of TBs intersecting this ram page */
133
    TranslationBlock *first_tb;
134
    /* in order to optimize self modifying code, we count the number
135
       of lookups we do to a given page to use a bitmap */
136
    unsigned int code_write_count;
137
    uint8_t *code_bitmap;
138
#if defined(CONFIG_USER_ONLY)
139
    unsigned long flags;
140
#endif
141
} PageDesc;
142

    
143
typedef struct PhysPageDesc {
144
    /* offset in host memory of the page + io_index in the low bits */
145
    ram_addr_t phys_offset;
146
} PhysPageDesc;
147

    
148
#define L2_BITS 10
149
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
150
/* XXX: this is a temporary hack for alpha target.
151
 *      In the future, this is to be replaced by a multi-level table
152
 *      to actually be able to handle the complete 64 bits address space.
153
 */
154
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
155
#else
156
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
157
#endif
158

    
159
#define L1_SIZE (1 << L1_BITS)
160
#define L2_SIZE (1 << L2_BITS)
161

    
162
unsigned long qemu_real_host_page_size;
163
unsigned long qemu_host_page_bits;
164
unsigned long qemu_host_page_size;
165
unsigned long qemu_host_page_mask;
166

    
167
/* XXX: for system emulation, it could just be an array */
168
static PageDesc *l1_map[L1_SIZE];
169
PhysPageDesc **l1_phys_map;
170

    
171
#if !defined(CONFIG_USER_ONLY)
172
static void io_mem_init(void);
173

    
174
/* io memory support */
175
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
176
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
177
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
178
static int io_mem_nb;
179
static int io_mem_watch;
180
#endif
181

    
182
/* log support */
183
char *logfilename = "/tmp/qemu.log";
184
FILE *logfile;
185
int loglevel;
186
static int log_append = 0;
187

    
188
/* statistics */
189
static int tlb_flush_count;
190
static int tb_flush_count;
191
static int tb_phys_invalidate_count;
192

    
193
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194
typedef struct subpage_t {
195
    target_phys_addr_t base;
196
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
197
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
198
    void *opaque[TARGET_PAGE_SIZE][2][4];
199
} subpage_t;
200

    
201
#ifdef _WIN32
202
static void map_exec(void *addr, long size)
203
{
204
    DWORD old_protect;
205
    VirtualProtect(addr, size,
206
                   PAGE_EXECUTE_READWRITE, &old_protect);
207
    
208
}
209
#else
210
static void map_exec(void *addr, long size)
211
{
212
    unsigned long start, end, page_size;
213
    
214
    page_size = getpagesize();
215
    start = (unsigned long)addr;
216
    start &= ~(page_size - 1);
217
    
218
    end = (unsigned long)addr + size;
219
    end += page_size - 1;
220
    end &= ~(page_size - 1);
221
    
222
    mprotect((void *)start, end - start,
223
             PROT_READ | PROT_WRITE | PROT_EXEC);
224
}
225
#endif
226

    
227
static void page_init(void)
228
{
229
    /* NOTE: we can always suppose that qemu_host_page_size >=
230
       TARGET_PAGE_SIZE */
231
#ifdef _WIN32
232
    {
233
        SYSTEM_INFO system_info;
234
        DWORD old_protect;
235

    
236
        GetSystemInfo(&system_info);
237
        qemu_real_host_page_size = system_info.dwPageSize;
238
    }
239
#else
240
    qemu_real_host_page_size = getpagesize();
241
#endif
242
    if (qemu_host_page_size == 0)
243
        qemu_host_page_size = qemu_real_host_page_size;
244
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
245
        qemu_host_page_size = TARGET_PAGE_SIZE;
246
    qemu_host_page_bits = 0;
247
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
248
        qemu_host_page_bits++;
249
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
250
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
251
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
252

    
253
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
254
    {
255
        long long startaddr, endaddr;
256
        FILE *f;
257
        int n;
258

    
259
        mmap_lock();
260
        last_brk = (unsigned long)sbrk(0);
261
        f = fopen("/proc/self/maps", "r");
262
        if (f) {
263
            do {
264
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
265
                if (n == 2) {
266
                    startaddr = MIN(startaddr,
267
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
268
                    endaddr = MIN(endaddr,
269
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
271
                                   TARGET_PAGE_ALIGN(endaddr),
272
                                   PAGE_RESERVED); 
273
                }
274
            } while (!feof(f));
275
            fclose(f);
276
        }
277
        mmap_unlock();
278
    }
279
#endif
280
}
281

    
282
static inline PageDesc *page_find_alloc(target_ulong index)
283
{
284
    PageDesc **lp, *p;
285

    
286
#if TARGET_LONG_BITS > 32
287
    /* Host memory outside guest VM.  For 32-bit targets we have already
288
       excluded high addresses.  */
289
    if (index > ((target_ulong)L2_SIZE * L1_SIZE * TARGET_PAGE_SIZE))
290
        return NULL;
291
#endif
292
    lp = &l1_map[index >> L2_BITS];
293
    p = *lp;
294
    if (!p) {
295
        /* allocate if not found */
296
#if defined(CONFIG_USER_ONLY)
297
        unsigned long addr;
298
        size_t len = sizeof(PageDesc) * L2_SIZE;
299
        /* Don't use qemu_malloc because it may recurse.  */
300
        p = mmap(0, len, PROT_READ | PROT_WRITE,
301
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
302
        *lp = p;
303
        addr = h2g(p);
304
        if (addr == (target_ulong)addr) {
305
            page_set_flags(addr & TARGET_PAGE_MASK,
306
                           TARGET_PAGE_ALIGN(addr + len),
307
                           PAGE_RESERVED); 
308
        }
309
#else
310
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
311
        *lp = p;
312
#endif
313
    }
314
    return p + (index & (L2_SIZE - 1));
315
}
316

    
317
static inline PageDesc *page_find(target_ulong index)
318
{
319
    PageDesc *p;
320

    
321
    p = l1_map[index >> L2_BITS];
322
    if (!p)
323
        return 0;
324
    return p + (index & (L2_SIZE - 1));
325
}
326

    
327
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
328
{
329
    void **lp, **p;
330
    PhysPageDesc *pd;
331

    
332
    p = (void **)l1_phys_map;
333
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
334

    
335
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
336
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
337
#endif
338
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
339
    p = *lp;
340
    if (!p) {
341
        /* allocate if not found */
342
        if (!alloc)
343
            return NULL;
344
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
345
        memset(p, 0, sizeof(void *) * L1_SIZE);
346
        *lp = p;
347
    }
348
#endif
349
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
350
    pd = *lp;
351
    if (!pd) {
352
        int i;
353
        /* allocate if not found */
354
        if (!alloc)
355
            return NULL;
356
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
357
        *lp = pd;
358
        for (i = 0; i < L2_SIZE; i++)
359
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
360
    }
361
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
362
}
363

    
364
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
365
{
366
    return phys_page_find_alloc(index, 0);
367
}
368

    
369
#if !defined(CONFIG_USER_ONLY)
370
static void tlb_protect_code(ram_addr_t ram_addr);
371
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
372
                                    target_ulong vaddr);
373
#define mmap_lock() do { } while(0)
374
#define mmap_unlock() do { } while(0)
375
#endif
376

    
377
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
378

    
379
#if defined(CONFIG_USER_ONLY)
380
/* Currently it is not recommanded to allocate big chunks of data in
381
   user mode. It will change when a dedicated libc will be used */
382
#define USE_STATIC_CODE_GEN_BUFFER
383
#endif
384

    
385
#ifdef USE_STATIC_CODE_GEN_BUFFER
386
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
387
#endif
388

    
389
void code_gen_alloc(unsigned long tb_size)
390
{
391
#ifdef USE_STATIC_CODE_GEN_BUFFER
392
    code_gen_buffer = static_code_gen_buffer;
393
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
394
    map_exec(code_gen_buffer, code_gen_buffer_size);
395
#else
396
    code_gen_buffer_size = tb_size;
397
    if (code_gen_buffer_size == 0) {
398
#if defined(CONFIG_USER_ONLY)
399
        /* in user mode, phys_ram_size is not meaningful */
400
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
401
#else
402
        /* XXX: needs ajustments */
403
        code_gen_buffer_size = (int)(phys_ram_size / 4);
404
#endif
405
    }
406
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
407
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
408
    /* The code gen buffer location may have constraints depending on
409
       the host cpu and OS */
410
#if defined(__linux__) 
411
    {
412
        int flags;
413
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
414
#if defined(__x86_64__)
415
        flags |= MAP_32BIT;
416
        /* Cannot map more than that */
417
        if (code_gen_buffer_size > (800 * 1024 * 1024))
418
            code_gen_buffer_size = (800 * 1024 * 1024);
419
#endif
420
        code_gen_buffer = mmap(NULL, code_gen_buffer_size,
421
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
422
                               flags, -1, 0);
423
        if (code_gen_buffer == MAP_FAILED) {
424
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
425
            exit(1);
426
        }
427
    }
428
#else
429
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
430
    if (!code_gen_buffer) {
431
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
432
        exit(1);
433
    }
434
    map_exec(code_gen_buffer, code_gen_buffer_size);
435
#endif
436
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
437
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
438
    code_gen_buffer_max_size = code_gen_buffer_size - 
439
        code_gen_max_block_size();
440
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
441
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
442
}
443

    
444
/* Must be called before using the QEMU cpus. 'tb_size' is the size
445
   (in bytes) allocated to the translation buffer. Zero means default
446
   size. */
447
void cpu_exec_init_all(unsigned long tb_size)
448
{
449
    cpu_gen_init();
450
    code_gen_alloc(tb_size);
451
    code_gen_ptr = code_gen_buffer;
452
    page_init();
453
#if !defined(CONFIG_USER_ONLY)
454
    io_mem_init();
455
#endif
456
}
457

    
458
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
459

    
460
#define CPU_COMMON_SAVE_VERSION 1
461

    
462
static void cpu_common_save(QEMUFile *f, void *opaque)
463
{
464
    CPUState *env = opaque;
465

    
466
    qemu_put_be32s(f, &env->halted);
467
    qemu_put_be32s(f, &env->interrupt_request);
468
}
469

    
470
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
471
{
472
    CPUState *env = opaque;
473

    
474
    if (version_id != CPU_COMMON_SAVE_VERSION)
475
        return -EINVAL;
476

    
477
    qemu_get_be32s(f, &env->halted);
478
    qemu_get_be32s(f, &env->interrupt_request);
479
    tlb_flush(env, 1);
480

    
481
    return 0;
482
}
483
#endif
484

    
485
void cpu_exec_init(CPUState *env)
486
{
487
    CPUState **penv;
488
    int cpu_index;
489

    
490
    env->next_cpu = NULL;
491
    penv = &first_cpu;
492
    cpu_index = 0;
493
    while (*penv != NULL) {
494
        penv = (CPUState **)&(*penv)->next_cpu;
495
        cpu_index++;
496
    }
497
    env->cpu_index = cpu_index;
498
    env->nb_watchpoints = 0;
499
    *penv = env;
500
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
501
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
502
                    cpu_common_save, cpu_common_load, env);
503
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
504
                    cpu_save, cpu_load, env);
505
#endif
506
}
507

    
508
static inline void invalidate_page_bitmap(PageDesc *p)
509
{
510
    if (p->code_bitmap) {
511
        qemu_free(p->code_bitmap);
512
        p->code_bitmap = NULL;
513
    }
514
    p->code_write_count = 0;
515
}
516

    
517
/* set to NULL all the 'first_tb' fields in all PageDescs */
518
static void page_flush_tb(void)
519
{
520
    int i, j;
521
    PageDesc *p;
522

    
523
    for(i = 0; i < L1_SIZE; i++) {
524
        p = l1_map[i];
525
        if (p) {
526
            for(j = 0; j < L2_SIZE; j++) {
527
                p->first_tb = NULL;
528
                invalidate_page_bitmap(p);
529
                p++;
530
            }
531
        }
532
    }
533
}
534

    
535
/* flush all the translation blocks */
536
/* XXX: tb_flush is currently not thread safe */
537
void tb_flush(CPUState *env1)
538
{
539
    CPUState *env;
540
#if defined(DEBUG_FLUSH)
541
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
542
           (unsigned long)(code_gen_ptr - code_gen_buffer),
543
           nb_tbs, nb_tbs > 0 ?
544
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
545
#endif
546
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
547
        cpu_abort(env1, "Internal error: code buffer overflow\n");
548

    
549
    nb_tbs = 0;
550

    
551
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
552
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
553
    }
554

    
555
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
556
    page_flush_tb();
557

    
558
    code_gen_ptr = code_gen_buffer;
559
    /* XXX: flush processor icache at this point if cache flush is
560
       expensive */
561
    tb_flush_count++;
562
}
563

    
564
#ifdef DEBUG_TB_CHECK
565

    
566
static void tb_invalidate_check(target_ulong address)
567
{
568
    TranslationBlock *tb;
569
    int i;
570
    address &= TARGET_PAGE_MASK;
571
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
572
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
573
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
574
                  address >= tb->pc + tb->size)) {
575
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
576
                       address, (long)tb->pc, tb->size);
577
            }
578
        }
579
    }
580
}
581

    
582
/* verify that all the pages have correct rights for code */
583
static void tb_page_check(void)
584
{
585
    TranslationBlock *tb;
586
    int i, flags1, flags2;
587

    
588
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
589
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
590
            flags1 = page_get_flags(tb->pc);
591
            flags2 = page_get_flags(tb->pc + tb->size - 1);
592
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
593
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
594
                       (long)tb->pc, tb->size, flags1, flags2);
595
            }
596
        }
597
    }
598
}
599

    
600
void tb_jmp_check(TranslationBlock *tb)
601
{
602
    TranslationBlock *tb1;
603
    unsigned int n1;
604

    
605
    /* suppress any remaining jumps to this TB */
606
    tb1 = tb->jmp_first;
607
    for(;;) {
608
        n1 = (long)tb1 & 3;
609
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
610
        if (n1 == 2)
611
            break;
612
        tb1 = tb1->jmp_next[n1];
613
    }
614
    /* check end of list */
615
    if (tb1 != tb) {
616
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
617
    }
618
}
619

    
620
#endif
621

    
622
/* invalidate one TB */
623
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
624
                             int next_offset)
625
{
626
    TranslationBlock *tb1;
627
    for(;;) {
628
        tb1 = *ptb;
629
        if (tb1 == tb) {
630
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
631
            break;
632
        }
633
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
634
    }
635
}
636

    
637
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
638
{
639
    TranslationBlock *tb1;
640
    unsigned int n1;
641

    
642
    for(;;) {
643
        tb1 = *ptb;
644
        n1 = (long)tb1 & 3;
645
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
646
        if (tb1 == tb) {
647
            *ptb = tb1->page_next[n1];
648
            break;
649
        }
650
        ptb = &tb1->page_next[n1];
651
    }
652
}
653

    
654
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
655
{
656
    TranslationBlock *tb1, **ptb;
657
    unsigned int n1;
658

    
659
    ptb = &tb->jmp_next[n];
660
    tb1 = *ptb;
661
    if (tb1) {
662
        /* find tb(n) in circular list */
663
        for(;;) {
664
            tb1 = *ptb;
665
            n1 = (long)tb1 & 3;
666
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
667
            if (n1 == n && tb1 == tb)
668
                break;
669
            if (n1 == 2) {
670
                ptb = &tb1->jmp_first;
671
            } else {
672
                ptb = &tb1->jmp_next[n1];
673
            }
674
        }
675
        /* now we can suppress tb(n) from the list */
676
        *ptb = tb->jmp_next[n];
677

    
678
        tb->jmp_next[n] = NULL;
679
    }
680
}
681

    
682
/* reset the jump entry 'n' of a TB so that it is not chained to
683
   another TB */
684
static inline void tb_reset_jump(TranslationBlock *tb, int n)
685
{
686
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
687
}
688

    
689
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
690
{
691
    CPUState *env;
692
    PageDesc *p;
693
    unsigned int h, n1;
694
    target_phys_addr_t phys_pc;
695
    TranslationBlock *tb1, *tb2;
696

    
697
    /* remove the TB from the hash list */
698
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
699
    h = tb_phys_hash_func(phys_pc);
700
    tb_remove(&tb_phys_hash[h], tb,
701
              offsetof(TranslationBlock, phys_hash_next));
702

    
703
    /* remove the TB from the page list */
704
    if (tb->page_addr[0] != page_addr) {
705
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
706
        tb_page_remove(&p->first_tb, tb);
707
        invalidate_page_bitmap(p);
708
    }
709
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
710
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
711
        tb_page_remove(&p->first_tb, tb);
712
        invalidate_page_bitmap(p);
713
    }
714

    
715
    tb_invalidated_flag = 1;
716

    
717
    /* remove the TB from the hash list */
718
    h = tb_jmp_cache_hash_func(tb->pc);
719
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
720
        if (env->tb_jmp_cache[h] == tb)
721
            env->tb_jmp_cache[h] = NULL;
722
    }
723

    
724
    /* suppress this TB from the two jump lists */
725
    tb_jmp_remove(tb, 0);
726
    tb_jmp_remove(tb, 1);
727

    
728
    /* suppress any remaining jumps to this TB */
729
    tb1 = tb->jmp_first;
730
    for(;;) {
731
        n1 = (long)tb1 & 3;
732
        if (n1 == 2)
733
            break;
734
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
735
        tb2 = tb1->jmp_next[n1];
736
        tb_reset_jump(tb1, n1);
737
        tb1->jmp_next[n1] = NULL;
738
        tb1 = tb2;
739
    }
740
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
741

    
742
    tb_phys_invalidate_count++;
743
}
744

    
745
static inline void set_bits(uint8_t *tab, int start, int len)
746
{
747
    int end, mask, end1;
748

    
749
    end = start + len;
750
    tab += start >> 3;
751
    mask = 0xff << (start & 7);
752
    if ((start & ~7) == (end & ~7)) {
753
        if (start < end) {
754
            mask &= ~(0xff << (end & 7));
755
            *tab |= mask;
756
        }
757
    } else {
758
        *tab++ |= mask;
759
        start = (start + 8) & ~7;
760
        end1 = end & ~7;
761
        while (start < end1) {
762
            *tab++ = 0xff;
763
            start += 8;
764
        }
765
        if (start < end) {
766
            mask = ~(0xff << (end & 7));
767
            *tab |= mask;
768
        }
769
    }
770
}
771

    
772
static void build_page_bitmap(PageDesc *p)
773
{
774
    int n, tb_start, tb_end;
775
    TranslationBlock *tb;
776

    
777
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
778
    if (!p->code_bitmap)
779
        return;
780

    
781
    tb = p->first_tb;
782
    while (tb != NULL) {
783
        n = (long)tb & 3;
784
        tb = (TranslationBlock *)((long)tb & ~3);
785
        /* NOTE: this is subtle as a TB may span two physical pages */
786
        if (n == 0) {
787
            /* NOTE: tb_end may be after the end of the page, but
788
               it is not a problem */
789
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
790
            tb_end = tb_start + tb->size;
791
            if (tb_end > TARGET_PAGE_SIZE)
792
                tb_end = TARGET_PAGE_SIZE;
793
        } else {
794
            tb_start = 0;
795
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
796
        }
797
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
798
        tb = tb->page_next[n];
799
    }
800
}
801

    
802
TranslationBlock *tb_gen_code(CPUState *env,
803
                              target_ulong pc, target_ulong cs_base,
804
                              int flags, int cflags)
805
{
806
    TranslationBlock *tb;
807
    uint8_t *tc_ptr;
808
    target_ulong phys_pc, phys_page2, virt_page2;
809
    int code_gen_size;
810

    
811
    phys_pc = get_phys_addr_code(env, pc);
812
    tb = tb_alloc(pc);
813
    if (!tb) {
814
        /* flush must be done */
815
        tb_flush(env);
816
        /* cannot fail at this point */
817
        tb = tb_alloc(pc);
818
        /* Don't forget to invalidate previous TB info.  */
819
        tb_invalidated_flag = 1;
820
    }
821
    tc_ptr = code_gen_ptr;
822
    tb->tc_ptr = tc_ptr;
823
    tb->cs_base = cs_base;
824
    tb->flags = flags;
825
    tb->cflags = cflags;
826
    cpu_gen_code(env, tb, &code_gen_size);
827
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
828

    
829
    /* check next page if needed */
830
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
831
    phys_page2 = -1;
832
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
833
        phys_page2 = get_phys_addr_code(env, virt_page2);
834
    }
835
    tb_link_phys(tb, phys_pc, phys_page2);
836
    return tb;
837
}
838

    
839
/* invalidate all TBs which intersect with the target physical page
840
   starting in range [start;end[. NOTE: start and end must refer to
841
   the same physical page. 'is_cpu_write_access' should be true if called
842
   from a real cpu write access: the virtual CPU will exit the current
843
   TB if code is modified inside this TB. */
844
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
845
                                   int is_cpu_write_access)
846
{
847
    int n, current_tb_modified, current_tb_not_found, current_flags;
848
    CPUState *env = cpu_single_env;
849
    PageDesc *p;
850
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
851
    target_ulong tb_start, tb_end;
852
    target_ulong current_pc, current_cs_base;
853

    
854
    p = page_find(start >> TARGET_PAGE_BITS);
855
    if (!p)
856
        return;
857
    if (!p->code_bitmap &&
858
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
859
        is_cpu_write_access) {
860
        /* build code bitmap */
861
        build_page_bitmap(p);
862
    }
863

    
864
    /* we remove all the TBs in the range [start, end[ */
865
    /* XXX: see if in some cases it could be faster to invalidate all the code */
866
    current_tb_not_found = is_cpu_write_access;
867
    current_tb_modified = 0;
868
    current_tb = NULL; /* avoid warning */
869
    current_pc = 0; /* avoid warning */
870
    current_cs_base = 0; /* avoid warning */
871
    current_flags = 0; /* avoid warning */
872
    tb = p->first_tb;
873
    while (tb != NULL) {
874
        n = (long)tb & 3;
875
        tb = (TranslationBlock *)((long)tb & ~3);
876
        tb_next = tb->page_next[n];
877
        /* NOTE: this is subtle as a TB may span two physical pages */
878
        if (n == 0) {
879
            /* NOTE: tb_end may be after the end of the page, but
880
               it is not a problem */
881
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
882
            tb_end = tb_start + tb->size;
883
        } else {
884
            tb_start = tb->page_addr[1];
885
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
886
        }
887
        if (!(tb_end <= start || tb_start >= end)) {
888
#ifdef TARGET_HAS_PRECISE_SMC
889
            if (current_tb_not_found) {
890
                current_tb_not_found = 0;
891
                current_tb = NULL;
892
                if (env->mem_io_pc) {
893
                    /* now we have a real cpu fault */
894
                    current_tb = tb_find_pc(env->mem_io_pc);
895
                }
896
            }
897
            if (current_tb == tb &&
898
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
899
                /* If we are modifying the current TB, we must stop
900
                its execution. We could be more precise by checking
901
                that the modification is after the current PC, but it
902
                would require a specialized function to partially
903
                restore the CPU state */
904

    
905
                current_tb_modified = 1;
906
                cpu_restore_state(current_tb, env,
907
                                  env->mem_io_pc, NULL);
908
#if defined(TARGET_I386)
909
                current_flags = env->hflags;
910
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
911
                current_cs_base = (target_ulong)env->segs[R_CS].base;
912
                current_pc = current_cs_base + env->eip;
913
#else
914
#error unsupported CPU
915
#endif
916
            }
917
#endif /* TARGET_HAS_PRECISE_SMC */
918
            /* we need to do that to handle the case where a signal
919
               occurs while doing tb_phys_invalidate() */
920
            saved_tb = NULL;
921
            if (env) {
922
                saved_tb = env->current_tb;
923
                env->current_tb = NULL;
924
            }
925
            tb_phys_invalidate(tb, -1);
926
            if (env) {
927
                env->current_tb = saved_tb;
928
                if (env->interrupt_request && env->current_tb)
929
                    cpu_interrupt(env, env->interrupt_request);
930
            }
931
        }
932
        tb = tb_next;
933
    }
934
#if !defined(CONFIG_USER_ONLY)
935
    /* if no code remaining, no need to continue to use slow writes */
936
    if (!p->first_tb) {
937
        invalidate_page_bitmap(p);
938
        if (is_cpu_write_access) {
939
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
940
        }
941
    }
942
#endif
943
#ifdef TARGET_HAS_PRECISE_SMC
944
    if (current_tb_modified) {
945
        /* we generate a block containing just the instruction
946
           modifying the memory. It will ensure that it cannot modify
947
           itself */
948
        env->current_tb = NULL;
949
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
950
        cpu_resume_from_signal(env, NULL);
951
    }
952
#endif
953
}
954

    
955
/* len must be <= 8 and start must be a multiple of len */
956
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
957
{
958
    PageDesc *p;
959
    int offset, b;
960
#if 0
961
    if (1) {
962
        if (loglevel) {
963
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
964
                   cpu_single_env->mem_io_vaddr, len,
965
                   cpu_single_env->eip,
966
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
967
        }
968
    }
969
#endif
970
    p = page_find(start >> TARGET_PAGE_BITS);
971
    if (!p)
972
        return;
973
    if (p->code_bitmap) {
974
        offset = start & ~TARGET_PAGE_MASK;
975
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
976
        if (b & ((1 << len) - 1))
977
            goto do_invalidate;
978
    } else {
979
    do_invalidate:
980
        tb_invalidate_phys_page_range(start, start + len, 1);
981
    }
982
}
983

    
984
#if !defined(CONFIG_SOFTMMU)
985
static void tb_invalidate_phys_page(target_phys_addr_t addr,
986
                                    unsigned long pc, void *puc)
987
{
988
    int n, current_flags, current_tb_modified;
989
    target_ulong current_pc, current_cs_base;
990
    PageDesc *p;
991
    TranslationBlock *tb, *current_tb;
992
#ifdef TARGET_HAS_PRECISE_SMC
993
    CPUState *env = cpu_single_env;
994
#endif
995

    
996
    addr &= TARGET_PAGE_MASK;
997
    p = page_find(addr >> TARGET_PAGE_BITS);
998
    if (!p)
999
        return;
1000
    tb = p->first_tb;
1001
    current_tb_modified = 0;
1002
    current_tb = NULL;
1003
    current_pc = 0; /* avoid warning */
1004
    current_cs_base = 0; /* avoid warning */
1005
    current_flags = 0; /* avoid warning */
1006
#ifdef TARGET_HAS_PRECISE_SMC
1007
    if (tb && pc != 0) {
1008
        current_tb = tb_find_pc(pc);
1009
    }
1010
#endif
1011
    while (tb != NULL) {
1012
        n = (long)tb & 3;
1013
        tb = (TranslationBlock *)((long)tb & ~3);
1014
#ifdef TARGET_HAS_PRECISE_SMC
1015
        if (current_tb == tb &&
1016
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1017
                /* If we are modifying the current TB, we must stop
1018
                   its execution. We could be more precise by checking
1019
                   that the modification is after the current PC, but it
1020
                   would require a specialized function to partially
1021
                   restore the CPU state */
1022

    
1023
            current_tb_modified = 1;
1024
            cpu_restore_state(current_tb, env, pc, puc);
1025
#if defined(TARGET_I386)
1026
            current_flags = env->hflags;
1027
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1028
            current_cs_base = (target_ulong)env->segs[R_CS].base;
1029
            current_pc = current_cs_base + env->eip;
1030
#else
1031
#error unsupported CPU
1032
#endif
1033
        }
1034
#endif /* TARGET_HAS_PRECISE_SMC */
1035
        tb_phys_invalidate(tb, addr);
1036
        tb = tb->page_next[n];
1037
    }
1038
    p->first_tb = NULL;
1039
#ifdef TARGET_HAS_PRECISE_SMC
1040
    if (current_tb_modified) {
1041
        /* we generate a block containing just the instruction
1042
           modifying the memory. It will ensure that it cannot modify
1043
           itself */
1044
        env->current_tb = NULL;
1045
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1046
        cpu_resume_from_signal(env, puc);
1047
    }
1048
#endif
1049
}
1050
#endif
1051

    
1052
/* add the tb in the target page and protect it if necessary */
1053
static inline void tb_alloc_page(TranslationBlock *tb,
1054
                                 unsigned int n, target_ulong page_addr)
1055
{
1056
    PageDesc *p;
1057
    TranslationBlock *last_first_tb;
1058

    
1059
    tb->page_addr[n] = page_addr;
1060
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1061
    tb->page_next[n] = p->first_tb;
1062
    last_first_tb = p->first_tb;
1063
    p->first_tb = (TranslationBlock *)((long)tb | n);
1064
    invalidate_page_bitmap(p);
1065

    
1066
#if defined(TARGET_HAS_SMC) || 1
1067

    
1068
#if defined(CONFIG_USER_ONLY)
1069
    if (p->flags & PAGE_WRITE) {
1070
        target_ulong addr;
1071
        PageDesc *p2;
1072
        int prot;
1073

    
1074
        /* force the host page as non writable (writes will have a
1075
           page fault + mprotect overhead) */
1076
        page_addr &= qemu_host_page_mask;
1077
        prot = 0;
1078
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1079
            addr += TARGET_PAGE_SIZE) {
1080

    
1081
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1082
            if (!p2)
1083
                continue;
1084
            prot |= p2->flags;
1085
            p2->flags &= ~PAGE_WRITE;
1086
            page_get_flags(addr);
1087
          }
1088
        mprotect(g2h(page_addr), qemu_host_page_size,
1089
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1090
#ifdef DEBUG_TB_INVALIDATE
1091
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1092
               page_addr);
1093
#endif
1094
    }
1095
#else
1096
    /* if some code is already present, then the pages are already
1097
       protected. So we handle the case where only the first TB is
1098
       allocated in a physical page */
1099
    if (!last_first_tb) {
1100
        tlb_protect_code(page_addr);
1101
    }
1102
#endif
1103

    
1104
#endif /* TARGET_HAS_SMC */
1105
}
1106

    
1107
/* Allocate a new translation block. Flush the translation buffer if
1108
   too many translation blocks or too much generated code. */
1109
TranslationBlock *tb_alloc(target_ulong pc)
1110
{
1111
    TranslationBlock *tb;
1112

    
1113
    if (nb_tbs >= code_gen_max_blocks ||
1114
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1115
        return NULL;
1116
    tb = &tbs[nb_tbs++];
1117
    tb->pc = pc;
1118
    tb->cflags = 0;
1119
    return tb;
1120
}
1121

    
1122
void tb_free(TranslationBlock *tb)
1123
{
1124
    /* In practice this is mostly used for single use temporary TB
1125
       Ignore the hard cases and just back up if this TB happens to
1126
       be the last one generated.  */
1127
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1128
        code_gen_ptr = tb->tc_ptr;
1129
        nb_tbs--;
1130
    }
1131
}
1132

    
1133
/* add a new TB and link it to the physical page tables. phys_page2 is
1134
   (-1) to indicate that only one page contains the TB. */
1135
void tb_link_phys(TranslationBlock *tb,
1136
                  target_ulong phys_pc, target_ulong phys_page2)
1137
{
1138
    unsigned int h;
1139
    TranslationBlock **ptb;
1140

    
1141
    /* Grab the mmap lock to stop another thread invalidating this TB
1142
       before we are done.  */
1143
    mmap_lock();
1144
    /* add in the physical hash table */
1145
    h = tb_phys_hash_func(phys_pc);
1146
    ptb = &tb_phys_hash[h];
1147
    tb->phys_hash_next = *ptb;
1148
    *ptb = tb;
1149

    
1150
    /* add in the page list */
1151
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1152
    if (phys_page2 != -1)
1153
        tb_alloc_page(tb, 1, phys_page2);
1154
    else
1155
        tb->page_addr[1] = -1;
1156

    
1157
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1158
    tb->jmp_next[0] = NULL;
1159
    tb->jmp_next[1] = NULL;
1160

    
1161
    /* init original jump addresses */
1162
    if (tb->tb_next_offset[0] != 0xffff)
1163
        tb_reset_jump(tb, 0);
1164
    if (tb->tb_next_offset[1] != 0xffff)
1165
        tb_reset_jump(tb, 1);
1166

    
1167
#ifdef DEBUG_TB_CHECK
1168
    tb_page_check();
1169
#endif
1170
    mmap_unlock();
1171
}
1172

    
1173
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1174
   tb[1].tc_ptr. Return NULL if not found */
1175
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1176
{
1177
    int m_min, m_max, m;
1178
    unsigned long v;
1179
    TranslationBlock *tb;
1180

    
1181
    if (nb_tbs <= 0)
1182
        return NULL;
1183
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1184
        tc_ptr >= (unsigned long)code_gen_ptr)
1185
        return NULL;
1186
    /* binary search (cf Knuth) */
1187
    m_min = 0;
1188
    m_max = nb_tbs - 1;
1189
    while (m_min <= m_max) {
1190
        m = (m_min + m_max) >> 1;
1191
        tb = &tbs[m];
1192
        v = (unsigned long)tb->tc_ptr;
1193
        if (v == tc_ptr)
1194
            return tb;
1195
        else if (tc_ptr < v) {
1196
            m_max = m - 1;
1197
        } else {
1198
            m_min = m + 1;
1199
        }
1200
    }
1201
    return &tbs[m_max];
1202
}
1203

    
1204
static void tb_reset_jump_recursive(TranslationBlock *tb);
1205

    
1206
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1207
{
1208
    TranslationBlock *tb1, *tb_next, **ptb;
1209
    unsigned int n1;
1210

    
1211
    tb1 = tb->jmp_next[n];
1212
    if (tb1 != NULL) {
1213
        /* find head of list */
1214
        for(;;) {
1215
            n1 = (long)tb1 & 3;
1216
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1217
            if (n1 == 2)
1218
                break;
1219
            tb1 = tb1->jmp_next[n1];
1220
        }
1221
        /* we are now sure now that tb jumps to tb1 */
1222
        tb_next = tb1;
1223

    
1224
        /* remove tb from the jmp_first list */
1225
        ptb = &tb_next->jmp_first;
1226
        for(;;) {
1227
            tb1 = *ptb;
1228
            n1 = (long)tb1 & 3;
1229
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1230
            if (n1 == n && tb1 == tb)
1231
                break;
1232
            ptb = &tb1->jmp_next[n1];
1233
        }
1234
        *ptb = tb->jmp_next[n];
1235
        tb->jmp_next[n] = NULL;
1236

    
1237
        /* suppress the jump to next tb in generated code */
1238
        tb_reset_jump(tb, n);
1239

    
1240
        /* suppress jumps in the tb on which we could have jumped */
1241
        tb_reset_jump_recursive(tb_next);
1242
    }
1243
}
1244

    
1245
static void tb_reset_jump_recursive(TranslationBlock *tb)
1246
{
1247
    tb_reset_jump_recursive2(tb, 0);
1248
    tb_reset_jump_recursive2(tb, 1);
1249
}
1250

    
1251
#if defined(TARGET_HAS_ICE)
1252
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1253
{
1254
    target_phys_addr_t addr;
1255
    target_ulong pd;
1256
    ram_addr_t ram_addr;
1257
    PhysPageDesc *p;
1258

    
1259
    addr = cpu_get_phys_page_debug(env, pc);
1260
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1261
    if (!p) {
1262
        pd = IO_MEM_UNASSIGNED;
1263
    } else {
1264
        pd = p->phys_offset;
1265
    }
1266
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1267
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1268
}
1269
#endif
1270

    
1271
/* Add a watchpoint.  */
1272
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1273
{
1274
    int i;
1275

    
1276
    for (i = 0; i < env->nb_watchpoints; i++) {
1277
        if (addr == env->watchpoint[i].vaddr)
1278
            return 0;
1279
    }
1280
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1281
        return -1;
1282

    
1283
    i = env->nb_watchpoints++;
1284
    env->watchpoint[i].vaddr = addr;
1285
    env->watchpoint[i].type = type;
1286
    tlb_flush_page(env, addr);
1287
    /* FIXME: This flush is needed because of the hack to make memory ops
1288
       terminate the TB.  It can be removed once the proper IO trap and
1289
       re-execute bits are in.  */
1290
    tb_flush(env);
1291
    return i;
1292
}
1293

    
1294
/* Remove a watchpoint.  */
1295
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1296
{
1297
    int i;
1298

    
1299
    for (i = 0; i < env->nb_watchpoints; i++) {
1300
        if (addr == env->watchpoint[i].vaddr) {
1301
            env->nb_watchpoints--;
1302
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1303
            tlb_flush_page(env, addr);
1304
            return 0;
1305
        }
1306
    }
1307
    return -1;
1308
}
1309

    
1310
/* Remove all watchpoints. */
1311
void cpu_watchpoint_remove_all(CPUState *env) {
1312
    int i;
1313

    
1314
    for (i = 0; i < env->nb_watchpoints; i++) {
1315
        tlb_flush_page(env, env->watchpoint[i].vaddr);
1316
    }
1317
    env->nb_watchpoints = 0;
1318
}
1319

    
1320
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1321
   breakpoint is reached */
1322
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1323
{
1324
#if defined(TARGET_HAS_ICE)
1325
    int i;
1326

    
1327
    for(i = 0; i < env->nb_breakpoints; i++) {
1328
        if (env->breakpoints[i] == pc)
1329
            return 0;
1330
    }
1331

    
1332
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1333
        return -1;
1334
    env->breakpoints[env->nb_breakpoints++] = pc;
1335

    
1336
    breakpoint_invalidate(env, pc);
1337
    return 0;
1338
#else
1339
    return -1;
1340
#endif
1341
}
1342

    
1343
/* remove all breakpoints */
1344
void cpu_breakpoint_remove_all(CPUState *env) {
1345
#if defined(TARGET_HAS_ICE)
1346
    int i;
1347
    for(i = 0; i < env->nb_breakpoints; i++) {
1348
        breakpoint_invalidate(env, env->breakpoints[i]);
1349
    }
1350
    env->nb_breakpoints = 0;
1351
#endif
1352
}
1353

    
1354
/* remove a breakpoint */
1355
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1356
{
1357
#if defined(TARGET_HAS_ICE)
1358
    int i;
1359
    for(i = 0; i < env->nb_breakpoints; i++) {
1360
        if (env->breakpoints[i] == pc)
1361
            goto found;
1362
    }
1363
    return -1;
1364
 found:
1365
    env->nb_breakpoints--;
1366
    if (i < env->nb_breakpoints)
1367
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1368

    
1369
    breakpoint_invalidate(env, pc);
1370
    return 0;
1371
#else
1372
    return -1;
1373
#endif
1374
}
1375

    
1376
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1377
   CPU loop after each instruction */
1378
void cpu_single_step(CPUState *env, int enabled)
1379
{
1380
#if defined(TARGET_HAS_ICE)
1381
    if (env->singlestep_enabled != enabled) {
1382
        env->singlestep_enabled = enabled;
1383
        /* must flush all the translated code to avoid inconsistancies */
1384
        /* XXX: only flush what is necessary */
1385
        tb_flush(env);
1386
    }
1387
#endif
1388
}
1389

    
1390
/* enable or disable low levels log */
1391
void cpu_set_log(int log_flags)
1392
{
1393
    loglevel = log_flags;
1394
    if (loglevel && !logfile) {
1395
        logfile = fopen(logfilename, log_append ? "a" : "w");
1396
        if (!logfile) {
1397
            perror(logfilename);
1398
            _exit(1);
1399
        }
1400
#if !defined(CONFIG_SOFTMMU)
1401
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1402
        {
1403
            static uint8_t logfile_buf[4096];
1404
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1405
        }
1406
#else
1407
        setvbuf(logfile, NULL, _IOLBF, 0);
1408
#endif
1409
        log_append = 1;
1410
    }
1411
    if (!loglevel && logfile) {
1412
        fclose(logfile);
1413
        logfile = NULL;
1414
    }
1415
}
1416

    
1417
void cpu_set_log_filename(const char *filename)
1418
{
1419
    logfilename = strdup(filename);
1420
    if (logfile) {
1421
        fclose(logfile);
1422
        logfile = NULL;
1423
    }
1424
    cpu_set_log(loglevel);
1425
}
1426

    
1427
/* mask must never be zero, except for A20 change call */
1428
void cpu_interrupt(CPUState *env, int mask)
1429
{
1430
#if !defined(USE_NPTL)
1431
    TranslationBlock *tb;
1432
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1433
#endif
1434
    int old_mask;
1435

    
1436
    old_mask = env->interrupt_request;
1437
    /* FIXME: This is probably not threadsafe.  A different thread could
1438
       be in the middle of a read-modify-write operation.  */
1439
    env->interrupt_request |= mask;
1440
#if defined(USE_NPTL)
1441
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1442
       problem and hope the cpu will stop of its own accord.  For userspace
1443
       emulation this often isn't actually as bad as it sounds.  Often
1444
       signals are used primarily to interrupt blocking syscalls.  */
1445
#else
1446
    if (use_icount) {
1447
        env->icount_decr.u16.high = 0xffff;
1448
#ifndef CONFIG_USER_ONLY
1449
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1450
           an async event happened and we need to process it.  */
1451
        if (!can_do_io(env)
1452
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1453
            cpu_abort(env, "Raised interrupt while not in I/O function");
1454
        }
1455
#endif
1456
    } else {
1457
        tb = env->current_tb;
1458
        /* if the cpu is currently executing code, we must unlink it and
1459
           all the potentially executing TB */
1460
        if (tb && !testandset(&interrupt_lock)) {
1461
            env->current_tb = NULL;
1462
            tb_reset_jump_recursive(tb);
1463
            resetlock(&interrupt_lock);
1464
        }
1465
    }
1466
#endif
1467
}
1468

    
1469
void cpu_reset_interrupt(CPUState *env, int mask)
1470
{
1471
    env->interrupt_request &= ~mask;
1472
}
1473

    
1474
CPULogItem cpu_log_items[] = {
1475
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1476
      "show generated host assembly code for each compiled TB" },
1477
    { CPU_LOG_TB_IN_ASM, "in_asm",
1478
      "show target assembly code for each compiled TB" },
1479
    { CPU_LOG_TB_OP, "op",
1480
      "show micro ops for each compiled TB" },
1481
    { CPU_LOG_TB_OP_OPT, "op_opt",
1482
      "show micro ops "
1483
#ifdef TARGET_I386
1484
      "before eflags optimization and "
1485
#endif
1486
      "after liveness analysis" },
1487
    { CPU_LOG_INT, "int",
1488
      "show interrupts/exceptions in short format" },
1489
    { CPU_LOG_EXEC, "exec",
1490
      "show trace before each executed TB (lots of logs)" },
1491
    { CPU_LOG_TB_CPU, "cpu",
1492
      "show CPU state before block translation" },
1493
#ifdef TARGET_I386
1494
    { CPU_LOG_PCALL, "pcall",
1495
      "show protected mode far calls/returns/exceptions" },
1496
#endif
1497
#ifdef DEBUG_IOPORT
1498
    { CPU_LOG_IOPORT, "ioport",
1499
      "show all i/o ports accesses" },
1500
#endif
1501
    { 0, NULL, NULL },
1502
};
1503

    
1504
static int cmp1(const char *s1, int n, const char *s2)
1505
{
1506
    if (strlen(s2) != n)
1507
        return 0;
1508
    return memcmp(s1, s2, n) == 0;
1509
}
1510

    
1511
/* takes a comma separated list of log masks. Return 0 if error. */
1512
int cpu_str_to_log_mask(const char *str)
1513
{
1514
    CPULogItem *item;
1515
    int mask;
1516
    const char *p, *p1;
1517

    
1518
    p = str;
1519
    mask = 0;
1520
    for(;;) {
1521
        p1 = strchr(p, ',');
1522
        if (!p1)
1523
            p1 = p + strlen(p);
1524
        if(cmp1(p,p1-p,"all")) {
1525
                for(item = cpu_log_items; item->mask != 0; item++) {
1526
                        mask |= item->mask;
1527
                }
1528
        } else {
1529
        for(item = cpu_log_items; item->mask != 0; item++) {
1530
            if (cmp1(p, p1 - p, item->name))
1531
                goto found;
1532
        }
1533
        return 0;
1534
        }
1535
    found:
1536
        mask |= item->mask;
1537
        if (*p1 != ',')
1538
            break;
1539
        p = p1 + 1;
1540
    }
1541
    return mask;
1542
}
1543

    
1544
void cpu_abort(CPUState *env, const char *fmt, ...)
1545
{
1546
    va_list ap;
1547
    va_list ap2;
1548

    
1549
    va_start(ap, fmt);
1550
    va_copy(ap2, ap);
1551
    fprintf(stderr, "qemu: fatal: ");
1552
    vfprintf(stderr, fmt, ap);
1553
    fprintf(stderr, "\n");
1554
#ifdef TARGET_I386
1555
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1556
#else
1557
    cpu_dump_state(env, stderr, fprintf, 0);
1558
#endif
1559
    if (logfile) {
1560
        fprintf(logfile, "qemu: fatal: ");
1561
        vfprintf(logfile, fmt, ap2);
1562
        fprintf(logfile, "\n");
1563
#ifdef TARGET_I386
1564
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1565
#else
1566
        cpu_dump_state(env, logfile, fprintf, 0);
1567
#endif
1568
        fflush(logfile);
1569
        fclose(logfile);
1570
    }
1571
    va_end(ap2);
1572
    va_end(ap);
1573
    abort();
1574
}
1575

    
1576
CPUState *cpu_copy(CPUState *env)
1577
{
1578
    CPUState *new_env = cpu_init(env->cpu_model_str);
1579
    /* preserve chaining and index */
1580
    CPUState *next_cpu = new_env->next_cpu;
1581
    int cpu_index = new_env->cpu_index;
1582
    memcpy(new_env, env, sizeof(CPUState));
1583
    new_env->next_cpu = next_cpu;
1584
    new_env->cpu_index = cpu_index;
1585
    return new_env;
1586
}
1587

    
1588
#if !defined(CONFIG_USER_ONLY)
1589

    
1590
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1591
{
1592
    unsigned int i;
1593

    
1594
    /* Discard jump cache entries for any tb which might potentially
1595
       overlap the flushed page.  */
1596
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1597
    memset (&env->tb_jmp_cache[i], 0, 
1598
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1599

    
1600
    i = tb_jmp_cache_hash_page(addr);
1601
    memset (&env->tb_jmp_cache[i], 0, 
1602
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1603
}
1604

    
1605
/* NOTE: if flush_global is true, also flush global entries (not
1606
   implemented yet) */
1607
void tlb_flush(CPUState *env, int flush_global)
1608
{
1609
    int i;
1610

    
1611
#if defined(DEBUG_TLB)
1612
    printf("tlb_flush:\n");
1613
#endif
1614
    /* must reset current TB so that interrupts cannot modify the
1615
       links while we are modifying them */
1616
    env->current_tb = NULL;
1617

    
1618
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1619
        env->tlb_table[0][i].addr_read = -1;
1620
        env->tlb_table[0][i].addr_write = -1;
1621
        env->tlb_table[0][i].addr_code = -1;
1622
        env->tlb_table[1][i].addr_read = -1;
1623
        env->tlb_table[1][i].addr_write = -1;
1624
        env->tlb_table[1][i].addr_code = -1;
1625
#if (NB_MMU_MODES >= 3)
1626
        env->tlb_table[2][i].addr_read = -1;
1627
        env->tlb_table[2][i].addr_write = -1;
1628
        env->tlb_table[2][i].addr_code = -1;
1629
#if (NB_MMU_MODES == 4)
1630
        env->tlb_table[3][i].addr_read = -1;
1631
        env->tlb_table[3][i].addr_write = -1;
1632
        env->tlb_table[3][i].addr_code = -1;
1633
#endif
1634
#endif
1635
    }
1636

    
1637
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1638

    
1639
#ifdef USE_KQEMU
1640
    if (env->kqemu_enabled) {
1641
        kqemu_flush(env, flush_global);
1642
    }
1643
#endif
1644
    tlb_flush_count++;
1645
}
1646

    
1647
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1648
{
1649
    if (addr == (tlb_entry->addr_read &
1650
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1651
        addr == (tlb_entry->addr_write &
1652
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1653
        addr == (tlb_entry->addr_code &
1654
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1655
        tlb_entry->addr_read = -1;
1656
        tlb_entry->addr_write = -1;
1657
        tlb_entry->addr_code = -1;
1658
    }
1659
}
1660

    
1661
void tlb_flush_page(CPUState *env, target_ulong addr)
1662
{
1663
    int i;
1664

    
1665
#if defined(DEBUG_TLB)
1666
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1667
#endif
1668
    /* must reset current TB so that interrupts cannot modify the
1669
       links while we are modifying them */
1670
    env->current_tb = NULL;
1671

    
1672
    addr &= TARGET_PAGE_MASK;
1673
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1674
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1675
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1676
#if (NB_MMU_MODES >= 3)
1677
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1678
#if (NB_MMU_MODES == 4)
1679
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1680
#endif
1681
#endif
1682

    
1683
    tlb_flush_jmp_cache(env, addr);
1684

    
1685
#ifdef USE_KQEMU
1686
    if (env->kqemu_enabled) {
1687
        kqemu_flush_page(env, addr);
1688
    }
1689
#endif
1690
}
1691

    
1692
/* update the TLBs so that writes to code in the virtual page 'addr'
1693
   can be detected */
1694
static void tlb_protect_code(ram_addr_t ram_addr)
1695
{
1696
    cpu_physical_memory_reset_dirty(ram_addr,
1697
                                    ram_addr + TARGET_PAGE_SIZE,
1698
                                    CODE_DIRTY_FLAG);
1699
}
1700

    
1701
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1702
   tested for self modifying code */
1703
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1704
                                    target_ulong vaddr)
1705
{
1706
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1707
}
1708

    
1709
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1710
                                         unsigned long start, unsigned long length)
1711
{
1712
    unsigned long addr;
1713
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1714
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1715
        if ((addr - start) < length) {
1716
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1717
        }
1718
    }
1719
}
1720

    
1721
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1722
                                     int dirty_flags)
1723
{
1724
    CPUState *env;
1725
    unsigned long length, start1;
1726
    int i, mask, len;
1727
    uint8_t *p;
1728

    
1729
    start &= TARGET_PAGE_MASK;
1730
    end = TARGET_PAGE_ALIGN(end);
1731

    
1732
    length = end - start;
1733
    if (length == 0)
1734
        return;
1735
    len = length >> TARGET_PAGE_BITS;
1736
#ifdef USE_KQEMU
1737
    /* XXX: should not depend on cpu context */
1738
    env = first_cpu;
1739
    if (env->kqemu_enabled) {
1740
        ram_addr_t addr;
1741
        addr = start;
1742
        for(i = 0; i < len; i++) {
1743
            kqemu_set_notdirty(env, addr);
1744
            addr += TARGET_PAGE_SIZE;
1745
        }
1746
    }
1747
#endif
1748
    mask = ~dirty_flags;
1749
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1750
    for(i = 0; i < len; i++)
1751
        p[i] &= mask;
1752

    
1753
    /* we modify the TLB cache so that the dirty bit will be set again
1754
       when accessing the range */
1755
    start1 = start + (unsigned long)phys_ram_base;
1756
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1757
        for(i = 0; i < CPU_TLB_SIZE; i++)
1758
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1759
        for(i = 0; i < CPU_TLB_SIZE; i++)
1760
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1761
#if (NB_MMU_MODES >= 3)
1762
        for(i = 0; i < CPU_TLB_SIZE; i++)
1763
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1764
#if (NB_MMU_MODES == 4)
1765
        for(i = 0; i < CPU_TLB_SIZE; i++)
1766
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1767
#endif
1768
#endif
1769
    }
1770
}
1771

    
1772
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1773
{
1774
    ram_addr_t ram_addr;
1775

    
1776
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1777
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1778
            tlb_entry->addend - (unsigned long)phys_ram_base;
1779
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1780
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1781
        }
1782
    }
1783
}
1784

    
1785
/* update the TLB according to the current state of the dirty bits */
1786
void cpu_tlb_update_dirty(CPUState *env)
1787
{
1788
    int i;
1789
    for(i = 0; i < CPU_TLB_SIZE; i++)
1790
        tlb_update_dirty(&env->tlb_table[0][i]);
1791
    for(i = 0; i < CPU_TLB_SIZE; i++)
1792
        tlb_update_dirty(&env->tlb_table[1][i]);
1793
#if (NB_MMU_MODES >= 3)
1794
    for(i = 0; i < CPU_TLB_SIZE; i++)
1795
        tlb_update_dirty(&env->tlb_table[2][i]);
1796
#if (NB_MMU_MODES == 4)
1797
    for(i = 0; i < CPU_TLB_SIZE; i++)
1798
        tlb_update_dirty(&env->tlb_table[3][i]);
1799
#endif
1800
#endif
1801
}
1802

    
1803
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1804
{
1805
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1806
        tlb_entry->addr_write = vaddr;
1807
}
1808

    
1809
/* update the TLB corresponding to virtual page vaddr
1810
   so that it is no longer dirty */
1811
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1812
{
1813
    int i;
1814

    
1815
    vaddr &= TARGET_PAGE_MASK;
1816
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1817
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1818
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1819
#if (NB_MMU_MODES >= 3)
1820
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1821
#if (NB_MMU_MODES == 4)
1822
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1823
#endif
1824
#endif
1825
}
1826

    
1827
/* add a new TLB entry. At most one entry for a given virtual address
1828
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1829
   (can only happen in non SOFTMMU mode for I/O pages or pages
1830
   conflicting with the host address space). */
1831
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1832
                      target_phys_addr_t paddr, int prot,
1833
                      int mmu_idx, int is_softmmu)
1834
{
1835
    PhysPageDesc *p;
1836
    unsigned long pd;
1837
    unsigned int index;
1838
    target_ulong address;
1839
    target_ulong code_address;
1840
    target_phys_addr_t addend;
1841
    int ret;
1842
    CPUTLBEntry *te;
1843
    int i;
1844
    target_phys_addr_t iotlb;
1845

    
1846
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1847
    if (!p) {
1848
        pd = IO_MEM_UNASSIGNED;
1849
    } else {
1850
        pd = p->phys_offset;
1851
    }
1852
#if defined(DEBUG_TLB)
1853
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1854
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1855
#endif
1856

    
1857
    ret = 0;
1858
    address = vaddr;
1859
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1860
        /* IO memory case (romd handled later) */
1861
        address |= TLB_MMIO;
1862
    }
1863
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1864
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1865
        /* Normal RAM.  */
1866
        iotlb = pd & TARGET_PAGE_MASK;
1867
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1868
            iotlb |= IO_MEM_NOTDIRTY;
1869
        else
1870
            iotlb |= IO_MEM_ROM;
1871
    } else {
1872
        /* IO handlers are currently passed a phsical address.
1873
           It would be nice to pass an offset from the base address
1874
           of that region.  This would avoid having to special case RAM,
1875
           and avoid full address decoding in every device.
1876
           We can't use the high bits of pd for this because
1877
           IO_MEM_ROMD uses these as a ram address.  */
1878
        iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1879
    }
1880

    
1881
    code_address = address;
1882
    /* Make accesses to pages with watchpoints go via the
1883
       watchpoint trap routines.  */
1884
    for (i = 0; i < env->nb_watchpoints; i++) {
1885
        if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1886
            iotlb = io_mem_watch + paddr;
1887
            /* TODO: The memory case can be optimized by not trapping
1888
               reads of pages with a write breakpoint.  */
1889
            address |= TLB_MMIO;
1890
        }
1891
    }
1892

    
1893
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1894
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
1895
    te = &env->tlb_table[mmu_idx][index];
1896
    te->addend = addend - vaddr;
1897
    if (prot & PAGE_READ) {
1898
        te->addr_read = address;
1899
    } else {
1900
        te->addr_read = -1;
1901
    }
1902

    
1903
    if (prot & PAGE_EXEC) {
1904
        te->addr_code = code_address;
1905
    } else {
1906
        te->addr_code = -1;
1907
    }
1908
    if (prot & PAGE_WRITE) {
1909
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1910
            (pd & IO_MEM_ROMD)) {
1911
            /* Write access calls the I/O callback.  */
1912
            te->addr_write = address | TLB_MMIO;
1913
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1914
                   !cpu_physical_memory_is_dirty(pd)) {
1915
            te->addr_write = address | TLB_NOTDIRTY;
1916
        } else {
1917
            te->addr_write = address;
1918
        }
1919
    } else {
1920
        te->addr_write = -1;
1921
    }
1922
    return ret;
1923
}
1924

    
1925
#else
1926

    
1927
void tlb_flush(CPUState *env, int flush_global)
1928
{
1929
}
1930

    
1931
void tlb_flush_page(CPUState *env, target_ulong addr)
1932
{
1933
}
1934

    
1935
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1936
                      target_phys_addr_t paddr, int prot,
1937
                      int mmu_idx, int is_softmmu)
1938
{
1939
    return 0;
1940
}
1941

    
1942
/* dump memory mappings */
1943
void page_dump(FILE *f)
1944
{
1945
    unsigned long start, end;
1946
    int i, j, prot, prot1;
1947
    PageDesc *p;
1948

    
1949
    fprintf(f, "%-8s %-8s %-8s %s\n",
1950
            "start", "end", "size", "prot");
1951
    start = -1;
1952
    end = -1;
1953
    prot = 0;
1954
    for(i = 0; i <= L1_SIZE; i++) {
1955
        if (i < L1_SIZE)
1956
            p = l1_map[i];
1957
        else
1958
            p = NULL;
1959
        for(j = 0;j < L2_SIZE; j++) {
1960
            if (!p)
1961
                prot1 = 0;
1962
            else
1963
                prot1 = p[j].flags;
1964
            if (prot1 != prot) {
1965
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1966
                if (start != -1) {
1967
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1968
                            start, end, end - start,
1969
                            prot & PAGE_READ ? 'r' : '-',
1970
                            prot & PAGE_WRITE ? 'w' : '-',
1971
                            prot & PAGE_EXEC ? 'x' : '-');
1972
                }
1973
                if (prot1 != 0)
1974
                    start = end;
1975
                else
1976
                    start = -1;
1977
                prot = prot1;
1978
            }
1979
            if (!p)
1980
                break;
1981
        }
1982
    }
1983
}
1984

    
1985
int page_get_flags(target_ulong address)
1986
{
1987
    PageDesc *p;
1988

    
1989
    p = page_find(address >> TARGET_PAGE_BITS);
1990
    if (!p)
1991
        return 0;
1992
    return p->flags;
1993
}
1994

    
1995
/* modify the flags of a page and invalidate the code if
1996
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1997
   depending on PAGE_WRITE */
1998
void page_set_flags(target_ulong start, target_ulong end, int flags)
1999
{
2000
    PageDesc *p;
2001
    target_ulong addr;
2002

    
2003
    /* mmap_lock should already be held.  */
2004
    start = start & TARGET_PAGE_MASK;
2005
    end = TARGET_PAGE_ALIGN(end);
2006
    if (flags & PAGE_WRITE)
2007
        flags |= PAGE_WRITE_ORG;
2008
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2009
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2010
        /* We may be called for host regions that are outside guest
2011
           address space.  */
2012
        if (!p)
2013
            return;
2014
        /* if the write protection is set, then we invalidate the code
2015
           inside */
2016
        if (!(p->flags & PAGE_WRITE) &&
2017
            (flags & PAGE_WRITE) &&
2018
            p->first_tb) {
2019
            tb_invalidate_phys_page(addr, 0, NULL);
2020
        }
2021
        p->flags = flags;
2022
    }
2023
}
2024

    
2025
int page_check_range(target_ulong start, target_ulong len, int flags)
2026
{
2027
    PageDesc *p;
2028
    target_ulong end;
2029
    target_ulong addr;
2030

    
2031
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2032
    start = start & TARGET_PAGE_MASK;
2033

    
2034
    if( end < start )
2035
        /* we've wrapped around */
2036
        return -1;
2037
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2038
        p = page_find(addr >> TARGET_PAGE_BITS);
2039
        if( !p )
2040
            return -1;
2041
        if( !(p->flags & PAGE_VALID) )
2042
            return -1;
2043

    
2044
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2045
            return -1;
2046
        if (flags & PAGE_WRITE) {
2047
            if (!(p->flags & PAGE_WRITE_ORG))
2048
                return -1;
2049
            /* unprotect the page if it was put read-only because it
2050
               contains translated code */
2051
            if (!(p->flags & PAGE_WRITE)) {
2052
                if (!page_unprotect(addr, 0, NULL))
2053
                    return -1;
2054
            }
2055
            return 0;
2056
        }
2057
    }
2058
    return 0;
2059
}
2060

    
2061
/* called from signal handler: invalidate the code and unprotect the
2062
   page. Return TRUE if the fault was succesfully handled. */
2063
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2064
{
2065
    unsigned int page_index, prot, pindex;
2066
    PageDesc *p, *p1;
2067
    target_ulong host_start, host_end, addr;
2068

    
2069
    /* Technically this isn't safe inside a signal handler.  However we
2070
       know this only ever happens in a synchronous SEGV handler, so in
2071
       practice it seems to be ok.  */
2072
    mmap_lock();
2073

    
2074
    host_start = address & qemu_host_page_mask;
2075
    page_index = host_start >> TARGET_PAGE_BITS;
2076
    p1 = page_find(page_index);
2077
    if (!p1) {
2078
        mmap_unlock();
2079
        return 0;
2080
    }
2081
    host_end = host_start + qemu_host_page_size;
2082
    p = p1;
2083
    prot = 0;
2084
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2085
        prot |= p->flags;
2086
        p++;
2087
    }
2088
    /* if the page was really writable, then we change its
2089
       protection back to writable */
2090
    if (prot & PAGE_WRITE_ORG) {
2091
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2092
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2093
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2094
                     (prot & PAGE_BITS) | PAGE_WRITE);
2095
            p1[pindex].flags |= PAGE_WRITE;
2096
            /* and since the content will be modified, we must invalidate
2097
               the corresponding translated code. */
2098
            tb_invalidate_phys_page(address, pc, puc);
2099
#ifdef DEBUG_TB_CHECK
2100
            tb_invalidate_check(address);
2101
#endif
2102
            mmap_unlock();
2103
            return 1;
2104
        }
2105
    }
2106
    mmap_unlock();
2107
    return 0;
2108
}
2109

    
2110
static inline void tlb_set_dirty(CPUState *env,
2111
                                 unsigned long addr, target_ulong vaddr)
2112
{
2113
}
2114
#endif /* defined(CONFIG_USER_ONLY) */
2115

    
2116
#if !defined(CONFIG_USER_ONLY)
2117
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2118
                             ram_addr_t memory);
2119
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2120
                           ram_addr_t orig_memory);
2121
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2122
                      need_subpage)                                     \
2123
    do {                                                                \
2124
        if (addr > start_addr)                                          \
2125
            start_addr2 = 0;                                            \
2126
        else {                                                          \
2127
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2128
            if (start_addr2 > 0)                                        \
2129
                need_subpage = 1;                                       \
2130
        }                                                               \
2131
                                                                        \
2132
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2133
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2134
        else {                                                          \
2135
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2136
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2137
                need_subpage = 1;                                       \
2138
        }                                                               \
2139
    } while (0)
2140

    
2141
/* register physical memory. 'size' must be a multiple of the target
2142
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2143
   io memory page */
2144
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2145
                                  ram_addr_t size,
2146
                                  ram_addr_t phys_offset)
2147
{
2148
    target_phys_addr_t addr, end_addr;
2149
    PhysPageDesc *p;
2150
    CPUState *env;
2151
    ram_addr_t orig_size = size;
2152
    void *subpage;
2153

    
2154
#ifdef USE_KQEMU
2155
    /* XXX: should not depend on cpu context */
2156
    env = first_cpu;
2157
    if (env->kqemu_enabled) {
2158
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2159
    }
2160
#endif
2161
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2162
    end_addr = start_addr + (target_phys_addr_t)size;
2163
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2164
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2165
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2166
            ram_addr_t orig_memory = p->phys_offset;
2167
            target_phys_addr_t start_addr2, end_addr2;
2168
            int need_subpage = 0;
2169

    
2170
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2171
                          need_subpage);
2172
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2173
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2174
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2175
                                           &p->phys_offset, orig_memory);
2176
                } else {
2177
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2178
                                            >> IO_MEM_SHIFT];
2179
                }
2180
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2181
            } else {
2182
                p->phys_offset = phys_offset;
2183
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2184
                    (phys_offset & IO_MEM_ROMD))
2185
                    phys_offset += TARGET_PAGE_SIZE;
2186
            }
2187
        } else {
2188
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2189
            p->phys_offset = phys_offset;
2190
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2191
                (phys_offset & IO_MEM_ROMD))
2192
                phys_offset += TARGET_PAGE_SIZE;
2193
            else {
2194
                target_phys_addr_t start_addr2, end_addr2;
2195
                int need_subpage = 0;
2196

    
2197
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2198
                              end_addr2, need_subpage);
2199

    
2200
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2201
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2202
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2203
                    subpage_register(subpage, start_addr2, end_addr2,
2204
                                     phys_offset);
2205
                }
2206
            }
2207
        }
2208
    }
2209

    
2210
    /* since each CPU stores ram addresses in its TLB cache, we must
2211
       reset the modified entries */
2212
    /* XXX: slow ! */
2213
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2214
        tlb_flush(env, 1);
2215
    }
2216
}
2217

    
2218
/* XXX: temporary until new memory mapping API */
2219
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2220
{
2221
    PhysPageDesc *p;
2222

    
2223
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2224
    if (!p)
2225
        return IO_MEM_UNASSIGNED;
2226
    return p->phys_offset;
2227
}
2228

    
2229
/* XXX: better than nothing */
2230
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2231
{
2232
    ram_addr_t addr;
2233
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2234
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2235
                (uint64_t)size, (uint64_t)phys_ram_size);
2236
        abort();
2237
    }
2238
    addr = phys_ram_alloc_offset;
2239
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2240
    return addr;
2241
}
2242

    
2243
void qemu_ram_free(ram_addr_t addr)
2244
{
2245
}
2246

    
2247
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2248
{
2249
#ifdef DEBUG_UNASSIGNED
2250
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2251
#endif
2252
#ifdef TARGET_SPARC
2253
    do_unassigned_access(addr, 0, 0, 0);
2254
#elif TARGET_CRIS
2255
    do_unassigned_access(addr, 0, 0, 0);
2256
#endif
2257
    return 0;
2258
}
2259

    
2260
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2261
{
2262
#ifdef DEBUG_UNASSIGNED
2263
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2264
#endif
2265
#ifdef TARGET_SPARC
2266
    do_unassigned_access(addr, 1, 0, 0);
2267
#elif TARGET_CRIS
2268
    do_unassigned_access(addr, 1, 0, 0);
2269
#endif
2270
}
2271

    
2272
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2273
    unassigned_mem_readb,
2274
    unassigned_mem_readb,
2275
    unassigned_mem_readb,
2276
};
2277

    
2278
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2279
    unassigned_mem_writeb,
2280
    unassigned_mem_writeb,
2281
    unassigned_mem_writeb,
2282
};
2283

    
2284
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2285
                                uint32_t val)
2286
{
2287
    int dirty_flags;
2288
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2289
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2290
#if !defined(CONFIG_USER_ONLY)
2291
        tb_invalidate_phys_page_fast(ram_addr, 1);
2292
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2293
#endif
2294
    }
2295
    stb_p(phys_ram_base + ram_addr, val);
2296
#ifdef USE_KQEMU
2297
    if (cpu_single_env->kqemu_enabled &&
2298
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2299
        kqemu_modify_page(cpu_single_env, ram_addr);
2300
#endif
2301
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2302
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2303
    /* we remove the notdirty callback only if the code has been
2304
       flushed */
2305
    if (dirty_flags == 0xff)
2306
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2307
}
2308

    
2309
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2310
                                uint32_t val)
2311
{
2312
    int dirty_flags;
2313
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2314
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2315
#if !defined(CONFIG_USER_ONLY)
2316
        tb_invalidate_phys_page_fast(ram_addr, 2);
2317
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2318
#endif
2319
    }
2320
    stw_p(phys_ram_base + ram_addr, val);
2321
#ifdef USE_KQEMU
2322
    if (cpu_single_env->kqemu_enabled &&
2323
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2324
        kqemu_modify_page(cpu_single_env, ram_addr);
2325
#endif
2326
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2327
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2328
    /* we remove the notdirty callback only if the code has been
2329
       flushed */
2330
    if (dirty_flags == 0xff)
2331
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2332
}
2333

    
2334
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2335
                                uint32_t val)
2336
{
2337
    int dirty_flags;
2338
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2339
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2340
#if !defined(CONFIG_USER_ONLY)
2341
        tb_invalidate_phys_page_fast(ram_addr, 4);
2342
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2343
#endif
2344
    }
2345
    stl_p(phys_ram_base + ram_addr, val);
2346
#ifdef USE_KQEMU
2347
    if (cpu_single_env->kqemu_enabled &&
2348
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2349
        kqemu_modify_page(cpu_single_env, ram_addr);
2350
#endif
2351
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2352
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2353
    /* we remove the notdirty callback only if the code has been
2354
       flushed */
2355
    if (dirty_flags == 0xff)
2356
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2357
}
2358

    
2359
static CPUReadMemoryFunc *error_mem_read[3] = {
2360
    NULL, /* never used */
2361
    NULL, /* never used */
2362
    NULL, /* never used */
2363
};
2364

    
2365
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2366
    notdirty_mem_writeb,
2367
    notdirty_mem_writew,
2368
    notdirty_mem_writel,
2369
};
2370

    
2371
/* Generate a debug exception if a watchpoint has been hit.  */
2372
static void check_watchpoint(int offset, int flags)
2373
{
2374
    CPUState *env = cpu_single_env;
2375
    target_ulong vaddr;
2376
    int i;
2377

    
2378
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2379
    for (i = 0; i < env->nb_watchpoints; i++) {
2380
        if (vaddr == env->watchpoint[i].vaddr
2381
                && (env->watchpoint[i].type & flags)) {
2382
            env->watchpoint_hit = i + 1;
2383
            cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2384
            break;
2385
        }
2386
    }
2387
}
2388

    
2389
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2390
   so these check for a hit then pass through to the normal out-of-line
2391
   phys routines.  */
2392
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2393
{
2394
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2395
    return ldub_phys(addr);
2396
}
2397

    
2398
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2399
{
2400
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2401
    return lduw_phys(addr);
2402
}
2403

    
2404
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2405
{
2406
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2407
    return ldl_phys(addr);
2408
}
2409

    
2410
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2411
                             uint32_t val)
2412
{
2413
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2414
    stb_phys(addr, val);
2415
}
2416

    
2417
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2418
                             uint32_t val)
2419
{
2420
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2421
    stw_phys(addr, val);
2422
}
2423

    
2424
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2425
                             uint32_t val)
2426
{
2427
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2428
    stl_phys(addr, val);
2429
}
2430

    
2431
static CPUReadMemoryFunc *watch_mem_read[3] = {
2432
    watch_mem_readb,
2433
    watch_mem_readw,
2434
    watch_mem_readl,
2435
};
2436

    
2437
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2438
    watch_mem_writeb,
2439
    watch_mem_writew,
2440
    watch_mem_writel,
2441
};
2442

    
2443
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2444
                                 unsigned int len)
2445
{
2446
    uint32_t ret;
2447
    unsigned int idx;
2448

    
2449
    idx = SUBPAGE_IDX(addr - mmio->base);
2450
#if defined(DEBUG_SUBPAGE)
2451
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2452
           mmio, len, addr, idx);
2453
#endif
2454
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2455

    
2456
    return ret;
2457
}
2458

    
2459
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2460
                              uint32_t value, unsigned int len)
2461
{
2462
    unsigned int idx;
2463

    
2464
    idx = SUBPAGE_IDX(addr - mmio->base);
2465
#if defined(DEBUG_SUBPAGE)
2466
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2467
           mmio, len, addr, idx, value);
2468
#endif
2469
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2470
}
2471

    
2472
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2473
{
2474
#if defined(DEBUG_SUBPAGE)
2475
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2476
#endif
2477

    
2478
    return subpage_readlen(opaque, addr, 0);
2479
}
2480

    
2481
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2482
                            uint32_t value)
2483
{
2484
#if defined(DEBUG_SUBPAGE)
2485
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2486
#endif
2487
    subpage_writelen(opaque, addr, value, 0);
2488
}
2489

    
2490
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2491
{
2492
#if defined(DEBUG_SUBPAGE)
2493
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2494
#endif
2495

    
2496
    return subpage_readlen(opaque, addr, 1);
2497
}
2498

    
2499
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2500
                            uint32_t value)
2501
{
2502
#if defined(DEBUG_SUBPAGE)
2503
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2504
#endif
2505
    subpage_writelen(opaque, addr, value, 1);
2506
}
2507

    
2508
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2509
{
2510
#if defined(DEBUG_SUBPAGE)
2511
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2512
#endif
2513

    
2514
    return subpage_readlen(opaque, addr, 2);
2515
}
2516

    
2517
static void subpage_writel (void *opaque,
2518
                         target_phys_addr_t addr, uint32_t value)
2519
{
2520
#if defined(DEBUG_SUBPAGE)
2521
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2522
#endif
2523
    subpage_writelen(opaque, addr, value, 2);
2524
}
2525

    
2526
static CPUReadMemoryFunc *subpage_read[] = {
2527
    &subpage_readb,
2528
    &subpage_readw,
2529
    &subpage_readl,
2530
};
2531

    
2532
static CPUWriteMemoryFunc *subpage_write[] = {
2533
    &subpage_writeb,
2534
    &subpage_writew,
2535
    &subpage_writel,
2536
};
2537

    
2538
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2539
                             ram_addr_t memory)
2540
{
2541
    int idx, eidx;
2542
    unsigned int i;
2543

    
2544
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2545
        return -1;
2546
    idx = SUBPAGE_IDX(start);
2547
    eidx = SUBPAGE_IDX(end);
2548
#if defined(DEBUG_SUBPAGE)
2549
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2550
           mmio, start, end, idx, eidx, memory);
2551
#endif
2552
    memory >>= IO_MEM_SHIFT;
2553
    for (; idx <= eidx; idx++) {
2554
        for (i = 0; i < 4; i++) {
2555
            if (io_mem_read[memory][i]) {
2556
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2557
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2558
            }
2559
            if (io_mem_write[memory][i]) {
2560
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2561
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2562
            }
2563
        }
2564
    }
2565

    
2566
    return 0;
2567
}
2568

    
2569
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2570
                           ram_addr_t orig_memory)
2571
{
2572
    subpage_t *mmio;
2573
    int subpage_memory;
2574

    
2575
    mmio = qemu_mallocz(sizeof(subpage_t));
2576
    if (mmio != NULL) {
2577
        mmio->base = base;
2578
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2579
#if defined(DEBUG_SUBPAGE)
2580
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2581
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2582
#endif
2583
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2584
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2585
    }
2586

    
2587
    return mmio;
2588
}
2589

    
2590
static void io_mem_init(void)
2591
{
2592
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2593
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2594
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2595
    io_mem_nb = 5;
2596

    
2597
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2598
                                          watch_mem_write, NULL);
2599
    /* alloc dirty bits array */
2600
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2601
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2602
}
2603

    
2604
/* mem_read and mem_write are arrays of functions containing the
2605
   function to access byte (index 0), word (index 1) and dword (index
2606
   2). Functions can be omitted with a NULL function pointer. The
2607
   registered functions may be modified dynamically later.
2608
   If io_index is non zero, the corresponding io zone is
2609
   modified. If it is zero, a new io zone is allocated. The return
2610
   value can be used with cpu_register_physical_memory(). (-1) is
2611
   returned if error. */
2612
int cpu_register_io_memory(int io_index,
2613
                           CPUReadMemoryFunc **mem_read,
2614
                           CPUWriteMemoryFunc **mem_write,
2615
                           void *opaque)
2616
{
2617
    int i, subwidth = 0;
2618

    
2619
    if (io_index <= 0) {
2620
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2621
            return -1;
2622
        io_index = io_mem_nb++;
2623
    } else {
2624
        if (io_index >= IO_MEM_NB_ENTRIES)
2625
            return -1;
2626
    }
2627

    
2628
    for(i = 0;i < 3; i++) {
2629
        if (!mem_read[i] || !mem_write[i])
2630
            subwidth = IO_MEM_SUBWIDTH;
2631
        io_mem_read[io_index][i] = mem_read[i];
2632
        io_mem_write[io_index][i] = mem_write[i];
2633
    }
2634
    io_mem_opaque[io_index] = opaque;
2635
    return (io_index << IO_MEM_SHIFT) | subwidth;
2636
}
2637

    
2638
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2639
{
2640
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2641
}
2642

    
2643
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2644
{
2645
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2646
}
2647

    
2648
#endif /* !defined(CONFIG_USER_ONLY) */
2649

    
2650
/* physical memory access (slow version, mainly for debug) */
2651
#if defined(CONFIG_USER_ONLY)
2652
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2653
                            int len, int is_write)
2654
{
2655
    int l, flags;
2656
    target_ulong page;
2657
    void * p;
2658

    
2659
    while (len > 0) {
2660
        page = addr & TARGET_PAGE_MASK;
2661
        l = (page + TARGET_PAGE_SIZE) - addr;
2662
        if (l > len)
2663
            l = len;
2664
        flags = page_get_flags(page);
2665
        if (!(flags & PAGE_VALID))
2666
            return;
2667
        if (is_write) {
2668
            if (!(flags & PAGE_WRITE))
2669
                return;
2670
            /* XXX: this code should not depend on lock_user */
2671
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2672
                /* FIXME - should this return an error rather than just fail? */
2673
                return;
2674
            memcpy(p, buf, l);
2675
            unlock_user(p, addr, l);
2676
        } else {
2677
            if (!(flags & PAGE_READ))
2678
                return;
2679
            /* XXX: this code should not depend on lock_user */
2680
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2681
                /* FIXME - should this return an error rather than just fail? */
2682
                return;
2683
            memcpy(buf, p, l);
2684
            unlock_user(p, addr, 0);
2685
        }
2686
        len -= l;
2687
        buf += l;
2688
        addr += l;
2689
    }
2690
}
2691

    
2692
#else
2693
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2694
                            int len, int is_write)
2695
{
2696
    int l, io_index;
2697
    uint8_t *ptr;
2698
    uint32_t val;
2699
    target_phys_addr_t page;
2700
    unsigned long pd;
2701
    PhysPageDesc *p;
2702

    
2703
    while (len > 0) {
2704
        page = addr & TARGET_PAGE_MASK;
2705
        l = (page + TARGET_PAGE_SIZE) - addr;
2706
        if (l > len)
2707
            l = len;
2708
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2709
        if (!p) {
2710
            pd = IO_MEM_UNASSIGNED;
2711
        } else {
2712
            pd = p->phys_offset;
2713
        }
2714

    
2715
        if (is_write) {
2716
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2717
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2718
                /* XXX: could force cpu_single_env to NULL to avoid
2719
                   potential bugs */
2720
                if (l >= 4 && ((addr & 3) == 0)) {
2721
                    /* 32 bit write access */
2722
                    val = ldl_p(buf);
2723
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2724
                    l = 4;
2725
                } else if (l >= 2 && ((addr & 1) == 0)) {
2726
                    /* 16 bit write access */
2727
                    val = lduw_p(buf);
2728
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2729
                    l = 2;
2730
                } else {
2731
                    /* 8 bit write access */
2732
                    val = ldub_p(buf);
2733
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2734
                    l = 1;
2735
                }
2736
            } else {
2737
                unsigned long addr1;
2738
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2739
                /* RAM case */
2740
                ptr = phys_ram_base + addr1;
2741
                memcpy(ptr, buf, l);
2742
                if (!cpu_physical_memory_is_dirty(addr1)) {
2743
                    /* invalidate code */
2744
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2745
                    /* set dirty bit */
2746
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2747
                        (0xff & ~CODE_DIRTY_FLAG);
2748
                }
2749
            }
2750
        } else {
2751
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2752
                !(pd & IO_MEM_ROMD)) {
2753
                /* I/O case */
2754
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2755
                if (l >= 4 && ((addr & 3) == 0)) {
2756
                    /* 32 bit read access */
2757
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2758
                    stl_p(buf, val);
2759
                    l = 4;
2760
                } else if (l >= 2 && ((addr & 1) == 0)) {
2761
                    /* 16 bit read access */
2762
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2763
                    stw_p(buf, val);
2764
                    l = 2;
2765
                } else {
2766
                    /* 8 bit read access */
2767
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2768
                    stb_p(buf, val);
2769
                    l = 1;
2770
                }
2771
            } else {
2772
                /* RAM case */
2773
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2774
                    (addr & ~TARGET_PAGE_MASK);
2775
                memcpy(buf, ptr, l);
2776
            }
2777
        }
2778
        len -= l;
2779
        buf += l;
2780
        addr += l;
2781
    }
2782
}
2783

    
2784
/* used for ROM loading : can write in RAM and ROM */
2785
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2786
                                   const uint8_t *buf, int len)
2787
{
2788
    int l;
2789
    uint8_t *ptr;
2790
    target_phys_addr_t page;
2791
    unsigned long pd;
2792
    PhysPageDesc *p;
2793

    
2794
    while (len > 0) {
2795
        page = addr & TARGET_PAGE_MASK;
2796
        l = (page + TARGET_PAGE_SIZE) - addr;
2797
        if (l > len)
2798
            l = len;
2799
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2800
        if (!p) {
2801
            pd = IO_MEM_UNASSIGNED;
2802
        } else {
2803
            pd = p->phys_offset;
2804
        }
2805

    
2806
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2807
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2808
            !(pd & IO_MEM_ROMD)) {
2809
            /* do nothing */
2810
        } else {
2811
            unsigned long addr1;
2812
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2813
            /* ROM/RAM case */
2814
            ptr = phys_ram_base + addr1;
2815
            memcpy(ptr, buf, l);
2816
        }
2817
        len -= l;
2818
        buf += l;
2819
        addr += l;
2820
    }
2821
}
2822

    
2823

    
2824
/* warning: addr must be aligned */
2825
uint32_t ldl_phys(target_phys_addr_t addr)
2826
{
2827
    int io_index;
2828
    uint8_t *ptr;
2829
    uint32_t val;
2830
    unsigned long pd;
2831
    PhysPageDesc *p;
2832

    
2833
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2834
    if (!p) {
2835
        pd = IO_MEM_UNASSIGNED;
2836
    } else {
2837
        pd = p->phys_offset;
2838
    }
2839

    
2840
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2841
        !(pd & IO_MEM_ROMD)) {
2842
        /* I/O case */
2843
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2844
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2845
    } else {
2846
        /* RAM case */
2847
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2848
            (addr & ~TARGET_PAGE_MASK);
2849
        val = ldl_p(ptr);
2850
    }
2851
    return val;
2852
}
2853

    
2854
/* warning: addr must be aligned */
2855
uint64_t ldq_phys(target_phys_addr_t addr)
2856
{
2857
    int io_index;
2858
    uint8_t *ptr;
2859
    uint64_t val;
2860
    unsigned long pd;
2861
    PhysPageDesc *p;
2862

    
2863
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2864
    if (!p) {
2865
        pd = IO_MEM_UNASSIGNED;
2866
    } else {
2867
        pd = p->phys_offset;
2868
    }
2869

    
2870
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2871
        !(pd & IO_MEM_ROMD)) {
2872
        /* I/O case */
2873
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2874
#ifdef TARGET_WORDS_BIGENDIAN
2875
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2876
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2877
#else
2878
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2879
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2880
#endif
2881
    } else {
2882
        /* RAM case */
2883
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2884
            (addr & ~TARGET_PAGE_MASK);
2885
        val = ldq_p(ptr);
2886
    }
2887
    return val;
2888
}
2889

    
2890
/* XXX: optimize */
2891
uint32_t ldub_phys(target_phys_addr_t addr)
2892
{
2893
    uint8_t val;
2894
    cpu_physical_memory_read(addr, &val, 1);
2895
    return val;
2896
}
2897

    
2898
/* XXX: optimize */
2899
uint32_t lduw_phys(target_phys_addr_t addr)
2900
{
2901
    uint16_t val;
2902
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2903
    return tswap16(val);
2904
}
2905

    
2906
/* warning: addr must be aligned. The ram page is not masked as dirty
2907
   and the code inside is not invalidated. It is useful if the dirty
2908
   bits are used to track modified PTEs */
2909
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2910
{
2911
    int io_index;
2912
    uint8_t *ptr;
2913
    unsigned long pd;
2914
    PhysPageDesc *p;
2915

    
2916
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2917
    if (!p) {
2918
        pd = IO_MEM_UNASSIGNED;
2919
    } else {
2920
        pd = p->phys_offset;
2921
    }
2922

    
2923
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2924
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2925
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2926
    } else {
2927
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2928
            (addr & ~TARGET_PAGE_MASK);
2929
        stl_p(ptr, val);
2930
    }
2931
}
2932

    
2933
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2934
{
2935
    int io_index;
2936
    uint8_t *ptr;
2937
    unsigned long pd;
2938
    PhysPageDesc *p;
2939

    
2940
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2941
    if (!p) {
2942
        pd = IO_MEM_UNASSIGNED;
2943
    } else {
2944
        pd = p->phys_offset;
2945
    }
2946

    
2947
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2948
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2949
#ifdef TARGET_WORDS_BIGENDIAN
2950
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2951
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2952
#else
2953
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2954
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2955
#endif
2956
    } else {
2957
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2958
            (addr & ~TARGET_PAGE_MASK);
2959
        stq_p(ptr, val);
2960
    }
2961
}
2962

    
2963
/* warning: addr must be aligned */
2964
void stl_phys(target_phys_addr_t addr, uint32_t val)
2965
{
2966
    int io_index;
2967
    uint8_t *ptr;
2968
    unsigned long pd;
2969
    PhysPageDesc *p;
2970

    
2971
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2972
    if (!p) {
2973
        pd = IO_MEM_UNASSIGNED;
2974
    } else {
2975
        pd = p->phys_offset;
2976
    }
2977

    
2978
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2979
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2980
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2981
    } else {
2982
        unsigned long addr1;
2983
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2984
        /* RAM case */
2985
        ptr = phys_ram_base + addr1;
2986
        stl_p(ptr, val);
2987
        if (!cpu_physical_memory_is_dirty(addr1)) {
2988
            /* invalidate code */
2989
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2990
            /* set dirty bit */
2991
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2992
                (0xff & ~CODE_DIRTY_FLAG);
2993
        }
2994
    }
2995
}
2996

    
2997
/* XXX: optimize */
2998
void stb_phys(target_phys_addr_t addr, uint32_t val)
2999
{
3000
    uint8_t v = val;
3001
    cpu_physical_memory_write(addr, &v, 1);
3002
}
3003

    
3004
/* XXX: optimize */
3005
void stw_phys(target_phys_addr_t addr, uint32_t val)
3006
{
3007
    uint16_t v = tswap16(val);
3008
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3009
}
3010

    
3011
/* XXX: optimize */
3012
void stq_phys(target_phys_addr_t addr, uint64_t val)
3013
{
3014
    val = tswap64(val);
3015
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3016
}
3017

    
3018
#endif
3019

    
3020
/* virtual memory access for debug */
3021
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3022
                        uint8_t *buf, int len, int is_write)
3023
{
3024
    int l;
3025
    target_phys_addr_t phys_addr;
3026
    target_ulong page;
3027

    
3028
    while (len > 0) {
3029
        page = addr & TARGET_PAGE_MASK;
3030
        phys_addr = cpu_get_phys_page_debug(env, page);
3031
        /* if no physical page mapped, return an error */
3032
        if (phys_addr == -1)
3033
            return -1;
3034
        l = (page + TARGET_PAGE_SIZE) - addr;
3035
        if (l > len)
3036
            l = len;
3037
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3038
                               buf, l, is_write);
3039
        len -= l;
3040
        buf += l;
3041
        addr += l;
3042
    }
3043
    return 0;
3044
}
3045

    
3046
/* in deterministic execution mode, instructions doing device I/Os
3047
   must be at the end of the TB */
3048
void cpu_io_recompile(CPUState *env, void *retaddr)
3049
{
3050
    TranslationBlock *tb;
3051
    uint32_t n, cflags;
3052
    target_ulong pc, cs_base;
3053
    uint64_t flags;
3054

    
3055
    tb = tb_find_pc((unsigned long)retaddr);
3056
    if (!tb) {
3057
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3058
                  retaddr);
3059
    }
3060
    n = env->icount_decr.u16.low + tb->icount;
3061
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3062
    /* Calculate how many instructions had been executed before the fault
3063
       occurred.  */
3064
    n = n - env->icount_decr.u16.low;
3065
    /* Generate a new TB ending on the I/O insn.  */
3066
    n++;
3067
    /* On MIPS and SH, delay slot instructions can only be restarted if
3068
       they were already the first instruction in the TB.  If this is not
3069
       the first instruction in a TB then re-execute the preceding
3070
       branch.  */
3071
#if defined(TARGET_MIPS)
3072
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3073
        env->active_tc.PC -= 4;
3074
        env->icount_decr.u16.low++;
3075
        env->hflags &= ~MIPS_HFLAG_BMASK;
3076
    }
3077
#elif defined(TARGET_SH4)
3078
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3079
            && n > 1) {
3080
        env->pc -= 2;
3081
        env->icount_decr.u16.low++;
3082
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3083
    }
3084
#endif
3085
    /* This should never happen.  */
3086
    if (n > CF_COUNT_MASK)
3087
        cpu_abort(env, "TB too big during recompile");
3088

    
3089
    cflags = n | CF_LAST_IO;
3090
    pc = tb->pc;
3091
    cs_base = tb->cs_base;
3092
    flags = tb->flags;
3093
    tb_phys_invalidate(tb, -1);
3094
    /* FIXME: In theory this could raise an exception.  In practice
3095
       we have already translated the block once so it's probably ok.  */
3096
    tb_gen_code(env, pc, cs_base, flags, cflags);
3097
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3098
       the first in the TB) then we end up generating a whole new TB and
3099
       repeating the fault, which is horribly inefficient.
3100
       Better would be to execute just this insn uncached, or generate a
3101
       second new TB.  */
3102
    cpu_resume_from_signal(env, NULL);
3103
}
3104

    
3105
void dump_exec_info(FILE *f,
3106
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3107
{
3108
    int i, target_code_size, max_target_code_size;
3109
    int direct_jmp_count, direct_jmp2_count, cross_page;
3110
    TranslationBlock *tb;
3111

    
3112
    target_code_size = 0;
3113
    max_target_code_size = 0;
3114
    cross_page = 0;
3115
    direct_jmp_count = 0;
3116
    direct_jmp2_count = 0;
3117
    for(i = 0; i < nb_tbs; i++) {
3118
        tb = &tbs[i];
3119
        target_code_size += tb->size;
3120
        if (tb->size > max_target_code_size)
3121
            max_target_code_size = tb->size;
3122
        if (tb->page_addr[1] != -1)
3123
            cross_page++;
3124
        if (tb->tb_next_offset[0] != 0xffff) {
3125
            direct_jmp_count++;
3126
            if (tb->tb_next_offset[1] != 0xffff) {
3127
                direct_jmp2_count++;
3128
            }
3129
        }
3130
    }
3131
    /* XXX: avoid using doubles ? */
3132
    cpu_fprintf(f, "Translation buffer state:\n");
3133
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3134
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3135
    cpu_fprintf(f, "TB count            %d/%d\n", 
3136
                nb_tbs, code_gen_max_blocks);
3137
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3138
                nb_tbs ? target_code_size / nb_tbs : 0,
3139
                max_target_code_size);
3140
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3141
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3142
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3143
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3144
            cross_page,
3145
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3146
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3147
                direct_jmp_count,
3148
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3149
                direct_jmp2_count,
3150
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3151
    cpu_fprintf(f, "\nStatistics:\n");
3152
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3153
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3154
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3155
    tcg_dump_info(f, cpu_fprintf);
3156
}
3157

    
3158
#if !defined(CONFIG_USER_ONLY)
3159

    
3160
#define MMUSUFFIX _cmmu
3161
#define GETPC() NULL
3162
#define env cpu_single_env
3163
#define SOFTMMU_CODE_ACCESS
3164

    
3165
#define SHIFT 0
3166
#include "softmmu_template.h"
3167

    
3168
#define SHIFT 1
3169
#include "softmmu_template.h"
3170

    
3171
#define SHIFT 2
3172
#include "softmmu_template.h"
3173

    
3174
#define SHIFT 3
3175
#include "softmmu_template.h"
3176

    
3177
#undef env
3178

    
3179
#endif