Statistics
| Branch: | Revision:

root / exec.c @ 7ba1e619

History | View | Annotate | Download (98 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#include "osdep.h"
42
#include "kvm.h"
43
#if defined(CONFIG_USER_ONLY)
44
#include <qemu.h>
45
#endif
46

    
47
//#define DEBUG_TB_INVALIDATE
48
//#define DEBUG_FLUSH
49
//#define DEBUG_TLB
50
//#define DEBUG_UNASSIGNED
51

    
52
/* make various TB consistency checks */
53
//#define DEBUG_TB_CHECK
54
//#define DEBUG_TLB_CHECK
55

    
56
//#define DEBUG_IOPORT
57
//#define DEBUG_SUBPAGE
58

    
59
#if !defined(CONFIG_USER_ONLY)
60
/* TB consistency checks only implemented for usermode emulation.  */
61
#undef DEBUG_TB_CHECK
62
#endif
63

    
64
#define SMC_BITMAP_USE_THRESHOLD 10
65

    
66
#define MMAP_AREA_START        0x00000000
67
#define MMAP_AREA_END          0xa8000000
68

    
69
#if defined(TARGET_SPARC64)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 41
71
#elif defined(TARGET_SPARC)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 36
73
#elif defined(TARGET_ALPHA)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#define TARGET_VIRT_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_PPC64)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 42
80
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81
#define TARGET_PHYS_ADDR_SPACE_BITS 36
82
#else
83
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84
#define TARGET_PHYS_ADDR_SPACE_BITS 32
85
#endif
86

    
87
static TranslationBlock *tbs;
88
int code_gen_max_blocks;
89
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90
static int nb_tbs;
91
/* any access to the tbs or the page table must use this lock */
92
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93

    
94
#if defined(__arm__) || defined(__sparc_v9__)
95
/* The prologue must be reachable with a direct jump. ARM and Sparc64
96
 have limited branch ranges (possibly also PPC) so place it in a
97
 section close to code segment. */
98
#define code_gen_section                                \
99
    __attribute__((__section__(".gen_code")))           \
100
    __attribute__((aligned (32)))
101
#else
102
#define code_gen_section                                \
103
    __attribute__((aligned (32)))
104
#endif
105

    
106
uint8_t code_gen_prologue[1024] code_gen_section;
107
static uint8_t *code_gen_buffer;
108
static unsigned long code_gen_buffer_size;
109
/* threshold to flush the translated code buffer */
110
static unsigned long code_gen_buffer_max_size;
111
uint8_t *code_gen_ptr;
112

    
113
#if !defined(CONFIG_USER_ONLY)
114
ram_addr_t phys_ram_size;
115
int phys_ram_fd;
116
uint8_t *phys_ram_base;
117
uint8_t *phys_ram_dirty;
118
static int in_migration;
119
static ram_addr_t phys_ram_alloc_offset = 0;
120
#endif
121

    
122
CPUState *first_cpu;
123
/* current CPU in the current thread. It is only valid inside
124
   cpu_exec() */
125
CPUState *cpu_single_env;
126
/* 0 = Do not count executed instructions.
127
   1 = Precise instruction counting.
128
   2 = Adaptive rate instruction counting.  */
129
int use_icount = 0;
130
/* Current instruction counter.  While executing translated code this may
131
   include some instructions that have not yet been executed.  */
132
int64_t qemu_icount;
133

    
134
typedef struct PageDesc {
135
    /* list of TBs intersecting this ram page */
136
    TranslationBlock *first_tb;
137
    /* in order to optimize self modifying code, we count the number
138
       of lookups we do to a given page to use a bitmap */
139
    unsigned int code_write_count;
140
    uint8_t *code_bitmap;
141
#if defined(CONFIG_USER_ONLY)
142
    unsigned long flags;
143
#endif
144
} PageDesc;
145

    
146
typedef struct PhysPageDesc {
147
    /* offset in host memory of the page + io_index in the low bits */
148
    ram_addr_t phys_offset;
149
} PhysPageDesc;
150

    
151
#define L2_BITS 10
152
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
153
/* XXX: this is a temporary hack for alpha target.
154
 *      In the future, this is to be replaced by a multi-level table
155
 *      to actually be able to handle the complete 64 bits address space.
156
 */
157
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
158
#else
159
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
160
#endif
161

    
162
#define L1_SIZE (1 << L1_BITS)
163
#define L2_SIZE (1 << L2_BITS)
164

    
165
unsigned long qemu_real_host_page_size;
166
unsigned long qemu_host_page_bits;
167
unsigned long qemu_host_page_size;
168
unsigned long qemu_host_page_mask;
169

    
170
/* XXX: for system emulation, it could just be an array */
171
static PageDesc *l1_map[L1_SIZE];
172
static PhysPageDesc **l1_phys_map;
173

    
174
#if !defined(CONFIG_USER_ONLY)
175
static void io_mem_init(void);
176

    
177
/* io memory support */
178
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
179
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
180
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
181
static int io_mem_nb;
182
static int io_mem_watch;
183
#endif
184

    
185
/* log support */
186
static const char *logfilename = "/tmp/qemu.log";
187
FILE *logfile;
188
int loglevel;
189
static int log_append = 0;
190

    
191
/* statistics */
192
static int tlb_flush_count;
193
static int tb_flush_count;
194
static int tb_phys_invalidate_count;
195

    
196
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197
typedef struct subpage_t {
198
    target_phys_addr_t base;
199
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
200
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
201
    void *opaque[TARGET_PAGE_SIZE][2][4];
202
} subpage_t;
203

    
204
#ifdef _WIN32
205
static void map_exec(void *addr, long size)
206
{
207
    DWORD old_protect;
208
    VirtualProtect(addr, size,
209
                   PAGE_EXECUTE_READWRITE, &old_protect);
210
    
211
}
212
#else
213
static void map_exec(void *addr, long size)
214
{
215
    unsigned long start, end, page_size;
216
    
217
    page_size = getpagesize();
218
    start = (unsigned long)addr;
219
    start &= ~(page_size - 1);
220
    
221
    end = (unsigned long)addr + size;
222
    end += page_size - 1;
223
    end &= ~(page_size - 1);
224
    
225
    mprotect((void *)start, end - start,
226
             PROT_READ | PROT_WRITE | PROT_EXEC);
227
}
228
#endif
229

    
230
static void page_init(void)
231
{
232
    /* NOTE: we can always suppose that qemu_host_page_size >=
233
       TARGET_PAGE_SIZE */
234
#ifdef _WIN32
235
    {
236
        SYSTEM_INFO system_info;
237
        DWORD old_protect;
238

    
239
        GetSystemInfo(&system_info);
240
        qemu_real_host_page_size = system_info.dwPageSize;
241
    }
242
#else
243
    qemu_real_host_page_size = getpagesize();
244
#endif
245
    if (qemu_host_page_size == 0)
246
        qemu_host_page_size = qemu_real_host_page_size;
247
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
248
        qemu_host_page_size = TARGET_PAGE_SIZE;
249
    qemu_host_page_bits = 0;
250
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
251
        qemu_host_page_bits++;
252
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
253
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
254
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
255

    
256
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
257
    {
258
        long long startaddr, endaddr;
259
        FILE *f;
260
        int n;
261

    
262
        mmap_lock();
263
        last_brk = (unsigned long)sbrk(0);
264
        f = fopen("/proc/self/maps", "r");
265
        if (f) {
266
            do {
267
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
268
                if (n == 2) {
269
                    startaddr = MIN(startaddr,
270
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
271
                    endaddr = MIN(endaddr,
272
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
273
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
274
                                   TARGET_PAGE_ALIGN(endaddr),
275
                                   PAGE_RESERVED); 
276
                }
277
            } while (!feof(f));
278
            fclose(f);
279
        }
280
        mmap_unlock();
281
    }
282
#endif
283
}
284

    
285
static inline PageDesc **page_l1_map(target_ulong index)
286
{
287
#if TARGET_LONG_BITS > 32
288
    /* Host memory outside guest VM.  For 32-bit targets we have already
289
       excluded high addresses.  */
290
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
291
        return NULL;
292
#endif
293
    return &l1_map[index >> L2_BITS];
294
}
295

    
296
static inline PageDesc *page_find_alloc(target_ulong index)
297
{
298
    PageDesc **lp, *p;
299
    lp = page_l1_map(index);
300
    if (!lp)
301
        return NULL;
302

    
303
    p = *lp;
304
    if (!p) {
305
        /* allocate if not found */
306
#if defined(CONFIG_USER_ONLY)
307
        unsigned long addr;
308
        size_t len = sizeof(PageDesc) * L2_SIZE;
309
        /* Don't use qemu_malloc because it may recurse.  */
310
        p = mmap(0, len, PROT_READ | PROT_WRITE,
311
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
312
        *lp = p;
313
        addr = h2g(p);
314
        if (addr == (target_ulong)addr) {
315
            page_set_flags(addr & TARGET_PAGE_MASK,
316
                           TARGET_PAGE_ALIGN(addr + len),
317
                           PAGE_RESERVED); 
318
        }
319
#else
320
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321
        *lp = p;
322
#endif
323
    }
324
    return p + (index & (L2_SIZE - 1));
325
}
326

    
327
static inline PageDesc *page_find(target_ulong index)
328
{
329
    PageDesc **lp, *p;
330
    lp = page_l1_map(index);
331
    if (!lp)
332
        return NULL;
333

    
334
    p = *lp;
335
    if (!p)
336
        return 0;
337
    return p + (index & (L2_SIZE - 1));
338
}
339

    
340
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
341
{
342
    void **lp, **p;
343
    PhysPageDesc *pd;
344

    
345
    p = (void **)l1_phys_map;
346
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
347

    
348
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350
#endif
351
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
352
    p = *lp;
353
    if (!p) {
354
        /* allocate if not found */
355
        if (!alloc)
356
            return NULL;
357
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358
        memset(p, 0, sizeof(void *) * L1_SIZE);
359
        *lp = p;
360
    }
361
#endif
362
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
363
    pd = *lp;
364
    if (!pd) {
365
        int i;
366
        /* allocate if not found */
367
        if (!alloc)
368
            return NULL;
369
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370
        *lp = pd;
371
        for (i = 0; i < L2_SIZE; i++)
372
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
373
    }
374
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
375
}
376

    
377
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
378
{
379
    return phys_page_find_alloc(index, 0);
380
}
381

    
382
#if !defined(CONFIG_USER_ONLY)
383
static void tlb_protect_code(ram_addr_t ram_addr);
384
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
385
                                    target_ulong vaddr);
386
#define mmap_lock() do { } while(0)
387
#define mmap_unlock() do { } while(0)
388
#endif
389

    
390
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
391

    
392
#if defined(CONFIG_USER_ONLY)
393
/* Currently it is not recommanded to allocate big chunks of data in
394
   user mode. It will change when a dedicated libc will be used */
395
#define USE_STATIC_CODE_GEN_BUFFER
396
#endif
397

    
398
#ifdef USE_STATIC_CODE_GEN_BUFFER
399
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
400
#endif
401

    
402
static void code_gen_alloc(unsigned long tb_size)
403
{
404
#ifdef USE_STATIC_CODE_GEN_BUFFER
405
    code_gen_buffer = static_code_gen_buffer;
406
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
407
    map_exec(code_gen_buffer, code_gen_buffer_size);
408
#else
409
    code_gen_buffer_size = tb_size;
410
    if (code_gen_buffer_size == 0) {
411
#if defined(CONFIG_USER_ONLY)
412
        /* in user mode, phys_ram_size is not meaningful */
413
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
414
#else
415
        /* XXX: needs ajustments */
416
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
417
#endif
418
    }
419
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
420
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
421
    /* The code gen buffer location may have constraints depending on
422
       the host cpu and OS */
423
#if defined(__linux__) 
424
    {
425
        int flags;
426
        void *start = NULL;
427

    
428
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
429
#if defined(__x86_64__)
430
        flags |= MAP_32BIT;
431
        /* Cannot map more than that */
432
        if (code_gen_buffer_size > (800 * 1024 * 1024))
433
            code_gen_buffer_size = (800 * 1024 * 1024);
434
#elif defined(__sparc_v9__)
435
        // Map the buffer below 2G, so we can use direct calls and branches
436
        flags |= MAP_FIXED;
437
        start = (void *) 0x60000000UL;
438
        if (code_gen_buffer_size > (512 * 1024 * 1024))
439
            code_gen_buffer_size = (512 * 1024 * 1024);
440
#endif
441
        code_gen_buffer = mmap(start, code_gen_buffer_size,
442
                               PROT_WRITE | PROT_READ | PROT_EXEC,
443
                               flags, -1, 0);
444
        if (code_gen_buffer == MAP_FAILED) {
445
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
446
            exit(1);
447
        }
448
    }
449
#elif defined(__FreeBSD__)
450
    {
451
        int flags;
452
        void *addr = NULL;
453
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
454
#if defined(__x86_64__)
455
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
456
         * 0x40000000 is free */
457
        flags |= MAP_FIXED;
458
        addr = (void *)0x40000000;
459
        /* Cannot map more than that */
460
        if (code_gen_buffer_size > (800 * 1024 * 1024))
461
            code_gen_buffer_size = (800 * 1024 * 1024);
462
#endif
463
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
464
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
465
                               flags, -1, 0);
466
        if (code_gen_buffer == MAP_FAILED) {
467
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
468
            exit(1);
469
        }
470
    }
471
#else
472
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
473
    if (!code_gen_buffer) {
474
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
475
        exit(1);
476
    }
477
    map_exec(code_gen_buffer, code_gen_buffer_size);
478
#endif
479
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
480
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
481
    code_gen_buffer_max_size = code_gen_buffer_size - 
482
        code_gen_max_block_size();
483
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
484
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
485
}
486

    
487
/* Must be called before using the QEMU cpus. 'tb_size' is the size
488
   (in bytes) allocated to the translation buffer. Zero means default
489
   size. */
490
void cpu_exec_init_all(unsigned long tb_size)
491
{
492
    cpu_gen_init();
493
    code_gen_alloc(tb_size);
494
    code_gen_ptr = code_gen_buffer;
495
    page_init();
496
#if !defined(CONFIG_USER_ONLY)
497
    io_mem_init();
498
#endif
499
}
500

    
501
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
502

    
503
#define CPU_COMMON_SAVE_VERSION 1
504

    
505
static void cpu_common_save(QEMUFile *f, void *opaque)
506
{
507
    CPUState *env = opaque;
508

    
509
    qemu_put_be32s(f, &env->halted);
510
    qemu_put_be32s(f, &env->interrupt_request);
511
}
512

    
513
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
514
{
515
    CPUState *env = opaque;
516

    
517
    if (version_id != CPU_COMMON_SAVE_VERSION)
518
        return -EINVAL;
519

    
520
    qemu_get_be32s(f, &env->halted);
521
    qemu_get_be32s(f, &env->interrupt_request);
522
    tlb_flush(env, 1);
523

    
524
    return 0;
525
}
526
#endif
527

    
528
void cpu_exec_init(CPUState *env)
529
{
530
    CPUState **penv;
531
    int cpu_index;
532

    
533
    env->next_cpu = NULL;
534
    penv = &first_cpu;
535
    cpu_index = 0;
536
    while (*penv != NULL) {
537
        penv = (CPUState **)&(*penv)->next_cpu;
538
        cpu_index++;
539
    }
540
    env->cpu_index = cpu_index;
541
    env->nb_watchpoints = 0;
542
    *penv = env;
543
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
544
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
545
                    cpu_common_save, cpu_common_load, env);
546
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
547
                    cpu_save, cpu_load, env);
548
#endif
549
}
550

    
551
static inline void invalidate_page_bitmap(PageDesc *p)
552
{
553
    if (p->code_bitmap) {
554
        qemu_free(p->code_bitmap);
555
        p->code_bitmap = NULL;
556
    }
557
    p->code_write_count = 0;
558
}
559

    
560
/* set to NULL all the 'first_tb' fields in all PageDescs */
561
static void page_flush_tb(void)
562
{
563
    int i, j;
564
    PageDesc *p;
565

    
566
    for(i = 0; i < L1_SIZE; i++) {
567
        p = l1_map[i];
568
        if (p) {
569
            for(j = 0; j < L2_SIZE; j++) {
570
                p->first_tb = NULL;
571
                invalidate_page_bitmap(p);
572
                p++;
573
            }
574
        }
575
    }
576
}
577

    
578
/* flush all the translation blocks */
579
/* XXX: tb_flush is currently not thread safe */
580
void tb_flush(CPUState *env1)
581
{
582
    CPUState *env;
583
#if defined(DEBUG_FLUSH)
584
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
585
           (unsigned long)(code_gen_ptr - code_gen_buffer),
586
           nb_tbs, nb_tbs > 0 ?
587
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
588
#endif
589
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
590
        cpu_abort(env1, "Internal error: code buffer overflow\n");
591

    
592
    nb_tbs = 0;
593

    
594
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
595
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
596
    }
597

    
598
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
599
    page_flush_tb();
600

    
601
    code_gen_ptr = code_gen_buffer;
602
    /* XXX: flush processor icache at this point if cache flush is
603
       expensive */
604
    tb_flush_count++;
605
}
606

    
607
#ifdef DEBUG_TB_CHECK
608

    
609
static void tb_invalidate_check(target_ulong address)
610
{
611
    TranslationBlock *tb;
612
    int i;
613
    address &= TARGET_PAGE_MASK;
614
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
615
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
616
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
617
                  address >= tb->pc + tb->size)) {
618
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
619
                       address, (long)tb->pc, tb->size);
620
            }
621
        }
622
    }
623
}
624

    
625
/* verify that all the pages have correct rights for code */
626
static void tb_page_check(void)
627
{
628
    TranslationBlock *tb;
629
    int i, flags1, flags2;
630

    
631
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
632
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
633
            flags1 = page_get_flags(tb->pc);
634
            flags2 = page_get_flags(tb->pc + tb->size - 1);
635
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
636
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
637
                       (long)tb->pc, tb->size, flags1, flags2);
638
            }
639
        }
640
    }
641
}
642

    
643
static void tb_jmp_check(TranslationBlock *tb)
644
{
645
    TranslationBlock *tb1;
646
    unsigned int n1;
647

    
648
    /* suppress any remaining jumps to this TB */
649
    tb1 = tb->jmp_first;
650
    for(;;) {
651
        n1 = (long)tb1 & 3;
652
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
653
        if (n1 == 2)
654
            break;
655
        tb1 = tb1->jmp_next[n1];
656
    }
657
    /* check end of list */
658
    if (tb1 != tb) {
659
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
660
    }
661
}
662

    
663
#endif
664

    
665
/* invalidate one TB */
666
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
667
                             int next_offset)
668
{
669
    TranslationBlock *tb1;
670
    for(;;) {
671
        tb1 = *ptb;
672
        if (tb1 == tb) {
673
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
674
            break;
675
        }
676
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
677
    }
678
}
679

    
680
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
681
{
682
    TranslationBlock *tb1;
683
    unsigned int n1;
684

    
685
    for(;;) {
686
        tb1 = *ptb;
687
        n1 = (long)tb1 & 3;
688
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
689
        if (tb1 == tb) {
690
            *ptb = tb1->page_next[n1];
691
            break;
692
        }
693
        ptb = &tb1->page_next[n1];
694
    }
695
}
696

    
697
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
698
{
699
    TranslationBlock *tb1, **ptb;
700
    unsigned int n1;
701

    
702
    ptb = &tb->jmp_next[n];
703
    tb1 = *ptb;
704
    if (tb1) {
705
        /* find tb(n) in circular list */
706
        for(;;) {
707
            tb1 = *ptb;
708
            n1 = (long)tb1 & 3;
709
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
710
            if (n1 == n && tb1 == tb)
711
                break;
712
            if (n1 == 2) {
713
                ptb = &tb1->jmp_first;
714
            } else {
715
                ptb = &tb1->jmp_next[n1];
716
            }
717
        }
718
        /* now we can suppress tb(n) from the list */
719
        *ptb = tb->jmp_next[n];
720

    
721
        tb->jmp_next[n] = NULL;
722
    }
723
}
724

    
725
/* reset the jump entry 'n' of a TB so that it is not chained to
726
   another TB */
727
static inline void tb_reset_jump(TranslationBlock *tb, int n)
728
{
729
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
730
}
731

    
732
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
733
{
734
    CPUState *env;
735
    PageDesc *p;
736
    unsigned int h, n1;
737
    target_phys_addr_t phys_pc;
738
    TranslationBlock *tb1, *tb2;
739

    
740
    /* remove the TB from the hash list */
741
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
742
    h = tb_phys_hash_func(phys_pc);
743
    tb_remove(&tb_phys_hash[h], tb,
744
              offsetof(TranslationBlock, phys_hash_next));
745

    
746
    /* remove the TB from the page list */
747
    if (tb->page_addr[0] != page_addr) {
748
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
749
        tb_page_remove(&p->first_tb, tb);
750
        invalidate_page_bitmap(p);
751
    }
752
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
753
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
754
        tb_page_remove(&p->first_tb, tb);
755
        invalidate_page_bitmap(p);
756
    }
757

    
758
    tb_invalidated_flag = 1;
759

    
760
    /* remove the TB from the hash list */
761
    h = tb_jmp_cache_hash_func(tb->pc);
762
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
763
        if (env->tb_jmp_cache[h] == tb)
764
            env->tb_jmp_cache[h] = NULL;
765
    }
766

    
767
    /* suppress this TB from the two jump lists */
768
    tb_jmp_remove(tb, 0);
769
    tb_jmp_remove(tb, 1);
770

    
771
    /* suppress any remaining jumps to this TB */
772
    tb1 = tb->jmp_first;
773
    for(;;) {
774
        n1 = (long)tb1 & 3;
775
        if (n1 == 2)
776
            break;
777
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
778
        tb2 = tb1->jmp_next[n1];
779
        tb_reset_jump(tb1, n1);
780
        tb1->jmp_next[n1] = NULL;
781
        tb1 = tb2;
782
    }
783
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
784

    
785
    tb_phys_invalidate_count++;
786
}
787

    
788
static inline void set_bits(uint8_t *tab, int start, int len)
789
{
790
    int end, mask, end1;
791

    
792
    end = start + len;
793
    tab += start >> 3;
794
    mask = 0xff << (start & 7);
795
    if ((start & ~7) == (end & ~7)) {
796
        if (start < end) {
797
            mask &= ~(0xff << (end & 7));
798
            *tab |= mask;
799
        }
800
    } else {
801
        *tab++ |= mask;
802
        start = (start + 8) & ~7;
803
        end1 = end & ~7;
804
        while (start < end1) {
805
            *tab++ = 0xff;
806
            start += 8;
807
        }
808
        if (start < end) {
809
            mask = ~(0xff << (end & 7));
810
            *tab |= mask;
811
        }
812
    }
813
}
814

    
815
static void build_page_bitmap(PageDesc *p)
816
{
817
    int n, tb_start, tb_end;
818
    TranslationBlock *tb;
819

    
820
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
821
    if (!p->code_bitmap)
822
        return;
823

    
824
    tb = p->first_tb;
825
    while (tb != NULL) {
826
        n = (long)tb & 3;
827
        tb = (TranslationBlock *)((long)tb & ~3);
828
        /* NOTE: this is subtle as a TB may span two physical pages */
829
        if (n == 0) {
830
            /* NOTE: tb_end may be after the end of the page, but
831
               it is not a problem */
832
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
833
            tb_end = tb_start + tb->size;
834
            if (tb_end > TARGET_PAGE_SIZE)
835
                tb_end = TARGET_PAGE_SIZE;
836
        } else {
837
            tb_start = 0;
838
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
839
        }
840
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
841
        tb = tb->page_next[n];
842
    }
843
}
844

    
845
TranslationBlock *tb_gen_code(CPUState *env,
846
                              target_ulong pc, target_ulong cs_base,
847
                              int flags, int cflags)
848
{
849
    TranslationBlock *tb;
850
    uint8_t *tc_ptr;
851
    target_ulong phys_pc, phys_page2, virt_page2;
852
    int code_gen_size;
853

    
854
    phys_pc = get_phys_addr_code(env, pc);
855
    tb = tb_alloc(pc);
856
    if (!tb) {
857
        /* flush must be done */
858
        tb_flush(env);
859
        /* cannot fail at this point */
860
        tb = tb_alloc(pc);
861
        /* Don't forget to invalidate previous TB info.  */
862
        tb_invalidated_flag = 1;
863
    }
864
    tc_ptr = code_gen_ptr;
865
    tb->tc_ptr = tc_ptr;
866
    tb->cs_base = cs_base;
867
    tb->flags = flags;
868
    tb->cflags = cflags;
869
    cpu_gen_code(env, tb, &code_gen_size);
870
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
871

    
872
    /* check next page if needed */
873
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
874
    phys_page2 = -1;
875
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
876
        phys_page2 = get_phys_addr_code(env, virt_page2);
877
    }
878
    tb_link_phys(tb, phys_pc, phys_page2);
879
    return tb;
880
}
881

    
882
/* invalidate all TBs which intersect with the target physical page
883
   starting in range [start;end[. NOTE: start and end must refer to
884
   the same physical page. 'is_cpu_write_access' should be true if called
885
   from a real cpu write access: the virtual CPU will exit the current
886
   TB if code is modified inside this TB. */
887
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
888
                                   int is_cpu_write_access)
889
{
890
    int n, current_tb_modified, current_tb_not_found, current_flags;
891
    CPUState *env = cpu_single_env;
892
    PageDesc *p;
893
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
894
    target_ulong tb_start, tb_end;
895
    target_ulong current_pc, current_cs_base;
896

    
897
    p = page_find(start >> TARGET_PAGE_BITS);
898
    if (!p)
899
        return;
900
    if (!p->code_bitmap &&
901
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
902
        is_cpu_write_access) {
903
        /* build code bitmap */
904
        build_page_bitmap(p);
905
    }
906

    
907
    /* we remove all the TBs in the range [start, end[ */
908
    /* XXX: see if in some cases it could be faster to invalidate all the code */
909
    current_tb_not_found = is_cpu_write_access;
910
    current_tb_modified = 0;
911
    current_tb = NULL; /* avoid warning */
912
    current_pc = 0; /* avoid warning */
913
    current_cs_base = 0; /* avoid warning */
914
    current_flags = 0; /* avoid warning */
915
    tb = p->first_tb;
916
    while (tb != NULL) {
917
        n = (long)tb & 3;
918
        tb = (TranslationBlock *)((long)tb & ~3);
919
        tb_next = tb->page_next[n];
920
        /* NOTE: this is subtle as a TB may span two physical pages */
921
        if (n == 0) {
922
            /* NOTE: tb_end may be after the end of the page, but
923
               it is not a problem */
924
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
925
            tb_end = tb_start + tb->size;
926
        } else {
927
            tb_start = tb->page_addr[1];
928
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
929
        }
930
        if (!(tb_end <= start || tb_start >= end)) {
931
#ifdef TARGET_HAS_PRECISE_SMC
932
            if (current_tb_not_found) {
933
                current_tb_not_found = 0;
934
                current_tb = NULL;
935
                if (env->mem_io_pc) {
936
                    /* now we have a real cpu fault */
937
                    current_tb = tb_find_pc(env->mem_io_pc);
938
                }
939
            }
940
            if (current_tb == tb &&
941
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
942
                /* If we are modifying the current TB, we must stop
943
                its execution. We could be more precise by checking
944
                that the modification is after the current PC, but it
945
                would require a specialized function to partially
946
                restore the CPU state */
947

    
948
                current_tb_modified = 1;
949
                cpu_restore_state(current_tb, env,
950
                                  env->mem_io_pc, NULL);
951
#if defined(TARGET_I386)
952
                current_flags = env->hflags;
953
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
954
                current_cs_base = (target_ulong)env->segs[R_CS].base;
955
                current_pc = current_cs_base + env->eip;
956
#else
957
#error unsupported CPU
958
#endif
959
            }
960
#endif /* TARGET_HAS_PRECISE_SMC */
961
            /* we need to do that to handle the case where a signal
962
               occurs while doing tb_phys_invalidate() */
963
            saved_tb = NULL;
964
            if (env) {
965
                saved_tb = env->current_tb;
966
                env->current_tb = NULL;
967
            }
968
            tb_phys_invalidate(tb, -1);
969
            if (env) {
970
                env->current_tb = saved_tb;
971
                if (env->interrupt_request && env->current_tb)
972
                    cpu_interrupt(env, env->interrupt_request);
973
            }
974
        }
975
        tb = tb_next;
976
    }
977
#if !defined(CONFIG_USER_ONLY)
978
    /* if no code remaining, no need to continue to use slow writes */
979
    if (!p->first_tb) {
980
        invalidate_page_bitmap(p);
981
        if (is_cpu_write_access) {
982
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
983
        }
984
    }
985
#endif
986
#ifdef TARGET_HAS_PRECISE_SMC
987
    if (current_tb_modified) {
988
        /* we generate a block containing just the instruction
989
           modifying the memory. It will ensure that it cannot modify
990
           itself */
991
        env->current_tb = NULL;
992
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
993
        cpu_resume_from_signal(env, NULL);
994
    }
995
#endif
996
}
997

    
998
/* len must be <= 8 and start must be a multiple of len */
999
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1000
{
1001
    PageDesc *p;
1002
    int offset, b;
1003
#if 0
1004
    if (1) {
1005
        if (loglevel) {
1006
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1007
                   cpu_single_env->mem_io_vaddr, len,
1008
                   cpu_single_env->eip,
1009
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1010
        }
1011
    }
1012
#endif
1013
    p = page_find(start >> TARGET_PAGE_BITS);
1014
    if (!p)
1015
        return;
1016
    if (p->code_bitmap) {
1017
        offset = start & ~TARGET_PAGE_MASK;
1018
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1019
        if (b & ((1 << len) - 1))
1020
            goto do_invalidate;
1021
    } else {
1022
    do_invalidate:
1023
        tb_invalidate_phys_page_range(start, start + len, 1);
1024
    }
1025
}
1026

    
1027
#if !defined(CONFIG_SOFTMMU)
1028
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1029
                                    unsigned long pc, void *puc)
1030
{
1031
    int n, current_flags, current_tb_modified;
1032
    target_ulong current_pc, current_cs_base;
1033
    PageDesc *p;
1034
    TranslationBlock *tb, *current_tb;
1035
#ifdef TARGET_HAS_PRECISE_SMC
1036
    CPUState *env = cpu_single_env;
1037
#endif
1038

    
1039
    addr &= TARGET_PAGE_MASK;
1040
    p = page_find(addr >> TARGET_PAGE_BITS);
1041
    if (!p)
1042
        return;
1043
    tb = p->first_tb;
1044
    current_tb_modified = 0;
1045
    current_tb = NULL;
1046
    current_pc = 0; /* avoid warning */
1047
    current_cs_base = 0; /* avoid warning */
1048
    current_flags = 0; /* avoid warning */
1049
#ifdef TARGET_HAS_PRECISE_SMC
1050
    if (tb && pc != 0) {
1051
        current_tb = tb_find_pc(pc);
1052
    }
1053
#endif
1054
    while (tb != NULL) {
1055
        n = (long)tb & 3;
1056
        tb = (TranslationBlock *)((long)tb & ~3);
1057
#ifdef TARGET_HAS_PRECISE_SMC
1058
        if (current_tb == tb &&
1059
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1060
                /* If we are modifying the current TB, we must stop
1061
                   its execution. We could be more precise by checking
1062
                   that the modification is after the current PC, but it
1063
                   would require a specialized function to partially
1064
                   restore the CPU state */
1065

    
1066
            current_tb_modified = 1;
1067
            cpu_restore_state(current_tb, env, pc, puc);
1068
#if defined(TARGET_I386)
1069
            current_flags = env->hflags;
1070
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1071
            current_cs_base = (target_ulong)env->segs[R_CS].base;
1072
            current_pc = current_cs_base + env->eip;
1073
#else
1074
#error unsupported CPU
1075
#endif
1076
        }
1077
#endif /* TARGET_HAS_PRECISE_SMC */
1078
        tb_phys_invalidate(tb, addr);
1079
        tb = tb->page_next[n];
1080
    }
1081
    p->first_tb = NULL;
1082
#ifdef TARGET_HAS_PRECISE_SMC
1083
    if (current_tb_modified) {
1084
        /* we generate a block containing just the instruction
1085
           modifying the memory. It will ensure that it cannot modify
1086
           itself */
1087
        env->current_tb = NULL;
1088
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1089
        cpu_resume_from_signal(env, puc);
1090
    }
1091
#endif
1092
}
1093
#endif
1094

    
1095
/* add the tb in the target page and protect it if necessary */
1096
static inline void tb_alloc_page(TranslationBlock *tb,
1097
                                 unsigned int n, target_ulong page_addr)
1098
{
1099
    PageDesc *p;
1100
    TranslationBlock *last_first_tb;
1101

    
1102
    tb->page_addr[n] = page_addr;
1103
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1104
    tb->page_next[n] = p->first_tb;
1105
    last_first_tb = p->first_tb;
1106
    p->first_tb = (TranslationBlock *)((long)tb | n);
1107
    invalidate_page_bitmap(p);
1108

    
1109
#if defined(TARGET_HAS_SMC) || 1
1110

    
1111
#if defined(CONFIG_USER_ONLY)
1112
    if (p->flags & PAGE_WRITE) {
1113
        target_ulong addr;
1114
        PageDesc *p2;
1115
        int prot;
1116

    
1117
        /* force the host page as non writable (writes will have a
1118
           page fault + mprotect overhead) */
1119
        page_addr &= qemu_host_page_mask;
1120
        prot = 0;
1121
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1122
            addr += TARGET_PAGE_SIZE) {
1123

    
1124
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1125
            if (!p2)
1126
                continue;
1127
            prot |= p2->flags;
1128
            p2->flags &= ~PAGE_WRITE;
1129
            page_get_flags(addr);
1130
          }
1131
        mprotect(g2h(page_addr), qemu_host_page_size,
1132
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1133
#ifdef DEBUG_TB_INVALIDATE
1134
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1135
               page_addr);
1136
#endif
1137
    }
1138
#else
1139
    /* if some code is already present, then the pages are already
1140
       protected. So we handle the case where only the first TB is
1141
       allocated in a physical page */
1142
    if (!last_first_tb) {
1143
        tlb_protect_code(page_addr);
1144
    }
1145
#endif
1146

    
1147
#endif /* TARGET_HAS_SMC */
1148
}
1149

    
1150
/* Allocate a new translation block. Flush the translation buffer if
1151
   too many translation blocks or too much generated code. */
1152
TranslationBlock *tb_alloc(target_ulong pc)
1153
{
1154
    TranslationBlock *tb;
1155

    
1156
    if (nb_tbs >= code_gen_max_blocks ||
1157
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1158
        return NULL;
1159
    tb = &tbs[nb_tbs++];
1160
    tb->pc = pc;
1161
    tb->cflags = 0;
1162
    return tb;
1163
}
1164

    
1165
void tb_free(TranslationBlock *tb)
1166
{
1167
    /* In practice this is mostly used for single use temporary TB
1168
       Ignore the hard cases and just back up if this TB happens to
1169
       be the last one generated.  */
1170
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1171
        code_gen_ptr = tb->tc_ptr;
1172
        nb_tbs--;
1173
    }
1174
}
1175

    
1176
/* add a new TB and link it to the physical page tables. phys_page2 is
1177
   (-1) to indicate that only one page contains the TB. */
1178
void tb_link_phys(TranslationBlock *tb,
1179
                  target_ulong phys_pc, target_ulong phys_page2)
1180
{
1181
    unsigned int h;
1182
    TranslationBlock **ptb;
1183

    
1184
    /* Grab the mmap lock to stop another thread invalidating this TB
1185
       before we are done.  */
1186
    mmap_lock();
1187
    /* add in the physical hash table */
1188
    h = tb_phys_hash_func(phys_pc);
1189
    ptb = &tb_phys_hash[h];
1190
    tb->phys_hash_next = *ptb;
1191
    *ptb = tb;
1192

    
1193
    /* add in the page list */
1194
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1195
    if (phys_page2 != -1)
1196
        tb_alloc_page(tb, 1, phys_page2);
1197
    else
1198
        tb->page_addr[1] = -1;
1199

    
1200
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1201
    tb->jmp_next[0] = NULL;
1202
    tb->jmp_next[1] = NULL;
1203

    
1204
    /* init original jump addresses */
1205
    if (tb->tb_next_offset[0] != 0xffff)
1206
        tb_reset_jump(tb, 0);
1207
    if (tb->tb_next_offset[1] != 0xffff)
1208
        tb_reset_jump(tb, 1);
1209

    
1210
#ifdef DEBUG_TB_CHECK
1211
    tb_page_check();
1212
#endif
1213
    mmap_unlock();
1214
}
1215

    
1216
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1217
   tb[1].tc_ptr. Return NULL if not found */
1218
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1219
{
1220
    int m_min, m_max, m;
1221
    unsigned long v;
1222
    TranslationBlock *tb;
1223

    
1224
    if (nb_tbs <= 0)
1225
        return NULL;
1226
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1227
        tc_ptr >= (unsigned long)code_gen_ptr)
1228
        return NULL;
1229
    /* binary search (cf Knuth) */
1230
    m_min = 0;
1231
    m_max = nb_tbs - 1;
1232
    while (m_min <= m_max) {
1233
        m = (m_min + m_max) >> 1;
1234
        tb = &tbs[m];
1235
        v = (unsigned long)tb->tc_ptr;
1236
        if (v == tc_ptr)
1237
            return tb;
1238
        else if (tc_ptr < v) {
1239
            m_max = m - 1;
1240
        } else {
1241
            m_min = m + 1;
1242
        }
1243
    }
1244
    return &tbs[m_max];
1245
}
1246

    
1247
static void tb_reset_jump_recursive(TranslationBlock *tb);
1248

    
1249
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1250
{
1251
    TranslationBlock *tb1, *tb_next, **ptb;
1252
    unsigned int n1;
1253

    
1254
    tb1 = tb->jmp_next[n];
1255
    if (tb1 != NULL) {
1256
        /* find head of list */
1257
        for(;;) {
1258
            n1 = (long)tb1 & 3;
1259
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1260
            if (n1 == 2)
1261
                break;
1262
            tb1 = tb1->jmp_next[n1];
1263
        }
1264
        /* we are now sure now that tb jumps to tb1 */
1265
        tb_next = tb1;
1266

    
1267
        /* remove tb from the jmp_first list */
1268
        ptb = &tb_next->jmp_first;
1269
        for(;;) {
1270
            tb1 = *ptb;
1271
            n1 = (long)tb1 & 3;
1272
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1273
            if (n1 == n && tb1 == tb)
1274
                break;
1275
            ptb = &tb1->jmp_next[n1];
1276
        }
1277
        *ptb = tb->jmp_next[n];
1278
        tb->jmp_next[n] = NULL;
1279

    
1280
        /* suppress the jump to next tb in generated code */
1281
        tb_reset_jump(tb, n);
1282

    
1283
        /* suppress jumps in the tb on which we could have jumped */
1284
        tb_reset_jump_recursive(tb_next);
1285
    }
1286
}
1287

    
1288
static void tb_reset_jump_recursive(TranslationBlock *tb)
1289
{
1290
    tb_reset_jump_recursive2(tb, 0);
1291
    tb_reset_jump_recursive2(tb, 1);
1292
}
1293

    
1294
#if defined(TARGET_HAS_ICE)
1295
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1296
{
1297
    target_phys_addr_t addr;
1298
    target_ulong pd;
1299
    ram_addr_t ram_addr;
1300
    PhysPageDesc *p;
1301

    
1302
    addr = cpu_get_phys_page_debug(env, pc);
1303
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1304
    if (!p) {
1305
        pd = IO_MEM_UNASSIGNED;
1306
    } else {
1307
        pd = p->phys_offset;
1308
    }
1309
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1310
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1311
}
1312
#endif
1313

    
1314
/* Add a watchpoint.  */
1315
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1316
{
1317
    int i;
1318

    
1319
    for (i = 0; i < env->nb_watchpoints; i++) {
1320
        if (addr == env->watchpoint[i].vaddr)
1321
            return 0;
1322
    }
1323
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1324
        return -1;
1325

    
1326
    i = env->nb_watchpoints++;
1327
    env->watchpoint[i].vaddr = addr;
1328
    env->watchpoint[i].type = type;
1329
    tlb_flush_page(env, addr);
1330
    /* FIXME: This flush is needed because of the hack to make memory ops
1331
       terminate the TB.  It can be removed once the proper IO trap and
1332
       re-execute bits are in.  */
1333
    tb_flush(env);
1334
    return i;
1335
}
1336

    
1337
/* Remove a watchpoint.  */
1338
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1339
{
1340
    int i;
1341

    
1342
    for (i = 0; i < env->nb_watchpoints; i++) {
1343
        if (addr == env->watchpoint[i].vaddr) {
1344
            env->nb_watchpoints--;
1345
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1346
            tlb_flush_page(env, addr);
1347
            return 0;
1348
        }
1349
    }
1350
    return -1;
1351
}
1352

    
1353
/* Remove all watchpoints. */
1354
void cpu_watchpoint_remove_all(CPUState *env) {
1355
    int i;
1356

    
1357
    for (i = 0; i < env->nb_watchpoints; i++) {
1358
        tlb_flush_page(env, env->watchpoint[i].vaddr);
1359
    }
1360
    env->nb_watchpoints = 0;
1361
}
1362

    
1363
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1364
   breakpoint is reached */
1365
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1366
{
1367
#if defined(TARGET_HAS_ICE)
1368
    int i;
1369

    
1370
    for(i = 0; i < env->nb_breakpoints; i++) {
1371
        if (env->breakpoints[i] == pc)
1372
            return 0;
1373
    }
1374

    
1375
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1376
        return -1;
1377
    env->breakpoints[env->nb_breakpoints++] = pc;
1378

    
1379
    breakpoint_invalidate(env, pc);
1380
    return 0;
1381
#else
1382
    return -1;
1383
#endif
1384
}
1385

    
1386
/* remove all breakpoints */
1387
void cpu_breakpoint_remove_all(CPUState *env) {
1388
#if defined(TARGET_HAS_ICE)
1389
    int i;
1390
    for(i = 0; i < env->nb_breakpoints; i++) {
1391
        breakpoint_invalidate(env, env->breakpoints[i]);
1392
    }
1393
    env->nb_breakpoints = 0;
1394
#endif
1395
}
1396

    
1397
/* remove a breakpoint */
1398
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1399
{
1400
#if defined(TARGET_HAS_ICE)
1401
    int i;
1402
    for(i = 0; i < env->nb_breakpoints; i++) {
1403
        if (env->breakpoints[i] == pc)
1404
            goto found;
1405
    }
1406
    return -1;
1407
 found:
1408
    env->nb_breakpoints--;
1409
    if (i < env->nb_breakpoints)
1410
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1411

    
1412
    breakpoint_invalidate(env, pc);
1413
    return 0;
1414
#else
1415
    return -1;
1416
#endif
1417
}
1418

    
1419
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1420
   CPU loop after each instruction */
1421
void cpu_single_step(CPUState *env, int enabled)
1422
{
1423
#if defined(TARGET_HAS_ICE)
1424
    if (env->singlestep_enabled != enabled) {
1425
        env->singlestep_enabled = enabled;
1426
        /* must flush all the translated code to avoid inconsistancies */
1427
        /* XXX: only flush what is necessary */
1428
        tb_flush(env);
1429
    }
1430
#endif
1431
}
1432

    
1433
/* enable or disable low levels log */
1434
void cpu_set_log(int log_flags)
1435
{
1436
    loglevel = log_flags;
1437
    if (loglevel && !logfile) {
1438
        logfile = fopen(logfilename, log_append ? "a" : "w");
1439
        if (!logfile) {
1440
            perror(logfilename);
1441
            _exit(1);
1442
        }
1443
#if !defined(CONFIG_SOFTMMU)
1444
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1445
        {
1446
            static char logfile_buf[4096];
1447
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1448
        }
1449
#else
1450
        setvbuf(logfile, NULL, _IOLBF, 0);
1451
#endif
1452
        log_append = 1;
1453
    }
1454
    if (!loglevel && logfile) {
1455
        fclose(logfile);
1456
        logfile = NULL;
1457
    }
1458
}
1459

    
1460
void cpu_set_log_filename(const char *filename)
1461
{
1462
    logfilename = strdup(filename);
1463
    if (logfile) {
1464
        fclose(logfile);
1465
        logfile = NULL;
1466
    }
1467
    cpu_set_log(loglevel);
1468
}
1469

    
1470
/* mask must never be zero, except for A20 change call */
1471
void cpu_interrupt(CPUState *env, int mask)
1472
{
1473
#if !defined(USE_NPTL)
1474
    TranslationBlock *tb;
1475
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1476
#endif
1477
    int old_mask;
1478

    
1479
    old_mask = env->interrupt_request;
1480
    /* FIXME: This is probably not threadsafe.  A different thread could
1481
       be in the middle of a read-modify-write operation.  */
1482
    env->interrupt_request |= mask;
1483
#if defined(USE_NPTL)
1484
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1485
       problem and hope the cpu will stop of its own accord.  For userspace
1486
       emulation this often isn't actually as bad as it sounds.  Often
1487
       signals are used primarily to interrupt blocking syscalls.  */
1488
#else
1489
    if (use_icount) {
1490
        env->icount_decr.u16.high = 0xffff;
1491
#ifndef CONFIG_USER_ONLY
1492
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1493
           an async event happened and we need to process it.  */
1494
        if (!can_do_io(env)
1495
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1496
            cpu_abort(env, "Raised interrupt while not in I/O function");
1497
        }
1498
#endif
1499
    } else {
1500
        tb = env->current_tb;
1501
        /* if the cpu is currently executing code, we must unlink it and
1502
           all the potentially executing TB */
1503
        if (tb && !testandset(&interrupt_lock)) {
1504
            env->current_tb = NULL;
1505
            tb_reset_jump_recursive(tb);
1506
            resetlock(&interrupt_lock);
1507
        }
1508
    }
1509
#endif
1510
}
1511

    
1512
void cpu_reset_interrupt(CPUState *env, int mask)
1513
{
1514
    env->interrupt_request &= ~mask;
1515
}
1516

    
1517
const CPULogItem cpu_log_items[] = {
1518
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1519
      "show generated host assembly code for each compiled TB" },
1520
    { CPU_LOG_TB_IN_ASM, "in_asm",
1521
      "show target assembly code for each compiled TB" },
1522
    { CPU_LOG_TB_OP, "op",
1523
      "show micro ops for each compiled TB" },
1524
    { CPU_LOG_TB_OP_OPT, "op_opt",
1525
      "show micro ops "
1526
#ifdef TARGET_I386
1527
      "before eflags optimization and "
1528
#endif
1529
      "after liveness analysis" },
1530
    { CPU_LOG_INT, "int",
1531
      "show interrupts/exceptions in short format" },
1532
    { CPU_LOG_EXEC, "exec",
1533
      "show trace before each executed TB (lots of logs)" },
1534
    { CPU_LOG_TB_CPU, "cpu",
1535
      "show CPU state before block translation" },
1536
#ifdef TARGET_I386
1537
    { CPU_LOG_PCALL, "pcall",
1538
      "show protected mode far calls/returns/exceptions" },
1539
#endif
1540
#ifdef DEBUG_IOPORT
1541
    { CPU_LOG_IOPORT, "ioport",
1542
      "show all i/o ports accesses" },
1543
#endif
1544
    { 0, NULL, NULL },
1545
};
1546

    
1547
static int cmp1(const char *s1, int n, const char *s2)
1548
{
1549
    if (strlen(s2) != n)
1550
        return 0;
1551
    return memcmp(s1, s2, n) == 0;
1552
}
1553

    
1554
/* takes a comma separated list of log masks. Return 0 if error. */
1555
int cpu_str_to_log_mask(const char *str)
1556
{
1557
    const CPULogItem *item;
1558
    int mask;
1559
    const char *p, *p1;
1560

    
1561
    p = str;
1562
    mask = 0;
1563
    for(;;) {
1564
        p1 = strchr(p, ',');
1565
        if (!p1)
1566
            p1 = p + strlen(p);
1567
        if(cmp1(p,p1-p,"all")) {
1568
                for(item = cpu_log_items; item->mask != 0; item++) {
1569
                        mask |= item->mask;
1570
                }
1571
        } else {
1572
        for(item = cpu_log_items; item->mask != 0; item++) {
1573
            if (cmp1(p, p1 - p, item->name))
1574
                goto found;
1575
        }
1576
        return 0;
1577
        }
1578
    found:
1579
        mask |= item->mask;
1580
        if (*p1 != ',')
1581
            break;
1582
        p = p1 + 1;
1583
    }
1584
    return mask;
1585
}
1586

    
1587
void cpu_abort(CPUState *env, const char *fmt, ...)
1588
{
1589
    va_list ap;
1590
    va_list ap2;
1591

    
1592
    va_start(ap, fmt);
1593
    va_copy(ap2, ap);
1594
    fprintf(stderr, "qemu: fatal: ");
1595
    vfprintf(stderr, fmt, ap);
1596
    fprintf(stderr, "\n");
1597
#ifdef TARGET_I386
1598
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1599
#else
1600
    cpu_dump_state(env, stderr, fprintf, 0);
1601
#endif
1602
    if (logfile) {
1603
        fprintf(logfile, "qemu: fatal: ");
1604
        vfprintf(logfile, fmt, ap2);
1605
        fprintf(logfile, "\n");
1606
#ifdef TARGET_I386
1607
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1608
#else
1609
        cpu_dump_state(env, logfile, fprintf, 0);
1610
#endif
1611
        fflush(logfile);
1612
        fclose(logfile);
1613
    }
1614
    va_end(ap2);
1615
    va_end(ap);
1616
    abort();
1617
}
1618

    
1619
CPUState *cpu_copy(CPUState *env)
1620
{
1621
    CPUState *new_env = cpu_init(env->cpu_model_str);
1622
    /* preserve chaining and index */
1623
    CPUState *next_cpu = new_env->next_cpu;
1624
    int cpu_index = new_env->cpu_index;
1625
    memcpy(new_env, env, sizeof(CPUState));
1626
    new_env->next_cpu = next_cpu;
1627
    new_env->cpu_index = cpu_index;
1628
    return new_env;
1629
}
1630

    
1631
#if !defined(CONFIG_USER_ONLY)
1632

    
1633
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1634
{
1635
    unsigned int i;
1636

    
1637
    /* Discard jump cache entries for any tb which might potentially
1638
       overlap the flushed page.  */
1639
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1640
    memset (&env->tb_jmp_cache[i], 0, 
1641
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1642

    
1643
    i = tb_jmp_cache_hash_page(addr);
1644
    memset (&env->tb_jmp_cache[i], 0, 
1645
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1646
}
1647

    
1648
/* NOTE: if flush_global is true, also flush global entries (not
1649
   implemented yet) */
1650
void tlb_flush(CPUState *env, int flush_global)
1651
{
1652
    int i;
1653

    
1654
#if defined(DEBUG_TLB)
1655
    printf("tlb_flush:\n");
1656
#endif
1657
    /* must reset current TB so that interrupts cannot modify the
1658
       links while we are modifying them */
1659
    env->current_tb = NULL;
1660

    
1661
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1662
        env->tlb_table[0][i].addr_read = -1;
1663
        env->tlb_table[0][i].addr_write = -1;
1664
        env->tlb_table[0][i].addr_code = -1;
1665
        env->tlb_table[1][i].addr_read = -1;
1666
        env->tlb_table[1][i].addr_write = -1;
1667
        env->tlb_table[1][i].addr_code = -1;
1668
#if (NB_MMU_MODES >= 3)
1669
        env->tlb_table[2][i].addr_read = -1;
1670
        env->tlb_table[2][i].addr_write = -1;
1671
        env->tlb_table[2][i].addr_code = -1;
1672
#if (NB_MMU_MODES == 4)
1673
        env->tlb_table[3][i].addr_read = -1;
1674
        env->tlb_table[3][i].addr_write = -1;
1675
        env->tlb_table[3][i].addr_code = -1;
1676
#endif
1677
#endif
1678
    }
1679

    
1680
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1681

    
1682
#ifdef USE_KQEMU
1683
    if (env->kqemu_enabled) {
1684
        kqemu_flush(env, flush_global);
1685
    }
1686
#endif
1687
    tlb_flush_count++;
1688
}
1689

    
1690
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1691
{
1692
    if (addr == (tlb_entry->addr_read &
1693
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1694
        addr == (tlb_entry->addr_write &
1695
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1696
        addr == (tlb_entry->addr_code &
1697
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1698
        tlb_entry->addr_read = -1;
1699
        tlb_entry->addr_write = -1;
1700
        tlb_entry->addr_code = -1;
1701
    }
1702
}
1703

    
1704
void tlb_flush_page(CPUState *env, target_ulong addr)
1705
{
1706
    int i;
1707

    
1708
#if defined(DEBUG_TLB)
1709
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1710
#endif
1711
    /* must reset current TB so that interrupts cannot modify the
1712
       links while we are modifying them */
1713
    env->current_tb = NULL;
1714

    
1715
    addr &= TARGET_PAGE_MASK;
1716
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1717
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1718
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1719
#if (NB_MMU_MODES >= 3)
1720
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1721
#if (NB_MMU_MODES == 4)
1722
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1723
#endif
1724
#endif
1725

    
1726
    tlb_flush_jmp_cache(env, addr);
1727

    
1728
#ifdef USE_KQEMU
1729
    if (env->kqemu_enabled) {
1730
        kqemu_flush_page(env, addr);
1731
    }
1732
#endif
1733
}
1734

    
1735
/* update the TLBs so that writes to code in the virtual page 'addr'
1736
   can be detected */
1737
static void tlb_protect_code(ram_addr_t ram_addr)
1738
{
1739
    cpu_physical_memory_reset_dirty(ram_addr,
1740
                                    ram_addr + TARGET_PAGE_SIZE,
1741
                                    CODE_DIRTY_FLAG);
1742
}
1743

    
1744
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1745
   tested for self modifying code */
1746
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1747
                                    target_ulong vaddr)
1748
{
1749
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1750
}
1751

    
1752
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1753
                                         unsigned long start, unsigned long length)
1754
{
1755
    unsigned long addr;
1756
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1757
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1758
        if ((addr - start) < length) {
1759
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1760
        }
1761
    }
1762
}
1763

    
1764
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1765
                                     int dirty_flags)
1766
{
1767
    CPUState *env;
1768
    unsigned long length, start1;
1769
    int i, mask, len;
1770
    uint8_t *p;
1771

    
1772
    start &= TARGET_PAGE_MASK;
1773
    end = TARGET_PAGE_ALIGN(end);
1774

    
1775
    length = end - start;
1776
    if (length == 0)
1777
        return;
1778
    len = length >> TARGET_PAGE_BITS;
1779
#ifdef USE_KQEMU
1780
    /* XXX: should not depend on cpu context */
1781
    env = first_cpu;
1782
    if (env->kqemu_enabled) {
1783
        ram_addr_t addr;
1784
        addr = start;
1785
        for(i = 0; i < len; i++) {
1786
            kqemu_set_notdirty(env, addr);
1787
            addr += TARGET_PAGE_SIZE;
1788
        }
1789
    }
1790
#endif
1791
    mask = ~dirty_flags;
1792
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1793
    for(i = 0; i < len; i++)
1794
        p[i] &= mask;
1795

    
1796
    /* we modify the TLB cache so that the dirty bit will be set again
1797
       when accessing the range */
1798
    start1 = start + (unsigned long)phys_ram_base;
1799
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1800
        for(i = 0; i < CPU_TLB_SIZE; i++)
1801
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1802
        for(i = 0; i < CPU_TLB_SIZE; i++)
1803
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1804
#if (NB_MMU_MODES >= 3)
1805
        for(i = 0; i < CPU_TLB_SIZE; i++)
1806
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1807
#if (NB_MMU_MODES == 4)
1808
        for(i = 0; i < CPU_TLB_SIZE; i++)
1809
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1810
#endif
1811
#endif
1812
    }
1813
}
1814

    
1815
int cpu_physical_memory_set_dirty_tracking(int enable)
1816
{
1817
    in_migration = enable;
1818
    return 0;
1819
}
1820

    
1821
int cpu_physical_memory_get_dirty_tracking(void)
1822
{
1823
    return in_migration;
1824
}
1825

    
1826
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1827
{
1828
    ram_addr_t ram_addr;
1829

    
1830
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1831
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1832
            tlb_entry->addend - (unsigned long)phys_ram_base;
1833
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1834
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1835
        }
1836
    }
1837
}
1838

    
1839
/* update the TLB according to the current state of the dirty bits */
1840
void cpu_tlb_update_dirty(CPUState *env)
1841
{
1842
    int i;
1843
    for(i = 0; i < CPU_TLB_SIZE; i++)
1844
        tlb_update_dirty(&env->tlb_table[0][i]);
1845
    for(i = 0; i < CPU_TLB_SIZE; i++)
1846
        tlb_update_dirty(&env->tlb_table[1][i]);
1847
#if (NB_MMU_MODES >= 3)
1848
    for(i = 0; i < CPU_TLB_SIZE; i++)
1849
        tlb_update_dirty(&env->tlb_table[2][i]);
1850
#if (NB_MMU_MODES == 4)
1851
    for(i = 0; i < CPU_TLB_SIZE; i++)
1852
        tlb_update_dirty(&env->tlb_table[3][i]);
1853
#endif
1854
#endif
1855
}
1856

    
1857
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1858
{
1859
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1860
        tlb_entry->addr_write = vaddr;
1861
}
1862

    
1863
/* update the TLB corresponding to virtual page vaddr
1864
   so that it is no longer dirty */
1865
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1866
{
1867
    int i;
1868

    
1869
    vaddr &= TARGET_PAGE_MASK;
1870
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1871
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1872
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1873
#if (NB_MMU_MODES >= 3)
1874
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1875
#if (NB_MMU_MODES == 4)
1876
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1877
#endif
1878
#endif
1879
}
1880

    
1881
/* add a new TLB entry. At most one entry for a given virtual address
1882
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1883
   (can only happen in non SOFTMMU mode for I/O pages or pages
1884
   conflicting with the host address space). */
1885
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1886
                      target_phys_addr_t paddr, int prot,
1887
                      int mmu_idx, int is_softmmu)
1888
{
1889
    PhysPageDesc *p;
1890
    unsigned long pd;
1891
    unsigned int index;
1892
    target_ulong address;
1893
    target_ulong code_address;
1894
    target_phys_addr_t addend;
1895
    int ret;
1896
    CPUTLBEntry *te;
1897
    int i;
1898
    target_phys_addr_t iotlb;
1899

    
1900
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1901
    if (!p) {
1902
        pd = IO_MEM_UNASSIGNED;
1903
    } else {
1904
        pd = p->phys_offset;
1905
    }
1906
#if defined(DEBUG_TLB)
1907
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1908
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1909
#endif
1910

    
1911
    ret = 0;
1912
    address = vaddr;
1913
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1914
        /* IO memory case (romd handled later) */
1915
        address |= TLB_MMIO;
1916
    }
1917
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1918
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1919
        /* Normal RAM.  */
1920
        iotlb = pd & TARGET_PAGE_MASK;
1921
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1922
            iotlb |= IO_MEM_NOTDIRTY;
1923
        else
1924
            iotlb |= IO_MEM_ROM;
1925
    } else {
1926
        /* IO handlers are currently passed a phsical address.
1927
           It would be nice to pass an offset from the base address
1928
           of that region.  This would avoid having to special case RAM,
1929
           and avoid full address decoding in every device.
1930
           We can't use the high bits of pd for this because
1931
           IO_MEM_ROMD uses these as a ram address.  */
1932
        iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1933
    }
1934

    
1935
    code_address = address;
1936
    /* Make accesses to pages with watchpoints go via the
1937
       watchpoint trap routines.  */
1938
    for (i = 0; i < env->nb_watchpoints; i++) {
1939
        if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1940
            iotlb = io_mem_watch + paddr;
1941
            /* TODO: The memory case can be optimized by not trapping
1942
               reads of pages with a write breakpoint.  */
1943
            address |= TLB_MMIO;
1944
        }
1945
    }
1946

    
1947
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1948
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
1949
    te = &env->tlb_table[mmu_idx][index];
1950
    te->addend = addend - vaddr;
1951
    if (prot & PAGE_READ) {
1952
        te->addr_read = address;
1953
    } else {
1954
        te->addr_read = -1;
1955
    }
1956

    
1957
    if (prot & PAGE_EXEC) {
1958
        te->addr_code = code_address;
1959
    } else {
1960
        te->addr_code = -1;
1961
    }
1962
    if (prot & PAGE_WRITE) {
1963
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1964
            (pd & IO_MEM_ROMD)) {
1965
            /* Write access calls the I/O callback.  */
1966
            te->addr_write = address | TLB_MMIO;
1967
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1968
                   !cpu_physical_memory_is_dirty(pd)) {
1969
            te->addr_write = address | TLB_NOTDIRTY;
1970
        } else {
1971
            te->addr_write = address;
1972
        }
1973
    } else {
1974
        te->addr_write = -1;
1975
    }
1976
    return ret;
1977
}
1978

    
1979
#else
1980

    
1981
void tlb_flush(CPUState *env, int flush_global)
1982
{
1983
}
1984

    
1985
void tlb_flush_page(CPUState *env, target_ulong addr)
1986
{
1987
}
1988

    
1989
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1990
                      target_phys_addr_t paddr, int prot,
1991
                      int mmu_idx, int is_softmmu)
1992
{
1993
    return 0;
1994
}
1995

    
1996
/* dump memory mappings */
1997
void page_dump(FILE *f)
1998
{
1999
    unsigned long start, end;
2000
    int i, j, prot, prot1;
2001
    PageDesc *p;
2002

    
2003
    fprintf(f, "%-8s %-8s %-8s %s\n",
2004
            "start", "end", "size", "prot");
2005
    start = -1;
2006
    end = -1;
2007
    prot = 0;
2008
    for(i = 0; i <= L1_SIZE; i++) {
2009
        if (i < L1_SIZE)
2010
            p = l1_map[i];
2011
        else
2012
            p = NULL;
2013
        for(j = 0;j < L2_SIZE; j++) {
2014
            if (!p)
2015
                prot1 = 0;
2016
            else
2017
                prot1 = p[j].flags;
2018
            if (prot1 != prot) {
2019
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2020
                if (start != -1) {
2021
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2022
                            start, end, end - start,
2023
                            prot & PAGE_READ ? 'r' : '-',
2024
                            prot & PAGE_WRITE ? 'w' : '-',
2025
                            prot & PAGE_EXEC ? 'x' : '-');
2026
                }
2027
                if (prot1 != 0)
2028
                    start = end;
2029
                else
2030
                    start = -1;
2031
                prot = prot1;
2032
            }
2033
            if (!p)
2034
                break;
2035
        }
2036
    }
2037
}
2038

    
2039
int page_get_flags(target_ulong address)
2040
{
2041
    PageDesc *p;
2042

    
2043
    p = page_find(address >> TARGET_PAGE_BITS);
2044
    if (!p)
2045
        return 0;
2046
    return p->flags;
2047
}
2048

    
2049
/* modify the flags of a page and invalidate the code if
2050
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2051
   depending on PAGE_WRITE */
2052
void page_set_flags(target_ulong start, target_ulong end, int flags)
2053
{
2054
    PageDesc *p;
2055
    target_ulong addr;
2056

    
2057
    /* mmap_lock should already be held.  */
2058
    start = start & TARGET_PAGE_MASK;
2059
    end = TARGET_PAGE_ALIGN(end);
2060
    if (flags & PAGE_WRITE)
2061
        flags |= PAGE_WRITE_ORG;
2062
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2063
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2064
        /* We may be called for host regions that are outside guest
2065
           address space.  */
2066
        if (!p)
2067
            return;
2068
        /* if the write protection is set, then we invalidate the code
2069
           inside */
2070
        if (!(p->flags & PAGE_WRITE) &&
2071
            (flags & PAGE_WRITE) &&
2072
            p->first_tb) {
2073
            tb_invalidate_phys_page(addr, 0, NULL);
2074
        }
2075
        p->flags = flags;
2076
    }
2077
}
2078

    
2079
int page_check_range(target_ulong start, target_ulong len, int flags)
2080
{
2081
    PageDesc *p;
2082
    target_ulong end;
2083
    target_ulong addr;
2084

    
2085
    if (start + len < start)
2086
        /* we've wrapped around */
2087
        return -1;
2088

    
2089
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2090
    start = start & TARGET_PAGE_MASK;
2091

    
2092
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2093
        p = page_find(addr >> TARGET_PAGE_BITS);
2094
        if( !p )
2095
            return -1;
2096
        if( !(p->flags & PAGE_VALID) )
2097
            return -1;
2098

    
2099
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2100
            return -1;
2101
        if (flags & PAGE_WRITE) {
2102
            if (!(p->flags & PAGE_WRITE_ORG))
2103
                return -1;
2104
            /* unprotect the page if it was put read-only because it
2105
               contains translated code */
2106
            if (!(p->flags & PAGE_WRITE)) {
2107
                if (!page_unprotect(addr, 0, NULL))
2108
                    return -1;
2109
            }
2110
            return 0;
2111
        }
2112
    }
2113
    return 0;
2114
}
2115

    
2116
/* called from signal handler: invalidate the code and unprotect the
2117
   page. Return TRUE if the fault was succesfully handled. */
2118
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2119
{
2120
    unsigned int page_index, prot, pindex;
2121
    PageDesc *p, *p1;
2122
    target_ulong host_start, host_end, addr;
2123

    
2124
    /* Technically this isn't safe inside a signal handler.  However we
2125
       know this only ever happens in a synchronous SEGV handler, so in
2126
       practice it seems to be ok.  */
2127
    mmap_lock();
2128

    
2129
    host_start = address & qemu_host_page_mask;
2130
    page_index = host_start >> TARGET_PAGE_BITS;
2131
    p1 = page_find(page_index);
2132
    if (!p1) {
2133
        mmap_unlock();
2134
        return 0;
2135
    }
2136
    host_end = host_start + qemu_host_page_size;
2137
    p = p1;
2138
    prot = 0;
2139
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2140
        prot |= p->flags;
2141
        p++;
2142
    }
2143
    /* if the page was really writable, then we change its
2144
       protection back to writable */
2145
    if (prot & PAGE_WRITE_ORG) {
2146
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2147
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2148
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2149
                     (prot & PAGE_BITS) | PAGE_WRITE);
2150
            p1[pindex].flags |= PAGE_WRITE;
2151
            /* and since the content will be modified, we must invalidate
2152
               the corresponding translated code. */
2153
            tb_invalidate_phys_page(address, pc, puc);
2154
#ifdef DEBUG_TB_CHECK
2155
            tb_invalidate_check(address);
2156
#endif
2157
            mmap_unlock();
2158
            return 1;
2159
        }
2160
    }
2161
    mmap_unlock();
2162
    return 0;
2163
}
2164

    
2165
static inline void tlb_set_dirty(CPUState *env,
2166
                                 unsigned long addr, target_ulong vaddr)
2167
{
2168
}
2169
#endif /* defined(CONFIG_USER_ONLY) */
2170

    
2171
#if !defined(CONFIG_USER_ONLY)
2172
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2173
                             ram_addr_t memory);
2174
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2175
                           ram_addr_t orig_memory);
2176
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2177
                      need_subpage)                                     \
2178
    do {                                                                \
2179
        if (addr > start_addr)                                          \
2180
            start_addr2 = 0;                                            \
2181
        else {                                                          \
2182
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2183
            if (start_addr2 > 0)                                        \
2184
                need_subpage = 1;                                       \
2185
        }                                                               \
2186
                                                                        \
2187
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2188
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2189
        else {                                                          \
2190
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2191
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2192
                need_subpage = 1;                                       \
2193
        }                                                               \
2194
    } while (0)
2195

    
2196
/* register physical memory. 'size' must be a multiple of the target
2197
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2198
   io memory page */
2199
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2200
                                  ram_addr_t size,
2201
                                  ram_addr_t phys_offset)
2202
{
2203
    target_phys_addr_t addr, end_addr;
2204
    PhysPageDesc *p;
2205
    CPUState *env;
2206
    ram_addr_t orig_size = size;
2207
    void *subpage;
2208

    
2209
#ifdef USE_KQEMU
2210
    /* XXX: should not depend on cpu context */
2211
    env = first_cpu;
2212
    if (env->kqemu_enabled) {
2213
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2214
    }
2215
#endif
2216
    if (kvm_enabled())
2217
        kvm_set_phys_mem(start_addr, size, phys_offset);
2218

    
2219
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2220
    end_addr = start_addr + (target_phys_addr_t)size;
2221
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2222
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2223
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2224
            ram_addr_t orig_memory = p->phys_offset;
2225
            target_phys_addr_t start_addr2, end_addr2;
2226
            int need_subpage = 0;
2227

    
2228
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2229
                          need_subpage);
2230
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2231
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2232
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2233
                                           &p->phys_offset, orig_memory);
2234
                } else {
2235
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2236
                                            >> IO_MEM_SHIFT];
2237
                }
2238
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2239
            } else {
2240
                p->phys_offset = phys_offset;
2241
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2242
                    (phys_offset & IO_MEM_ROMD))
2243
                    phys_offset += TARGET_PAGE_SIZE;
2244
            }
2245
        } else {
2246
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2247
            p->phys_offset = phys_offset;
2248
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2249
                (phys_offset & IO_MEM_ROMD))
2250
                phys_offset += TARGET_PAGE_SIZE;
2251
            else {
2252
                target_phys_addr_t start_addr2, end_addr2;
2253
                int need_subpage = 0;
2254

    
2255
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2256
                              end_addr2, need_subpage);
2257

    
2258
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2259
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2260
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2261
                    subpage_register(subpage, start_addr2, end_addr2,
2262
                                     phys_offset);
2263
                }
2264
            }
2265
        }
2266
    }
2267

    
2268
    /* since each CPU stores ram addresses in its TLB cache, we must
2269
       reset the modified entries */
2270
    /* XXX: slow ! */
2271
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2272
        tlb_flush(env, 1);
2273
    }
2274
}
2275

    
2276
/* XXX: temporary until new memory mapping API */
2277
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2278
{
2279
    PhysPageDesc *p;
2280

    
2281
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2282
    if (!p)
2283
        return IO_MEM_UNASSIGNED;
2284
    return p->phys_offset;
2285
}
2286

    
2287
/* XXX: better than nothing */
2288
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2289
{
2290
    ram_addr_t addr;
2291
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2292
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2293
                (uint64_t)size, (uint64_t)phys_ram_size);
2294
        abort();
2295
    }
2296
    addr = phys_ram_alloc_offset;
2297
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2298
    return addr;
2299
}
2300

    
2301
void qemu_ram_free(ram_addr_t addr)
2302
{
2303
}
2304

    
2305
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2306
{
2307
#ifdef DEBUG_UNASSIGNED
2308
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2309
#endif
2310
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2311
    do_unassigned_access(addr, 0, 0, 0, 1);
2312
#endif
2313
    return 0;
2314
}
2315

    
2316
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2317
{
2318
#ifdef DEBUG_UNASSIGNED
2319
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2320
#endif
2321
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2322
    do_unassigned_access(addr, 0, 0, 0, 2);
2323
#endif
2324
    return 0;
2325
}
2326

    
2327
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2328
{
2329
#ifdef DEBUG_UNASSIGNED
2330
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2331
#endif
2332
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2333
    do_unassigned_access(addr, 0, 0, 0, 4);
2334
#endif
2335
    return 0;
2336
}
2337

    
2338
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2339
{
2340
#ifdef DEBUG_UNASSIGNED
2341
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2342
#endif
2343
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2344
    do_unassigned_access(addr, 1, 0, 0, 1);
2345
#endif
2346
}
2347

    
2348
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2349
{
2350
#ifdef DEBUG_UNASSIGNED
2351
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2352
#endif
2353
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2354
    do_unassigned_access(addr, 1, 0, 0, 2);
2355
#endif
2356
}
2357

    
2358
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2359
{
2360
#ifdef DEBUG_UNASSIGNED
2361
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2362
#endif
2363
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2364
    do_unassigned_access(addr, 1, 0, 0, 4);
2365
#endif
2366
}
2367

    
2368
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2369
    unassigned_mem_readb,
2370
    unassigned_mem_readw,
2371
    unassigned_mem_readl,
2372
};
2373

    
2374
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2375
    unassigned_mem_writeb,
2376
    unassigned_mem_writew,
2377
    unassigned_mem_writel,
2378
};
2379

    
2380
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2381
                                uint32_t val)
2382
{
2383
    int dirty_flags;
2384
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2385
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2386
#if !defined(CONFIG_USER_ONLY)
2387
        tb_invalidate_phys_page_fast(ram_addr, 1);
2388
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2389
#endif
2390
    }
2391
    stb_p(phys_ram_base + ram_addr, val);
2392
#ifdef USE_KQEMU
2393
    if (cpu_single_env->kqemu_enabled &&
2394
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2395
        kqemu_modify_page(cpu_single_env, ram_addr);
2396
#endif
2397
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2398
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2399
    /* we remove the notdirty callback only if the code has been
2400
       flushed */
2401
    if (dirty_flags == 0xff)
2402
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2403
}
2404

    
2405
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2406
                                uint32_t val)
2407
{
2408
    int dirty_flags;
2409
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2410
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2411
#if !defined(CONFIG_USER_ONLY)
2412
        tb_invalidate_phys_page_fast(ram_addr, 2);
2413
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2414
#endif
2415
    }
2416
    stw_p(phys_ram_base + ram_addr, val);
2417
#ifdef USE_KQEMU
2418
    if (cpu_single_env->kqemu_enabled &&
2419
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2420
        kqemu_modify_page(cpu_single_env, ram_addr);
2421
#endif
2422
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2423
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2424
    /* we remove the notdirty callback only if the code has been
2425
       flushed */
2426
    if (dirty_flags == 0xff)
2427
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2428
}
2429

    
2430
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2431
                                uint32_t val)
2432
{
2433
    int dirty_flags;
2434
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2435
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2436
#if !defined(CONFIG_USER_ONLY)
2437
        tb_invalidate_phys_page_fast(ram_addr, 4);
2438
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2439
#endif
2440
    }
2441
    stl_p(phys_ram_base + ram_addr, val);
2442
#ifdef USE_KQEMU
2443
    if (cpu_single_env->kqemu_enabled &&
2444
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2445
        kqemu_modify_page(cpu_single_env, ram_addr);
2446
#endif
2447
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2448
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2449
    /* we remove the notdirty callback only if the code has been
2450
       flushed */
2451
    if (dirty_flags == 0xff)
2452
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2453
}
2454

    
2455
static CPUReadMemoryFunc *error_mem_read[3] = {
2456
    NULL, /* never used */
2457
    NULL, /* never used */
2458
    NULL, /* never used */
2459
};
2460

    
2461
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2462
    notdirty_mem_writeb,
2463
    notdirty_mem_writew,
2464
    notdirty_mem_writel,
2465
};
2466

    
2467
/* Generate a debug exception if a watchpoint has been hit.  */
2468
static void check_watchpoint(int offset, int flags)
2469
{
2470
    CPUState *env = cpu_single_env;
2471
    target_ulong vaddr;
2472
    int i;
2473

    
2474
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2475
    for (i = 0; i < env->nb_watchpoints; i++) {
2476
        if (vaddr == env->watchpoint[i].vaddr
2477
                && (env->watchpoint[i].type & flags)) {
2478
            env->watchpoint_hit = i + 1;
2479
            cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2480
            break;
2481
        }
2482
    }
2483
}
2484

    
2485
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2486
   so these check for a hit then pass through to the normal out-of-line
2487
   phys routines.  */
2488
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2489
{
2490
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2491
    return ldub_phys(addr);
2492
}
2493

    
2494
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2495
{
2496
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2497
    return lduw_phys(addr);
2498
}
2499

    
2500
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2501
{
2502
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2503
    return ldl_phys(addr);
2504
}
2505

    
2506
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2507
                             uint32_t val)
2508
{
2509
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2510
    stb_phys(addr, val);
2511
}
2512

    
2513
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2514
                             uint32_t val)
2515
{
2516
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2517
    stw_phys(addr, val);
2518
}
2519

    
2520
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2521
                             uint32_t val)
2522
{
2523
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2524
    stl_phys(addr, val);
2525
}
2526

    
2527
static CPUReadMemoryFunc *watch_mem_read[3] = {
2528
    watch_mem_readb,
2529
    watch_mem_readw,
2530
    watch_mem_readl,
2531
};
2532

    
2533
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2534
    watch_mem_writeb,
2535
    watch_mem_writew,
2536
    watch_mem_writel,
2537
};
2538

    
2539
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2540
                                 unsigned int len)
2541
{
2542
    uint32_t ret;
2543
    unsigned int idx;
2544

    
2545
    idx = SUBPAGE_IDX(addr - mmio->base);
2546
#if defined(DEBUG_SUBPAGE)
2547
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2548
           mmio, len, addr, idx);
2549
#endif
2550
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2551

    
2552
    return ret;
2553
}
2554

    
2555
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2556
                              uint32_t value, unsigned int len)
2557
{
2558
    unsigned int idx;
2559

    
2560
    idx = SUBPAGE_IDX(addr - mmio->base);
2561
#if defined(DEBUG_SUBPAGE)
2562
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2563
           mmio, len, addr, idx, value);
2564
#endif
2565
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2566
}
2567

    
2568
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2569
{
2570
#if defined(DEBUG_SUBPAGE)
2571
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2572
#endif
2573

    
2574
    return subpage_readlen(opaque, addr, 0);
2575
}
2576

    
2577
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2578
                            uint32_t value)
2579
{
2580
#if defined(DEBUG_SUBPAGE)
2581
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2582
#endif
2583
    subpage_writelen(opaque, addr, value, 0);
2584
}
2585

    
2586
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2587
{
2588
#if defined(DEBUG_SUBPAGE)
2589
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2590
#endif
2591

    
2592
    return subpage_readlen(opaque, addr, 1);
2593
}
2594

    
2595
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2596
                            uint32_t value)
2597
{
2598
#if defined(DEBUG_SUBPAGE)
2599
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2600
#endif
2601
    subpage_writelen(opaque, addr, value, 1);
2602
}
2603

    
2604
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2605
{
2606
#if defined(DEBUG_SUBPAGE)
2607
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2608
#endif
2609

    
2610
    return subpage_readlen(opaque, addr, 2);
2611
}
2612

    
2613
static void subpage_writel (void *opaque,
2614
                         target_phys_addr_t addr, uint32_t value)
2615
{
2616
#if defined(DEBUG_SUBPAGE)
2617
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2618
#endif
2619
    subpage_writelen(opaque, addr, value, 2);
2620
}
2621

    
2622
static CPUReadMemoryFunc *subpage_read[] = {
2623
    &subpage_readb,
2624
    &subpage_readw,
2625
    &subpage_readl,
2626
};
2627

    
2628
static CPUWriteMemoryFunc *subpage_write[] = {
2629
    &subpage_writeb,
2630
    &subpage_writew,
2631
    &subpage_writel,
2632
};
2633

    
2634
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2635
                             ram_addr_t memory)
2636
{
2637
    int idx, eidx;
2638
    unsigned int i;
2639

    
2640
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2641
        return -1;
2642
    idx = SUBPAGE_IDX(start);
2643
    eidx = SUBPAGE_IDX(end);
2644
#if defined(DEBUG_SUBPAGE)
2645
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2646
           mmio, start, end, idx, eidx, memory);
2647
#endif
2648
    memory >>= IO_MEM_SHIFT;
2649
    for (; idx <= eidx; idx++) {
2650
        for (i = 0; i < 4; i++) {
2651
            if (io_mem_read[memory][i]) {
2652
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2653
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2654
            }
2655
            if (io_mem_write[memory][i]) {
2656
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2657
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2658
            }
2659
        }
2660
    }
2661

    
2662
    return 0;
2663
}
2664

    
2665
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2666
                           ram_addr_t orig_memory)
2667
{
2668
    subpage_t *mmio;
2669
    int subpage_memory;
2670

    
2671
    mmio = qemu_mallocz(sizeof(subpage_t));
2672
    if (mmio != NULL) {
2673
        mmio->base = base;
2674
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2675
#if defined(DEBUG_SUBPAGE)
2676
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2677
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2678
#endif
2679
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2680
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2681
    }
2682

    
2683
    return mmio;
2684
}
2685

    
2686
static void io_mem_init(void)
2687
{
2688
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2689
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2690
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2691
    io_mem_nb = 5;
2692

    
2693
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2694
                                          watch_mem_write, NULL);
2695
    /* alloc dirty bits array */
2696
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2697
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2698
}
2699

    
2700
/* mem_read and mem_write are arrays of functions containing the
2701
   function to access byte (index 0), word (index 1) and dword (index
2702
   2). Functions can be omitted with a NULL function pointer. The
2703
   registered functions may be modified dynamically later.
2704
   If io_index is non zero, the corresponding io zone is
2705
   modified. If it is zero, a new io zone is allocated. The return
2706
   value can be used with cpu_register_physical_memory(). (-1) is
2707
   returned if error. */
2708
int cpu_register_io_memory(int io_index,
2709
                           CPUReadMemoryFunc **mem_read,
2710
                           CPUWriteMemoryFunc **mem_write,
2711
                           void *opaque)
2712
{
2713
    int i, subwidth = 0;
2714

    
2715
    if (io_index <= 0) {
2716
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2717
            return -1;
2718
        io_index = io_mem_nb++;
2719
    } else {
2720
        if (io_index >= IO_MEM_NB_ENTRIES)
2721
            return -1;
2722
    }
2723

    
2724
    for(i = 0;i < 3; i++) {
2725
        if (!mem_read[i] || !mem_write[i])
2726
            subwidth = IO_MEM_SUBWIDTH;
2727
        io_mem_read[io_index][i] = mem_read[i];
2728
        io_mem_write[io_index][i] = mem_write[i];
2729
    }
2730
    io_mem_opaque[io_index] = opaque;
2731
    return (io_index << IO_MEM_SHIFT) | subwidth;
2732
}
2733

    
2734
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2735
{
2736
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2737
}
2738

    
2739
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2740
{
2741
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2742
}
2743

    
2744
#endif /* !defined(CONFIG_USER_ONLY) */
2745

    
2746
/* physical memory access (slow version, mainly for debug) */
2747
#if defined(CONFIG_USER_ONLY)
2748
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2749
                            int len, int is_write)
2750
{
2751
    int l, flags;
2752
    target_ulong page;
2753
    void * p;
2754

    
2755
    while (len > 0) {
2756
        page = addr & TARGET_PAGE_MASK;
2757
        l = (page + TARGET_PAGE_SIZE) - addr;
2758
        if (l > len)
2759
            l = len;
2760
        flags = page_get_flags(page);
2761
        if (!(flags & PAGE_VALID))
2762
            return;
2763
        if (is_write) {
2764
            if (!(flags & PAGE_WRITE))
2765
                return;
2766
            /* XXX: this code should not depend on lock_user */
2767
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2768
                /* FIXME - should this return an error rather than just fail? */
2769
                return;
2770
            memcpy(p, buf, l);
2771
            unlock_user(p, addr, l);
2772
        } else {
2773
            if (!(flags & PAGE_READ))
2774
                return;
2775
            /* XXX: this code should not depend on lock_user */
2776
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2777
                /* FIXME - should this return an error rather than just fail? */
2778
                return;
2779
            memcpy(buf, p, l);
2780
            unlock_user(p, addr, 0);
2781
        }
2782
        len -= l;
2783
        buf += l;
2784
        addr += l;
2785
    }
2786
}
2787

    
2788
#else
2789
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2790
                            int len, int is_write)
2791
{
2792
    int l, io_index;
2793
    uint8_t *ptr;
2794
    uint32_t val;
2795
    target_phys_addr_t page;
2796
    unsigned long pd;
2797
    PhysPageDesc *p;
2798

    
2799
    while (len > 0) {
2800
        page = addr & TARGET_PAGE_MASK;
2801
        l = (page + TARGET_PAGE_SIZE) - addr;
2802
        if (l > len)
2803
            l = len;
2804
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2805
        if (!p) {
2806
            pd = IO_MEM_UNASSIGNED;
2807
        } else {
2808
            pd = p->phys_offset;
2809
        }
2810

    
2811
        if (is_write) {
2812
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2813
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2814
                /* XXX: could force cpu_single_env to NULL to avoid
2815
                   potential bugs */
2816
                if (l >= 4 && ((addr & 3) == 0)) {
2817
                    /* 32 bit write access */
2818
                    val = ldl_p(buf);
2819
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2820
                    l = 4;
2821
                } else if (l >= 2 && ((addr & 1) == 0)) {
2822
                    /* 16 bit write access */
2823
                    val = lduw_p(buf);
2824
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2825
                    l = 2;
2826
                } else {
2827
                    /* 8 bit write access */
2828
                    val = ldub_p(buf);
2829
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2830
                    l = 1;
2831
                }
2832
            } else {
2833
                unsigned long addr1;
2834
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2835
                /* RAM case */
2836
                ptr = phys_ram_base + addr1;
2837
                memcpy(ptr, buf, l);
2838
                if (!cpu_physical_memory_is_dirty(addr1)) {
2839
                    /* invalidate code */
2840
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2841
                    /* set dirty bit */
2842
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2843
                        (0xff & ~CODE_DIRTY_FLAG);
2844
                }
2845
            }
2846
        } else {
2847
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2848
                !(pd & IO_MEM_ROMD)) {
2849
                /* I/O case */
2850
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2851
                if (l >= 4 && ((addr & 3) == 0)) {
2852
                    /* 32 bit read access */
2853
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2854
                    stl_p(buf, val);
2855
                    l = 4;
2856
                } else if (l >= 2 && ((addr & 1) == 0)) {
2857
                    /* 16 bit read access */
2858
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2859
                    stw_p(buf, val);
2860
                    l = 2;
2861
                } else {
2862
                    /* 8 bit read access */
2863
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2864
                    stb_p(buf, val);
2865
                    l = 1;
2866
                }
2867
            } else {
2868
                /* RAM case */
2869
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2870
                    (addr & ~TARGET_PAGE_MASK);
2871
                memcpy(buf, ptr, l);
2872
            }
2873
        }
2874
        len -= l;
2875
        buf += l;
2876
        addr += l;
2877
    }
2878
}
2879

    
2880
/* used for ROM loading : can write in RAM and ROM */
2881
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2882
                                   const uint8_t *buf, int len)
2883
{
2884
    int l;
2885
    uint8_t *ptr;
2886
    target_phys_addr_t page;
2887
    unsigned long pd;
2888
    PhysPageDesc *p;
2889

    
2890
    while (len > 0) {
2891
        page = addr & TARGET_PAGE_MASK;
2892
        l = (page + TARGET_PAGE_SIZE) - addr;
2893
        if (l > len)
2894
            l = len;
2895
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2896
        if (!p) {
2897
            pd = IO_MEM_UNASSIGNED;
2898
        } else {
2899
            pd = p->phys_offset;
2900
        }
2901

    
2902
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2903
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2904
            !(pd & IO_MEM_ROMD)) {
2905
            /* do nothing */
2906
        } else {
2907
            unsigned long addr1;
2908
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2909
            /* ROM/RAM case */
2910
            ptr = phys_ram_base + addr1;
2911
            memcpy(ptr, buf, l);
2912
        }
2913
        len -= l;
2914
        buf += l;
2915
        addr += l;
2916
    }
2917
}
2918

    
2919

    
2920
/* warning: addr must be aligned */
2921
uint32_t ldl_phys(target_phys_addr_t addr)
2922
{
2923
    int io_index;
2924
    uint8_t *ptr;
2925
    uint32_t val;
2926
    unsigned long pd;
2927
    PhysPageDesc *p;
2928

    
2929
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2930
    if (!p) {
2931
        pd = IO_MEM_UNASSIGNED;
2932
    } else {
2933
        pd = p->phys_offset;
2934
    }
2935

    
2936
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2937
        !(pd & IO_MEM_ROMD)) {
2938
        /* I/O case */
2939
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2940
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2941
    } else {
2942
        /* RAM case */
2943
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2944
            (addr & ~TARGET_PAGE_MASK);
2945
        val = ldl_p(ptr);
2946
    }
2947
    return val;
2948
}
2949

    
2950
/* warning: addr must be aligned */
2951
uint64_t ldq_phys(target_phys_addr_t addr)
2952
{
2953
    int io_index;
2954
    uint8_t *ptr;
2955
    uint64_t val;
2956
    unsigned long pd;
2957
    PhysPageDesc *p;
2958

    
2959
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2960
    if (!p) {
2961
        pd = IO_MEM_UNASSIGNED;
2962
    } else {
2963
        pd = p->phys_offset;
2964
    }
2965

    
2966
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2967
        !(pd & IO_MEM_ROMD)) {
2968
        /* I/O case */
2969
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2970
#ifdef TARGET_WORDS_BIGENDIAN
2971
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2972
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2973
#else
2974
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2975
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2976
#endif
2977
    } else {
2978
        /* RAM case */
2979
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2980
            (addr & ~TARGET_PAGE_MASK);
2981
        val = ldq_p(ptr);
2982
    }
2983
    return val;
2984
}
2985

    
2986
/* XXX: optimize */
2987
uint32_t ldub_phys(target_phys_addr_t addr)
2988
{
2989
    uint8_t val;
2990
    cpu_physical_memory_read(addr, &val, 1);
2991
    return val;
2992
}
2993

    
2994
/* XXX: optimize */
2995
uint32_t lduw_phys(target_phys_addr_t addr)
2996
{
2997
    uint16_t val;
2998
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2999
    return tswap16(val);
3000
}
3001

    
3002
/* warning: addr must be aligned. The ram page is not masked as dirty
3003
   and the code inside is not invalidated. It is useful if the dirty
3004
   bits are used to track modified PTEs */
3005
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3006
{
3007
    int io_index;
3008
    uint8_t *ptr;
3009
    unsigned long pd;
3010
    PhysPageDesc *p;
3011

    
3012
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3013
    if (!p) {
3014
        pd = IO_MEM_UNASSIGNED;
3015
    } else {
3016
        pd = p->phys_offset;
3017
    }
3018

    
3019
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3020
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3021
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3022
    } else {
3023
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3024
        ptr = phys_ram_base + addr1;
3025
        stl_p(ptr, val);
3026

    
3027
        if (unlikely(in_migration)) {
3028
            if (!cpu_physical_memory_is_dirty(addr1)) {
3029
                /* invalidate code */
3030
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3031
                /* set dirty bit */
3032
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3033
                    (0xff & ~CODE_DIRTY_FLAG);
3034
            }
3035
        }
3036
    }
3037
}
3038

    
3039
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3040
{
3041
    int io_index;
3042
    uint8_t *ptr;
3043
    unsigned long pd;
3044
    PhysPageDesc *p;
3045

    
3046
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3047
    if (!p) {
3048
        pd = IO_MEM_UNASSIGNED;
3049
    } else {
3050
        pd = p->phys_offset;
3051
    }
3052

    
3053
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3054
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3055
#ifdef TARGET_WORDS_BIGENDIAN
3056
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3057
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3058
#else
3059
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3060
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3061
#endif
3062
    } else {
3063
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3064
            (addr & ~TARGET_PAGE_MASK);
3065
        stq_p(ptr, val);
3066
    }
3067
}
3068

    
3069
/* warning: addr must be aligned */
3070
void stl_phys(target_phys_addr_t addr, uint32_t val)
3071
{
3072
    int io_index;
3073
    uint8_t *ptr;
3074
    unsigned long pd;
3075
    PhysPageDesc *p;
3076

    
3077
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3078
    if (!p) {
3079
        pd = IO_MEM_UNASSIGNED;
3080
    } else {
3081
        pd = p->phys_offset;
3082
    }
3083

    
3084
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3085
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3086
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3087
    } else {
3088
        unsigned long addr1;
3089
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3090
        /* RAM case */
3091
        ptr = phys_ram_base + addr1;
3092
        stl_p(ptr, val);
3093
        if (!cpu_physical_memory_is_dirty(addr1)) {
3094
            /* invalidate code */
3095
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3096
            /* set dirty bit */
3097
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3098
                (0xff & ~CODE_DIRTY_FLAG);
3099
        }
3100
    }
3101
}
3102

    
3103
/* XXX: optimize */
3104
void stb_phys(target_phys_addr_t addr, uint32_t val)
3105
{
3106
    uint8_t v = val;
3107
    cpu_physical_memory_write(addr, &v, 1);
3108
}
3109

    
3110
/* XXX: optimize */
3111
void stw_phys(target_phys_addr_t addr, uint32_t val)
3112
{
3113
    uint16_t v = tswap16(val);
3114
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3115
}
3116

    
3117
/* XXX: optimize */
3118
void stq_phys(target_phys_addr_t addr, uint64_t val)
3119
{
3120
    val = tswap64(val);
3121
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3122
}
3123

    
3124
#endif
3125

    
3126
/* virtual memory access for debug */
3127
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3128
                        uint8_t *buf, int len, int is_write)
3129
{
3130
    int l;
3131
    target_phys_addr_t phys_addr;
3132
    target_ulong page;
3133

    
3134
    while (len > 0) {
3135
        page = addr & TARGET_PAGE_MASK;
3136
        phys_addr = cpu_get_phys_page_debug(env, page);
3137
        /* if no physical page mapped, return an error */
3138
        if (phys_addr == -1)
3139
            return -1;
3140
        l = (page + TARGET_PAGE_SIZE) - addr;
3141
        if (l > len)
3142
            l = len;
3143
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3144
                               buf, l, is_write);
3145
        len -= l;
3146
        buf += l;
3147
        addr += l;
3148
    }
3149
    return 0;
3150
}
3151

    
3152
/* in deterministic execution mode, instructions doing device I/Os
3153
   must be at the end of the TB */
3154
void cpu_io_recompile(CPUState *env, void *retaddr)
3155
{
3156
    TranslationBlock *tb;
3157
    uint32_t n, cflags;
3158
    target_ulong pc, cs_base;
3159
    uint64_t flags;
3160

    
3161
    tb = tb_find_pc((unsigned long)retaddr);
3162
    if (!tb) {
3163
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3164
                  retaddr);
3165
    }
3166
    n = env->icount_decr.u16.low + tb->icount;
3167
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3168
    /* Calculate how many instructions had been executed before the fault
3169
       occurred.  */
3170
    n = n - env->icount_decr.u16.low;
3171
    /* Generate a new TB ending on the I/O insn.  */
3172
    n++;
3173
    /* On MIPS and SH, delay slot instructions can only be restarted if
3174
       they were already the first instruction in the TB.  If this is not
3175
       the first instruction in a TB then re-execute the preceding
3176
       branch.  */
3177
#if defined(TARGET_MIPS)
3178
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3179
        env->active_tc.PC -= 4;
3180
        env->icount_decr.u16.low++;
3181
        env->hflags &= ~MIPS_HFLAG_BMASK;
3182
    }
3183
#elif defined(TARGET_SH4)
3184
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3185
            && n > 1) {
3186
        env->pc -= 2;
3187
        env->icount_decr.u16.low++;
3188
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3189
    }
3190
#endif
3191
    /* This should never happen.  */
3192
    if (n > CF_COUNT_MASK)
3193
        cpu_abort(env, "TB too big during recompile");
3194

    
3195
    cflags = n | CF_LAST_IO;
3196
    pc = tb->pc;
3197
    cs_base = tb->cs_base;
3198
    flags = tb->flags;
3199
    tb_phys_invalidate(tb, -1);
3200
    /* FIXME: In theory this could raise an exception.  In practice
3201
       we have already translated the block once so it's probably ok.  */
3202
    tb_gen_code(env, pc, cs_base, flags, cflags);
3203
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3204
       the first in the TB) then we end up generating a whole new TB and
3205
       repeating the fault, which is horribly inefficient.
3206
       Better would be to execute just this insn uncached, or generate a
3207
       second new TB.  */
3208
    cpu_resume_from_signal(env, NULL);
3209
}
3210

    
3211
void dump_exec_info(FILE *f,
3212
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3213
{
3214
    int i, target_code_size, max_target_code_size;
3215
    int direct_jmp_count, direct_jmp2_count, cross_page;
3216
    TranslationBlock *tb;
3217

    
3218
    target_code_size = 0;
3219
    max_target_code_size = 0;
3220
    cross_page = 0;
3221
    direct_jmp_count = 0;
3222
    direct_jmp2_count = 0;
3223
    for(i = 0; i < nb_tbs; i++) {
3224
        tb = &tbs[i];
3225
        target_code_size += tb->size;
3226
        if (tb->size > max_target_code_size)
3227
            max_target_code_size = tb->size;
3228
        if (tb->page_addr[1] != -1)
3229
            cross_page++;
3230
        if (tb->tb_next_offset[0] != 0xffff) {
3231
            direct_jmp_count++;
3232
            if (tb->tb_next_offset[1] != 0xffff) {
3233
                direct_jmp2_count++;
3234
            }
3235
        }
3236
    }
3237
    /* XXX: avoid using doubles ? */
3238
    cpu_fprintf(f, "Translation buffer state:\n");
3239
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3240
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3241
    cpu_fprintf(f, "TB count            %d/%d\n", 
3242
                nb_tbs, code_gen_max_blocks);
3243
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3244
                nb_tbs ? target_code_size / nb_tbs : 0,
3245
                max_target_code_size);
3246
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3247
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3248
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3249
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3250
            cross_page,
3251
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3252
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3253
                direct_jmp_count,
3254
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3255
                direct_jmp2_count,
3256
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3257
    cpu_fprintf(f, "\nStatistics:\n");
3258
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3259
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3260
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3261
    tcg_dump_info(f, cpu_fprintf);
3262
}
3263

    
3264
#if !defined(CONFIG_USER_ONLY)
3265

    
3266
#define MMUSUFFIX _cmmu
3267
#define GETPC() NULL
3268
#define env cpu_single_env
3269
#define SOFTMMU_CODE_ACCESS
3270

    
3271
#define SHIFT 0
3272
#include "softmmu_template.h"
3273

    
3274
#define SHIFT 1
3275
#include "softmmu_template.h"
3276

    
3277
#define SHIFT 2
3278
#include "softmmu_template.h"
3279

    
3280
#define SHIFT 3
3281
#include "softmmu_template.h"
3282

    
3283
#undef env
3284

    
3285
#endif