Statistics
| Branch: | Revision:

root / exec.c @ 55489a17

History | View | Annotate | Download (98 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#include "osdep.h"
42
#include "kvm.h"
43
#if defined(CONFIG_USER_ONLY)
44
#include <qemu.h>
45
#endif
46

    
47
//#define DEBUG_TB_INVALIDATE
48
//#define DEBUG_FLUSH
49
//#define DEBUG_TLB
50
//#define DEBUG_UNASSIGNED
51

    
52
/* make various TB consistency checks */
53
//#define DEBUG_TB_CHECK
54
//#define DEBUG_TLB_CHECK
55

    
56
//#define DEBUG_IOPORT
57
//#define DEBUG_SUBPAGE
58

    
59
#if !defined(CONFIG_USER_ONLY)
60
/* TB consistency checks only implemented for usermode emulation.  */
61
#undef DEBUG_TB_CHECK
62
#endif
63

    
64
#define SMC_BITMAP_USE_THRESHOLD 10
65

    
66
#define MMAP_AREA_START        0x00000000
67
#define MMAP_AREA_END          0xa8000000
68

    
69
#if defined(TARGET_SPARC64)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 41
71
#elif defined(TARGET_SPARC)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 36
73
#elif defined(TARGET_ALPHA)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#define TARGET_VIRT_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_PPC64)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 42
80
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81
#define TARGET_PHYS_ADDR_SPACE_BITS 36
82
#else
83
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84
#define TARGET_PHYS_ADDR_SPACE_BITS 32
85
#endif
86

    
87
static TranslationBlock *tbs;
88
int code_gen_max_blocks;
89
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90
static int nb_tbs;
91
/* any access to the tbs or the page table must use this lock */
92
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93

    
94
#if defined(__arm__) || defined(__sparc_v9__)
95
/* The prologue must be reachable with a direct jump. ARM and Sparc64
96
 have limited branch ranges (possibly also PPC) so place it in a
97
 section close to code segment. */
98
#define code_gen_section                                \
99
    __attribute__((__section__(".gen_code")))           \
100
    __attribute__((aligned (32)))
101
#else
102
#define code_gen_section                                \
103
    __attribute__((aligned (32)))
104
#endif
105

    
106
uint8_t code_gen_prologue[1024] code_gen_section;
107
static uint8_t *code_gen_buffer;
108
static unsigned long code_gen_buffer_size;
109
/* threshold to flush the translated code buffer */
110
static unsigned long code_gen_buffer_max_size;
111
uint8_t *code_gen_ptr;
112

    
113
#if !defined(CONFIG_USER_ONLY)
114
ram_addr_t phys_ram_size;
115
int phys_ram_fd;
116
uint8_t *phys_ram_base;
117
uint8_t *phys_ram_dirty;
118
static int in_migration;
119
static ram_addr_t phys_ram_alloc_offset = 0;
120
#endif
121

    
122
CPUState *first_cpu;
123
/* current CPU in the current thread. It is only valid inside
124
   cpu_exec() */
125
CPUState *cpu_single_env;
126
/* 0 = Do not count executed instructions.
127
   1 = Precise instruction counting.
128
   2 = Adaptive rate instruction counting.  */
129
int use_icount = 0;
130
/* Current instruction counter.  While executing translated code this may
131
   include some instructions that have not yet been executed.  */
132
int64_t qemu_icount;
133

    
134
typedef struct PageDesc {
135
    /* list of TBs intersecting this ram page */
136
    TranslationBlock *first_tb;
137
    /* in order to optimize self modifying code, we count the number
138
       of lookups we do to a given page to use a bitmap */
139
    unsigned int code_write_count;
140
    uint8_t *code_bitmap;
141
#if defined(CONFIG_USER_ONLY)
142
    unsigned long flags;
143
#endif
144
} PageDesc;
145

    
146
typedef struct PhysPageDesc {
147
    /* offset in host memory of the page + io_index in the low bits */
148
    ram_addr_t phys_offset;
149
} PhysPageDesc;
150

    
151
#define L2_BITS 10
152
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
153
/* XXX: this is a temporary hack for alpha target.
154
 *      In the future, this is to be replaced by a multi-level table
155
 *      to actually be able to handle the complete 64 bits address space.
156
 */
157
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
158
#else
159
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
160
#endif
161

    
162
#define L1_SIZE (1 << L1_BITS)
163
#define L2_SIZE (1 << L2_BITS)
164

    
165
unsigned long qemu_real_host_page_size;
166
unsigned long qemu_host_page_bits;
167
unsigned long qemu_host_page_size;
168
unsigned long qemu_host_page_mask;
169

    
170
/* XXX: for system emulation, it could just be an array */
171
static PageDesc *l1_map[L1_SIZE];
172
static PhysPageDesc **l1_phys_map;
173

    
174
#if !defined(CONFIG_USER_ONLY)
175
static void io_mem_init(void);
176

    
177
/* io memory support */
178
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
179
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
180
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
181
static int io_mem_nb;
182
static int io_mem_watch;
183
#endif
184

    
185
/* log support */
186
static const char *logfilename = "/tmp/qemu.log";
187
FILE *logfile;
188
int loglevel;
189
static int log_append = 0;
190

    
191
/* statistics */
192
static int tlb_flush_count;
193
static int tb_flush_count;
194
static int tb_phys_invalidate_count;
195

    
196
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197
typedef struct subpage_t {
198
    target_phys_addr_t base;
199
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
200
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
201
    void *opaque[TARGET_PAGE_SIZE][2][4];
202
} subpage_t;
203

    
204
#ifdef _WIN32
205
static void map_exec(void *addr, long size)
206
{
207
    DWORD old_protect;
208
    VirtualProtect(addr, size,
209
                   PAGE_EXECUTE_READWRITE, &old_protect);
210
    
211
}
212
#else
213
static void map_exec(void *addr, long size)
214
{
215
    unsigned long start, end, page_size;
216
    
217
    page_size = getpagesize();
218
    start = (unsigned long)addr;
219
    start &= ~(page_size - 1);
220
    
221
    end = (unsigned long)addr + size;
222
    end += page_size - 1;
223
    end &= ~(page_size - 1);
224
    
225
    mprotect((void *)start, end - start,
226
             PROT_READ | PROT_WRITE | PROT_EXEC);
227
}
228
#endif
229

    
230
static void page_init(void)
231
{
232
    /* NOTE: we can always suppose that qemu_host_page_size >=
233
       TARGET_PAGE_SIZE */
234
#ifdef _WIN32
235
    {
236
        SYSTEM_INFO system_info;
237

    
238
        GetSystemInfo(&system_info);
239
        qemu_real_host_page_size = system_info.dwPageSize;
240
    }
241
#else
242
    qemu_real_host_page_size = getpagesize();
243
#endif
244
    if (qemu_host_page_size == 0)
245
        qemu_host_page_size = qemu_real_host_page_size;
246
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
247
        qemu_host_page_size = TARGET_PAGE_SIZE;
248
    qemu_host_page_bits = 0;
249
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
250
        qemu_host_page_bits++;
251
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
252
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
253
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
254

    
255
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
256
    {
257
        long long startaddr, endaddr;
258
        FILE *f;
259
        int n;
260

    
261
        mmap_lock();
262
        last_brk = (unsigned long)sbrk(0);
263
        f = fopen("/proc/self/maps", "r");
264
        if (f) {
265
            do {
266
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
267
                if (n == 2) {
268
                    startaddr = MIN(startaddr,
269
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270
                    endaddr = MIN(endaddr,
271
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
273
                                   TARGET_PAGE_ALIGN(endaddr),
274
                                   PAGE_RESERVED); 
275
                }
276
            } while (!feof(f));
277
            fclose(f);
278
        }
279
        mmap_unlock();
280
    }
281
#endif
282
}
283

    
284
static inline PageDesc **page_l1_map(target_ulong index)
285
{
286
#if TARGET_LONG_BITS > 32
287
    /* Host memory outside guest VM.  For 32-bit targets we have already
288
       excluded high addresses.  */
289
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
290
        return NULL;
291
#endif
292
    return &l1_map[index >> L2_BITS];
293
}
294

    
295
static inline PageDesc *page_find_alloc(target_ulong index)
296
{
297
    PageDesc **lp, *p;
298
    lp = page_l1_map(index);
299
    if (!lp)
300
        return NULL;
301

    
302
    p = *lp;
303
    if (!p) {
304
        /* allocate if not found */
305
#if defined(CONFIG_USER_ONLY)
306
        unsigned long addr;
307
        size_t len = sizeof(PageDesc) * L2_SIZE;
308
        /* Don't use qemu_malloc because it may recurse.  */
309
        p = mmap(0, len, PROT_READ | PROT_WRITE,
310
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
311
        *lp = p;
312
        addr = h2g(p);
313
        if (addr == (target_ulong)addr) {
314
            page_set_flags(addr & TARGET_PAGE_MASK,
315
                           TARGET_PAGE_ALIGN(addr + len),
316
                           PAGE_RESERVED); 
317
        }
318
#else
319
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
320
        *lp = p;
321
#endif
322
    }
323
    return p + (index & (L2_SIZE - 1));
324
}
325

    
326
static inline PageDesc *page_find(target_ulong index)
327
{
328
    PageDesc **lp, *p;
329
    lp = page_l1_map(index);
330
    if (!lp)
331
        return NULL;
332

    
333
    p = *lp;
334
    if (!p)
335
        return 0;
336
    return p + (index & (L2_SIZE - 1));
337
}
338

    
339
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
340
{
341
    void **lp, **p;
342
    PhysPageDesc *pd;
343

    
344
    p = (void **)l1_phys_map;
345
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
346

    
347
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
348
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
349
#endif
350
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
351
    p = *lp;
352
    if (!p) {
353
        /* allocate if not found */
354
        if (!alloc)
355
            return NULL;
356
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
357
        memset(p, 0, sizeof(void *) * L1_SIZE);
358
        *lp = p;
359
    }
360
#endif
361
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
362
    pd = *lp;
363
    if (!pd) {
364
        int i;
365
        /* allocate if not found */
366
        if (!alloc)
367
            return NULL;
368
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
369
        *lp = pd;
370
        for (i = 0; i < L2_SIZE; i++)
371
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
372
    }
373
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
374
}
375

    
376
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
377
{
378
    return phys_page_find_alloc(index, 0);
379
}
380

    
381
#if !defined(CONFIG_USER_ONLY)
382
static void tlb_protect_code(ram_addr_t ram_addr);
383
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
384
                                    target_ulong vaddr);
385
#define mmap_lock() do { } while(0)
386
#define mmap_unlock() do { } while(0)
387
#endif
388

    
389
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
390

    
391
#if defined(CONFIG_USER_ONLY)
392
/* Currently it is not recommanded to allocate big chunks of data in
393
   user mode. It will change when a dedicated libc will be used */
394
#define USE_STATIC_CODE_GEN_BUFFER
395
#endif
396

    
397
#ifdef USE_STATIC_CODE_GEN_BUFFER
398
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
399
#endif
400

    
401
static void code_gen_alloc(unsigned long tb_size)
402
{
403
#ifdef USE_STATIC_CODE_GEN_BUFFER
404
    code_gen_buffer = static_code_gen_buffer;
405
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406
    map_exec(code_gen_buffer, code_gen_buffer_size);
407
#else
408
    code_gen_buffer_size = tb_size;
409
    if (code_gen_buffer_size == 0) {
410
#if defined(CONFIG_USER_ONLY)
411
        /* in user mode, phys_ram_size is not meaningful */
412
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
413
#else
414
        /* XXX: needs ajustments */
415
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
416
#endif
417
    }
418
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420
    /* The code gen buffer location may have constraints depending on
421
       the host cpu and OS */
422
#if defined(__linux__) 
423
    {
424
        int flags;
425
        void *start = NULL;
426

    
427
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
428
#if defined(__x86_64__)
429
        flags |= MAP_32BIT;
430
        /* Cannot map more than that */
431
        if (code_gen_buffer_size > (800 * 1024 * 1024))
432
            code_gen_buffer_size = (800 * 1024 * 1024);
433
#elif defined(__sparc_v9__)
434
        // Map the buffer below 2G, so we can use direct calls and branches
435
        flags |= MAP_FIXED;
436
        start = (void *) 0x60000000UL;
437
        if (code_gen_buffer_size > (512 * 1024 * 1024))
438
            code_gen_buffer_size = (512 * 1024 * 1024);
439
#endif
440
        code_gen_buffer = mmap(start, code_gen_buffer_size,
441
                               PROT_WRITE | PROT_READ | PROT_EXEC,
442
                               flags, -1, 0);
443
        if (code_gen_buffer == MAP_FAILED) {
444
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
445
            exit(1);
446
        }
447
    }
448
#elif defined(__FreeBSD__)
449
    {
450
        int flags;
451
        void *addr = NULL;
452
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
453
#if defined(__x86_64__)
454
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
455
         * 0x40000000 is free */
456
        flags |= MAP_FIXED;
457
        addr = (void *)0x40000000;
458
        /* Cannot map more than that */
459
        if (code_gen_buffer_size > (800 * 1024 * 1024))
460
            code_gen_buffer_size = (800 * 1024 * 1024);
461
#endif
462
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
463
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
464
                               flags, -1, 0);
465
        if (code_gen_buffer == MAP_FAILED) {
466
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467
            exit(1);
468
        }
469
    }
470
#else
471
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
472
    if (!code_gen_buffer) {
473
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
474
        exit(1);
475
    }
476
    map_exec(code_gen_buffer, code_gen_buffer_size);
477
#endif
478
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
479
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
480
    code_gen_buffer_max_size = code_gen_buffer_size - 
481
        code_gen_max_block_size();
482
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
483
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
484
}
485

    
486
/* Must be called before using the QEMU cpus. 'tb_size' is the size
487
   (in bytes) allocated to the translation buffer. Zero means default
488
   size. */
489
void cpu_exec_init_all(unsigned long tb_size)
490
{
491
    cpu_gen_init();
492
    code_gen_alloc(tb_size);
493
    code_gen_ptr = code_gen_buffer;
494
    page_init();
495
#if !defined(CONFIG_USER_ONLY)
496
    io_mem_init();
497
#endif
498
}
499

    
500
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
501

    
502
#define CPU_COMMON_SAVE_VERSION 1
503

    
504
static void cpu_common_save(QEMUFile *f, void *opaque)
505
{
506
    CPUState *env = opaque;
507

    
508
    qemu_put_be32s(f, &env->halted);
509
    qemu_put_be32s(f, &env->interrupt_request);
510
}
511

    
512
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
513
{
514
    CPUState *env = opaque;
515

    
516
    if (version_id != CPU_COMMON_SAVE_VERSION)
517
        return -EINVAL;
518

    
519
    qemu_get_be32s(f, &env->halted);
520
    qemu_get_be32s(f, &env->interrupt_request);
521
    tlb_flush(env, 1);
522

    
523
    return 0;
524
}
525
#endif
526

    
527
void cpu_exec_init(CPUState *env)
528
{
529
    CPUState **penv;
530
    int cpu_index;
531

    
532
    env->next_cpu = NULL;
533
    penv = &first_cpu;
534
    cpu_index = 0;
535
    while (*penv != NULL) {
536
        penv = (CPUState **)&(*penv)->next_cpu;
537
        cpu_index++;
538
    }
539
    env->cpu_index = cpu_index;
540
    env->nb_watchpoints = 0;
541
    *penv = env;
542
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
543
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
544
                    cpu_common_save, cpu_common_load, env);
545
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
546
                    cpu_save, cpu_load, env);
547
#endif
548
}
549

    
550
static inline void invalidate_page_bitmap(PageDesc *p)
551
{
552
    if (p->code_bitmap) {
553
        qemu_free(p->code_bitmap);
554
        p->code_bitmap = NULL;
555
    }
556
    p->code_write_count = 0;
557
}
558

    
559
/* set to NULL all the 'first_tb' fields in all PageDescs */
560
static void page_flush_tb(void)
561
{
562
    int i, j;
563
    PageDesc *p;
564

    
565
    for(i = 0; i < L1_SIZE; i++) {
566
        p = l1_map[i];
567
        if (p) {
568
            for(j = 0; j < L2_SIZE; j++) {
569
                p->first_tb = NULL;
570
                invalidate_page_bitmap(p);
571
                p++;
572
            }
573
        }
574
    }
575
}
576

    
577
/* flush all the translation blocks */
578
/* XXX: tb_flush is currently not thread safe */
579
void tb_flush(CPUState *env1)
580
{
581
    CPUState *env;
582
#if defined(DEBUG_FLUSH)
583
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
584
           (unsigned long)(code_gen_ptr - code_gen_buffer),
585
           nb_tbs, nb_tbs > 0 ?
586
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
587
#endif
588
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
589
        cpu_abort(env1, "Internal error: code buffer overflow\n");
590

    
591
    nb_tbs = 0;
592

    
593
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
594
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
595
    }
596

    
597
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
598
    page_flush_tb();
599

    
600
    code_gen_ptr = code_gen_buffer;
601
    /* XXX: flush processor icache at this point if cache flush is
602
       expensive */
603
    tb_flush_count++;
604
}
605

    
606
#ifdef DEBUG_TB_CHECK
607

    
608
static void tb_invalidate_check(target_ulong address)
609
{
610
    TranslationBlock *tb;
611
    int i;
612
    address &= TARGET_PAGE_MASK;
613
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
614
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
615
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
616
                  address >= tb->pc + tb->size)) {
617
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
618
                       address, (long)tb->pc, tb->size);
619
            }
620
        }
621
    }
622
}
623

    
624
/* verify that all the pages have correct rights for code */
625
static void tb_page_check(void)
626
{
627
    TranslationBlock *tb;
628
    int i, flags1, flags2;
629

    
630
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
631
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
632
            flags1 = page_get_flags(tb->pc);
633
            flags2 = page_get_flags(tb->pc + tb->size - 1);
634
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
635
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
636
                       (long)tb->pc, tb->size, flags1, flags2);
637
            }
638
        }
639
    }
640
}
641

    
642
static void tb_jmp_check(TranslationBlock *tb)
643
{
644
    TranslationBlock *tb1;
645
    unsigned int n1;
646

    
647
    /* suppress any remaining jumps to this TB */
648
    tb1 = tb->jmp_first;
649
    for(;;) {
650
        n1 = (long)tb1 & 3;
651
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
652
        if (n1 == 2)
653
            break;
654
        tb1 = tb1->jmp_next[n1];
655
    }
656
    /* check end of list */
657
    if (tb1 != tb) {
658
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
659
    }
660
}
661

    
662
#endif
663

    
664
/* invalidate one TB */
665
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
666
                             int next_offset)
667
{
668
    TranslationBlock *tb1;
669
    for(;;) {
670
        tb1 = *ptb;
671
        if (tb1 == tb) {
672
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
673
            break;
674
        }
675
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
676
    }
677
}
678

    
679
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
680
{
681
    TranslationBlock *tb1;
682
    unsigned int n1;
683

    
684
    for(;;) {
685
        tb1 = *ptb;
686
        n1 = (long)tb1 & 3;
687
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
688
        if (tb1 == tb) {
689
            *ptb = tb1->page_next[n1];
690
            break;
691
        }
692
        ptb = &tb1->page_next[n1];
693
    }
694
}
695

    
696
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
697
{
698
    TranslationBlock *tb1, **ptb;
699
    unsigned int n1;
700

    
701
    ptb = &tb->jmp_next[n];
702
    tb1 = *ptb;
703
    if (tb1) {
704
        /* find tb(n) in circular list */
705
        for(;;) {
706
            tb1 = *ptb;
707
            n1 = (long)tb1 & 3;
708
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
709
            if (n1 == n && tb1 == tb)
710
                break;
711
            if (n1 == 2) {
712
                ptb = &tb1->jmp_first;
713
            } else {
714
                ptb = &tb1->jmp_next[n1];
715
            }
716
        }
717
        /* now we can suppress tb(n) from the list */
718
        *ptb = tb->jmp_next[n];
719

    
720
        tb->jmp_next[n] = NULL;
721
    }
722
}
723

    
724
/* reset the jump entry 'n' of a TB so that it is not chained to
725
   another TB */
726
static inline void tb_reset_jump(TranslationBlock *tb, int n)
727
{
728
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
729
}
730

    
731
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
732
{
733
    CPUState *env;
734
    PageDesc *p;
735
    unsigned int h, n1;
736
    target_phys_addr_t phys_pc;
737
    TranslationBlock *tb1, *tb2;
738

    
739
    /* remove the TB from the hash list */
740
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
741
    h = tb_phys_hash_func(phys_pc);
742
    tb_remove(&tb_phys_hash[h], tb,
743
              offsetof(TranslationBlock, phys_hash_next));
744

    
745
    /* remove the TB from the page list */
746
    if (tb->page_addr[0] != page_addr) {
747
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
748
        tb_page_remove(&p->first_tb, tb);
749
        invalidate_page_bitmap(p);
750
    }
751
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
752
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
753
        tb_page_remove(&p->first_tb, tb);
754
        invalidate_page_bitmap(p);
755
    }
756

    
757
    tb_invalidated_flag = 1;
758

    
759
    /* remove the TB from the hash list */
760
    h = tb_jmp_cache_hash_func(tb->pc);
761
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
762
        if (env->tb_jmp_cache[h] == tb)
763
            env->tb_jmp_cache[h] = NULL;
764
    }
765

    
766
    /* suppress this TB from the two jump lists */
767
    tb_jmp_remove(tb, 0);
768
    tb_jmp_remove(tb, 1);
769

    
770
    /* suppress any remaining jumps to this TB */
771
    tb1 = tb->jmp_first;
772
    for(;;) {
773
        n1 = (long)tb1 & 3;
774
        if (n1 == 2)
775
            break;
776
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
777
        tb2 = tb1->jmp_next[n1];
778
        tb_reset_jump(tb1, n1);
779
        tb1->jmp_next[n1] = NULL;
780
        tb1 = tb2;
781
    }
782
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
783

    
784
    tb_phys_invalidate_count++;
785
}
786

    
787
static inline void set_bits(uint8_t *tab, int start, int len)
788
{
789
    int end, mask, end1;
790

    
791
    end = start + len;
792
    tab += start >> 3;
793
    mask = 0xff << (start & 7);
794
    if ((start & ~7) == (end & ~7)) {
795
        if (start < end) {
796
            mask &= ~(0xff << (end & 7));
797
            *tab |= mask;
798
        }
799
    } else {
800
        *tab++ |= mask;
801
        start = (start + 8) & ~7;
802
        end1 = end & ~7;
803
        while (start < end1) {
804
            *tab++ = 0xff;
805
            start += 8;
806
        }
807
        if (start < end) {
808
            mask = ~(0xff << (end & 7));
809
            *tab |= mask;
810
        }
811
    }
812
}
813

    
814
static void build_page_bitmap(PageDesc *p)
815
{
816
    int n, tb_start, tb_end;
817
    TranslationBlock *tb;
818

    
819
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
820
    if (!p->code_bitmap)
821
        return;
822

    
823
    tb = p->first_tb;
824
    while (tb != NULL) {
825
        n = (long)tb & 3;
826
        tb = (TranslationBlock *)((long)tb & ~3);
827
        /* NOTE: this is subtle as a TB may span two physical pages */
828
        if (n == 0) {
829
            /* NOTE: tb_end may be after the end of the page, but
830
               it is not a problem */
831
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
832
            tb_end = tb_start + tb->size;
833
            if (tb_end > TARGET_PAGE_SIZE)
834
                tb_end = TARGET_PAGE_SIZE;
835
        } else {
836
            tb_start = 0;
837
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
838
        }
839
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
840
        tb = tb->page_next[n];
841
    }
842
}
843

    
844
TranslationBlock *tb_gen_code(CPUState *env,
845
                              target_ulong pc, target_ulong cs_base,
846
                              int flags, int cflags)
847
{
848
    TranslationBlock *tb;
849
    uint8_t *tc_ptr;
850
    target_ulong phys_pc, phys_page2, virt_page2;
851
    int code_gen_size;
852

    
853
    phys_pc = get_phys_addr_code(env, pc);
854
    tb = tb_alloc(pc);
855
    if (!tb) {
856
        /* flush must be done */
857
        tb_flush(env);
858
        /* cannot fail at this point */
859
        tb = tb_alloc(pc);
860
        /* Don't forget to invalidate previous TB info.  */
861
        tb_invalidated_flag = 1;
862
    }
863
    tc_ptr = code_gen_ptr;
864
    tb->tc_ptr = tc_ptr;
865
    tb->cs_base = cs_base;
866
    tb->flags = flags;
867
    tb->cflags = cflags;
868
    cpu_gen_code(env, tb, &code_gen_size);
869
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
870

    
871
    /* check next page if needed */
872
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
873
    phys_page2 = -1;
874
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
875
        phys_page2 = get_phys_addr_code(env, virt_page2);
876
    }
877
    tb_link_phys(tb, phys_pc, phys_page2);
878
    return tb;
879
}
880

    
881
/* invalidate all TBs which intersect with the target physical page
882
   starting in range [start;end[. NOTE: start and end must refer to
883
   the same physical page. 'is_cpu_write_access' should be true if called
884
   from a real cpu write access: the virtual CPU will exit the current
885
   TB if code is modified inside this TB. */
886
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
887
                                   int is_cpu_write_access)
888
{
889
    int n, current_tb_modified, current_tb_not_found, current_flags;
890
    CPUState *env = cpu_single_env;
891
    PageDesc *p;
892
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
893
    target_ulong tb_start, tb_end;
894
    target_ulong current_pc, current_cs_base;
895

    
896
    p = page_find(start >> TARGET_PAGE_BITS);
897
    if (!p)
898
        return;
899
    if (!p->code_bitmap &&
900
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
901
        is_cpu_write_access) {
902
        /* build code bitmap */
903
        build_page_bitmap(p);
904
    }
905

    
906
    /* we remove all the TBs in the range [start, end[ */
907
    /* XXX: see if in some cases it could be faster to invalidate all the code */
908
    current_tb_not_found = is_cpu_write_access;
909
    current_tb_modified = 0;
910
    current_tb = NULL; /* avoid warning */
911
    current_pc = 0; /* avoid warning */
912
    current_cs_base = 0; /* avoid warning */
913
    current_flags = 0; /* avoid warning */
914
    tb = p->first_tb;
915
    while (tb != NULL) {
916
        n = (long)tb & 3;
917
        tb = (TranslationBlock *)((long)tb & ~3);
918
        tb_next = tb->page_next[n];
919
        /* NOTE: this is subtle as a TB may span two physical pages */
920
        if (n == 0) {
921
            /* NOTE: tb_end may be after the end of the page, but
922
               it is not a problem */
923
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
924
            tb_end = tb_start + tb->size;
925
        } else {
926
            tb_start = tb->page_addr[1];
927
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
928
        }
929
        if (!(tb_end <= start || tb_start >= end)) {
930
#ifdef TARGET_HAS_PRECISE_SMC
931
            if (current_tb_not_found) {
932
                current_tb_not_found = 0;
933
                current_tb = NULL;
934
                if (env->mem_io_pc) {
935
                    /* now we have a real cpu fault */
936
                    current_tb = tb_find_pc(env->mem_io_pc);
937
                }
938
            }
939
            if (current_tb == tb &&
940
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
941
                /* If we are modifying the current TB, we must stop
942
                its execution. We could be more precise by checking
943
                that the modification is after the current PC, but it
944
                would require a specialized function to partially
945
                restore the CPU state */
946

    
947
                current_tb_modified = 1;
948
                cpu_restore_state(current_tb, env,
949
                                  env->mem_io_pc, NULL);
950
#if defined(TARGET_I386)
951
                current_flags = env->hflags;
952
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
953
                current_cs_base = (target_ulong)env->segs[R_CS].base;
954
                current_pc = current_cs_base + env->eip;
955
#else
956
#error unsupported CPU
957
#endif
958
            }
959
#endif /* TARGET_HAS_PRECISE_SMC */
960
            /* we need to do that to handle the case where a signal
961
               occurs while doing tb_phys_invalidate() */
962
            saved_tb = NULL;
963
            if (env) {
964
                saved_tb = env->current_tb;
965
                env->current_tb = NULL;
966
            }
967
            tb_phys_invalidate(tb, -1);
968
            if (env) {
969
                env->current_tb = saved_tb;
970
                if (env->interrupt_request && env->current_tb)
971
                    cpu_interrupt(env, env->interrupt_request);
972
            }
973
        }
974
        tb = tb_next;
975
    }
976
#if !defined(CONFIG_USER_ONLY)
977
    /* if no code remaining, no need to continue to use slow writes */
978
    if (!p->first_tb) {
979
        invalidate_page_bitmap(p);
980
        if (is_cpu_write_access) {
981
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
982
        }
983
    }
984
#endif
985
#ifdef TARGET_HAS_PRECISE_SMC
986
    if (current_tb_modified) {
987
        /* we generate a block containing just the instruction
988
           modifying the memory. It will ensure that it cannot modify
989
           itself */
990
        env->current_tb = NULL;
991
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
992
        cpu_resume_from_signal(env, NULL);
993
    }
994
#endif
995
}
996

    
997
/* len must be <= 8 and start must be a multiple of len */
998
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
999
{
1000
    PageDesc *p;
1001
    int offset, b;
1002
#if 0
1003
    if (1) {
1004
        if (loglevel) {
1005
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1006
                   cpu_single_env->mem_io_vaddr, len,
1007
                   cpu_single_env->eip,
1008
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1009
        }
1010
    }
1011
#endif
1012
    p = page_find(start >> TARGET_PAGE_BITS);
1013
    if (!p)
1014
        return;
1015
    if (p->code_bitmap) {
1016
        offset = start & ~TARGET_PAGE_MASK;
1017
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1018
        if (b & ((1 << len) - 1))
1019
            goto do_invalidate;
1020
    } else {
1021
    do_invalidate:
1022
        tb_invalidate_phys_page_range(start, start + len, 1);
1023
    }
1024
}
1025

    
1026
#if !defined(CONFIG_SOFTMMU)
1027
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1028
                                    unsigned long pc, void *puc)
1029
{
1030
    int n, current_flags, current_tb_modified;
1031
    target_ulong current_pc, current_cs_base;
1032
    PageDesc *p;
1033
    TranslationBlock *tb, *current_tb;
1034
#ifdef TARGET_HAS_PRECISE_SMC
1035
    CPUState *env = cpu_single_env;
1036
#endif
1037

    
1038
    addr &= TARGET_PAGE_MASK;
1039
    p = page_find(addr >> TARGET_PAGE_BITS);
1040
    if (!p)
1041
        return;
1042
    tb = p->first_tb;
1043
    current_tb_modified = 0;
1044
    current_tb = NULL;
1045
    current_pc = 0; /* avoid warning */
1046
    current_cs_base = 0; /* avoid warning */
1047
    current_flags = 0; /* avoid warning */
1048
#ifdef TARGET_HAS_PRECISE_SMC
1049
    if (tb && pc != 0) {
1050
        current_tb = tb_find_pc(pc);
1051
    }
1052
#endif
1053
    while (tb != NULL) {
1054
        n = (long)tb & 3;
1055
        tb = (TranslationBlock *)((long)tb & ~3);
1056
#ifdef TARGET_HAS_PRECISE_SMC
1057
        if (current_tb == tb &&
1058
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1059
                /* If we are modifying the current TB, we must stop
1060
                   its execution. We could be more precise by checking
1061
                   that the modification is after the current PC, but it
1062
                   would require a specialized function to partially
1063
                   restore the CPU state */
1064

    
1065
            current_tb_modified = 1;
1066
            cpu_restore_state(current_tb, env, pc, puc);
1067
#if defined(TARGET_I386)
1068
            current_flags = env->hflags;
1069
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1070
            current_cs_base = (target_ulong)env->segs[R_CS].base;
1071
            current_pc = current_cs_base + env->eip;
1072
#else
1073
#error unsupported CPU
1074
#endif
1075
        }
1076
#endif /* TARGET_HAS_PRECISE_SMC */
1077
        tb_phys_invalidate(tb, addr);
1078
        tb = tb->page_next[n];
1079
    }
1080
    p->first_tb = NULL;
1081
#ifdef TARGET_HAS_PRECISE_SMC
1082
    if (current_tb_modified) {
1083
        /* we generate a block containing just the instruction
1084
           modifying the memory. It will ensure that it cannot modify
1085
           itself */
1086
        env->current_tb = NULL;
1087
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1088
        cpu_resume_from_signal(env, puc);
1089
    }
1090
#endif
1091
}
1092
#endif
1093

    
1094
/* add the tb in the target page and protect it if necessary */
1095
static inline void tb_alloc_page(TranslationBlock *tb,
1096
                                 unsigned int n, target_ulong page_addr)
1097
{
1098
    PageDesc *p;
1099
    TranslationBlock *last_first_tb;
1100

    
1101
    tb->page_addr[n] = page_addr;
1102
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1103
    tb->page_next[n] = p->first_tb;
1104
    last_first_tb = p->first_tb;
1105
    p->first_tb = (TranslationBlock *)((long)tb | n);
1106
    invalidate_page_bitmap(p);
1107

    
1108
#if defined(TARGET_HAS_SMC) || 1
1109

    
1110
#if defined(CONFIG_USER_ONLY)
1111
    if (p->flags & PAGE_WRITE) {
1112
        target_ulong addr;
1113
        PageDesc *p2;
1114
        int prot;
1115

    
1116
        /* force the host page as non writable (writes will have a
1117
           page fault + mprotect overhead) */
1118
        page_addr &= qemu_host_page_mask;
1119
        prot = 0;
1120
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1121
            addr += TARGET_PAGE_SIZE) {
1122

    
1123
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1124
            if (!p2)
1125
                continue;
1126
            prot |= p2->flags;
1127
            p2->flags &= ~PAGE_WRITE;
1128
            page_get_flags(addr);
1129
          }
1130
        mprotect(g2h(page_addr), qemu_host_page_size,
1131
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1132
#ifdef DEBUG_TB_INVALIDATE
1133
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1134
               page_addr);
1135
#endif
1136
    }
1137
#else
1138
    /* if some code is already present, then the pages are already
1139
       protected. So we handle the case where only the first TB is
1140
       allocated in a physical page */
1141
    if (!last_first_tb) {
1142
        tlb_protect_code(page_addr);
1143
    }
1144
#endif
1145

    
1146
#endif /* TARGET_HAS_SMC */
1147
}
1148

    
1149
/* Allocate a new translation block. Flush the translation buffer if
1150
   too many translation blocks or too much generated code. */
1151
TranslationBlock *tb_alloc(target_ulong pc)
1152
{
1153
    TranslationBlock *tb;
1154

    
1155
    if (nb_tbs >= code_gen_max_blocks ||
1156
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1157
        return NULL;
1158
    tb = &tbs[nb_tbs++];
1159
    tb->pc = pc;
1160
    tb->cflags = 0;
1161
    return tb;
1162
}
1163

    
1164
void tb_free(TranslationBlock *tb)
1165
{
1166
    /* In practice this is mostly used for single use temporary TB
1167
       Ignore the hard cases and just back up if this TB happens to
1168
       be the last one generated.  */
1169
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1170
        code_gen_ptr = tb->tc_ptr;
1171
        nb_tbs--;
1172
    }
1173
}
1174

    
1175
/* add a new TB and link it to the physical page tables. phys_page2 is
1176
   (-1) to indicate that only one page contains the TB. */
1177
void tb_link_phys(TranslationBlock *tb,
1178
                  target_ulong phys_pc, target_ulong phys_page2)
1179
{
1180
    unsigned int h;
1181
    TranslationBlock **ptb;
1182

    
1183
    /* Grab the mmap lock to stop another thread invalidating this TB
1184
       before we are done.  */
1185
    mmap_lock();
1186
    /* add in the physical hash table */
1187
    h = tb_phys_hash_func(phys_pc);
1188
    ptb = &tb_phys_hash[h];
1189
    tb->phys_hash_next = *ptb;
1190
    *ptb = tb;
1191

    
1192
    /* add in the page list */
1193
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1194
    if (phys_page2 != -1)
1195
        tb_alloc_page(tb, 1, phys_page2);
1196
    else
1197
        tb->page_addr[1] = -1;
1198

    
1199
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1200
    tb->jmp_next[0] = NULL;
1201
    tb->jmp_next[1] = NULL;
1202

    
1203
    /* init original jump addresses */
1204
    if (tb->tb_next_offset[0] != 0xffff)
1205
        tb_reset_jump(tb, 0);
1206
    if (tb->tb_next_offset[1] != 0xffff)
1207
        tb_reset_jump(tb, 1);
1208

    
1209
#ifdef DEBUG_TB_CHECK
1210
    tb_page_check();
1211
#endif
1212
    mmap_unlock();
1213
}
1214

    
1215
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1216
   tb[1].tc_ptr. Return NULL if not found */
1217
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1218
{
1219
    int m_min, m_max, m;
1220
    unsigned long v;
1221
    TranslationBlock *tb;
1222

    
1223
    if (nb_tbs <= 0)
1224
        return NULL;
1225
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1226
        tc_ptr >= (unsigned long)code_gen_ptr)
1227
        return NULL;
1228
    /* binary search (cf Knuth) */
1229
    m_min = 0;
1230
    m_max = nb_tbs - 1;
1231
    while (m_min <= m_max) {
1232
        m = (m_min + m_max) >> 1;
1233
        tb = &tbs[m];
1234
        v = (unsigned long)tb->tc_ptr;
1235
        if (v == tc_ptr)
1236
            return tb;
1237
        else if (tc_ptr < v) {
1238
            m_max = m - 1;
1239
        } else {
1240
            m_min = m + 1;
1241
        }
1242
    }
1243
    return &tbs[m_max];
1244
}
1245

    
1246
static void tb_reset_jump_recursive(TranslationBlock *tb);
1247

    
1248
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1249
{
1250
    TranslationBlock *tb1, *tb_next, **ptb;
1251
    unsigned int n1;
1252

    
1253
    tb1 = tb->jmp_next[n];
1254
    if (tb1 != NULL) {
1255
        /* find head of list */
1256
        for(;;) {
1257
            n1 = (long)tb1 & 3;
1258
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1259
            if (n1 == 2)
1260
                break;
1261
            tb1 = tb1->jmp_next[n1];
1262
        }
1263
        /* we are now sure now that tb jumps to tb1 */
1264
        tb_next = tb1;
1265

    
1266
        /* remove tb from the jmp_first list */
1267
        ptb = &tb_next->jmp_first;
1268
        for(;;) {
1269
            tb1 = *ptb;
1270
            n1 = (long)tb1 & 3;
1271
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1272
            if (n1 == n && tb1 == tb)
1273
                break;
1274
            ptb = &tb1->jmp_next[n1];
1275
        }
1276
        *ptb = tb->jmp_next[n];
1277
        tb->jmp_next[n] = NULL;
1278

    
1279
        /* suppress the jump to next tb in generated code */
1280
        tb_reset_jump(tb, n);
1281

    
1282
        /* suppress jumps in the tb on which we could have jumped */
1283
        tb_reset_jump_recursive(tb_next);
1284
    }
1285
}
1286

    
1287
static void tb_reset_jump_recursive(TranslationBlock *tb)
1288
{
1289
    tb_reset_jump_recursive2(tb, 0);
1290
    tb_reset_jump_recursive2(tb, 1);
1291
}
1292

    
1293
#if defined(TARGET_HAS_ICE)
1294
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1295
{
1296
    target_phys_addr_t addr;
1297
    target_ulong pd;
1298
    ram_addr_t ram_addr;
1299
    PhysPageDesc *p;
1300

    
1301
    addr = cpu_get_phys_page_debug(env, pc);
1302
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1303
    if (!p) {
1304
        pd = IO_MEM_UNASSIGNED;
1305
    } else {
1306
        pd = p->phys_offset;
1307
    }
1308
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1309
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1310
}
1311
#endif
1312

    
1313
/* Add a watchpoint.  */
1314
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1315
{
1316
    int i;
1317

    
1318
    for (i = 0; i < env->nb_watchpoints; i++) {
1319
        if (addr == env->watchpoint[i].vaddr)
1320
            return 0;
1321
    }
1322
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1323
        return -1;
1324

    
1325
    i = env->nb_watchpoints++;
1326
    env->watchpoint[i].vaddr = addr;
1327
    env->watchpoint[i].type = type;
1328
    tlb_flush_page(env, addr);
1329
    /* FIXME: This flush is needed because of the hack to make memory ops
1330
       terminate the TB.  It can be removed once the proper IO trap and
1331
       re-execute bits are in.  */
1332
    tb_flush(env);
1333
    return i;
1334
}
1335

    
1336
/* Remove a watchpoint.  */
1337
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1338
{
1339
    int i;
1340

    
1341
    for (i = 0; i < env->nb_watchpoints; i++) {
1342
        if (addr == env->watchpoint[i].vaddr) {
1343
            env->nb_watchpoints--;
1344
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1345
            tlb_flush_page(env, addr);
1346
            return 0;
1347
        }
1348
    }
1349
    return -1;
1350
}
1351

    
1352
/* Remove all watchpoints. */
1353
void cpu_watchpoint_remove_all(CPUState *env) {
1354
    int i;
1355

    
1356
    for (i = 0; i < env->nb_watchpoints; i++) {
1357
        tlb_flush_page(env, env->watchpoint[i].vaddr);
1358
    }
1359
    env->nb_watchpoints = 0;
1360
}
1361

    
1362
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1363
   breakpoint is reached */
1364
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1365
{
1366
#if defined(TARGET_HAS_ICE)
1367
    int i;
1368

    
1369
    for(i = 0; i < env->nb_breakpoints; i++) {
1370
        if (env->breakpoints[i] == pc)
1371
            return 0;
1372
    }
1373

    
1374
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1375
        return -1;
1376
    env->breakpoints[env->nb_breakpoints++] = pc;
1377

    
1378
    breakpoint_invalidate(env, pc);
1379
    return 0;
1380
#else
1381
    return -1;
1382
#endif
1383
}
1384

    
1385
/* remove all breakpoints */
1386
void cpu_breakpoint_remove_all(CPUState *env) {
1387
#if defined(TARGET_HAS_ICE)
1388
    int i;
1389
    for(i = 0; i < env->nb_breakpoints; i++) {
1390
        breakpoint_invalidate(env, env->breakpoints[i]);
1391
    }
1392
    env->nb_breakpoints = 0;
1393
#endif
1394
}
1395

    
1396
/* remove a breakpoint */
1397
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1398
{
1399
#if defined(TARGET_HAS_ICE)
1400
    int i;
1401
    for(i = 0; i < env->nb_breakpoints; i++) {
1402
        if (env->breakpoints[i] == pc)
1403
            goto found;
1404
    }
1405
    return -1;
1406
 found:
1407
    env->nb_breakpoints--;
1408
    if (i < env->nb_breakpoints)
1409
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1410

    
1411
    breakpoint_invalidate(env, pc);
1412
    return 0;
1413
#else
1414
    return -1;
1415
#endif
1416
}
1417

    
1418
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1419
   CPU loop after each instruction */
1420
void cpu_single_step(CPUState *env, int enabled)
1421
{
1422
#if defined(TARGET_HAS_ICE)
1423
    if (env->singlestep_enabled != enabled) {
1424
        env->singlestep_enabled = enabled;
1425
        /* must flush all the translated code to avoid inconsistancies */
1426
        /* XXX: only flush what is necessary */
1427
        tb_flush(env);
1428
    }
1429
#endif
1430
}
1431

    
1432
/* enable or disable low levels log */
1433
void cpu_set_log(int log_flags)
1434
{
1435
    loglevel = log_flags;
1436
    if (loglevel && !logfile) {
1437
        logfile = fopen(logfilename, log_append ? "a" : "w");
1438
        if (!logfile) {
1439
            perror(logfilename);
1440
            _exit(1);
1441
        }
1442
#if !defined(CONFIG_SOFTMMU)
1443
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1444
        {
1445
            static char logfile_buf[4096];
1446
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1447
        }
1448
#else
1449
        setvbuf(logfile, NULL, _IOLBF, 0);
1450
#endif
1451
        log_append = 1;
1452
    }
1453
    if (!loglevel && logfile) {
1454
        fclose(logfile);
1455
        logfile = NULL;
1456
    }
1457
}
1458

    
1459
void cpu_set_log_filename(const char *filename)
1460
{
1461
    logfilename = strdup(filename);
1462
    if (logfile) {
1463
        fclose(logfile);
1464
        logfile = NULL;
1465
    }
1466
    cpu_set_log(loglevel);
1467
}
1468

    
1469
/* mask must never be zero, except for A20 change call */
1470
void cpu_interrupt(CPUState *env, int mask)
1471
{
1472
#if !defined(USE_NPTL)
1473
    TranslationBlock *tb;
1474
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1475
#endif
1476
    int old_mask;
1477

    
1478
    old_mask = env->interrupt_request;
1479
    /* FIXME: This is probably not threadsafe.  A different thread could
1480
       be in the middle of a read-modify-write operation.  */
1481
    env->interrupt_request |= mask;
1482
#if defined(USE_NPTL)
1483
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1484
       problem and hope the cpu will stop of its own accord.  For userspace
1485
       emulation this often isn't actually as bad as it sounds.  Often
1486
       signals are used primarily to interrupt blocking syscalls.  */
1487
#else
1488
    if (use_icount) {
1489
        env->icount_decr.u16.high = 0xffff;
1490
#ifndef CONFIG_USER_ONLY
1491
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1492
           an async event happened and we need to process it.  */
1493
        if (!can_do_io(env)
1494
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1495
            cpu_abort(env, "Raised interrupt while not in I/O function");
1496
        }
1497
#endif
1498
    } else {
1499
        tb = env->current_tb;
1500
        /* if the cpu is currently executing code, we must unlink it and
1501
           all the potentially executing TB */
1502
        if (tb && !testandset(&interrupt_lock)) {
1503
            env->current_tb = NULL;
1504
            tb_reset_jump_recursive(tb);
1505
            resetlock(&interrupt_lock);
1506
        }
1507
    }
1508
#endif
1509
}
1510

    
1511
void cpu_reset_interrupt(CPUState *env, int mask)
1512
{
1513
    env->interrupt_request &= ~mask;
1514
}
1515

    
1516
const CPULogItem cpu_log_items[] = {
1517
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1518
      "show generated host assembly code for each compiled TB" },
1519
    { CPU_LOG_TB_IN_ASM, "in_asm",
1520
      "show target assembly code for each compiled TB" },
1521
    { CPU_LOG_TB_OP, "op",
1522
      "show micro ops for each compiled TB" },
1523
    { CPU_LOG_TB_OP_OPT, "op_opt",
1524
      "show micro ops "
1525
#ifdef TARGET_I386
1526
      "before eflags optimization and "
1527
#endif
1528
      "after liveness analysis" },
1529
    { CPU_LOG_INT, "int",
1530
      "show interrupts/exceptions in short format" },
1531
    { CPU_LOG_EXEC, "exec",
1532
      "show trace before each executed TB (lots of logs)" },
1533
    { CPU_LOG_TB_CPU, "cpu",
1534
      "show CPU state before block translation" },
1535
#ifdef TARGET_I386
1536
    { CPU_LOG_PCALL, "pcall",
1537
      "show protected mode far calls/returns/exceptions" },
1538
#endif
1539
#ifdef DEBUG_IOPORT
1540
    { CPU_LOG_IOPORT, "ioport",
1541
      "show all i/o ports accesses" },
1542
#endif
1543
    { 0, NULL, NULL },
1544
};
1545

    
1546
static int cmp1(const char *s1, int n, const char *s2)
1547
{
1548
    if (strlen(s2) != n)
1549
        return 0;
1550
    return memcmp(s1, s2, n) == 0;
1551
}
1552

    
1553
/* takes a comma separated list of log masks. Return 0 if error. */
1554
int cpu_str_to_log_mask(const char *str)
1555
{
1556
    const CPULogItem *item;
1557
    int mask;
1558
    const char *p, *p1;
1559

    
1560
    p = str;
1561
    mask = 0;
1562
    for(;;) {
1563
        p1 = strchr(p, ',');
1564
        if (!p1)
1565
            p1 = p + strlen(p);
1566
        if(cmp1(p,p1-p,"all")) {
1567
                for(item = cpu_log_items; item->mask != 0; item++) {
1568
                        mask |= item->mask;
1569
                }
1570
        } else {
1571
        for(item = cpu_log_items; item->mask != 0; item++) {
1572
            if (cmp1(p, p1 - p, item->name))
1573
                goto found;
1574
        }
1575
        return 0;
1576
        }
1577
    found:
1578
        mask |= item->mask;
1579
        if (*p1 != ',')
1580
            break;
1581
        p = p1 + 1;
1582
    }
1583
    return mask;
1584
}
1585

    
1586
void cpu_abort(CPUState *env, const char *fmt, ...)
1587
{
1588
    va_list ap;
1589
    va_list ap2;
1590

    
1591
    va_start(ap, fmt);
1592
    va_copy(ap2, ap);
1593
    fprintf(stderr, "qemu: fatal: ");
1594
    vfprintf(stderr, fmt, ap);
1595
    fprintf(stderr, "\n");
1596
#ifdef TARGET_I386
1597
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1598
#else
1599
    cpu_dump_state(env, stderr, fprintf, 0);
1600
#endif
1601
    if (logfile) {
1602
        fprintf(logfile, "qemu: fatal: ");
1603
        vfprintf(logfile, fmt, ap2);
1604
        fprintf(logfile, "\n");
1605
#ifdef TARGET_I386
1606
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1607
#else
1608
        cpu_dump_state(env, logfile, fprintf, 0);
1609
#endif
1610
        fflush(logfile);
1611
        fclose(logfile);
1612
    }
1613
    va_end(ap2);
1614
    va_end(ap);
1615
    abort();
1616
}
1617

    
1618
CPUState *cpu_copy(CPUState *env)
1619
{
1620
    CPUState *new_env = cpu_init(env->cpu_model_str);
1621
    /* preserve chaining and index */
1622
    CPUState *next_cpu = new_env->next_cpu;
1623
    int cpu_index = new_env->cpu_index;
1624
    memcpy(new_env, env, sizeof(CPUState));
1625
    new_env->next_cpu = next_cpu;
1626
    new_env->cpu_index = cpu_index;
1627
    return new_env;
1628
}
1629

    
1630
#if !defined(CONFIG_USER_ONLY)
1631

    
1632
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1633
{
1634
    unsigned int i;
1635

    
1636
    /* Discard jump cache entries for any tb which might potentially
1637
       overlap the flushed page.  */
1638
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1639
    memset (&env->tb_jmp_cache[i], 0, 
1640
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1641

    
1642
    i = tb_jmp_cache_hash_page(addr);
1643
    memset (&env->tb_jmp_cache[i], 0, 
1644
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1645
}
1646

    
1647
/* NOTE: if flush_global is true, also flush global entries (not
1648
   implemented yet) */
1649
void tlb_flush(CPUState *env, int flush_global)
1650
{
1651
    int i;
1652

    
1653
#if defined(DEBUG_TLB)
1654
    printf("tlb_flush:\n");
1655
#endif
1656
    /* must reset current TB so that interrupts cannot modify the
1657
       links while we are modifying them */
1658
    env->current_tb = NULL;
1659

    
1660
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1661
        env->tlb_table[0][i].addr_read = -1;
1662
        env->tlb_table[0][i].addr_write = -1;
1663
        env->tlb_table[0][i].addr_code = -1;
1664
        env->tlb_table[1][i].addr_read = -1;
1665
        env->tlb_table[1][i].addr_write = -1;
1666
        env->tlb_table[1][i].addr_code = -1;
1667
#if (NB_MMU_MODES >= 3)
1668
        env->tlb_table[2][i].addr_read = -1;
1669
        env->tlb_table[2][i].addr_write = -1;
1670
        env->tlb_table[2][i].addr_code = -1;
1671
#if (NB_MMU_MODES == 4)
1672
        env->tlb_table[3][i].addr_read = -1;
1673
        env->tlb_table[3][i].addr_write = -1;
1674
        env->tlb_table[3][i].addr_code = -1;
1675
#endif
1676
#endif
1677
    }
1678

    
1679
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1680

    
1681
#ifdef USE_KQEMU
1682
    if (env->kqemu_enabled) {
1683
        kqemu_flush(env, flush_global);
1684
    }
1685
#endif
1686
    tlb_flush_count++;
1687
}
1688

    
1689
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1690
{
1691
    if (addr == (tlb_entry->addr_read &
1692
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1693
        addr == (tlb_entry->addr_write &
1694
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1695
        addr == (tlb_entry->addr_code &
1696
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1697
        tlb_entry->addr_read = -1;
1698
        tlb_entry->addr_write = -1;
1699
        tlb_entry->addr_code = -1;
1700
    }
1701
}
1702

    
1703
void tlb_flush_page(CPUState *env, target_ulong addr)
1704
{
1705
    int i;
1706

    
1707
#if defined(DEBUG_TLB)
1708
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1709
#endif
1710
    /* must reset current TB so that interrupts cannot modify the
1711
       links while we are modifying them */
1712
    env->current_tb = NULL;
1713

    
1714
    addr &= TARGET_PAGE_MASK;
1715
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1716
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1717
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1718
#if (NB_MMU_MODES >= 3)
1719
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1720
#if (NB_MMU_MODES == 4)
1721
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1722
#endif
1723
#endif
1724

    
1725
    tlb_flush_jmp_cache(env, addr);
1726

    
1727
#ifdef USE_KQEMU
1728
    if (env->kqemu_enabled) {
1729
        kqemu_flush_page(env, addr);
1730
    }
1731
#endif
1732
}
1733

    
1734
/* update the TLBs so that writes to code in the virtual page 'addr'
1735
   can be detected */
1736
static void tlb_protect_code(ram_addr_t ram_addr)
1737
{
1738
    cpu_physical_memory_reset_dirty(ram_addr,
1739
                                    ram_addr + TARGET_PAGE_SIZE,
1740
                                    CODE_DIRTY_FLAG);
1741
}
1742

    
1743
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1744
   tested for self modifying code */
1745
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1746
                                    target_ulong vaddr)
1747
{
1748
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1749
}
1750

    
1751
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1752
                                         unsigned long start, unsigned long length)
1753
{
1754
    unsigned long addr;
1755
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1756
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1757
        if ((addr - start) < length) {
1758
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1759
        }
1760
    }
1761
}
1762

    
1763
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1764
                                     int dirty_flags)
1765
{
1766
    CPUState *env;
1767
    unsigned long length, start1;
1768
    int i, mask, len;
1769
    uint8_t *p;
1770

    
1771
    start &= TARGET_PAGE_MASK;
1772
    end = TARGET_PAGE_ALIGN(end);
1773

    
1774
    length = end - start;
1775
    if (length == 0)
1776
        return;
1777
    len = length >> TARGET_PAGE_BITS;
1778
#ifdef USE_KQEMU
1779
    /* XXX: should not depend on cpu context */
1780
    env = first_cpu;
1781
    if (env->kqemu_enabled) {
1782
        ram_addr_t addr;
1783
        addr = start;
1784
        for(i = 0; i < len; i++) {
1785
            kqemu_set_notdirty(env, addr);
1786
            addr += TARGET_PAGE_SIZE;
1787
        }
1788
    }
1789
#endif
1790
    mask = ~dirty_flags;
1791
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1792
    for(i = 0; i < len; i++)
1793
        p[i] &= mask;
1794

    
1795
    /* we modify the TLB cache so that the dirty bit will be set again
1796
       when accessing the range */
1797
    start1 = start + (unsigned long)phys_ram_base;
1798
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1799
        for(i = 0; i < CPU_TLB_SIZE; i++)
1800
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1801
        for(i = 0; i < CPU_TLB_SIZE; i++)
1802
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1803
#if (NB_MMU_MODES >= 3)
1804
        for(i = 0; i < CPU_TLB_SIZE; i++)
1805
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1806
#if (NB_MMU_MODES == 4)
1807
        for(i = 0; i < CPU_TLB_SIZE; i++)
1808
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1809
#endif
1810
#endif
1811
    }
1812
}
1813

    
1814
int cpu_physical_memory_set_dirty_tracking(int enable)
1815
{
1816
    in_migration = enable;
1817
    return 0;
1818
}
1819

    
1820
int cpu_physical_memory_get_dirty_tracking(void)
1821
{
1822
    return in_migration;
1823
}
1824

    
1825
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1826
{
1827
    ram_addr_t ram_addr;
1828

    
1829
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1830
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1831
            tlb_entry->addend - (unsigned long)phys_ram_base;
1832
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1833
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1834
        }
1835
    }
1836
}
1837

    
1838
/* update the TLB according to the current state of the dirty bits */
1839
void cpu_tlb_update_dirty(CPUState *env)
1840
{
1841
    int i;
1842
    for(i = 0; i < CPU_TLB_SIZE; i++)
1843
        tlb_update_dirty(&env->tlb_table[0][i]);
1844
    for(i = 0; i < CPU_TLB_SIZE; i++)
1845
        tlb_update_dirty(&env->tlb_table[1][i]);
1846
#if (NB_MMU_MODES >= 3)
1847
    for(i = 0; i < CPU_TLB_SIZE; i++)
1848
        tlb_update_dirty(&env->tlb_table[2][i]);
1849
#if (NB_MMU_MODES == 4)
1850
    for(i = 0; i < CPU_TLB_SIZE; i++)
1851
        tlb_update_dirty(&env->tlb_table[3][i]);
1852
#endif
1853
#endif
1854
}
1855

    
1856
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1857
{
1858
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1859
        tlb_entry->addr_write = vaddr;
1860
}
1861

    
1862
/* update the TLB corresponding to virtual page vaddr
1863
   so that it is no longer dirty */
1864
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1865
{
1866
    int i;
1867

    
1868
    vaddr &= TARGET_PAGE_MASK;
1869
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1870
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1871
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1872
#if (NB_MMU_MODES >= 3)
1873
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1874
#if (NB_MMU_MODES == 4)
1875
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1876
#endif
1877
#endif
1878
}
1879

    
1880
/* add a new TLB entry. At most one entry for a given virtual address
1881
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1882
   (can only happen in non SOFTMMU mode for I/O pages or pages
1883
   conflicting with the host address space). */
1884
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1885
                      target_phys_addr_t paddr, int prot,
1886
                      int mmu_idx, int is_softmmu)
1887
{
1888
    PhysPageDesc *p;
1889
    unsigned long pd;
1890
    unsigned int index;
1891
    target_ulong address;
1892
    target_ulong code_address;
1893
    target_phys_addr_t addend;
1894
    int ret;
1895
    CPUTLBEntry *te;
1896
    int i;
1897
    target_phys_addr_t iotlb;
1898

    
1899
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1900
    if (!p) {
1901
        pd = IO_MEM_UNASSIGNED;
1902
    } else {
1903
        pd = p->phys_offset;
1904
    }
1905
#if defined(DEBUG_TLB)
1906
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1907
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1908
#endif
1909

    
1910
    ret = 0;
1911
    address = vaddr;
1912
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1913
        /* IO memory case (romd handled later) */
1914
        address |= TLB_MMIO;
1915
    }
1916
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1917
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1918
        /* Normal RAM.  */
1919
        iotlb = pd & TARGET_PAGE_MASK;
1920
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1921
            iotlb |= IO_MEM_NOTDIRTY;
1922
        else
1923
            iotlb |= IO_MEM_ROM;
1924
    } else {
1925
        /* IO handlers are currently passed a phsical address.
1926
           It would be nice to pass an offset from the base address
1927
           of that region.  This would avoid having to special case RAM,
1928
           and avoid full address decoding in every device.
1929
           We can't use the high bits of pd for this because
1930
           IO_MEM_ROMD uses these as a ram address.  */
1931
        iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1932
    }
1933

    
1934
    code_address = address;
1935
    /* Make accesses to pages with watchpoints go via the
1936
       watchpoint trap routines.  */
1937
    for (i = 0; i < env->nb_watchpoints; i++) {
1938
        if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1939
            iotlb = io_mem_watch + paddr;
1940
            /* TODO: The memory case can be optimized by not trapping
1941
               reads of pages with a write breakpoint.  */
1942
            address |= TLB_MMIO;
1943
        }
1944
    }
1945

    
1946
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1947
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
1948
    te = &env->tlb_table[mmu_idx][index];
1949
    te->addend = addend - vaddr;
1950
    if (prot & PAGE_READ) {
1951
        te->addr_read = address;
1952
    } else {
1953
        te->addr_read = -1;
1954
    }
1955

    
1956
    if (prot & PAGE_EXEC) {
1957
        te->addr_code = code_address;
1958
    } else {
1959
        te->addr_code = -1;
1960
    }
1961
    if (prot & PAGE_WRITE) {
1962
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1963
            (pd & IO_MEM_ROMD)) {
1964
            /* Write access calls the I/O callback.  */
1965
            te->addr_write = address | TLB_MMIO;
1966
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1967
                   !cpu_physical_memory_is_dirty(pd)) {
1968
            te->addr_write = address | TLB_NOTDIRTY;
1969
        } else {
1970
            te->addr_write = address;
1971
        }
1972
    } else {
1973
        te->addr_write = -1;
1974
    }
1975
    return ret;
1976
}
1977

    
1978
#else
1979

    
1980
void tlb_flush(CPUState *env, int flush_global)
1981
{
1982
}
1983

    
1984
void tlb_flush_page(CPUState *env, target_ulong addr)
1985
{
1986
}
1987

    
1988
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1989
                      target_phys_addr_t paddr, int prot,
1990
                      int mmu_idx, int is_softmmu)
1991
{
1992
    return 0;
1993
}
1994

    
1995
/* dump memory mappings */
1996
void page_dump(FILE *f)
1997
{
1998
    unsigned long start, end;
1999
    int i, j, prot, prot1;
2000
    PageDesc *p;
2001

    
2002
    fprintf(f, "%-8s %-8s %-8s %s\n",
2003
            "start", "end", "size", "prot");
2004
    start = -1;
2005
    end = -1;
2006
    prot = 0;
2007
    for(i = 0; i <= L1_SIZE; i++) {
2008
        if (i < L1_SIZE)
2009
            p = l1_map[i];
2010
        else
2011
            p = NULL;
2012
        for(j = 0;j < L2_SIZE; j++) {
2013
            if (!p)
2014
                prot1 = 0;
2015
            else
2016
                prot1 = p[j].flags;
2017
            if (prot1 != prot) {
2018
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2019
                if (start != -1) {
2020
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2021
                            start, end, end - start,
2022
                            prot & PAGE_READ ? 'r' : '-',
2023
                            prot & PAGE_WRITE ? 'w' : '-',
2024
                            prot & PAGE_EXEC ? 'x' : '-');
2025
                }
2026
                if (prot1 != 0)
2027
                    start = end;
2028
                else
2029
                    start = -1;
2030
                prot = prot1;
2031
            }
2032
            if (!p)
2033
                break;
2034
        }
2035
    }
2036
}
2037

    
2038
int page_get_flags(target_ulong address)
2039
{
2040
    PageDesc *p;
2041

    
2042
    p = page_find(address >> TARGET_PAGE_BITS);
2043
    if (!p)
2044
        return 0;
2045
    return p->flags;
2046
}
2047

    
2048
/* modify the flags of a page and invalidate the code if
2049
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2050
   depending on PAGE_WRITE */
2051
void page_set_flags(target_ulong start, target_ulong end, int flags)
2052
{
2053
    PageDesc *p;
2054
    target_ulong addr;
2055

    
2056
    /* mmap_lock should already be held.  */
2057
    start = start & TARGET_PAGE_MASK;
2058
    end = TARGET_PAGE_ALIGN(end);
2059
    if (flags & PAGE_WRITE)
2060
        flags |= PAGE_WRITE_ORG;
2061
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2062
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2063
        /* We may be called for host regions that are outside guest
2064
           address space.  */
2065
        if (!p)
2066
            return;
2067
        /* if the write protection is set, then we invalidate the code
2068
           inside */
2069
        if (!(p->flags & PAGE_WRITE) &&
2070
            (flags & PAGE_WRITE) &&
2071
            p->first_tb) {
2072
            tb_invalidate_phys_page(addr, 0, NULL);
2073
        }
2074
        p->flags = flags;
2075
    }
2076
}
2077

    
2078
int page_check_range(target_ulong start, target_ulong len, int flags)
2079
{
2080
    PageDesc *p;
2081
    target_ulong end;
2082
    target_ulong addr;
2083

    
2084
    if (start + len < start)
2085
        /* we've wrapped around */
2086
        return -1;
2087

    
2088
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2089
    start = start & TARGET_PAGE_MASK;
2090

    
2091
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2092
        p = page_find(addr >> TARGET_PAGE_BITS);
2093
        if( !p )
2094
            return -1;
2095
        if( !(p->flags & PAGE_VALID) )
2096
            return -1;
2097

    
2098
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2099
            return -1;
2100
        if (flags & PAGE_WRITE) {
2101
            if (!(p->flags & PAGE_WRITE_ORG))
2102
                return -1;
2103
            /* unprotect the page if it was put read-only because it
2104
               contains translated code */
2105
            if (!(p->flags & PAGE_WRITE)) {
2106
                if (!page_unprotect(addr, 0, NULL))
2107
                    return -1;
2108
            }
2109
            return 0;
2110
        }
2111
    }
2112
    return 0;
2113
}
2114

    
2115
/* called from signal handler: invalidate the code and unprotect the
2116
   page. Return TRUE if the fault was succesfully handled. */
2117
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2118
{
2119
    unsigned int page_index, prot, pindex;
2120
    PageDesc *p, *p1;
2121
    target_ulong host_start, host_end, addr;
2122

    
2123
    /* Technically this isn't safe inside a signal handler.  However we
2124
       know this only ever happens in a synchronous SEGV handler, so in
2125
       practice it seems to be ok.  */
2126
    mmap_lock();
2127

    
2128
    host_start = address & qemu_host_page_mask;
2129
    page_index = host_start >> TARGET_PAGE_BITS;
2130
    p1 = page_find(page_index);
2131
    if (!p1) {
2132
        mmap_unlock();
2133
        return 0;
2134
    }
2135
    host_end = host_start + qemu_host_page_size;
2136
    p = p1;
2137
    prot = 0;
2138
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2139
        prot |= p->flags;
2140
        p++;
2141
    }
2142
    /* if the page was really writable, then we change its
2143
       protection back to writable */
2144
    if (prot & PAGE_WRITE_ORG) {
2145
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2146
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2147
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2148
                     (prot & PAGE_BITS) | PAGE_WRITE);
2149
            p1[pindex].flags |= PAGE_WRITE;
2150
            /* and since the content will be modified, we must invalidate
2151
               the corresponding translated code. */
2152
            tb_invalidate_phys_page(address, pc, puc);
2153
#ifdef DEBUG_TB_CHECK
2154
            tb_invalidate_check(address);
2155
#endif
2156
            mmap_unlock();
2157
            return 1;
2158
        }
2159
    }
2160
    mmap_unlock();
2161
    return 0;
2162
}
2163

    
2164
static inline void tlb_set_dirty(CPUState *env,
2165
                                 unsigned long addr, target_ulong vaddr)
2166
{
2167
}
2168
#endif /* defined(CONFIG_USER_ONLY) */
2169

    
2170
#if !defined(CONFIG_USER_ONLY)
2171
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2172
                             ram_addr_t memory);
2173
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2174
                           ram_addr_t orig_memory);
2175
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2176
                      need_subpage)                                     \
2177
    do {                                                                \
2178
        if (addr > start_addr)                                          \
2179
            start_addr2 = 0;                                            \
2180
        else {                                                          \
2181
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2182
            if (start_addr2 > 0)                                        \
2183
                need_subpage = 1;                                       \
2184
        }                                                               \
2185
                                                                        \
2186
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2187
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2188
        else {                                                          \
2189
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2190
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2191
                need_subpage = 1;                                       \
2192
        }                                                               \
2193
    } while (0)
2194

    
2195
/* register physical memory. 'size' must be a multiple of the target
2196
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2197
   io memory page */
2198
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2199
                                  ram_addr_t size,
2200
                                  ram_addr_t phys_offset)
2201
{
2202
    target_phys_addr_t addr, end_addr;
2203
    PhysPageDesc *p;
2204
    CPUState *env;
2205
    ram_addr_t orig_size = size;
2206
    void *subpage;
2207

    
2208
#ifdef USE_KQEMU
2209
    /* XXX: should not depend on cpu context */
2210
    env = first_cpu;
2211
    if (env->kqemu_enabled) {
2212
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2213
    }
2214
#endif
2215
    if (kvm_enabled())
2216
        kvm_set_phys_mem(start_addr, size, phys_offset);
2217

    
2218
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2219
    end_addr = start_addr + (target_phys_addr_t)size;
2220
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2221
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2222
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2223
            ram_addr_t orig_memory = p->phys_offset;
2224
            target_phys_addr_t start_addr2, end_addr2;
2225
            int need_subpage = 0;
2226

    
2227
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2228
                          need_subpage);
2229
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2230
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2231
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2232
                                           &p->phys_offset, orig_memory);
2233
                } else {
2234
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2235
                                            >> IO_MEM_SHIFT];
2236
                }
2237
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2238
            } else {
2239
                p->phys_offset = phys_offset;
2240
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2241
                    (phys_offset & IO_MEM_ROMD))
2242
                    phys_offset += TARGET_PAGE_SIZE;
2243
            }
2244
        } else {
2245
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2246
            p->phys_offset = phys_offset;
2247
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2248
                (phys_offset & IO_MEM_ROMD))
2249
                phys_offset += TARGET_PAGE_SIZE;
2250
            else {
2251
                target_phys_addr_t start_addr2, end_addr2;
2252
                int need_subpage = 0;
2253

    
2254
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2255
                              end_addr2, need_subpage);
2256

    
2257
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2258
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2259
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2260
                    subpage_register(subpage, start_addr2, end_addr2,
2261
                                     phys_offset);
2262
                }
2263
            }
2264
        }
2265
    }
2266

    
2267
    /* since each CPU stores ram addresses in its TLB cache, we must
2268
       reset the modified entries */
2269
    /* XXX: slow ! */
2270
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2271
        tlb_flush(env, 1);
2272
    }
2273
}
2274

    
2275
/* XXX: temporary until new memory mapping API */
2276
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2277
{
2278
    PhysPageDesc *p;
2279

    
2280
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2281
    if (!p)
2282
        return IO_MEM_UNASSIGNED;
2283
    return p->phys_offset;
2284
}
2285

    
2286
/* XXX: better than nothing */
2287
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2288
{
2289
    ram_addr_t addr;
2290
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2291
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2292
                (uint64_t)size, (uint64_t)phys_ram_size);
2293
        abort();
2294
    }
2295
    addr = phys_ram_alloc_offset;
2296
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2297
    return addr;
2298
}
2299

    
2300
void qemu_ram_free(ram_addr_t addr)
2301
{
2302
}
2303

    
2304
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2305
{
2306
#ifdef DEBUG_UNASSIGNED
2307
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2308
#endif
2309
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2310
    do_unassigned_access(addr, 0, 0, 0, 1);
2311
#endif
2312
    return 0;
2313
}
2314

    
2315
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2316
{
2317
#ifdef DEBUG_UNASSIGNED
2318
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2319
#endif
2320
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2321
    do_unassigned_access(addr, 0, 0, 0, 2);
2322
#endif
2323
    return 0;
2324
}
2325

    
2326
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2327
{
2328
#ifdef DEBUG_UNASSIGNED
2329
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2330
#endif
2331
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2332
    do_unassigned_access(addr, 0, 0, 0, 4);
2333
#endif
2334
    return 0;
2335
}
2336

    
2337
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2338
{
2339
#ifdef DEBUG_UNASSIGNED
2340
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2341
#endif
2342
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2343
    do_unassigned_access(addr, 1, 0, 0, 1);
2344
#endif
2345
}
2346

    
2347
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2348
{
2349
#ifdef DEBUG_UNASSIGNED
2350
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2351
#endif
2352
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2353
    do_unassigned_access(addr, 1, 0, 0, 2);
2354
#endif
2355
}
2356

    
2357
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2358
{
2359
#ifdef DEBUG_UNASSIGNED
2360
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2361
#endif
2362
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2363
    do_unassigned_access(addr, 1, 0, 0, 4);
2364
#endif
2365
}
2366

    
2367
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2368
    unassigned_mem_readb,
2369
    unassigned_mem_readw,
2370
    unassigned_mem_readl,
2371
};
2372

    
2373
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2374
    unassigned_mem_writeb,
2375
    unassigned_mem_writew,
2376
    unassigned_mem_writel,
2377
};
2378

    
2379
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2380
                                uint32_t val)
2381
{
2382
    int dirty_flags;
2383
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2384
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2385
#if !defined(CONFIG_USER_ONLY)
2386
        tb_invalidate_phys_page_fast(ram_addr, 1);
2387
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2388
#endif
2389
    }
2390
    stb_p(phys_ram_base + ram_addr, val);
2391
#ifdef USE_KQEMU
2392
    if (cpu_single_env->kqemu_enabled &&
2393
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2394
        kqemu_modify_page(cpu_single_env, ram_addr);
2395
#endif
2396
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2397
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2398
    /* we remove the notdirty callback only if the code has been
2399
       flushed */
2400
    if (dirty_flags == 0xff)
2401
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2402
}
2403

    
2404
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2405
                                uint32_t val)
2406
{
2407
    int dirty_flags;
2408
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2409
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2410
#if !defined(CONFIG_USER_ONLY)
2411
        tb_invalidate_phys_page_fast(ram_addr, 2);
2412
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2413
#endif
2414
    }
2415
    stw_p(phys_ram_base + ram_addr, val);
2416
#ifdef USE_KQEMU
2417
    if (cpu_single_env->kqemu_enabled &&
2418
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2419
        kqemu_modify_page(cpu_single_env, ram_addr);
2420
#endif
2421
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2422
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2423
    /* we remove the notdirty callback only if the code has been
2424
       flushed */
2425
    if (dirty_flags == 0xff)
2426
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2427
}
2428

    
2429
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2430
                                uint32_t val)
2431
{
2432
    int dirty_flags;
2433
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2434
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2435
#if !defined(CONFIG_USER_ONLY)
2436
        tb_invalidate_phys_page_fast(ram_addr, 4);
2437
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2438
#endif
2439
    }
2440
    stl_p(phys_ram_base + ram_addr, val);
2441
#ifdef USE_KQEMU
2442
    if (cpu_single_env->kqemu_enabled &&
2443
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2444
        kqemu_modify_page(cpu_single_env, ram_addr);
2445
#endif
2446
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2447
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2448
    /* we remove the notdirty callback only if the code has been
2449
       flushed */
2450
    if (dirty_flags == 0xff)
2451
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2452
}
2453

    
2454
static CPUReadMemoryFunc *error_mem_read[3] = {
2455
    NULL, /* never used */
2456
    NULL, /* never used */
2457
    NULL, /* never used */
2458
};
2459

    
2460
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2461
    notdirty_mem_writeb,
2462
    notdirty_mem_writew,
2463
    notdirty_mem_writel,
2464
};
2465

    
2466
/* Generate a debug exception if a watchpoint has been hit.  */
2467
static void check_watchpoint(int offset, int flags)
2468
{
2469
    CPUState *env = cpu_single_env;
2470
    target_ulong vaddr;
2471
    int i;
2472

    
2473
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2474
    for (i = 0; i < env->nb_watchpoints; i++) {
2475
        if (vaddr == env->watchpoint[i].vaddr
2476
                && (env->watchpoint[i].type & flags)) {
2477
            env->watchpoint_hit = i + 1;
2478
            cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2479
            break;
2480
        }
2481
    }
2482
}
2483

    
2484
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2485
   so these check for a hit then pass through to the normal out-of-line
2486
   phys routines.  */
2487
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2488
{
2489
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2490
    return ldub_phys(addr);
2491
}
2492

    
2493
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2494
{
2495
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2496
    return lduw_phys(addr);
2497
}
2498

    
2499
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2500
{
2501
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2502
    return ldl_phys(addr);
2503
}
2504

    
2505
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2506
                             uint32_t val)
2507
{
2508
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2509
    stb_phys(addr, val);
2510
}
2511

    
2512
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2513
                             uint32_t val)
2514
{
2515
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2516
    stw_phys(addr, val);
2517
}
2518

    
2519
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2520
                             uint32_t val)
2521
{
2522
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2523
    stl_phys(addr, val);
2524
}
2525

    
2526
static CPUReadMemoryFunc *watch_mem_read[3] = {
2527
    watch_mem_readb,
2528
    watch_mem_readw,
2529
    watch_mem_readl,
2530
};
2531

    
2532
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2533
    watch_mem_writeb,
2534
    watch_mem_writew,
2535
    watch_mem_writel,
2536
};
2537

    
2538
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2539
                                 unsigned int len)
2540
{
2541
    uint32_t ret;
2542
    unsigned int idx;
2543

    
2544
    idx = SUBPAGE_IDX(addr - mmio->base);
2545
#if defined(DEBUG_SUBPAGE)
2546
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2547
           mmio, len, addr, idx);
2548
#endif
2549
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2550

    
2551
    return ret;
2552
}
2553

    
2554
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2555
                              uint32_t value, unsigned int len)
2556
{
2557
    unsigned int idx;
2558

    
2559
    idx = SUBPAGE_IDX(addr - mmio->base);
2560
#if defined(DEBUG_SUBPAGE)
2561
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2562
           mmio, len, addr, idx, value);
2563
#endif
2564
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2565
}
2566

    
2567
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2568
{
2569
#if defined(DEBUG_SUBPAGE)
2570
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2571
#endif
2572

    
2573
    return subpage_readlen(opaque, addr, 0);
2574
}
2575

    
2576
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2577
                            uint32_t value)
2578
{
2579
#if defined(DEBUG_SUBPAGE)
2580
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2581
#endif
2582
    subpage_writelen(opaque, addr, value, 0);
2583
}
2584

    
2585
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2586
{
2587
#if defined(DEBUG_SUBPAGE)
2588
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2589
#endif
2590

    
2591
    return subpage_readlen(opaque, addr, 1);
2592
}
2593

    
2594
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2595
                            uint32_t value)
2596
{
2597
#if defined(DEBUG_SUBPAGE)
2598
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2599
#endif
2600
    subpage_writelen(opaque, addr, value, 1);
2601
}
2602

    
2603
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2604
{
2605
#if defined(DEBUG_SUBPAGE)
2606
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2607
#endif
2608

    
2609
    return subpage_readlen(opaque, addr, 2);
2610
}
2611

    
2612
static void subpage_writel (void *opaque,
2613
                         target_phys_addr_t addr, uint32_t value)
2614
{
2615
#if defined(DEBUG_SUBPAGE)
2616
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2617
#endif
2618
    subpage_writelen(opaque, addr, value, 2);
2619
}
2620

    
2621
static CPUReadMemoryFunc *subpage_read[] = {
2622
    &subpage_readb,
2623
    &subpage_readw,
2624
    &subpage_readl,
2625
};
2626

    
2627
static CPUWriteMemoryFunc *subpage_write[] = {
2628
    &subpage_writeb,
2629
    &subpage_writew,
2630
    &subpage_writel,
2631
};
2632

    
2633
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2634
                             ram_addr_t memory)
2635
{
2636
    int idx, eidx;
2637
    unsigned int i;
2638

    
2639
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2640
        return -1;
2641
    idx = SUBPAGE_IDX(start);
2642
    eidx = SUBPAGE_IDX(end);
2643
#if defined(DEBUG_SUBPAGE)
2644
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2645
           mmio, start, end, idx, eidx, memory);
2646
#endif
2647
    memory >>= IO_MEM_SHIFT;
2648
    for (; idx <= eidx; idx++) {
2649
        for (i = 0; i < 4; i++) {
2650
            if (io_mem_read[memory][i]) {
2651
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2652
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2653
            }
2654
            if (io_mem_write[memory][i]) {
2655
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2656
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2657
            }
2658
        }
2659
    }
2660

    
2661
    return 0;
2662
}
2663

    
2664
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2665
                           ram_addr_t orig_memory)
2666
{
2667
    subpage_t *mmio;
2668
    int subpage_memory;
2669

    
2670
    mmio = qemu_mallocz(sizeof(subpage_t));
2671
    if (mmio != NULL) {
2672
        mmio->base = base;
2673
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2674
#if defined(DEBUG_SUBPAGE)
2675
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2676
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2677
#endif
2678
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2679
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2680
    }
2681

    
2682
    return mmio;
2683
}
2684

    
2685
static void io_mem_init(void)
2686
{
2687
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2688
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2689
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2690
    io_mem_nb = 5;
2691

    
2692
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2693
                                          watch_mem_write, NULL);
2694
    /* alloc dirty bits array */
2695
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2696
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2697
}
2698

    
2699
/* mem_read and mem_write are arrays of functions containing the
2700
   function to access byte (index 0), word (index 1) and dword (index
2701
   2). Functions can be omitted with a NULL function pointer. The
2702
   registered functions may be modified dynamically later.
2703
   If io_index is non zero, the corresponding io zone is
2704
   modified. If it is zero, a new io zone is allocated. The return
2705
   value can be used with cpu_register_physical_memory(). (-1) is
2706
   returned if error. */
2707
int cpu_register_io_memory(int io_index,
2708
                           CPUReadMemoryFunc **mem_read,
2709
                           CPUWriteMemoryFunc **mem_write,
2710
                           void *opaque)
2711
{
2712
    int i, subwidth = 0;
2713

    
2714
    if (io_index <= 0) {
2715
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2716
            return -1;
2717
        io_index = io_mem_nb++;
2718
    } else {
2719
        if (io_index >= IO_MEM_NB_ENTRIES)
2720
            return -1;
2721
    }
2722

    
2723
    for(i = 0;i < 3; i++) {
2724
        if (!mem_read[i] || !mem_write[i])
2725
            subwidth = IO_MEM_SUBWIDTH;
2726
        io_mem_read[io_index][i] = mem_read[i];
2727
        io_mem_write[io_index][i] = mem_write[i];
2728
    }
2729
    io_mem_opaque[io_index] = opaque;
2730
    return (io_index << IO_MEM_SHIFT) | subwidth;
2731
}
2732

    
2733
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2734
{
2735
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2736
}
2737

    
2738
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2739
{
2740
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2741
}
2742

    
2743
#endif /* !defined(CONFIG_USER_ONLY) */
2744

    
2745
/* physical memory access (slow version, mainly for debug) */
2746
#if defined(CONFIG_USER_ONLY)
2747
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2748
                            int len, int is_write)
2749
{
2750
    int l, flags;
2751
    target_ulong page;
2752
    void * p;
2753

    
2754
    while (len > 0) {
2755
        page = addr & TARGET_PAGE_MASK;
2756
        l = (page + TARGET_PAGE_SIZE) - addr;
2757
        if (l > len)
2758
            l = len;
2759
        flags = page_get_flags(page);
2760
        if (!(flags & PAGE_VALID))
2761
            return;
2762
        if (is_write) {
2763
            if (!(flags & PAGE_WRITE))
2764
                return;
2765
            /* XXX: this code should not depend on lock_user */
2766
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2767
                /* FIXME - should this return an error rather than just fail? */
2768
                return;
2769
            memcpy(p, buf, l);
2770
            unlock_user(p, addr, l);
2771
        } else {
2772
            if (!(flags & PAGE_READ))
2773
                return;
2774
            /* XXX: this code should not depend on lock_user */
2775
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2776
                /* FIXME - should this return an error rather than just fail? */
2777
                return;
2778
            memcpy(buf, p, l);
2779
            unlock_user(p, addr, 0);
2780
        }
2781
        len -= l;
2782
        buf += l;
2783
        addr += l;
2784
    }
2785
}
2786

    
2787
#else
2788
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2789
                            int len, int is_write)
2790
{
2791
    int l, io_index;
2792
    uint8_t *ptr;
2793
    uint32_t val;
2794
    target_phys_addr_t page;
2795
    unsigned long pd;
2796
    PhysPageDesc *p;
2797

    
2798
    while (len > 0) {
2799
        page = addr & TARGET_PAGE_MASK;
2800
        l = (page + TARGET_PAGE_SIZE) - addr;
2801
        if (l > len)
2802
            l = len;
2803
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2804
        if (!p) {
2805
            pd = IO_MEM_UNASSIGNED;
2806
        } else {
2807
            pd = p->phys_offset;
2808
        }
2809

    
2810
        if (is_write) {
2811
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2812
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2813
                /* XXX: could force cpu_single_env to NULL to avoid
2814
                   potential bugs */
2815
                if (l >= 4 && ((addr & 3) == 0)) {
2816
                    /* 32 bit write access */
2817
                    val = ldl_p(buf);
2818
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2819
                    l = 4;
2820
                } else if (l >= 2 && ((addr & 1) == 0)) {
2821
                    /* 16 bit write access */
2822
                    val = lduw_p(buf);
2823
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2824
                    l = 2;
2825
                } else {
2826
                    /* 8 bit write access */
2827
                    val = ldub_p(buf);
2828
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2829
                    l = 1;
2830
                }
2831
            } else {
2832
                unsigned long addr1;
2833
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2834
                /* RAM case */
2835
                ptr = phys_ram_base + addr1;
2836
                memcpy(ptr, buf, l);
2837
                if (!cpu_physical_memory_is_dirty(addr1)) {
2838
                    /* invalidate code */
2839
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2840
                    /* set dirty bit */
2841
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2842
                        (0xff & ~CODE_DIRTY_FLAG);
2843
                }
2844
            }
2845
        } else {
2846
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2847
                !(pd & IO_MEM_ROMD)) {
2848
                /* I/O case */
2849
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2850
                if (l >= 4 && ((addr & 3) == 0)) {
2851
                    /* 32 bit read access */
2852
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2853
                    stl_p(buf, val);
2854
                    l = 4;
2855
                } else if (l >= 2 && ((addr & 1) == 0)) {
2856
                    /* 16 bit read access */
2857
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2858
                    stw_p(buf, val);
2859
                    l = 2;
2860
                } else {
2861
                    /* 8 bit read access */
2862
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2863
                    stb_p(buf, val);
2864
                    l = 1;
2865
                }
2866
            } else {
2867
                /* RAM case */
2868
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2869
                    (addr & ~TARGET_PAGE_MASK);
2870
                memcpy(buf, ptr, l);
2871
            }
2872
        }
2873
        len -= l;
2874
        buf += l;
2875
        addr += l;
2876
    }
2877
}
2878

    
2879
/* used for ROM loading : can write in RAM and ROM */
2880
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2881
                                   const uint8_t *buf, int len)
2882
{
2883
    int l;
2884
    uint8_t *ptr;
2885
    target_phys_addr_t page;
2886
    unsigned long pd;
2887
    PhysPageDesc *p;
2888

    
2889
    while (len > 0) {
2890
        page = addr & TARGET_PAGE_MASK;
2891
        l = (page + TARGET_PAGE_SIZE) - addr;
2892
        if (l > len)
2893
            l = len;
2894
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2895
        if (!p) {
2896
            pd = IO_MEM_UNASSIGNED;
2897
        } else {
2898
            pd = p->phys_offset;
2899
        }
2900

    
2901
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2902
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2903
            !(pd & IO_MEM_ROMD)) {
2904
            /* do nothing */
2905
        } else {
2906
            unsigned long addr1;
2907
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2908
            /* ROM/RAM case */
2909
            ptr = phys_ram_base + addr1;
2910
            memcpy(ptr, buf, l);
2911
        }
2912
        len -= l;
2913
        buf += l;
2914
        addr += l;
2915
    }
2916
}
2917

    
2918

    
2919
/* warning: addr must be aligned */
2920
uint32_t ldl_phys(target_phys_addr_t addr)
2921
{
2922
    int io_index;
2923
    uint8_t *ptr;
2924
    uint32_t val;
2925
    unsigned long pd;
2926
    PhysPageDesc *p;
2927

    
2928
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2929
    if (!p) {
2930
        pd = IO_MEM_UNASSIGNED;
2931
    } else {
2932
        pd = p->phys_offset;
2933
    }
2934

    
2935
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2936
        !(pd & IO_MEM_ROMD)) {
2937
        /* I/O case */
2938
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2939
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2940
    } else {
2941
        /* RAM case */
2942
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2943
            (addr & ~TARGET_PAGE_MASK);
2944
        val = ldl_p(ptr);
2945
    }
2946
    return val;
2947
}
2948

    
2949
/* warning: addr must be aligned */
2950
uint64_t ldq_phys(target_phys_addr_t addr)
2951
{
2952
    int io_index;
2953
    uint8_t *ptr;
2954
    uint64_t val;
2955
    unsigned long pd;
2956
    PhysPageDesc *p;
2957

    
2958
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2959
    if (!p) {
2960
        pd = IO_MEM_UNASSIGNED;
2961
    } else {
2962
        pd = p->phys_offset;
2963
    }
2964

    
2965
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2966
        !(pd & IO_MEM_ROMD)) {
2967
        /* I/O case */
2968
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2969
#ifdef TARGET_WORDS_BIGENDIAN
2970
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2971
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2972
#else
2973
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2974
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2975
#endif
2976
    } else {
2977
        /* RAM case */
2978
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2979
            (addr & ~TARGET_PAGE_MASK);
2980
        val = ldq_p(ptr);
2981
    }
2982
    return val;
2983
}
2984

    
2985
/* XXX: optimize */
2986
uint32_t ldub_phys(target_phys_addr_t addr)
2987
{
2988
    uint8_t val;
2989
    cpu_physical_memory_read(addr, &val, 1);
2990
    return val;
2991
}
2992

    
2993
/* XXX: optimize */
2994
uint32_t lduw_phys(target_phys_addr_t addr)
2995
{
2996
    uint16_t val;
2997
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2998
    return tswap16(val);
2999
}
3000

    
3001
/* warning: addr must be aligned. The ram page is not masked as dirty
3002
   and the code inside is not invalidated. It is useful if the dirty
3003
   bits are used to track modified PTEs */
3004
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3005
{
3006
    int io_index;
3007
    uint8_t *ptr;
3008
    unsigned long pd;
3009
    PhysPageDesc *p;
3010

    
3011
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3012
    if (!p) {
3013
        pd = IO_MEM_UNASSIGNED;
3014
    } else {
3015
        pd = p->phys_offset;
3016
    }
3017

    
3018
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3019
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3020
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3021
    } else {
3022
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3023
        ptr = phys_ram_base + addr1;
3024
        stl_p(ptr, val);
3025

    
3026
        if (unlikely(in_migration)) {
3027
            if (!cpu_physical_memory_is_dirty(addr1)) {
3028
                /* invalidate code */
3029
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3030
                /* set dirty bit */
3031
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3032
                    (0xff & ~CODE_DIRTY_FLAG);
3033
            }
3034
        }
3035
    }
3036
}
3037

    
3038
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3039
{
3040
    int io_index;
3041
    uint8_t *ptr;
3042
    unsigned long pd;
3043
    PhysPageDesc *p;
3044

    
3045
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3046
    if (!p) {
3047
        pd = IO_MEM_UNASSIGNED;
3048
    } else {
3049
        pd = p->phys_offset;
3050
    }
3051

    
3052
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3053
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3054
#ifdef TARGET_WORDS_BIGENDIAN
3055
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3056
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3057
#else
3058
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3059
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3060
#endif
3061
    } else {
3062
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3063
            (addr & ~TARGET_PAGE_MASK);
3064
        stq_p(ptr, val);
3065
    }
3066
}
3067

    
3068
/* warning: addr must be aligned */
3069
void stl_phys(target_phys_addr_t addr, uint32_t val)
3070
{
3071
    int io_index;
3072
    uint8_t *ptr;
3073
    unsigned long pd;
3074
    PhysPageDesc *p;
3075

    
3076
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3077
    if (!p) {
3078
        pd = IO_MEM_UNASSIGNED;
3079
    } else {
3080
        pd = p->phys_offset;
3081
    }
3082

    
3083
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3084
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3085
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3086
    } else {
3087
        unsigned long addr1;
3088
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3089
        /* RAM case */
3090
        ptr = phys_ram_base + addr1;
3091
        stl_p(ptr, val);
3092
        if (!cpu_physical_memory_is_dirty(addr1)) {
3093
            /* invalidate code */
3094
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3095
            /* set dirty bit */
3096
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3097
                (0xff & ~CODE_DIRTY_FLAG);
3098
        }
3099
    }
3100
}
3101

    
3102
/* XXX: optimize */
3103
void stb_phys(target_phys_addr_t addr, uint32_t val)
3104
{
3105
    uint8_t v = val;
3106
    cpu_physical_memory_write(addr, &v, 1);
3107
}
3108

    
3109
/* XXX: optimize */
3110
void stw_phys(target_phys_addr_t addr, uint32_t val)
3111
{
3112
    uint16_t v = tswap16(val);
3113
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3114
}
3115

    
3116
/* XXX: optimize */
3117
void stq_phys(target_phys_addr_t addr, uint64_t val)
3118
{
3119
    val = tswap64(val);
3120
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3121
}
3122

    
3123
#endif
3124

    
3125
/* virtual memory access for debug */
3126
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3127
                        uint8_t *buf, int len, int is_write)
3128
{
3129
    int l;
3130
    target_phys_addr_t phys_addr;
3131
    target_ulong page;
3132

    
3133
    while (len > 0) {
3134
        page = addr & TARGET_PAGE_MASK;
3135
        phys_addr = cpu_get_phys_page_debug(env, page);
3136
        /* if no physical page mapped, return an error */
3137
        if (phys_addr == -1)
3138
            return -1;
3139
        l = (page + TARGET_PAGE_SIZE) - addr;
3140
        if (l > len)
3141
            l = len;
3142
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3143
                               buf, l, is_write);
3144
        len -= l;
3145
        buf += l;
3146
        addr += l;
3147
    }
3148
    return 0;
3149
}
3150

    
3151
/* in deterministic execution mode, instructions doing device I/Os
3152
   must be at the end of the TB */
3153
void cpu_io_recompile(CPUState *env, void *retaddr)
3154
{
3155
    TranslationBlock *tb;
3156
    uint32_t n, cflags;
3157
    target_ulong pc, cs_base;
3158
    uint64_t flags;
3159

    
3160
    tb = tb_find_pc((unsigned long)retaddr);
3161
    if (!tb) {
3162
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3163
                  retaddr);
3164
    }
3165
    n = env->icount_decr.u16.low + tb->icount;
3166
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3167
    /* Calculate how many instructions had been executed before the fault
3168
       occurred.  */
3169
    n = n - env->icount_decr.u16.low;
3170
    /* Generate a new TB ending on the I/O insn.  */
3171
    n++;
3172
    /* On MIPS and SH, delay slot instructions can only be restarted if
3173
       they were already the first instruction in the TB.  If this is not
3174
       the first instruction in a TB then re-execute the preceding
3175
       branch.  */
3176
#if defined(TARGET_MIPS)
3177
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3178
        env->active_tc.PC -= 4;
3179
        env->icount_decr.u16.low++;
3180
        env->hflags &= ~MIPS_HFLAG_BMASK;
3181
    }
3182
#elif defined(TARGET_SH4)
3183
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3184
            && n > 1) {
3185
        env->pc -= 2;
3186
        env->icount_decr.u16.low++;
3187
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3188
    }
3189
#endif
3190
    /* This should never happen.  */
3191
    if (n > CF_COUNT_MASK)
3192
        cpu_abort(env, "TB too big during recompile");
3193

    
3194
    cflags = n | CF_LAST_IO;
3195
    pc = tb->pc;
3196
    cs_base = tb->cs_base;
3197
    flags = tb->flags;
3198
    tb_phys_invalidate(tb, -1);
3199
    /* FIXME: In theory this could raise an exception.  In practice
3200
       we have already translated the block once so it's probably ok.  */
3201
    tb_gen_code(env, pc, cs_base, flags, cflags);
3202
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3203
       the first in the TB) then we end up generating a whole new TB and
3204
       repeating the fault, which is horribly inefficient.
3205
       Better would be to execute just this insn uncached, or generate a
3206
       second new TB.  */
3207
    cpu_resume_from_signal(env, NULL);
3208
}
3209

    
3210
void dump_exec_info(FILE *f,
3211
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3212
{
3213
    int i, target_code_size, max_target_code_size;
3214
    int direct_jmp_count, direct_jmp2_count, cross_page;
3215
    TranslationBlock *tb;
3216

    
3217
    target_code_size = 0;
3218
    max_target_code_size = 0;
3219
    cross_page = 0;
3220
    direct_jmp_count = 0;
3221
    direct_jmp2_count = 0;
3222
    for(i = 0; i < nb_tbs; i++) {
3223
        tb = &tbs[i];
3224
        target_code_size += tb->size;
3225
        if (tb->size > max_target_code_size)
3226
            max_target_code_size = tb->size;
3227
        if (tb->page_addr[1] != -1)
3228
            cross_page++;
3229
        if (tb->tb_next_offset[0] != 0xffff) {
3230
            direct_jmp_count++;
3231
            if (tb->tb_next_offset[1] != 0xffff) {
3232
                direct_jmp2_count++;
3233
            }
3234
        }
3235
    }
3236
    /* XXX: avoid using doubles ? */
3237
    cpu_fprintf(f, "Translation buffer state:\n");
3238
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3239
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3240
    cpu_fprintf(f, "TB count            %d/%d\n", 
3241
                nb_tbs, code_gen_max_blocks);
3242
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3243
                nb_tbs ? target_code_size / nb_tbs : 0,
3244
                max_target_code_size);
3245
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3246
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3247
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3248
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3249
            cross_page,
3250
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3251
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3252
                direct_jmp_count,
3253
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3254
                direct_jmp2_count,
3255
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3256
    cpu_fprintf(f, "\nStatistics:\n");
3257
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3258
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3259
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3260
    tcg_dump_info(f, cpu_fprintf);
3261
}
3262

    
3263
#if !defined(CONFIG_USER_ONLY)
3264

    
3265
#define MMUSUFFIX _cmmu
3266
#define GETPC() NULL
3267
#define env cpu_single_env
3268
#define SOFTMMU_CODE_ACCESS
3269

    
3270
#define SHIFT 0
3271
#include "softmmu_template.h"
3272

    
3273
#define SHIFT 1
3274
#include "softmmu_template.h"
3275

    
3276
#define SHIFT 2
3277
#include "softmmu_template.h"
3278

    
3279
#define SHIFT 3
3280
#include "softmmu_template.h"
3281

    
3282
#undef env
3283

    
3284
#endif