Statistics
| Branch: | Revision:

root / exec.c @ faed1c2a

History | View | Annotate | Download (109.3 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#ifdef _WIN32
21
#include <windows.h>
22
#else
23
#include <sys/types.h>
24
#include <sys/mman.h>
25
#endif
26
#include <stdlib.h>
27
#include <stdio.h>
28
#include <stdarg.h>
29
#include <string.h>
30
#include <errno.h>
31
#include <unistd.h>
32
#include <inttypes.h>
33

    
34
#include "cpu.h"
35
#include "exec-all.h"
36
#include "qemu-common.h"
37
#include "tcg.h"
38
#include "hw/hw.h"
39
#include "osdep.h"
40
#include "kvm.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#endif
44

    
45
//#define DEBUG_TB_INVALIDATE
46
//#define DEBUG_FLUSH
47
//#define DEBUG_TLB
48
//#define DEBUG_UNASSIGNED
49

    
50
/* make various TB consistency checks */
51
//#define DEBUG_TB_CHECK
52
//#define DEBUG_TLB_CHECK
53

    
54
//#define DEBUG_IOPORT
55
//#define DEBUG_SUBPAGE
56

    
57
#if !defined(CONFIG_USER_ONLY)
58
/* TB consistency checks only implemented for usermode emulation.  */
59
#undef DEBUG_TB_CHECK
60
#endif
61

    
62
#define SMC_BITMAP_USE_THRESHOLD 10
63

    
64
#if defined(TARGET_SPARC64)
65
#define TARGET_PHYS_ADDR_SPACE_BITS 41
66
#elif defined(TARGET_SPARC)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 36
68
#elif defined(TARGET_ALPHA)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 42
70
#define TARGET_VIRT_ADDR_SPACE_BITS 42
71
#elif defined(TARGET_PPC64)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_X86_64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_I386)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 36
77
#else
78
#define TARGET_PHYS_ADDR_SPACE_BITS 32
79
#endif
80

    
81
static TranslationBlock *tbs;
82
int code_gen_max_blocks;
83
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84
static int nb_tbs;
85
/* any access to the tbs or the page table must use this lock */
86
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87

    
88
#if defined(__arm__) || defined(__sparc_v9__)
89
/* The prologue must be reachable with a direct jump. ARM and Sparc64
90
 have limited branch ranges (possibly also PPC) so place it in a
91
 section close to code segment. */
92
#define code_gen_section                                \
93
    __attribute__((__section__(".gen_code")))           \
94
    __attribute__((aligned (32)))
95
#elif defined(_WIN32)
96
/* Maximum alignment for Win32 is 16. */
97
#define code_gen_section                                \
98
    __attribute__((aligned (16)))
99
#else
100
#define code_gen_section                                \
101
    __attribute__((aligned (32)))
102
#endif
103

    
104
uint8_t code_gen_prologue[1024] code_gen_section;
105
static uint8_t *code_gen_buffer;
106
static unsigned long code_gen_buffer_size;
107
/* threshold to flush the translated code buffer */
108
static unsigned long code_gen_buffer_max_size;
109
uint8_t *code_gen_ptr;
110

    
111
#if !defined(CONFIG_USER_ONLY)
112
int phys_ram_fd;
113
uint8_t *phys_ram_dirty;
114
static int in_migration;
115

    
116
typedef struct RAMBlock {
117
    uint8_t *host;
118
    ram_addr_t offset;
119
    ram_addr_t length;
120
    struct RAMBlock *next;
121
} RAMBlock;
122

    
123
static RAMBlock *ram_blocks;
124
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
125
   then we can no longer assume contiguous ram offsets, and external uses
126
   of this variable will break.  */
127
ram_addr_t last_ram_offset;
128
#endif
129

    
130
CPUState *first_cpu;
131
/* current CPU in the current thread. It is only valid inside
132
   cpu_exec() */
133
CPUState *cpu_single_env;
134
/* 0 = Do not count executed instructions.
135
   1 = Precise instruction counting.
136
   2 = Adaptive rate instruction counting.  */
137
int use_icount = 0;
138
/* Current instruction counter.  While executing translated code this may
139
   include some instructions that have not yet been executed.  */
140
int64_t qemu_icount;
141

    
142
typedef struct PageDesc {
143
    /* list of TBs intersecting this ram page */
144
    TranslationBlock *first_tb;
145
    /* in order to optimize self modifying code, we count the number
146
       of lookups we do to a given page to use a bitmap */
147
    unsigned int code_write_count;
148
    uint8_t *code_bitmap;
149
#if defined(CONFIG_USER_ONLY)
150
    unsigned long flags;
151
#endif
152
} PageDesc;
153

    
154
typedef struct PhysPageDesc {
155
    /* offset in host memory of the page + io_index in the low bits */
156
    ram_addr_t phys_offset;
157
    ram_addr_t region_offset;
158
} PhysPageDesc;
159

    
160
#define L2_BITS 10
161
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162
/* XXX: this is a temporary hack for alpha target.
163
 *      In the future, this is to be replaced by a multi-level table
164
 *      to actually be able to handle the complete 64 bits address space.
165
 */
166
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167
#else
168
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
169
#endif
170

    
171
#define L1_SIZE (1 << L1_BITS)
172
#define L2_SIZE (1 << L2_BITS)
173

    
174
unsigned long qemu_real_host_page_size;
175
unsigned long qemu_host_page_bits;
176
unsigned long qemu_host_page_size;
177
unsigned long qemu_host_page_mask;
178

    
179
/* XXX: for system emulation, it could just be an array */
180
static PageDesc *l1_map[L1_SIZE];
181
static PhysPageDesc **l1_phys_map;
182

    
183
#if !defined(CONFIG_USER_ONLY)
184
static void io_mem_init(void);
185

    
186
/* io memory support */
187
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
188
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
189
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
190
static char io_mem_used[IO_MEM_NB_ENTRIES];
191
static int io_mem_watch;
192
#endif
193

    
194
/* log support */
195
static const char *logfilename = "/tmp/qemu.log";
196
FILE *logfile;
197
int loglevel;
198
static int log_append = 0;
199

    
200
/* statistics */
201
static int tlb_flush_count;
202
static int tb_flush_count;
203
static int tb_phys_invalidate_count;
204

    
205
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
206
typedef struct subpage_t {
207
    target_phys_addr_t base;
208
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
209
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
210
    void *opaque[TARGET_PAGE_SIZE][2][4];
211
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
212
} subpage_t;
213

    
214
#ifdef _WIN32
215
static void map_exec(void *addr, long size)
216
{
217
    DWORD old_protect;
218
    VirtualProtect(addr, size,
219
                   PAGE_EXECUTE_READWRITE, &old_protect);
220
    
221
}
222
#else
223
static void map_exec(void *addr, long size)
224
{
225
    unsigned long start, end, page_size;
226
    
227
    page_size = getpagesize();
228
    start = (unsigned long)addr;
229
    start &= ~(page_size - 1);
230
    
231
    end = (unsigned long)addr + size;
232
    end += page_size - 1;
233
    end &= ~(page_size - 1);
234
    
235
    mprotect((void *)start, end - start,
236
             PROT_READ | PROT_WRITE | PROT_EXEC);
237
}
238
#endif
239

    
240
static void page_init(void)
241
{
242
    /* NOTE: we can always suppose that qemu_host_page_size >=
243
       TARGET_PAGE_SIZE */
244
#ifdef _WIN32
245
    {
246
        SYSTEM_INFO system_info;
247

    
248
        GetSystemInfo(&system_info);
249
        qemu_real_host_page_size = system_info.dwPageSize;
250
    }
251
#else
252
    qemu_real_host_page_size = getpagesize();
253
#endif
254
    if (qemu_host_page_size == 0)
255
        qemu_host_page_size = qemu_real_host_page_size;
256
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
257
        qemu_host_page_size = TARGET_PAGE_SIZE;
258
    qemu_host_page_bits = 0;
259
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
260
        qemu_host_page_bits++;
261
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
262
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
263
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
264

    
265
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
266
    {
267
        long long startaddr, endaddr;
268
        FILE *f;
269
        int n;
270

    
271
        mmap_lock();
272
        last_brk = (unsigned long)sbrk(0);
273
        f = fopen("/proc/self/maps", "r");
274
        if (f) {
275
            do {
276
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
277
                if (n == 2) {
278
                    startaddr = MIN(startaddr,
279
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280
                    endaddr = MIN(endaddr,
281
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
282
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
283
                                   TARGET_PAGE_ALIGN(endaddr),
284
                                   PAGE_RESERVED); 
285
                }
286
            } while (!feof(f));
287
            fclose(f);
288
        }
289
        mmap_unlock();
290
    }
291
#endif
292
}
293

    
294
static inline PageDesc **page_l1_map(target_ulong index)
295
{
296
#if TARGET_LONG_BITS > 32
297
    /* Host memory outside guest VM.  For 32-bit targets we have already
298
       excluded high addresses.  */
299
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
300
        return NULL;
301
#endif
302
    return &l1_map[index >> L2_BITS];
303
}
304

    
305
static inline PageDesc *page_find_alloc(target_ulong index)
306
{
307
    PageDesc **lp, *p;
308
    lp = page_l1_map(index);
309
    if (!lp)
310
        return NULL;
311

    
312
    p = *lp;
313
    if (!p) {
314
        /* allocate if not found */
315
#if defined(CONFIG_USER_ONLY)
316
        size_t len = sizeof(PageDesc) * L2_SIZE;
317
        /* Don't use qemu_malloc because it may recurse.  */
318
        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
319
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
320
        *lp = p;
321
        if (h2g_valid(p)) {
322
            unsigned long addr = h2g(p);
323
            page_set_flags(addr & TARGET_PAGE_MASK,
324
                           TARGET_PAGE_ALIGN(addr + len),
325
                           PAGE_RESERVED); 
326
        }
327
#else
328
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
329
        *lp = p;
330
#endif
331
    }
332
    return p + (index & (L2_SIZE - 1));
333
}
334

    
335
static inline PageDesc *page_find(target_ulong index)
336
{
337
    PageDesc **lp, *p;
338
    lp = page_l1_map(index);
339
    if (!lp)
340
        return NULL;
341

    
342
    p = *lp;
343
    if (!p) {
344
        return NULL;
345
    }
346
    return p + (index & (L2_SIZE - 1));
347
}
348

    
349
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
350
{
351
    void **lp, **p;
352
    PhysPageDesc *pd;
353

    
354
    p = (void **)l1_phys_map;
355
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
356

    
357
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359
#endif
360
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
361
    p = *lp;
362
    if (!p) {
363
        /* allocate if not found */
364
        if (!alloc)
365
            return NULL;
366
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367
        memset(p, 0, sizeof(void *) * L1_SIZE);
368
        *lp = p;
369
    }
370
#endif
371
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
372
    pd = *lp;
373
    if (!pd) {
374
        int i;
375
        /* allocate if not found */
376
        if (!alloc)
377
            return NULL;
378
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379
        *lp = pd;
380
        for (i = 0; i < L2_SIZE; i++) {
381
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
382
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
383
        }
384
    }
385
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
386
}
387

    
388
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
389
{
390
    return phys_page_find_alloc(index, 0);
391
}
392

    
393
#if !defined(CONFIG_USER_ONLY)
394
static void tlb_protect_code(ram_addr_t ram_addr);
395
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
396
                                    target_ulong vaddr);
397
#define mmap_lock() do { } while(0)
398
#define mmap_unlock() do { } while(0)
399
#endif
400

    
401
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
402

    
403
#if defined(CONFIG_USER_ONLY)
404
/* Currently it is not recommended to allocate big chunks of data in
405
   user mode. It will change when a dedicated libc will be used */
406
#define USE_STATIC_CODE_GEN_BUFFER
407
#endif
408

    
409
#ifdef USE_STATIC_CODE_GEN_BUFFER
410
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
411
#endif
412

    
413
static void code_gen_alloc(unsigned long tb_size)
414
{
415
#ifdef USE_STATIC_CODE_GEN_BUFFER
416
    code_gen_buffer = static_code_gen_buffer;
417
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
418
    map_exec(code_gen_buffer, code_gen_buffer_size);
419
#else
420
    code_gen_buffer_size = tb_size;
421
    if (code_gen_buffer_size == 0) {
422
#if defined(CONFIG_USER_ONLY)
423
        /* in user mode, phys_ram_size is not meaningful */
424
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
425
#else
426
        /* XXX: needs adjustments */
427
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
428
#endif
429
    }
430
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
431
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
432
    /* The code gen buffer location may have constraints depending on
433
       the host cpu and OS */
434
#if defined(__linux__) 
435
    {
436
        int flags;
437
        void *start = NULL;
438

    
439
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
440
#if defined(__x86_64__)
441
        flags |= MAP_32BIT;
442
        /* Cannot map more than that */
443
        if (code_gen_buffer_size > (800 * 1024 * 1024))
444
            code_gen_buffer_size = (800 * 1024 * 1024);
445
#elif defined(__sparc_v9__)
446
        // Map the buffer below 2G, so we can use direct calls and branches
447
        flags |= MAP_FIXED;
448
        start = (void *) 0x60000000UL;
449
        if (code_gen_buffer_size > (512 * 1024 * 1024))
450
            code_gen_buffer_size = (512 * 1024 * 1024);
451
#elif defined(__arm__)
452
        /* Map the buffer below 32M, so we can use direct calls and branches */
453
        flags |= MAP_FIXED;
454
        start = (void *) 0x01000000UL;
455
        if (code_gen_buffer_size > 16 * 1024 * 1024)
456
            code_gen_buffer_size = 16 * 1024 * 1024;
457
#endif
458
        code_gen_buffer = mmap(start, code_gen_buffer_size,
459
                               PROT_WRITE | PROT_READ | PROT_EXEC,
460
                               flags, -1, 0);
461
        if (code_gen_buffer == MAP_FAILED) {
462
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
463
            exit(1);
464
        }
465
    }
466
#elif defined(__FreeBSD__) || defined(__DragonFly__)
467
    {
468
        int flags;
469
        void *addr = NULL;
470
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
471
#if defined(__x86_64__)
472
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
473
         * 0x40000000 is free */
474
        flags |= MAP_FIXED;
475
        addr = (void *)0x40000000;
476
        /* Cannot map more than that */
477
        if (code_gen_buffer_size > (800 * 1024 * 1024))
478
            code_gen_buffer_size = (800 * 1024 * 1024);
479
#endif
480
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
481
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
482
                               flags, -1, 0);
483
        if (code_gen_buffer == MAP_FAILED) {
484
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
485
            exit(1);
486
        }
487
    }
488
#else
489
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
490
    map_exec(code_gen_buffer, code_gen_buffer_size);
491
#endif
492
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
493
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
494
    code_gen_buffer_max_size = code_gen_buffer_size - 
495
        code_gen_max_block_size();
496
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
497
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
498
}
499

    
500
/* Must be called before using the QEMU cpus. 'tb_size' is the size
501
   (in bytes) allocated to the translation buffer. Zero means default
502
   size. */
503
void cpu_exec_init_all(unsigned long tb_size)
504
{
505
    cpu_gen_init();
506
    code_gen_alloc(tb_size);
507
    code_gen_ptr = code_gen_buffer;
508
    page_init();
509
#if !defined(CONFIG_USER_ONLY)
510
    io_mem_init();
511
#endif
512
}
513

    
514
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
515

    
516
#define CPU_COMMON_SAVE_VERSION 1
517

    
518
static void cpu_common_save(QEMUFile *f, void *opaque)
519
{
520
    CPUState *env = opaque;
521

    
522
    cpu_synchronize_state(env);
523

    
524
    qemu_put_be32s(f, &env->halted);
525
    qemu_put_be32s(f, &env->interrupt_request);
526
}
527

    
528
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
529
{
530
    CPUState *env = opaque;
531

    
532
    cpu_synchronize_state(env);
533
    if (version_id != CPU_COMMON_SAVE_VERSION)
534
        return -EINVAL;
535

    
536
    qemu_get_be32s(f, &env->halted);
537
    qemu_get_be32s(f, &env->interrupt_request);
538
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
539
       version_id is increased. */
540
    env->interrupt_request &= ~0x01;
541
    tlb_flush(env, 1);
542

    
543
    return 0;
544
}
545
#endif
546

    
547
CPUState *qemu_get_cpu(int cpu)
548
{
549
    CPUState *env = first_cpu;
550

    
551
    while (env) {
552
        if (env->cpu_index == cpu)
553
            break;
554
        env = env->next_cpu;
555
    }
556

    
557
    return env;
558
}
559

    
560
void cpu_exec_init(CPUState *env)
561
{
562
    CPUState **penv;
563
    int cpu_index;
564

    
565
#if defined(CONFIG_USER_ONLY)
566
    cpu_list_lock();
567
#endif
568
    env->next_cpu = NULL;
569
    penv = &first_cpu;
570
    cpu_index = 0;
571
    while (*penv != NULL) {
572
        penv = &(*penv)->next_cpu;
573
        cpu_index++;
574
    }
575
    env->cpu_index = cpu_index;
576
    env->numa_node = 0;
577
    TAILQ_INIT(&env->breakpoints);
578
    TAILQ_INIT(&env->watchpoints);
579
    *penv = env;
580
#if defined(CONFIG_USER_ONLY)
581
    cpu_list_unlock();
582
#endif
583
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
584
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
585
                    cpu_common_save, cpu_common_load, env);
586
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
587
                    cpu_save, cpu_load, env);
588
#endif
589
}
590

    
591
static inline void invalidate_page_bitmap(PageDesc *p)
592
{
593
    if (p->code_bitmap) {
594
        qemu_free(p->code_bitmap);
595
        p->code_bitmap = NULL;
596
    }
597
    p->code_write_count = 0;
598
}
599

    
600
/* set to NULL all the 'first_tb' fields in all PageDescs */
601
static void page_flush_tb(void)
602
{
603
    int i, j;
604
    PageDesc *p;
605

    
606
    for(i = 0; i < L1_SIZE; i++) {
607
        p = l1_map[i];
608
        if (p) {
609
            for(j = 0; j < L2_SIZE; j++) {
610
                p->first_tb = NULL;
611
                invalidate_page_bitmap(p);
612
                p++;
613
            }
614
        }
615
    }
616
}
617

    
618
/* flush all the translation blocks */
619
/* XXX: tb_flush is currently not thread safe */
620
void tb_flush(CPUState *env1)
621
{
622
    CPUState *env;
623
#if defined(DEBUG_FLUSH)
624
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
625
           (unsigned long)(code_gen_ptr - code_gen_buffer),
626
           nb_tbs, nb_tbs > 0 ?
627
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
628
#endif
629
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
630
        cpu_abort(env1, "Internal error: code buffer overflow\n");
631

    
632
    nb_tbs = 0;
633

    
634
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
635
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
636
    }
637

    
638
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
639
    page_flush_tb();
640

    
641
    code_gen_ptr = code_gen_buffer;
642
    /* XXX: flush processor icache at this point if cache flush is
643
       expensive */
644
    tb_flush_count++;
645
}
646

    
647
#ifdef DEBUG_TB_CHECK
648

    
649
static void tb_invalidate_check(target_ulong address)
650
{
651
    TranslationBlock *tb;
652
    int i;
653
    address &= TARGET_PAGE_MASK;
654
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
655
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
656
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
657
                  address >= tb->pc + tb->size)) {
658
                printf("ERROR invalidate: address=" TARGET_FMT_lx
659
                       " PC=%08lx size=%04x\n",
660
                       address, (long)tb->pc, tb->size);
661
            }
662
        }
663
    }
664
}
665

    
666
/* verify that all the pages have correct rights for code */
667
static void tb_page_check(void)
668
{
669
    TranslationBlock *tb;
670
    int i, flags1, flags2;
671

    
672
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
673
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
674
            flags1 = page_get_flags(tb->pc);
675
            flags2 = page_get_flags(tb->pc + tb->size - 1);
676
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
677
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
678
                       (long)tb->pc, tb->size, flags1, flags2);
679
            }
680
        }
681
    }
682
}
683

    
684
#endif
685

    
686
/* invalidate one TB */
687
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
688
                             int next_offset)
689
{
690
    TranslationBlock *tb1;
691
    for(;;) {
692
        tb1 = *ptb;
693
        if (tb1 == tb) {
694
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
695
            break;
696
        }
697
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
698
    }
699
}
700

    
701
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
702
{
703
    TranslationBlock *tb1;
704
    unsigned int n1;
705

    
706
    for(;;) {
707
        tb1 = *ptb;
708
        n1 = (long)tb1 & 3;
709
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
710
        if (tb1 == tb) {
711
            *ptb = tb1->page_next[n1];
712
            break;
713
        }
714
        ptb = &tb1->page_next[n1];
715
    }
716
}
717

    
718
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
719
{
720
    TranslationBlock *tb1, **ptb;
721
    unsigned int n1;
722

    
723
    ptb = &tb->jmp_next[n];
724
    tb1 = *ptb;
725
    if (tb1) {
726
        /* find tb(n) in circular list */
727
        for(;;) {
728
            tb1 = *ptb;
729
            n1 = (long)tb1 & 3;
730
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
731
            if (n1 == n && tb1 == tb)
732
                break;
733
            if (n1 == 2) {
734
                ptb = &tb1->jmp_first;
735
            } else {
736
                ptb = &tb1->jmp_next[n1];
737
            }
738
        }
739
        /* now we can suppress tb(n) from the list */
740
        *ptb = tb->jmp_next[n];
741

    
742
        tb->jmp_next[n] = NULL;
743
    }
744
}
745

    
746
/* reset the jump entry 'n' of a TB so that it is not chained to
747
   another TB */
748
static inline void tb_reset_jump(TranslationBlock *tb, int n)
749
{
750
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
751
}
752

    
753
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
754
{
755
    CPUState *env;
756
    PageDesc *p;
757
    unsigned int h, n1;
758
    target_phys_addr_t phys_pc;
759
    TranslationBlock *tb1, *tb2;
760

    
761
    /* remove the TB from the hash list */
762
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
763
    h = tb_phys_hash_func(phys_pc);
764
    tb_remove(&tb_phys_hash[h], tb,
765
              offsetof(TranslationBlock, phys_hash_next));
766

    
767
    /* remove the TB from the page list */
768
    if (tb->page_addr[0] != page_addr) {
769
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
770
        tb_page_remove(&p->first_tb, tb);
771
        invalidate_page_bitmap(p);
772
    }
773
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
774
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
775
        tb_page_remove(&p->first_tb, tb);
776
        invalidate_page_bitmap(p);
777
    }
778

    
779
    tb_invalidated_flag = 1;
780

    
781
    /* remove the TB from the hash list */
782
    h = tb_jmp_cache_hash_func(tb->pc);
783
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
784
        if (env->tb_jmp_cache[h] == tb)
785
            env->tb_jmp_cache[h] = NULL;
786
    }
787

    
788
    /* suppress this TB from the two jump lists */
789
    tb_jmp_remove(tb, 0);
790
    tb_jmp_remove(tb, 1);
791

    
792
    /* suppress any remaining jumps to this TB */
793
    tb1 = tb->jmp_first;
794
    for(;;) {
795
        n1 = (long)tb1 & 3;
796
        if (n1 == 2)
797
            break;
798
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
799
        tb2 = tb1->jmp_next[n1];
800
        tb_reset_jump(tb1, n1);
801
        tb1->jmp_next[n1] = NULL;
802
        tb1 = tb2;
803
    }
804
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
805

    
806
    tb_phys_invalidate_count++;
807
}
808

    
809
static inline void set_bits(uint8_t *tab, int start, int len)
810
{
811
    int end, mask, end1;
812

    
813
    end = start + len;
814
    tab += start >> 3;
815
    mask = 0xff << (start & 7);
816
    if ((start & ~7) == (end & ~7)) {
817
        if (start < end) {
818
            mask &= ~(0xff << (end & 7));
819
            *tab |= mask;
820
        }
821
    } else {
822
        *tab++ |= mask;
823
        start = (start + 8) & ~7;
824
        end1 = end & ~7;
825
        while (start < end1) {
826
            *tab++ = 0xff;
827
            start += 8;
828
        }
829
        if (start < end) {
830
            mask = ~(0xff << (end & 7));
831
            *tab |= mask;
832
        }
833
    }
834
}
835

    
836
static void build_page_bitmap(PageDesc *p)
837
{
838
    int n, tb_start, tb_end;
839
    TranslationBlock *tb;
840

    
841
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
842

    
843
    tb = p->first_tb;
844
    while (tb != NULL) {
845
        n = (long)tb & 3;
846
        tb = (TranslationBlock *)((long)tb & ~3);
847
        /* NOTE: this is subtle as a TB may span two physical pages */
848
        if (n == 0) {
849
            /* NOTE: tb_end may be after the end of the page, but
850
               it is not a problem */
851
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
852
            tb_end = tb_start + tb->size;
853
            if (tb_end > TARGET_PAGE_SIZE)
854
                tb_end = TARGET_PAGE_SIZE;
855
        } else {
856
            tb_start = 0;
857
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
858
        }
859
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
860
        tb = tb->page_next[n];
861
    }
862
}
863

    
864
TranslationBlock *tb_gen_code(CPUState *env,
865
                              target_ulong pc, target_ulong cs_base,
866
                              int flags, int cflags)
867
{
868
    TranslationBlock *tb;
869
    uint8_t *tc_ptr;
870
    target_ulong phys_pc, phys_page2, virt_page2;
871
    int code_gen_size;
872

    
873
    phys_pc = get_phys_addr_code(env, pc);
874
    tb = tb_alloc(pc);
875
    if (!tb) {
876
        /* flush must be done */
877
        tb_flush(env);
878
        /* cannot fail at this point */
879
        tb = tb_alloc(pc);
880
        /* Don't forget to invalidate previous TB info.  */
881
        tb_invalidated_flag = 1;
882
    }
883
    tc_ptr = code_gen_ptr;
884
    tb->tc_ptr = tc_ptr;
885
    tb->cs_base = cs_base;
886
    tb->flags = flags;
887
    tb->cflags = cflags;
888
    cpu_gen_code(env, tb, &code_gen_size);
889
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
890

    
891
    /* check next page if needed */
892
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
893
    phys_page2 = -1;
894
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
895
        phys_page2 = get_phys_addr_code(env, virt_page2);
896
    }
897
    tb_link_phys(tb, phys_pc, phys_page2);
898
    return tb;
899
}
900

    
901
/* invalidate all TBs which intersect with the target physical page
902
   starting in range [start;end[. NOTE: start and end must refer to
903
   the same physical page. 'is_cpu_write_access' should be true if called
904
   from a real cpu write access: the virtual CPU will exit the current
905
   TB if code is modified inside this TB. */
906
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
907
                                   int is_cpu_write_access)
908
{
909
    TranslationBlock *tb, *tb_next, *saved_tb;
910
    CPUState *env = cpu_single_env;
911
    target_ulong tb_start, tb_end;
912
    PageDesc *p;
913
    int n;
914
#ifdef TARGET_HAS_PRECISE_SMC
915
    int current_tb_not_found = is_cpu_write_access;
916
    TranslationBlock *current_tb = NULL;
917
    int current_tb_modified = 0;
918
    target_ulong current_pc = 0;
919
    target_ulong current_cs_base = 0;
920
    int current_flags = 0;
921
#endif /* TARGET_HAS_PRECISE_SMC */
922

    
923
    p = page_find(start >> TARGET_PAGE_BITS);
924
    if (!p)
925
        return;
926
    if (!p->code_bitmap &&
927
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
928
        is_cpu_write_access) {
929
        /* build code bitmap */
930
        build_page_bitmap(p);
931
    }
932

    
933
    /* we remove all the TBs in the range [start, end[ */
934
    /* XXX: see if in some cases it could be faster to invalidate all the code */
935
    tb = p->first_tb;
936
    while (tb != NULL) {
937
        n = (long)tb & 3;
938
        tb = (TranslationBlock *)((long)tb & ~3);
939
        tb_next = tb->page_next[n];
940
        /* NOTE: this is subtle as a TB may span two physical pages */
941
        if (n == 0) {
942
            /* NOTE: tb_end may be after the end of the page, but
943
               it is not a problem */
944
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
945
            tb_end = tb_start + tb->size;
946
        } else {
947
            tb_start = tb->page_addr[1];
948
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
949
        }
950
        if (!(tb_end <= start || tb_start >= end)) {
951
#ifdef TARGET_HAS_PRECISE_SMC
952
            if (current_tb_not_found) {
953
                current_tb_not_found = 0;
954
                current_tb = NULL;
955
                if (env->mem_io_pc) {
956
                    /* now we have a real cpu fault */
957
                    current_tb = tb_find_pc(env->mem_io_pc);
958
                }
959
            }
960
            if (current_tb == tb &&
961
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
962
                /* If we are modifying the current TB, we must stop
963
                its execution. We could be more precise by checking
964
                that the modification is after the current PC, but it
965
                would require a specialized function to partially
966
                restore the CPU state */
967

    
968
                current_tb_modified = 1;
969
                cpu_restore_state(current_tb, env,
970
                                  env->mem_io_pc, NULL);
971
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
972
                                     &current_flags);
973
            }
974
#endif /* TARGET_HAS_PRECISE_SMC */
975
            /* we need to do that to handle the case where a signal
976
               occurs while doing tb_phys_invalidate() */
977
            saved_tb = NULL;
978
            if (env) {
979
                saved_tb = env->current_tb;
980
                env->current_tb = NULL;
981
            }
982
            tb_phys_invalidate(tb, -1);
983
            if (env) {
984
                env->current_tb = saved_tb;
985
                if (env->interrupt_request && env->current_tb)
986
                    cpu_interrupt(env, env->interrupt_request);
987
            }
988
        }
989
        tb = tb_next;
990
    }
991
#if !defined(CONFIG_USER_ONLY)
992
    /* if no code remaining, no need to continue to use slow writes */
993
    if (!p->first_tb) {
994
        invalidate_page_bitmap(p);
995
        if (is_cpu_write_access) {
996
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
997
        }
998
    }
999
#endif
1000
#ifdef TARGET_HAS_PRECISE_SMC
1001
    if (current_tb_modified) {
1002
        /* we generate a block containing just the instruction
1003
           modifying the memory. It will ensure that it cannot modify
1004
           itself */
1005
        env->current_tb = NULL;
1006
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1007
        cpu_resume_from_signal(env, NULL);
1008
    }
1009
#endif
1010
}
1011

    
1012
/* len must be <= 8 and start must be a multiple of len */
1013
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1014
{
1015
    PageDesc *p;
1016
    int offset, b;
1017
#if 0
1018
    if (1) {
1019
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1020
                  cpu_single_env->mem_io_vaddr, len,
1021
                  cpu_single_env->eip,
1022
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1023
    }
1024
#endif
1025
    p = page_find(start >> TARGET_PAGE_BITS);
1026
    if (!p)
1027
        return;
1028
    if (p->code_bitmap) {
1029
        offset = start & ~TARGET_PAGE_MASK;
1030
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1031
        if (b & ((1 << len) - 1))
1032
            goto do_invalidate;
1033
    } else {
1034
    do_invalidate:
1035
        tb_invalidate_phys_page_range(start, start + len, 1);
1036
    }
1037
}
1038

    
1039
#if !defined(CONFIG_SOFTMMU)
1040
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1041
                                    unsigned long pc, void *puc)
1042
{
1043
    TranslationBlock *tb;
1044
    PageDesc *p;
1045
    int n;
1046
#ifdef TARGET_HAS_PRECISE_SMC
1047
    TranslationBlock *current_tb = NULL;
1048
    CPUState *env = cpu_single_env;
1049
    int current_tb_modified = 0;
1050
    target_ulong current_pc = 0;
1051
    target_ulong current_cs_base = 0;
1052
    int current_flags = 0;
1053
#endif
1054

    
1055
    addr &= TARGET_PAGE_MASK;
1056
    p = page_find(addr >> TARGET_PAGE_BITS);
1057
    if (!p)
1058
        return;
1059
    tb = p->first_tb;
1060
#ifdef TARGET_HAS_PRECISE_SMC
1061
    if (tb && pc != 0) {
1062
        current_tb = tb_find_pc(pc);
1063
    }
1064
#endif
1065
    while (tb != NULL) {
1066
        n = (long)tb & 3;
1067
        tb = (TranslationBlock *)((long)tb & ~3);
1068
#ifdef TARGET_HAS_PRECISE_SMC
1069
        if (current_tb == tb &&
1070
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1071
                /* If we are modifying the current TB, we must stop
1072
                   its execution. We could be more precise by checking
1073
                   that the modification is after the current PC, but it
1074
                   would require a specialized function to partially
1075
                   restore the CPU state */
1076

    
1077
            current_tb_modified = 1;
1078
            cpu_restore_state(current_tb, env, pc, puc);
1079
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1080
                                 &current_flags);
1081
        }
1082
#endif /* TARGET_HAS_PRECISE_SMC */
1083
        tb_phys_invalidate(tb, addr);
1084
        tb = tb->page_next[n];
1085
    }
1086
    p->first_tb = NULL;
1087
#ifdef TARGET_HAS_PRECISE_SMC
1088
    if (current_tb_modified) {
1089
        /* we generate a block containing just the instruction
1090
           modifying the memory. It will ensure that it cannot modify
1091
           itself */
1092
        env->current_tb = NULL;
1093
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1094
        cpu_resume_from_signal(env, puc);
1095
    }
1096
#endif
1097
}
1098
#endif
1099

    
1100
/* add the tb in the target page and protect it if necessary */
1101
static inline void tb_alloc_page(TranslationBlock *tb,
1102
                                 unsigned int n, target_ulong page_addr)
1103
{
1104
    PageDesc *p;
1105
    TranslationBlock *last_first_tb;
1106

    
1107
    tb->page_addr[n] = page_addr;
1108
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1109
    tb->page_next[n] = p->first_tb;
1110
    last_first_tb = p->first_tb;
1111
    p->first_tb = (TranslationBlock *)((long)tb | n);
1112
    invalidate_page_bitmap(p);
1113

    
1114
#if defined(TARGET_HAS_SMC) || 1
1115

    
1116
#if defined(CONFIG_USER_ONLY)
1117
    if (p->flags & PAGE_WRITE) {
1118
        target_ulong addr;
1119
        PageDesc *p2;
1120
        int prot;
1121

    
1122
        /* force the host page as non writable (writes will have a
1123
           page fault + mprotect overhead) */
1124
        page_addr &= qemu_host_page_mask;
1125
        prot = 0;
1126
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1127
            addr += TARGET_PAGE_SIZE) {
1128

    
1129
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1130
            if (!p2)
1131
                continue;
1132
            prot |= p2->flags;
1133
            p2->flags &= ~PAGE_WRITE;
1134
            page_get_flags(addr);
1135
          }
1136
        mprotect(g2h(page_addr), qemu_host_page_size,
1137
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1138
#ifdef DEBUG_TB_INVALIDATE
1139
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1140
               page_addr);
1141
#endif
1142
    }
1143
#else
1144
    /* if some code is already present, then the pages are already
1145
       protected. So we handle the case where only the first TB is
1146
       allocated in a physical page */
1147
    if (!last_first_tb) {
1148
        tlb_protect_code(page_addr);
1149
    }
1150
#endif
1151

    
1152
#endif /* TARGET_HAS_SMC */
1153
}
1154

    
1155
/* Allocate a new translation block. Flush the translation buffer if
1156
   too many translation blocks or too much generated code. */
1157
TranslationBlock *tb_alloc(target_ulong pc)
1158
{
1159
    TranslationBlock *tb;
1160

    
1161
    if (nb_tbs >= code_gen_max_blocks ||
1162
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1163
        return NULL;
1164
    tb = &tbs[nb_tbs++];
1165
    tb->pc = pc;
1166
    tb->cflags = 0;
1167
    return tb;
1168
}
1169

    
1170
void tb_free(TranslationBlock *tb)
1171
{
1172
    /* In practice this is mostly used for single use temporary TB
1173
       Ignore the hard cases and just back up if this TB happens to
1174
       be the last one generated.  */
1175
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1176
        code_gen_ptr = tb->tc_ptr;
1177
        nb_tbs--;
1178
    }
1179
}
1180

    
1181
/* add a new TB and link it to the physical page tables. phys_page2 is
1182
   (-1) to indicate that only one page contains the TB. */
1183
void tb_link_phys(TranslationBlock *tb,
1184
                  target_ulong phys_pc, target_ulong phys_page2)
1185
{
1186
    unsigned int h;
1187
    TranslationBlock **ptb;
1188

    
1189
    /* Grab the mmap lock to stop another thread invalidating this TB
1190
       before we are done.  */
1191
    mmap_lock();
1192
    /* add in the physical hash table */
1193
    h = tb_phys_hash_func(phys_pc);
1194
    ptb = &tb_phys_hash[h];
1195
    tb->phys_hash_next = *ptb;
1196
    *ptb = tb;
1197

    
1198
    /* add in the page list */
1199
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1200
    if (phys_page2 != -1)
1201
        tb_alloc_page(tb, 1, phys_page2);
1202
    else
1203
        tb->page_addr[1] = -1;
1204

    
1205
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1206
    tb->jmp_next[0] = NULL;
1207
    tb->jmp_next[1] = NULL;
1208

    
1209
    /* init original jump addresses */
1210
    if (tb->tb_next_offset[0] != 0xffff)
1211
        tb_reset_jump(tb, 0);
1212
    if (tb->tb_next_offset[1] != 0xffff)
1213
        tb_reset_jump(tb, 1);
1214

    
1215
#ifdef DEBUG_TB_CHECK
1216
    tb_page_check();
1217
#endif
1218
    mmap_unlock();
1219
}
1220

    
1221
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1222
   tb[1].tc_ptr. Return NULL if not found */
1223
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1224
{
1225
    int m_min, m_max, m;
1226
    unsigned long v;
1227
    TranslationBlock *tb;
1228

    
1229
    if (nb_tbs <= 0)
1230
        return NULL;
1231
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1232
        tc_ptr >= (unsigned long)code_gen_ptr)
1233
        return NULL;
1234
    /* binary search (cf Knuth) */
1235
    m_min = 0;
1236
    m_max = nb_tbs - 1;
1237
    while (m_min <= m_max) {
1238
        m = (m_min + m_max) >> 1;
1239
        tb = &tbs[m];
1240
        v = (unsigned long)tb->tc_ptr;
1241
        if (v == tc_ptr)
1242
            return tb;
1243
        else if (tc_ptr < v) {
1244
            m_max = m - 1;
1245
        } else {
1246
            m_min = m + 1;
1247
        }
1248
    }
1249
    return &tbs[m_max];
1250
}
1251

    
1252
static void tb_reset_jump_recursive(TranslationBlock *tb);
1253

    
1254
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1255
{
1256
    TranslationBlock *tb1, *tb_next, **ptb;
1257
    unsigned int n1;
1258

    
1259
    tb1 = tb->jmp_next[n];
1260
    if (tb1 != NULL) {
1261
        /* find head of list */
1262
        for(;;) {
1263
            n1 = (long)tb1 & 3;
1264
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1265
            if (n1 == 2)
1266
                break;
1267
            tb1 = tb1->jmp_next[n1];
1268
        }
1269
        /* we are now sure now that tb jumps to tb1 */
1270
        tb_next = tb1;
1271

    
1272
        /* remove tb from the jmp_first list */
1273
        ptb = &tb_next->jmp_first;
1274
        for(;;) {
1275
            tb1 = *ptb;
1276
            n1 = (long)tb1 & 3;
1277
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1278
            if (n1 == n && tb1 == tb)
1279
                break;
1280
            ptb = &tb1->jmp_next[n1];
1281
        }
1282
        *ptb = tb->jmp_next[n];
1283
        tb->jmp_next[n] = NULL;
1284

    
1285
        /* suppress the jump to next tb in generated code */
1286
        tb_reset_jump(tb, n);
1287

    
1288
        /* suppress jumps in the tb on which we could have jumped */
1289
        tb_reset_jump_recursive(tb_next);
1290
    }
1291
}
1292

    
1293
static void tb_reset_jump_recursive(TranslationBlock *tb)
1294
{
1295
    tb_reset_jump_recursive2(tb, 0);
1296
    tb_reset_jump_recursive2(tb, 1);
1297
}
1298

    
1299
#if defined(TARGET_HAS_ICE)
1300
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1301
{
1302
    target_phys_addr_t addr;
1303
    target_ulong pd;
1304
    ram_addr_t ram_addr;
1305
    PhysPageDesc *p;
1306

    
1307
    addr = cpu_get_phys_page_debug(env, pc);
1308
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1309
    if (!p) {
1310
        pd = IO_MEM_UNASSIGNED;
1311
    } else {
1312
        pd = p->phys_offset;
1313
    }
1314
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1315
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1316
}
1317
#endif
1318

    
1319
/* Add a watchpoint.  */
1320
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1321
                          int flags, CPUWatchpoint **watchpoint)
1322
{
1323
    target_ulong len_mask = ~(len - 1);
1324
    CPUWatchpoint *wp;
1325

    
1326
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1327
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1328
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1329
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1330
        return -EINVAL;
1331
    }
1332
    wp = qemu_malloc(sizeof(*wp));
1333

    
1334
    wp->vaddr = addr;
1335
    wp->len_mask = len_mask;
1336
    wp->flags = flags;
1337

    
1338
    /* keep all GDB-injected watchpoints in front */
1339
    if (flags & BP_GDB)
1340
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1341
    else
1342
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1343

    
1344
    tlb_flush_page(env, addr);
1345

    
1346
    if (watchpoint)
1347
        *watchpoint = wp;
1348
    return 0;
1349
}
1350

    
1351
/* Remove a specific watchpoint.  */
1352
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1353
                          int flags)
1354
{
1355
    target_ulong len_mask = ~(len - 1);
1356
    CPUWatchpoint *wp;
1357

    
1358
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1359
        if (addr == wp->vaddr && len_mask == wp->len_mask
1360
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1361
            cpu_watchpoint_remove_by_ref(env, wp);
1362
            return 0;
1363
        }
1364
    }
1365
    return -ENOENT;
1366
}
1367

    
1368
/* Remove a specific watchpoint by reference.  */
1369
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1370
{
1371
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1372

    
1373
    tlb_flush_page(env, watchpoint->vaddr);
1374

    
1375
    qemu_free(watchpoint);
1376
}
1377

    
1378
/* Remove all matching watchpoints.  */
1379
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1380
{
1381
    CPUWatchpoint *wp, *next;
1382

    
1383
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1384
        if (wp->flags & mask)
1385
            cpu_watchpoint_remove_by_ref(env, wp);
1386
    }
1387
}
1388

    
1389
/* Add a breakpoint.  */
1390
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1391
                          CPUBreakpoint **breakpoint)
1392
{
1393
#if defined(TARGET_HAS_ICE)
1394
    CPUBreakpoint *bp;
1395

    
1396
    bp = qemu_malloc(sizeof(*bp));
1397

    
1398
    bp->pc = pc;
1399
    bp->flags = flags;
1400

    
1401
    /* keep all GDB-injected breakpoints in front */
1402
    if (flags & BP_GDB)
1403
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1404
    else
1405
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1406

    
1407
    breakpoint_invalidate(env, pc);
1408

    
1409
    if (breakpoint)
1410
        *breakpoint = bp;
1411
    return 0;
1412
#else
1413
    return -ENOSYS;
1414
#endif
1415
}
1416

    
1417
/* Remove a specific breakpoint.  */
1418
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1419
{
1420
#if defined(TARGET_HAS_ICE)
1421
    CPUBreakpoint *bp;
1422

    
1423
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1424
        if (bp->pc == pc && bp->flags == flags) {
1425
            cpu_breakpoint_remove_by_ref(env, bp);
1426
            return 0;
1427
        }
1428
    }
1429
    return -ENOENT;
1430
#else
1431
    return -ENOSYS;
1432
#endif
1433
}
1434

    
1435
/* Remove a specific breakpoint by reference.  */
1436
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1437
{
1438
#if defined(TARGET_HAS_ICE)
1439
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1440

    
1441
    breakpoint_invalidate(env, breakpoint->pc);
1442

    
1443
    qemu_free(breakpoint);
1444
#endif
1445
}
1446

    
1447
/* Remove all matching breakpoints. */
1448
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1449
{
1450
#if defined(TARGET_HAS_ICE)
1451
    CPUBreakpoint *bp, *next;
1452

    
1453
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1454
        if (bp->flags & mask)
1455
            cpu_breakpoint_remove_by_ref(env, bp);
1456
    }
1457
#endif
1458
}
1459

    
1460
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1461
   CPU loop after each instruction */
1462
void cpu_single_step(CPUState *env, int enabled)
1463
{
1464
#if defined(TARGET_HAS_ICE)
1465
    if (env->singlestep_enabled != enabled) {
1466
        env->singlestep_enabled = enabled;
1467
        if (kvm_enabled())
1468
            kvm_update_guest_debug(env, 0);
1469
        else {
1470
            /* must flush all the translated code to avoid inconsistencies */
1471
            /* XXX: only flush what is necessary */
1472
            tb_flush(env);
1473
        }
1474
    }
1475
#endif
1476
}
1477

    
1478
/* enable or disable low levels log */
1479
void cpu_set_log(int log_flags)
1480
{
1481
    loglevel = log_flags;
1482
    if (loglevel && !logfile) {
1483
        logfile = fopen(logfilename, log_append ? "a" : "w");
1484
        if (!logfile) {
1485
            perror(logfilename);
1486
            _exit(1);
1487
        }
1488
#if !defined(CONFIG_SOFTMMU)
1489
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1490
        {
1491
            static char logfile_buf[4096];
1492
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1493
        }
1494
#elif !defined(_WIN32)
1495
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1496
        setvbuf(logfile, NULL, _IOLBF, 0);
1497
#endif
1498
        log_append = 1;
1499
    }
1500
    if (!loglevel && logfile) {
1501
        fclose(logfile);
1502
        logfile = NULL;
1503
    }
1504
}
1505

    
1506
void cpu_set_log_filename(const char *filename)
1507
{
1508
    logfilename = strdup(filename);
1509
    if (logfile) {
1510
        fclose(logfile);
1511
        logfile = NULL;
1512
    }
1513
    cpu_set_log(loglevel);
1514
}
1515

    
1516
static void cpu_unlink_tb(CPUState *env)
1517
{
1518
#if defined(CONFIG_USE_NPTL)
1519
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1520
       problem and hope the cpu will stop of its own accord.  For userspace
1521
       emulation this often isn't actually as bad as it sounds.  Often
1522
       signals are used primarily to interrupt blocking syscalls.  */
1523
#else
1524
    TranslationBlock *tb;
1525
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1526

    
1527
    tb = env->current_tb;
1528
    /* if the cpu is currently executing code, we must unlink it and
1529
       all the potentially executing TB */
1530
    if (tb && !testandset(&interrupt_lock)) {
1531
        env->current_tb = NULL;
1532
        tb_reset_jump_recursive(tb);
1533
        resetlock(&interrupt_lock);
1534
    }
1535
#endif
1536
}
1537

    
1538
/* mask must never be zero, except for A20 change call */
1539
void cpu_interrupt(CPUState *env, int mask)
1540
{
1541
    int old_mask;
1542

    
1543
    old_mask = env->interrupt_request;
1544
    env->interrupt_request |= mask;
1545

    
1546
#ifndef CONFIG_USER_ONLY
1547
    /*
1548
     * If called from iothread context, wake the target cpu in
1549
     * case its halted.
1550
     */
1551
    if (!qemu_cpu_self(env)) {
1552
        qemu_cpu_kick(env);
1553
        return;
1554
    }
1555
#endif
1556

    
1557
    if (use_icount) {
1558
        env->icount_decr.u16.high = 0xffff;
1559
#ifndef CONFIG_USER_ONLY
1560
        if (!can_do_io(env)
1561
            && (mask & ~old_mask) != 0) {
1562
            cpu_abort(env, "Raised interrupt while not in I/O function");
1563
        }
1564
#endif
1565
    } else {
1566
        cpu_unlink_tb(env);
1567
    }
1568
}
1569

    
1570
void cpu_reset_interrupt(CPUState *env, int mask)
1571
{
1572
    env->interrupt_request &= ~mask;
1573
}
1574

    
1575
void cpu_exit(CPUState *env)
1576
{
1577
    env->exit_request = 1;
1578
    cpu_unlink_tb(env);
1579
}
1580

    
1581
const CPULogItem cpu_log_items[] = {
1582
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1583
      "show generated host assembly code for each compiled TB" },
1584
    { CPU_LOG_TB_IN_ASM, "in_asm",
1585
      "show target assembly code for each compiled TB" },
1586
    { CPU_LOG_TB_OP, "op",
1587
      "show micro ops for each compiled TB" },
1588
    { CPU_LOG_TB_OP_OPT, "op_opt",
1589
      "show micro ops "
1590
#ifdef TARGET_I386
1591
      "before eflags optimization and "
1592
#endif
1593
      "after liveness analysis" },
1594
    { CPU_LOG_INT, "int",
1595
      "show interrupts/exceptions in short format" },
1596
    { CPU_LOG_EXEC, "exec",
1597
      "show trace before each executed TB (lots of logs)" },
1598
    { CPU_LOG_TB_CPU, "cpu",
1599
      "show CPU state before block translation" },
1600
#ifdef TARGET_I386
1601
    { CPU_LOG_PCALL, "pcall",
1602
      "show protected mode far calls/returns/exceptions" },
1603
    { CPU_LOG_RESET, "cpu_reset",
1604
      "show CPU state before CPU resets" },
1605
#endif
1606
#ifdef DEBUG_IOPORT
1607
    { CPU_LOG_IOPORT, "ioport",
1608
      "show all i/o ports accesses" },
1609
#endif
1610
    { 0, NULL, NULL },
1611
};
1612

    
1613
static int cmp1(const char *s1, int n, const char *s2)
1614
{
1615
    if (strlen(s2) != n)
1616
        return 0;
1617
    return memcmp(s1, s2, n) == 0;
1618
}
1619

    
1620
/* takes a comma separated list of log masks. Return 0 if error. */
1621
int cpu_str_to_log_mask(const char *str)
1622
{
1623
    const CPULogItem *item;
1624
    int mask;
1625
    const char *p, *p1;
1626

    
1627
    p = str;
1628
    mask = 0;
1629
    for(;;) {
1630
        p1 = strchr(p, ',');
1631
        if (!p1)
1632
            p1 = p + strlen(p);
1633
        if(cmp1(p,p1-p,"all")) {
1634
                for(item = cpu_log_items; item->mask != 0; item++) {
1635
                        mask |= item->mask;
1636
                }
1637
        } else {
1638
        for(item = cpu_log_items; item->mask != 0; item++) {
1639
            if (cmp1(p, p1 - p, item->name))
1640
                goto found;
1641
        }
1642
        return 0;
1643
        }
1644
    found:
1645
        mask |= item->mask;
1646
        if (*p1 != ',')
1647
            break;
1648
        p = p1 + 1;
1649
    }
1650
    return mask;
1651
}
1652

    
1653
void cpu_abort(CPUState *env, const char *fmt, ...)
1654
{
1655
    va_list ap;
1656
    va_list ap2;
1657

    
1658
    va_start(ap, fmt);
1659
    va_copy(ap2, ap);
1660
    fprintf(stderr, "qemu: fatal: ");
1661
    vfprintf(stderr, fmt, ap);
1662
    fprintf(stderr, "\n");
1663
#ifdef TARGET_I386
1664
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1665
#else
1666
    cpu_dump_state(env, stderr, fprintf, 0);
1667
#endif
1668
    if (qemu_log_enabled()) {
1669
        qemu_log("qemu: fatal: ");
1670
        qemu_log_vprintf(fmt, ap2);
1671
        qemu_log("\n");
1672
#ifdef TARGET_I386
1673
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1674
#else
1675
        log_cpu_state(env, 0);
1676
#endif
1677
        qemu_log_flush();
1678
        qemu_log_close();
1679
    }
1680
    va_end(ap2);
1681
    va_end(ap);
1682
    abort();
1683
}
1684

    
1685
CPUState *cpu_copy(CPUState *env)
1686
{
1687
    CPUState *new_env = cpu_init(env->cpu_model_str);
1688
    CPUState *next_cpu = new_env->next_cpu;
1689
    int cpu_index = new_env->cpu_index;
1690
#if defined(TARGET_HAS_ICE)
1691
    CPUBreakpoint *bp;
1692
    CPUWatchpoint *wp;
1693
#endif
1694

    
1695
    memcpy(new_env, env, sizeof(CPUState));
1696

    
1697
    /* Preserve chaining and index. */
1698
    new_env->next_cpu = next_cpu;
1699
    new_env->cpu_index = cpu_index;
1700

    
1701
    /* Clone all break/watchpoints.
1702
       Note: Once we support ptrace with hw-debug register access, make sure
1703
       BP_CPU break/watchpoints are handled correctly on clone. */
1704
    TAILQ_INIT(&env->breakpoints);
1705
    TAILQ_INIT(&env->watchpoints);
1706
#if defined(TARGET_HAS_ICE)
1707
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1708
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1709
    }
1710
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1711
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1712
                              wp->flags, NULL);
1713
    }
1714
#endif
1715

    
1716
    return new_env;
1717
}
1718

    
1719
#if !defined(CONFIG_USER_ONLY)
1720

    
1721
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1722
{
1723
    unsigned int i;
1724

    
1725
    /* Discard jump cache entries for any tb which might potentially
1726
       overlap the flushed page.  */
1727
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1728
    memset (&env->tb_jmp_cache[i], 0, 
1729
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1730

    
1731
    i = tb_jmp_cache_hash_page(addr);
1732
    memset (&env->tb_jmp_cache[i], 0, 
1733
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1734
}
1735

    
1736
static CPUTLBEntry s_cputlb_empty_entry = {
1737
    .addr_read  = -1,
1738
    .addr_write = -1,
1739
    .addr_code  = -1,
1740
    .addend     = -1,
1741
};
1742

    
1743
/* NOTE: if flush_global is true, also flush global entries (not
1744
   implemented yet) */
1745
void tlb_flush(CPUState *env, int flush_global)
1746
{
1747
    int i;
1748

    
1749
#if defined(DEBUG_TLB)
1750
    printf("tlb_flush:\n");
1751
#endif
1752
    /* must reset current TB so that interrupts cannot modify the
1753
       links while we are modifying them */
1754
    env->current_tb = NULL;
1755

    
1756
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1757
        int mmu_idx;
1758
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1759
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1760
        }
1761
    }
1762

    
1763
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1764

    
1765
    tlb_flush_count++;
1766
}
1767

    
1768
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1769
{
1770
    if (addr == (tlb_entry->addr_read &
1771
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1772
        addr == (tlb_entry->addr_write &
1773
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1774
        addr == (tlb_entry->addr_code &
1775
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1776
        *tlb_entry = s_cputlb_empty_entry;
1777
    }
1778
}
1779

    
1780
void tlb_flush_page(CPUState *env, target_ulong addr)
1781
{
1782
    int i;
1783
    int mmu_idx;
1784

    
1785
#if defined(DEBUG_TLB)
1786
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1787
#endif
1788
    /* must reset current TB so that interrupts cannot modify the
1789
       links while we are modifying them */
1790
    env->current_tb = NULL;
1791

    
1792
    addr &= TARGET_PAGE_MASK;
1793
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1794
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1795
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1796

    
1797
    tlb_flush_jmp_cache(env, addr);
1798
}
1799

    
1800
/* update the TLBs so that writes to code in the virtual page 'addr'
1801
   can be detected */
1802
static void tlb_protect_code(ram_addr_t ram_addr)
1803
{
1804
    cpu_physical_memory_reset_dirty(ram_addr,
1805
                                    ram_addr + TARGET_PAGE_SIZE,
1806
                                    CODE_DIRTY_FLAG);
1807
}
1808

    
1809
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1810
   tested for self modifying code */
1811
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1812
                                    target_ulong vaddr)
1813
{
1814
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1815
}
1816

    
1817
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1818
                                         unsigned long start, unsigned long length)
1819
{
1820
    unsigned long addr;
1821
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1822
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1823
        if ((addr - start) < length) {
1824
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1825
        }
1826
    }
1827
}
1828

    
1829
/* Note: start and end must be within the same ram block.  */
1830
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1831
                                     int dirty_flags)
1832
{
1833
    CPUState *env;
1834
    unsigned long length, start1;
1835
    int i, mask, len;
1836
    uint8_t *p;
1837

    
1838
    start &= TARGET_PAGE_MASK;
1839
    end = TARGET_PAGE_ALIGN(end);
1840

    
1841
    length = end - start;
1842
    if (length == 0)
1843
        return;
1844
    len = length >> TARGET_PAGE_BITS;
1845
    mask = ~dirty_flags;
1846
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1847
    for(i = 0; i < len; i++)
1848
        p[i] &= mask;
1849

    
1850
    /* we modify the TLB cache so that the dirty bit will be set again
1851
       when accessing the range */
1852
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1853
    /* Chek that we don't span multiple blocks - this breaks the
1854
       address comparisons below.  */
1855
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1856
            != (end - 1) - start) {
1857
        abort();
1858
    }
1859

    
1860
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1861
        int mmu_idx;
1862
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1863
            for(i = 0; i < CPU_TLB_SIZE; i++)
1864
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1865
                                      start1, length);
1866
        }
1867
    }
1868
}
1869

    
1870
int cpu_physical_memory_set_dirty_tracking(int enable)
1871
{
1872
    in_migration = enable;
1873
    if (kvm_enabled()) {
1874
        return kvm_set_migration_log(enable);
1875
    }
1876
    return 0;
1877
}
1878

    
1879
int cpu_physical_memory_get_dirty_tracking(void)
1880
{
1881
    return in_migration;
1882
}
1883

    
1884
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1885
                                   target_phys_addr_t end_addr)
1886
{
1887
    int ret = 0;
1888

    
1889
    if (kvm_enabled())
1890
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1891
    return ret;
1892
}
1893

    
1894
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1895
{
1896
    ram_addr_t ram_addr;
1897
    void *p;
1898

    
1899
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1900
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1901
            + tlb_entry->addend);
1902
        ram_addr = qemu_ram_addr_from_host(p);
1903
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1904
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1905
        }
1906
    }
1907
}
1908

    
1909
/* update the TLB according to the current state of the dirty bits */
1910
void cpu_tlb_update_dirty(CPUState *env)
1911
{
1912
    int i;
1913
    int mmu_idx;
1914
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1915
        for(i = 0; i < CPU_TLB_SIZE; i++)
1916
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1917
    }
1918
}
1919

    
1920
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1921
{
1922
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1923
        tlb_entry->addr_write = vaddr;
1924
}
1925

    
1926
/* update the TLB corresponding to virtual page vaddr
1927
   so that it is no longer dirty */
1928
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1929
{
1930
    int i;
1931
    int mmu_idx;
1932

    
1933
    vaddr &= TARGET_PAGE_MASK;
1934
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1935
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1936
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1937
}
1938

    
1939
/* add a new TLB entry. At most one entry for a given virtual address
1940
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1941
   (can only happen in non SOFTMMU mode for I/O pages or pages
1942
   conflicting with the host address space). */
1943
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1944
                      target_phys_addr_t paddr, int prot,
1945
                      int mmu_idx, int is_softmmu)
1946
{
1947
    PhysPageDesc *p;
1948
    unsigned long pd;
1949
    unsigned int index;
1950
    target_ulong address;
1951
    target_ulong code_address;
1952
    target_phys_addr_t addend;
1953
    int ret;
1954
    CPUTLBEntry *te;
1955
    CPUWatchpoint *wp;
1956
    target_phys_addr_t iotlb;
1957

    
1958
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1959
    if (!p) {
1960
        pd = IO_MEM_UNASSIGNED;
1961
    } else {
1962
        pd = p->phys_offset;
1963
    }
1964
#if defined(DEBUG_TLB)
1965
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1966
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1967
#endif
1968

    
1969
    ret = 0;
1970
    address = vaddr;
1971
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1972
        /* IO memory case (romd handled later) */
1973
        address |= TLB_MMIO;
1974
    }
1975
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1976
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1977
        /* Normal RAM.  */
1978
        iotlb = pd & TARGET_PAGE_MASK;
1979
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1980
            iotlb |= IO_MEM_NOTDIRTY;
1981
        else
1982
            iotlb |= IO_MEM_ROM;
1983
    } else {
1984
        /* IO handlers are currently passed a physical address.
1985
           It would be nice to pass an offset from the base address
1986
           of that region.  This would avoid having to special case RAM,
1987
           and avoid full address decoding in every device.
1988
           We can't use the high bits of pd for this because
1989
           IO_MEM_ROMD uses these as a ram address.  */
1990
        iotlb = (pd & ~TARGET_PAGE_MASK);
1991
        if (p) {
1992
            iotlb += p->region_offset;
1993
        } else {
1994
            iotlb += paddr;
1995
        }
1996
    }
1997

    
1998
    code_address = address;
1999
    /* Make accesses to pages with watchpoints go via the
2000
       watchpoint trap routines.  */
2001
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2002
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2003
            iotlb = io_mem_watch + paddr;
2004
            /* TODO: The memory case can be optimized by not trapping
2005
               reads of pages with a write breakpoint.  */
2006
            address |= TLB_MMIO;
2007
        }
2008
    }
2009

    
2010
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2011
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2012
    te = &env->tlb_table[mmu_idx][index];
2013
    te->addend = addend - vaddr;
2014
    if (prot & PAGE_READ) {
2015
        te->addr_read = address;
2016
    } else {
2017
        te->addr_read = -1;
2018
    }
2019

    
2020
    if (prot & PAGE_EXEC) {
2021
        te->addr_code = code_address;
2022
    } else {
2023
        te->addr_code = -1;
2024
    }
2025
    if (prot & PAGE_WRITE) {
2026
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2027
            (pd & IO_MEM_ROMD)) {
2028
            /* Write access calls the I/O callback.  */
2029
            te->addr_write = address | TLB_MMIO;
2030
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2031
                   !cpu_physical_memory_is_dirty(pd)) {
2032
            te->addr_write = address | TLB_NOTDIRTY;
2033
        } else {
2034
            te->addr_write = address;
2035
        }
2036
    } else {
2037
        te->addr_write = -1;
2038
    }
2039
    return ret;
2040
}
2041

    
2042
#else
2043

    
2044
void tlb_flush(CPUState *env, int flush_global)
2045
{
2046
}
2047

    
2048
void tlb_flush_page(CPUState *env, target_ulong addr)
2049
{
2050
}
2051

    
2052
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2053
                      target_phys_addr_t paddr, int prot,
2054
                      int mmu_idx, int is_softmmu)
2055
{
2056
    return 0;
2057
}
2058

    
2059
/*
2060
 * Walks guest process memory "regions" one by one
2061
 * and calls callback function 'fn' for each region.
2062
 */
2063
int walk_memory_regions(void *priv,
2064
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2065
{
2066
    unsigned long start, end;
2067
    PageDesc *p = NULL;
2068
    int i, j, prot, prot1;
2069
    int rc = 0;
2070

    
2071
    start = end = -1;
2072
    prot = 0;
2073

    
2074
    for (i = 0; i <= L1_SIZE; i++) {
2075
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2076
        for (j = 0; j < L2_SIZE; j++) {
2077
            prot1 = (p == NULL) ? 0 : p[j].flags;
2078
            /*
2079
             * "region" is one continuous chunk of memory
2080
             * that has same protection flags set.
2081
             */
2082
            if (prot1 != prot) {
2083
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2084
                if (start != -1) {
2085
                    rc = (*fn)(priv, start, end, prot);
2086
                    /* callback can stop iteration by returning != 0 */
2087
                    if (rc != 0)
2088
                        return (rc);
2089
                }
2090
                if (prot1 != 0)
2091
                    start = end;
2092
                else
2093
                    start = -1;
2094
                prot = prot1;
2095
            }
2096
            if (p == NULL)
2097
                break;
2098
        }
2099
    }
2100
    return (rc);
2101
}
2102

    
2103
static int dump_region(void *priv, unsigned long start,
2104
    unsigned long end, unsigned long prot)
2105
{
2106
    FILE *f = (FILE *)priv;
2107

    
2108
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2109
        start, end, end - start,
2110
        ((prot & PAGE_READ) ? 'r' : '-'),
2111
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2112
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2113

    
2114
    return (0);
2115
}
2116

    
2117
/* dump memory mappings */
2118
void page_dump(FILE *f)
2119
{
2120
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2121
            "start", "end", "size", "prot");
2122
    walk_memory_regions(f, dump_region);
2123
}
2124

    
2125
int page_get_flags(target_ulong address)
2126
{
2127
    PageDesc *p;
2128

    
2129
    p = page_find(address >> TARGET_PAGE_BITS);
2130
    if (!p)
2131
        return 0;
2132
    return p->flags;
2133
}
2134

    
2135
/* modify the flags of a page and invalidate the code if
2136
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2137
   depending on PAGE_WRITE */
2138
void page_set_flags(target_ulong start, target_ulong end, int flags)
2139
{
2140
    PageDesc *p;
2141
    target_ulong addr;
2142

    
2143
    /* mmap_lock should already be held.  */
2144
    start = start & TARGET_PAGE_MASK;
2145
    end = TARGET_PAGE_ALIGN(end);
2146
    if (flags & PAGE_WRITE)
2147
        flags |= PAGE_WRITE_ORG;
2148
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2149
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2150
        /* We may be called for host regions that are outside guest
2151
           address space.  */
2152
        if (!p)
2153
            return;
2154
        /* if the write protection is set, then we invalidate the code
2155
           inside */
2156
        if (!(p->flags & PAGE_WRITE) &&
2157
            (flags & PAGE_WRITE) &&
2158
            p->first_tb) {
2159
            tb_invalidate_phys_page(addr, 0, NULL);
2160
        }
2161
        p->flags = flags;
2162
    }
2163
}
2164

    
2165
int page_check_range(target_ulong start, target_ulong len, int flags)
2166
{
2167
    PageDesc *p;
2168
    target_ulong end;
2169
    target_ulong addr;
2170

    
2171
    if (start + len < start)
2172
        /* we've wrapped around */
2173
        return -1;
2174

    
2175
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2176
    start = start & TARGET_PAGE_MASK;
2177

    
2178
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2179
        p = page_find(addr >> TARGET_PAGE_BITS);
2180
        if( !p )
2181
            return -1;
2182
        if( !(p->flags & PAGE_VALID) )
2183
            return -1;
2184

    
2185
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2186
            return -1;
2187
        if (flags & PAGE_WRITE) {
2188
            if (!(p->flags & PAGE_WRITE_ORG))
2189
                return -1;
2190
            /* unprotect the page if it was put read-only because it
2191
               contains translated code */
2192
            if (!(p->flags & PAGE_WRITE)) {
2193
                if (!page_unprotect(addr, 0, NULL))
2194
                    return -1;
2195
            }
2196
            return 0;
2197
        }
2198
    }
2199
    return 0;
2200
}
2201

    
2202
/* called from signal handler: invalidate the code and unprotect the
2203
   page. Return TRUE if the fault was successfully handled. */
2204
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2205
{
2206
    unsigned int page_index, prot, pindex;
2207
    PageDesc *p, *p1;
2208
    target_ulong host_start, host_end, addr;
2209

    
2210
    /* Technically this isn't safe inside a signal handler.  However we
2211
       know this only ever happens in a synchronous SEGV handler, so in
2212
       practice it seems to be ok.  */
2213
    mmap_lock();
2214

    
2215
    host_start = address & qemu_host_page_mask;
2216
    page_index = host_start >> TARGET_PAGE_BITS;
2217
    p1 = page_find(page_index);
2218
    if (!p1) {
2219
        mmap_unlock();
2220
        return 0;
2221
    }
2222
    host_end = host_start + qemu_host_page_size;
2223
    p = p1;
2224
    prot = 0;
2225
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2226
        prot |= p->flags;
2227
        p++;
2228
    }
2229
    /* if the page was really writable, then we change its
2230
       protection back to writable */
2231
    if (prot & PAGE_WRITE_ORG) {
2232
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2233
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2234
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2235
                     (prot & PAGE_BITS) | PAGE_WRITE);
2236
            p1[pindex].flags |= PAGE_WRITE;
2237
            /* and since the content will be modified, we must invalidate
2238
               the corresponding translated code. */
2239
            tb_invalidate_phys_page(address, pc, puc);
2240
#ifdef DEBUG_TB_CHECK
2241
            tb_invalidate_check(address);
2242
#endif
2243
            mmap_unlock();
2244
            return 1;
2245
        }
2246
    }
2247
    mmap_unlock();
2248
    return 0;
2249
}
2250

    
2251
static inline void tlb_set_dirty(CPUState *env,
2252
                                 unsigned long addr, target_ulong vaddr)
2253
{
2254
}
2255
#endif /* defined(CONFIG_USER_ONLY) */
2256

    
2257
#if !defined(CONFIG_USER_ONLY)
2258

    
2259
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2260
                             ram_addr_t memory, ram_addr_t region_offset);
2261
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2262
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2263
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2264
                      need_subpage)                                     \
2265
    do {                                                                \
2266
        if (addr > start_addr)                                          \
2267
            start_addr2 = 0;                                            \
2268
        else {                                                          \
2269
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2270
            if (start_addr2 > 0)                                        \
2271
                need_subpage = 1;                                       \
2272
        }                                                               \
2273
                                                                        \
2274
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2275
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2276
        else {                                                          \
2277
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2278
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2279
                need_subpage = 1;                                       \
2280
        }                                                               \
2281
    } while (0)
2282

    
2283
/* register physical memory. 'size' must be a multiple of the target
2284
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2285
   io memory page.  The address used when calling the IO function is
2286
   the offset from the start of the region, plus region_offset.  Both
2287
   start_addr and region_offset are rounded down to a page boundary
2288
   before calculating this offset.  This should not be a problem unless
2289
   the low bits of start_addr and region_offset differ.  */
2290
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2291
                                         ram_addr_t size,
2292
                                         ram_addr_t phys_offset,
2293
                                         ram_addr_t region_offset)
2294
{
2295
    target_phys_addr_t addr, end_addr;
2296
    PhysPageDesc *p;
2297
    CPUState *env;
2298
    ram_addr_t orig_size = size;
2299
    void *subpage;
2300

    
2301
    if (kvm_enabled())
2302
        kvm_set_phys_mem(start_addr, size, phys_offset);
2303

    
2304
    if (phys_offset == IO_MEM_UNASSIGNED) {
2305
        region_offset = start_addr;
2306
    }
2307
    region_offset &= TARGET_PAGE_MASK;
2308
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2309
    end_addr = start_addr + (target_phys_addr_t)size;
2310
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2311
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2312
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2313
            ram_addr_t orig_memory = p->phys_offset;
2314
            target_phys_addr_t start_addr2, end_addr2;
2315
            int need_subpage = 0;
2316

    
2317
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2318
                          need_subpage);
2319
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2320
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2321
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2322
                                           &p->phys_offset, orig_memory,
2323
                                           p->region_offset);
2324
                } else {
2325
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2326
                                            >> IO_MEM_SHIFT];
2327
                }
2328
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2329
                                 region_offset);
2330
                p->region_offset = 0;
2331
            } else {
2332
                p->phys_offset = phys_offset;
2333
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2334
                    (phys_offset & IO_MEM_ROMD))
2335
                    phys_offset += TARGET_PAGE_SIZE;
2336
            }
2337
        } else {
2338
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2339
            p->phys_offset = phys_offset;
2340
            p->region_offset = region_offset;
2341
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2342
                (phys_offset & IO_MEM_ROMD)) {
2343
                phys_offset += TARGET_PAGE_SIZE;
2344
            } else {
2345
                target_phys_addr_t start_addr2, end_addr2;
2346
                int need_subpage = 0;
2347

    
2348
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2349
                              end_addr2, need_subpage);
2350

    
2351
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2352
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2353
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2354
                                           addr & TARGET_PAGE_MASK);
2355
                    subpage_register(subpage, start_addr2, end_addr2,
2356
                                     phys_offset, region_offset);
2357
                    p->region_offset = 0;
2358
                }
2359
            }
2360
        }
2361
        region_offset += TARGET_PAGE_SIZE;
2362
    }
2363

    
2364
    /* since each CPU stores ram addresses in its TLB cache, we must
2365
       reset the modified entries */
2366
    /* XXX: slow ! */
2367
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2368
        tlb_flush(env, 1);
2369
    }
2370
}
2371

    
2372
/* XXX: temporary until new memory mapping API */
2373
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2374
{
2375
    PhysPageDesc *p;
2376

    
2377
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2378
    if (!p)
2379
        return IO_MEM_UNASSIGNED;
2380
    return p->phys_offset;
2381
}
2382

    
2383
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2384
{
2385
    if (kvm_enabled())
2386
        kvm_coalesce_mmio_region(addr, size);
2387
}
2388

    
2389
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2390
{
2391
    if (kvm_enabled())
2392
        kvm_uncoalesce_mmio_region(addr, size);
2393
}
2394

    
2395
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2396
{
2397
    RAMBlock *new_block;
2398

    
2399
    size = TARGET_PAGE_ALIGN(size);
2400
    new_block = qemu_malloc(sizeof(*new_block));
2401

    
2402
    new_block->host = qemu_vmalloc(size);
2403
    new_block->offset = last_ram_offset;
2404
    new_block->length = size;
2405

    
2406
    new_block->next = ram_blocks;
2407
    ram_blocks = new_block;
2408

    
2409
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2410
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2411
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2412
           0xff, size >> TARGET_PAGE_BITS);
2413

    
2414
    last_ram_offset += size;
2415

    
2416
    if (kvm_enabled())
2417
        kvm_setup_guest_memory(new_block->host, size);
2418

    
2419
    return new_block->offset;
2420
}
2421

    
2422
void qemu_ram_free(ram_addr_t addr)
2423
{
2424
    /* TODO: implement this.  */
2425
}
2426

    
2427
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2428
   With the exception of the softmmu code in this file, this should
2429
   only be used for local memory (e.g. video ram) that the device owns,
2430
   and knows it isn't going to access beyond the end of the block.
2431

2432
   It should not be used for general purpose DMA.
2433
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2434
 */
2435
void *qemu_get_ram_ptr(ram_addr_t addr)
2436
{
2437
    RAMBlock *prev;
2438
    RAMBlock **prevp;
2439
    RAMBlock *block;
2440

    
2441
    prev = NULL;
2442
    prevp = &ram_blocks;
2443
    block = ram_blocks;
2444
    while (block && (block->offset > addr
2445
                     || block->offset + block->length <= addr)) {
2446
        if (prev)
2447
          prevp = &prev->next;
2448
        prev = block;
2449
        block = block->next;
2450
    }
2451
    if (!block) {
2452
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2453
        abort();
2454
    }
2455
    /* Move this entry to to start of the list.  */
2456
    if (prev) {
2457
        prev->next = block->next;
2458
        block->next = *prevp;
2459
        *prevp = block;
2460
    }
2461
    return block->host + (addr - block->offset);
2462
}
2463

    
2464
/* Some of the softmmu routines need to translate from a host pointer
2465
   (typically a TLB entry) back to a ram offset.  */
2466
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2467
{
2468
    RAMBlock *prev;
2469
    RAMBlock **prevp;
2470
    RAMBlock *block;
2471
    uint8_t *host = ptr;
2472

    
2473
    prev = NULL;
2474
    prevp = &ram_blocks;
2475
    block = ram_blocks;
2476
    while (block && (block->host > host
2477
                     || block->host + block->length <= host)) {
2478
        if (prev)
2479
          prevp = &prev->next;
2480
        prev = block;
2481
        block = block->next;
2482
    }
2483
    if (!block) {
2484
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2485
        abort();
2486
    }
2487
    return block->offset + (host - block->host);
2488
}
2489

    
2490
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2491
{
2492
#ifdef DEBUG_UNASSIGNED
2493
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2494
#endif
2495
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2496
    do_unassigned_access(addr, 0, 0, 0, 1);
2497
#endif
2498
    return 0;
2499
}
2500

    
2501
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2502
{
2503
#ifdef DEBUG_UNASSIGNED
2504
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2505
#endif
2506
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2507
    do_unassigned_access(addr, 0, 0, 0, 2);
2508
#endif
2509
    return 0;
2510
}
2511

    
2512
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2513
{
2514
#ifdef DEBUG_UNASSIGNED
2515
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2516
#endif
2517
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2518
    do_unassigned_access(addr, 0, 0, 0, 4);
2519
#endif
2520
    return 0;
2521
}
2522

    
2523
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2524
{
2525
#ifdef DEBUG_UNASSIGNED
2526
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2527
#endif
2528
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2529
    do_unassigned_access(addr, 1, 0, 0, 1);
2530
#endif
2531
}
2532

    
2533
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2534
{
2535
#ifdef DEBUG_UNASSIGNED
2536
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2537
#endif
2538
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2539
    do_unassigned_access(addr, 1, 0, 0, 2);
2540
#endif
2541
}
2542

    
2543
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2544
{
2545
#ifdef DEBUG_UNASSIGNED
2546
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2547
#endif
2548
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2549
    do_unassigned_access(addr, 1, 0, 0, 4);
2550
#endif
2551
}
2552

    
2553
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2554
    unassigned_mem_readb,
2555
    unassigned_mem_readw,
2556
    unassigned_mem_readl,
2557
};
2558

    
2559
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2560
    unassigned_mem_writeb,
2561
    unassigned_mem_writew,
2562
    unassigned_mem_writel,
2563
};
2564

    
2565
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2566
                                uint32_t val)
2567
{
2568
    int dirty_flags;
2569
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2570
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2571
#if !defined(CONFIG_USER_ONLY)
2572
        tb_invalidate_phys_page_fast(ram_addr, 1);
2573
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2574
#endif
2575
    }
2576
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2577
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2578
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2579
    /* we remove the notdirty callback only if the code has been
2580
       flushed */
2581
    if (dirty_flags == 0xff)
2582
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2583
}
2584

    
2585
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2586
                                uint32_t val)
2587
{
2588
    int dirty_flags;
2589
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2590
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2591
#if !defined(CONFIG_USER_ONLY)
2592
        tb_invalidate_phys_page_fast(ram_addr, 2);
2593
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2594
#endif
2595
    }
2596
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2597
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2598
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2599
    /* we remove the notdirty callback only if the code has been
2600
       flushed */
2601
    if (dirty_flags == 0xff)
2602
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2603
}
2604

    
2605
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2606
                                uint32_t val)
2607
{
2608
    int dirty_flags;
2609
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2610
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2611
#if !defined(CONFIG_USER_ONLY)
2612
        tb_invalidate_phys_page_fast(ram_addr, 4);
2613
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2614
#endif
2615
    }
2616
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2617
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2618
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2619
    /* we remove the notdirty callback only if the code has been
2620
       flushed */
2621
    if (dirty_flags == 0xff)
2622
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2623
}
2624

    
2625
static CPUReadMemoryFunc * const error_mem_read[3] = {
2626
    NULL, /* never used */
2627
    NULL, /* never used */
2628
    NULL, /* never used */
2629
};
2630

    
2631
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2632
    notdirty_mem_writeb,
2633
    notdirty_mem_writew,
2634
    notdirty_mem_writel,
2635
};
2636

    
2637
/* Generate a debug exception if a watchpoint has been hit.  */
2638
static void check_watchpoint(int offset, int len_mask, int flags)
2639
{
2640
    CPUState *env = cpu_single_env;
2641
    target_ulong pc, cs_base;
2642
    TranslationBlock *tb;
2643
    target_ulong vaddr;
2644
    CPUWatchpoint *wp;
2645
    int cpu_flags;
2646

    
2647
    if (env->watchpoint_hit) {
2648
        /* We re-entered the check after replacing the TB. Now raise
2649
         * the debug interrupt so that is will trigger after the
2650
         * current instruction. */
2651
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2652
        return;
2653
    }
2654
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2655
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2656
        if ((vaddr == (wp->vaddr & len_mask) ||
2657
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2658
            wp->flags |= BP_WATCHPOINT_HIT;
2659
            if (!env->watchpoint_hit) {
2660
                env->watchpoint_hit = wp;
2661
                tb = tb_find_pc(env->mem_io_pc);
2662
                if (!tb) {
2663
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2664
                              "pc=%p", (void *)env->mem_io_pc);
2665
                }
2666
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2667
                tb_phys_invalidate(tb, -1);
2668
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2669
                    env->exception_index = EXCP_DEBUG;
2670
                } else {
2671
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2672
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2673
                }
2674
                cpu_resume_from_signal(env, NULL);
2675
            }
2676
        } else {
2677
            wp->flags &= ~BP_WATCHPOINT_HIT;
2678
        }
2679
    }
2680
}
2681

    
2682
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2683
   so these check for a hit then pass through to the normal out-of-line
2684
   phys routines.  */
2685
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2686
{
2687
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2688
    return ldub_phys(addr);
2689
}
2690

    
2691
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2692
{
2693
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2694
    return lduw_phys(addr);
2695
}
2696

    
2697
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2698
{
2699
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2700
    return ldl_phys(addr);
2701
}
2702

    
2703
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2704
                             uint32_t val)
2705
{
2706
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2707
    stb_phys(addr, val);
2708
}
2709

    
2710
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2711
                             uint32_t val)
2712
{
2713
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2714
    stw_phys(addr, val);
2715
}
2716

    
2717
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2718
                             uint32_t val)
2719
{
2720
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2721
    stl_phys(addr, val);
2722
}
2723

    
2724
static CPUReadMemoryFunc * const watch_mem_read[3] = {
2725
    watch_mem_readb,
2726
    watch_mem_readw,
2727
    watch_mem_readl,
2728
};
2729

    
2730
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2731
    watch_mem_writeb,
2732
    watch_mem_writew,
2733
    watch_mem_writel,
2734
};
2735

    
2736
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2737
                                 unsigned int len)
2738
{
2739
    uint32_t ret;
2740
    unsigned int idx;
2741

    
2742
    idx = SUBPAGE_IDX(addr);
2743
#if defined(DEBUG_SUBPAGE)
2744
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2745
           mmio, len, addr, idx);
2746
#endif
2747
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2748
                                       addr + mmio->region_offset[idx][0][len]);
2749

    
2750
    return ret;
2751
}
2752

    
2753
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2754
                              uint32_t value, unsigned int len)
2755
{
2756
    unsigned int idx;
2757

    
2758
    idx = SUBPAGE_IDX(addr);
2759
#if defined(DEBUG_SUBPAGE)
2760
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2761
           mmio, len, addr, idx, value);
2762
#endif
2763
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2764
                                  addr + mmio->region_offset[idx][1][len],
2765
                                  value);
2766
}
2767

    
2768
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2769
{
2770
#if defined(DEBUG_SUBPAGE)
2771
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2772
#endif
2773

    
2774
    return subpage_readlen(opaque, addr, 0);
2775
}
2776

    
2777
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2778
                            uint32_t value)
2779
{
2780
#if defined(DEBUG_SUBPAGE)
2781
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2782
#endif
2783
    subpage_writelen(opaque, addr, value, 0);
2784
}
2785

    
2786
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2787
{
2788
#if defined(DEBUG_SUBPAGE)
2789
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2790
#endif
2791

    
2792
    return subpage_readlen(opaque, addr, 1);
2793
}
2794

    
2795
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2796
                            uint32_t value)
2797
{
2798
#if defined(DEBUG_SUBPAGE)
2799
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2800
#endif
2801
    subpage_writelen(opaque, addr, value, 1);
2802
}
2803

    
2804
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2805
{
2806
#if defined(DEBUG_SUBPAGE)
2807
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2808
#endif
2809

    
2810
    return subpage_readlen(opaque, addr, 2);
2811
}
2812

    
2813
static void subpage_writel (void *opaque,
2814
                         target_phys_addr_t addr, uint32_t value)
2815
{
2816
#if defined(DEBUG_SUBPAGE)
2817
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2818
#endif
2819
    subpage_writelen(opaque, addr, value, 2);
2820
}
2821

    
2822
static CPUReadMemoryFunc * const subpage_read[] = {
2823
    &subpage_readb,
2824
    &subpage_readw,
2825
    &subpage_readl,
2826
};
2827

    
2828
static CPUWriteMemoryFunc * const subpage_write[] = {
2829
    &subpage_writeb,
2830
    &subpage_writew,
2831
    &subpage_writel,
2832
};
2833

    
2834
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2835
                             ram_addr_t memory, ram_addr_t region_offset)
2836
{
2837
    int idx, eidx;
2838
    unsigned int i;
2839

    
2840
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2841
        return -1;
2842
    idx = SUBPAGE_IDX(start);
2843
    eidx = SUBPAGE_IDX(end);
2844
#if defined(DEBUG_SUBPAGE)
2845
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2846
           mmio, start, end, idx, eidx, memory);
2847
#endif
2848
    memory >>= IO_MEM_SHIFT;
2849
    for (; idx <= eidx; idx++) {
2850
        for (i = 0; i < 4; i++) {
2851
            if (io_mem_read[memory][i]) {
2852
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2853
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2854
                mmio->region_offset[idx][0][i] = region_offset;
2855
            }
2856
            if (io_mem_write[memory][i]) {
2857
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2858
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2859
                mmio->region_offset[idx][1][i] = region_offset;
2860
            }
2861
        }
2862
    }
2863

    
2864
    return 0;
2865
}
2866

    
2867
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2868
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2869
{
2870
    subpage_t *mmio;
2871
    int subpage_memory;
2872

    
2873
    mmio = qemu_mallocz(sizeof(subpage_t));
2874

    
2875
    mmio->base = base;
2876
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2877
#if defined(DEBUG_SUBPAGE)
2878
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2879
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2880
#endif
2881
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2882
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2883
                         region_offset);
2884

    
2885
    return mmio;
2886
}
2887

    
2888
static int get_free_io_mem_idx(void)
2889
{
2890
    int i;
2891

    
2892
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2893
        if (!io_mem_used[i]) {
2894
            io_mem_used[i] = 1;
2895
            return i;
2896
        }
2897

    
2898
    return -1;
2899
}
2900

    
2901
/* mem_read and mem_write are arrays of functions containing the
2902
   function to access byte (index 0), word (index 1) and dword (index
2903
   2). Functions can be omitted with a NULL function pointer.
2904
   If io_index is non zero, the corresponding io zone is
2905
   modified. If it is zero, a new io zone is allocated. The return
2906
   value can be used with cpu_register_physical_memory(). (-1) is
2907
   returned if error. */
2908
static int cpu_register_io_memory_fixed(int io_index,
2909
                                        CPUReadMemoryFunc * const *mem_read,
2910
                                        CPUWriteMemoryFunc * const *mem_write,
2911
                                        void *opaque)
2912
{
2913
    int i, subwidth = 0;
2914

    
2915
    if (io_index <= 0) {
2916
        io_index = get_free_io_mem_idx();
2917
        if (io_index == -1)
2918
            return io_index;
2919
    } else {
2920
        io_index >>= IO_MEM_SHIFT;
2921
        if (io_index >= IO_MEM_NB_ENTRIES)
2922
            return -1;
2923
    }
2924

    
2925
    for(i = 0;i < 3; i++) {
2926
        if (!mem_read[i] || !mem_write[i])
2927
            subwidth = IO_MEM_SUBWIDTH;
2928
        io_mem_read[io_index][i] = mem_read[i];
2929
        io_mem_write[io_index][i] = mem_write[i];
2930
    }
2931
    io_mem_opaque[io_index] = opaque;
2932
    return (io_index << IO_MEM_SHIFT) | subwidth;
2933
}
2934

    
2935
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2936
                           CPUWriteMemoryFunc * const *mem_write,
2937
                           void *opaque)
2938
{
2939
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
2940
}
2941

    
2942
void cpu_unregister_io_memory(int io_table_address)
2943
{
2944
    int i;
2945
    int io_index = io_table_address >> IO_MEM_SHIFT;
2946

    
2947
    for (i=0;i < 3; i++) {
2948
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2949
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2950
    }
2951
    io_mem_opaque[io_index] = NULL;
2952
    io_mem_used[io_index] = 0;
2953
}
2954

    
2955
static void io_mem_init(void)
2956
{
2957
    int i;
2958

    
2959
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
2960
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
2961
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
2962
    for (i=0; i<5; i++)
2963
        io_mem_used[i] = 1;
2964

    
2965
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
2966
                                          watch_mem_write, NULL);
2967
}
2968

    
2969
#endif /* !defined(CONFIG_USER_ONLY) */
2970

    
2971
/* physical memory access (slow version, mainly for debug) */
2972
#if defined(CONFIG_USER_ONLY)
2973
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2974
                            int len, int is_write)
2975
{
2976
    int l, flags;
2977
    target_ulong page;
2978
    void * p;
2979

    
2980
    while (len > 0) {
2981
        page = addr & TARGET_PAGE_MASK;
2982
        l = (page + TARGET_PAGE_SIZE) - addr;
2983
        if (l > len)
2984
            l = len;
2985
        flags = page_get_flags(page);
2986
        if (!(flags & PAGE_VALID))
2987
            return;
2988
        if (is_write) {
2989
            if (!(flags & PAGE_WRITE))
2990
                return;
2991
            /* XXX: this code should not depend on lock_user */
2992
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2993
                /* FIXME - should this return an error rather than just fail? */
2994
                return;
2995
            memcpy(p, buf, l);
2996
            unlock_user(p, addr, l);
2997
        } else {
2998
            if (!(flags & PAGE_READ))
2999
                return;
3000
            /* XXX: this code should not depend on lock_user */
3001
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3002
                /* FIXME - should this return an error rather than just fail? */
3003
                return;
3004
            memcpy(buf, p, l);
3005
            unlock_user(p, addr, 0);
3006
        }
3007
        len -= l;
3008
        buf += l;
3009
        addr += l;
3010
    }
3011
}
3012

    
3013
#else
3014
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3015
                            int len, int is_write)
3016
{
3017
    int l, io_index;
3018
    uint8_t *ptr;
3019
    uint32_t val;
3020
    target_phys_addr_t page;
3021
    unsigned long pd;
3022
    PhysPageDesc *p;
3023

    
3024
    while (len > 0) {
3025
        page = addr & TARGET_PAGE_MASK;
3026
        l = (page + TARGET_PAGE_SIZE) - addr;
3027
        if (l > len)
3028
            l = len;
3029
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3030
        if (!p) {
3031
            pd = IO_MEM_UNASSIGNED;
3032
        } else {
3033
            pd = p->phys_offset;
3034
        }
3035

    
3036
        if (is_write) {
3037
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3038
                target_phys_addr_t addr1 = addr;
3039
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3040
                if (p)
3041
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3042
                /* XXX: could force cpu_single_env to NULL to avoid
3043
                   potential bugs */
3044
                if (l >= 4 && ((addr1 & 3) == 0)) {
3045
                    /* 32 bit write access */
3046
                    val = ldl_p(buf);
3047
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3048
                    l = 4;
3049
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3050
                    /* 16 bit write access */
3051
                    val = lduw_p(buf);
3052
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3053
                    l = 2;
3054
                } else {
3055
                    /* 8 bit write access */
3056
                    val = ldub_p(buf);
3057
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3058
                    l = 1;
3059
                }
3060
            } else {
3061
                unsigned long addr1;
3062
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3063
                /* RAM case */
3064
                ptr = qemu_get_ram_ptr(addr1);
3065
                memcpy(ptr, buf, l);
3066
                if (!cpu_physical_memory_is_dirty(addr1)) {
3067
                    /* invalidate code */
3068
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3069
                    /* set dirty bit */
3070
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3071
                        (0xff & ~CODE_DIRTY_FLAG);
3072
                }
3073
            }
3074
        } else {
3075
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3076
                !(pd & IO_MEM_ROMD)) {
3077
                target_phys_addr_t addr1 = addr;
3078
                /* I/O case */
3079
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3080
                if (p)
3081
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3082
                if (l >= 4 && ((addr1 & 3) == 0)) {
3083
                    /* 32 bit read access */
3084
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3085
                    stl_p(buf, val);
3086
                    l = 4;
3087
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3088
                    /* 16 bit read access */
3089
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3090
                    stw_p(buf, val);
3091
                    l = 2;
3092
                } else {
3093
                    /* 8 bit read access */
3094
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3095
                    stb_p(buf, val);
3096
                    l = 1;
3097
                }
3098
            } else {
3099
                /* RAM case */
3100
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3101
                    (addr & ~TARGET_PAGE_MASK);
3102
                memcpy(buf, ptr, l);
3103
            }
3104
        }
3105
        len -= l;
3106
        buf += l;
3107
        addr += l;
3108
    }
3109
}
3110

    
3111
/* used for ROM loading : can write in RAM and ROM */
3112
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3113
                                   const uint8_t *buf, int len)
3114
{
3115
    int l;
3116
    uint8_t *ptr;
3117
    target_phys_addr_t page;
3118
    unsigned long pd;
3119
    PhysPageDesc *p;
3120

    
3121
    while (len > 0) {
3122
        page = addr & TARGET_PAGE_MASK;
3123
        l = (page + TARGET_PAGE_SIZE) - addr;
3124
        if (l > len)
3125
            l = len;
3126
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3127
        if (!p) {
3128
            pd = IO_MEM_UNASSIGNED;
3129
        } else {
3130
            pd = p->phys_offset;
3131
        }
3132

    
3133
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3134
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3135
            !(pd & IO_MEM_ROMD)) {
3136
            /* do nothing */
3137
        } else {
3138
            unsigned long addr1;
3139
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3140
            /* ROM/RAM case */
3141
            ptr = qemu_get_ram_ptr(addr1);
3142
            memcpy(ptr, buf, l);
3143
        }
3144
        len -= l;
3145
        buf += l;
3146
        addr += l;
3147
    }
3148
}
3149

    
3150
typedef struct {
3151
    void *buffer;
3152
    target_phys_addr_t addr;
3153
    target_phys_addr_t len;
3154
} BounceBuffer;
3155

    
3156
static BounceBuffer bounce;
3157

    
3158
typedef struct MapClient {
3159
    void *opaque;
3160
    void (*callback)(void *opaque);
3161
    LIST_ENTRY(MapClient) link;
3162
} MapClient;
3163

    
3164
static LIST_HEAD(map_client_list, MapClient) map_client_list
3165
    = LIST_HEAD_INITIALIZER(map_client_list);
3166

    
3167
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3168
{
3169
    MapClient *client = qemu_malloc(sizeof(*client));
3170

    
3171
    client->opaque = opaque;
3172
    client->callback = callback;
3173
    LIST_INSERT_HEAD(&map_client_list, client, link);
3174
    return client;
3175
}
3176

    
3177
void cpu_unregister_map_client(void *_client)
3178
{
3179
    MapClient *client = (MapClient *)_client;
3180

    
3181
    LIST_REMOVE(client, link);
3182
    qemu_free(client);
3183
}
3184

    
3185
static void cpu_notify_map_clients(void)
3186
{
3187
    MapClient *client;
3188

    
3189
    while (!LIST_EMPTY(&map_client_list)) {
3190
        client = LIST_FIRST(&map_client_list);
3191
        client->callback(client->opaque);
3192
        cpu_unregister_map_client(client);
3193
    }
3194
}
3195

    
3196
/* Map a physical memory region into a host virtual address.
3197
 * May map a subset of the requested range, given by and returned in *plen.
3198
 * May return NULL if resources needed to perform the mapping are exhausted.
3199
 * Use only for reads OR writes - not for read-modify-write operations.
3200
 * Use cpu_register_map_client() to know when retrying the map operation is
3201
 * likely to succeed.
3202
 */
3203
void *cpu_physical_memory_map(target_phys_addr_t addr,
3204
                              target_phys_addr_t *plen,
3205
                              int is_write)
3206
{
3207
    target_phys_addr_t len = *plen;
3208
    target_phys_addr_t done = 0;
3209
    int l;
3210
    uint8_t *ret = NULL;
3211
    uint8_t *ptr;
3212
    target_phys_addr_t page;
3213
    unsigned long pd;
3214
    PhysPageDesc *p;
3215
    unsigned long addr1;
3216

    
3217
    while (len > 0) {
3218
        page = addr & TARGET_PAGE_MASK;
3219
        l = (page + TARGET_PAGE_SIZE) - addr;
3220
        if (l > len)
3221
            l = len;
3222
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3223
        if (!p) {
3224
            pd = IO_MEM_UNASSIGNED;
3225
        } else {
3226
            pd = p->phys_offset;
3227
        }
3228

    
3229
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3230
            if (done || bounce.buffer) {
3231
                break;
3232
            }
3233
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3234
            bounce.addr = addr;
3235
            bounce.len = l;
3236
            if (!is_write) {
3237
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3238
            }
3239
            ptr = bounce.buffer;
3240
        } else {
3241
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3242
            ptr = qemu_get_ram_ptr(addr1);
3243
        }
3244
        if (!done) {
3245
            ret = ptr;
3246
        } else if (ret + done != ptr) {
3247
            break;
3248
        }
3249

    
3250
        len -= l;
3251
        addr += l;
3252
        done += l;
3253
    }
3254
    *plen = done;
3255
    return ret;
3256
}
3257

    
3258
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3259
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3260
 * the amount of memory that was actually read or written by the caller.
3261
 */
3262
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3263
                               int is_write, target_phys_addr_t access_len)
3264
{
3265
    if (buffer != bounce.buffer) {
3266
        if (is_write) {
3267
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3268
            while (access_len) {
3269
                unsigned l;
3270
                l = TARGET_PAGE_SIZE;
3271
                if (l > access_len)
3272
                    l = access_len;
3273
                if (!cpu_physical_memory_is_dirty(addr1)) {
3274
                    /* invalidate code */
3275
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3276
                    /* set dirty bit */
3277
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3278
                        (0xff & ~CODE_DIRTY_FLAG);
3279
                }
3280
                addr1 += l;
3281
                access_len -= l;
3282
            }
3283
        }
3284
        return;
3285
    }
3286
    if (is_write) {
3287
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3288
    }
3289
    qemu_free(bounce.buffer);
3290
    bounce.buffer = NULL;
3291
    cpu_notify_map_clients();
3292
}
3293

    
3294
/* warning: addr must be aligned */
3295
uint32_t ldl_phys(target_phys_addr_t addr)
3296
{
3297
    int io_index;
3298
    uint8_t *ptr;
3299
    uint32_t val;
3300
    unsigned long pd;
3301
    PhysPageDesc *p;
3302

    
3303
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3304
    if (!p) {
3305
        pd = IO_MEM_UNASSIGNED;
3306
    } else {
3307
        pd = p->phys_offset;
3308
    }
3309

    
3310
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3311
        !(pd & IO_MEM_ROMD)) {
3312
        /* I/O case */
3313
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3314
        if (p)
3315
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3316
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3317
    } else {
3318
        /* RAM case */
3319
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3320
            (addr & ~TARGET_PAGE_MASK);
3321
        val = ldl_p(ptr);
3322
    }
3323
    return val;
3324
}
3325

    
3326
/* warning: addr must be aligned */
3327
uint64_t ldq_phys(target_phys_addr_t addr)
3328
{
3329
    int io_index;
3330
    uint8_t *ptr;
3331
    uint64_t val;
3332
    unsigned long pd;
3333
    PhysPageDesc *p;
3334

    
3335
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3336
    if (!p) {
3337
        pd = IO_MEM_UNASSIGNED;
3338
    } else {
3339
        pd = p->phys_offset;
3340
    }
3341

    
3342
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3343
        !(pd & IO_MEM_ROMD)) {
3344
        /* I/O case */
3345
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3346
        if (p)
3347
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3348
#ifdef TARGET_WORDS_BIGENDIAN
3349
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3350
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3351
#else
3352
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3353
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3354
#endif
3355
    } else {
3356
        /* RAM case */
3357
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3358
            (addr & ~TARGET_PAGE_MASK);
3359
        val = ldq_p(ptr);
3360
    }
3361
    return val;
3362
}
3363

    
3364
/* XXX: optimize */
3365
uint32_t ldub_phys(target_phys_addr_t addr)
3366
{
3367
    uint8_t val;
3368
    cpu_physical_memory_read(addr, &val, 1);
3369
    return val;
3370
}
3371

    
3372
/* XXX: optimize */
3373
uint32_t lduw_phys(target_phys_addr_t addr)
3374
{
3375
    uint16_t val;
3376
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3377
    return tswap16(val);
3378
}
3379

    
3380
/* warning: addr must be aligned. The ram page is not masked as dirty
3381
   and the code inside is not invalidated. It is useful if the dirty
3382
   bits are used to track modified PTEs */
3383
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3384
{
3385
    int io_index;
3386
    uint8_t *ptr;
3387
    unsigned long pd;
3388
    PhysPageDesc *p;
3389

    
3390
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3391
    if (!p) {
3392
        pd = IO_MEM_UNASSIGNED;
3393
    } else {
3394
        pd = p->phys_offset;
3395
    }
3396

    
3397
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3398
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3399
        if (p)
3400
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3401
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3402
    } else {
3403
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3404
        ptr = qemu_get_ram_ptr(addr1);
3405
        stl_p(ptr, val);
3406

    
3407
        if (unlikely(in_migration)) {
3408
            if (!cpu_physical_memory_is_dirty(addr1)) {
3409
                /* invalidate code */
3410
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3411
                /* set dirty bit */
3412
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3413
                    (0xff & ~CODE_DIRTY_FLAG);
3414
            }
3415
        }
3416
    }
3417
}
3418

    
3419
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3420
{
3421
    int io_index;
3422
    uint8_t *ptr;
3423
    unsigned long pd;
3424
    PhysPageDesc *p;
3425

    
3426
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3427
    if (!p) {
3428
        pd = IO_MEM_UNASSIGNED;
3429
    } else {
3430
        pd = p->phys_offset;
3431
    }
3432

    
3433
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3434
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3435
        if (p)
3436
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3437
#ifdef TARGET_WORDS_BIGENDIAN
3438
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3439
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3440
#else
3441
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3442
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3443
#endif
3444
    } else {
3445
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3446
            (addr & ~TARGET_PAGE_MASK);
3447
        stq_p(ptr, val);
3448
    }
3449
}
3450

    
3451
/* warning: addr must be aligned */
3452
void stl_phys(target_phys_addr_t addr, uint32_t val)
3453
{
3454
    int io_index;
3455
    uint8_t *ptr;
3456
    unsigned long pd;
3457
    PhysPageDesc *p;
3458

    
3459
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3460
    if (!p) {
3461
        pd = IO_MEM_UNASSIGNED;
3462
    } else {
3463
        pd = p->phys_offset;
3464
    }
3465

    
3466
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3467
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3468
        if (p)
3469
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3470
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3471
    } else {
3472
        unsigned long addr1;
3473
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3474
        /* RAM case */
3475
        ptr = qemu_get_ram_ptr(addr1);
3476
        stl_p(ptr, val);
3477
        if (!cpu_physical_memory_is_dirty(addr1)) {
3478
            /* invalidate code */
3479
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3480
            /* set dirty bit */
3481
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3482
                (0xff & ~CODE_DIRTY_FLAG);
3483
        }
3484
    }
3485
}
3486

    
3487
/* XXX: optimize */
3488
void stb_phys(target_phys_addr_t addr, uint32_t val)
3489
{
3490
    uint8_t v = val;
3491
    cpu_physical_memory_write(addr, &v, 1);
3492
}
3493

    
3494
/* XXX: optimize */
3495
void stw_phys(target_phys_addr_t addr, uint32_t val)
3496
{
3497
    uint16_t v = tswap16(val);
3498
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3499
}
3500

    
3501
/* XXX: optimize */
3502
void stq_phys(target_phys_addr_t addr, uint64_t val)
3503
{
3504
    val = tswap64(val);
3505
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3506
}
3507

    
3508
#endif
3509

    
3510
/* virtual memory access for debug (includes writing to ROM) */
3511
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3512
                        uint8_t *buf, int len, int is_write)
3513
{
3514
    int l;
3515
    target_phys_addr_t phys_addr;
3516
    target_ulong page;
3517

    
3518
    while (len > 0) {
3519
        page = addr & TARGET_PAGE_MASK;
3520
        phys_addr = cpu_get_phys_page_debug(env, page);
3521
        /* if no physical page mapped, return an error */
3522
        if (phys_addr == -1)
3523
            return -1;
3524
        l = (page + TARGET_PAGE_SIZE) - addr;
3525
        if (l > len)
3526
            l = len;
3527
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3528
#if !defined(CONFIG_USER_ONLY)
3529
        if (is_write)
3530
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3531
        else
3532
#endif
3533
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3534
        len -= l;
3535
        buf += l;
3536
        addr += l;
3537
    }
3538
    return 0;
3539
}
3540

    
3541
/* in deterministic execution mode, instructions doing device I/Os
3542
   must be at the end of the TB */
3543
void cpu_io_recompile(CPUState *env, void *retaddr)
3544
{
3545
    TranslationBlock *tb;
3546
    uint32_t n, cflags;
3547
    target_ulong pc, cs_base;
3548
    uint64_t flags;
3549

    
3550
    tb = tb_find_pc((unsigned long)retaddr);
3551
    if (!tb) {
3552
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3553
                  retaddr);
3554
    }
3555
    n = env->icount_decr.u16.low + tb->icount;
3556
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3557
    /* Calculate how many instructions had been executed before the fault
3558
       occurred.  */
3559
    n = n - env->icount_decr.u16.low;
3560
    /* Generate a new TB ending on the I/O insn.  */
3561
    n++;
3562
    /* On MIPS and SH, delay slot instructions can only be restarted if
3563
       they were already the first instruction in the TB.  If this is not
3564
       the first instruction in a TB then re-execute the preceding
3565
       branch.  */
3566
#if defined(TARGET_MIPS)
3567
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3568
        env->active_tc.PC -= 4;
3569
        env->icount_decr.u16.low++;
3570
        env->hflags &= ~MIPS_HFLAG_BMASK;
3571
    }
3572
#elif defined(TARGET_SH4)
3573
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3574
            && n > 1) {
3575
        env->pc -= 2;
3576
        env->icount_decr.u16.low++;
3577
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3578
    }
3579
#endif
3580
    /* This should never happen.  */
3581
    if (n > CF_COUNT_MASK)
3582
        cpu_abort(env, "TB too big during recompile");
3583

    
3584
    cflags = n | CF_LAST_IO;
3585
    pc = tb->pc;
3586
    cs_base = tb->cs_base;
3587
    flags = tb->flags;
3588
    tb_phys_invalidate(tb, -1);
3589
    /* FIXME: In theory this could raise an exception.  In practice
3590
       we have already translated the block once so it's probably ok.  */
3591
    tb_gen_code(env, pc, cs_base, flags, cflags);
3592
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3593
       the first in the TB) then we end up generating a whole new TB and
3594
       repeating the fault, which is horribly inefficient.
3595
       Better would be to execute just this insn uncached, or generate a
3596
       second new TB.  */
3597
    cpu_resume_from_signal(env, NULL);
3598
}
3599

    
3600
void dump_exec_info(FILE *f,
3601
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3602
{
3603
    int i, target_code_size, max_target_code_size;
3604
    int direct_jmp_count, direct_jmp2_count, cross_page;
3605
    TranslationBlock *tb;
3606

    
3607
    target_code_size = 0;
3608
    max_target_code_size = 0;
3609
    cross_page = 0;
3610
    direct_jmp_count = 0;
3611
    direct_jmp2_count = 0;
3612
    for(i = 0; i < nb_tbs; i++) {
3613
        tb = &tbs[i];
3614
        target_code_size += tb->size;
3615
        if (tb->size > max_target_code_size)
3616
            max_target_code_size = tb->size;
3617
        if (tb->page_addr[1] != -1)
3618
            cross_page++;
3619
        if (tb->tb_next_offset[0] != 0xffff) {
3620
            direct_jmp_count++;
3621
            if (tb->tb_next_offset[1] != 0xffff) {
3622
                direct_jmp2_count++;
3623
            }
3624
        }
3625
    }
3626
    /* XXX: avoid using doubles ? */
3627
    cpu_fprintf(f, "Translation buffer state:\n");
3628
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3629
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3630
    cpu_fprintf(f, "TB count            %d/%d\n", 
3631
                nb_tbs, code_gen_max_blocks);
3632
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3633
                nb_tbs ? target_code_size / nb_tbs : 0,
3634
                max_target_code_size);
3635
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3636
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3637
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3638
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3639
            cross_page,
3640
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3641
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3642
                direct_jmp_count,
3643
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3644
                direct_jmp2_count,
3645
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3646
    cpu_fprintf(f, "\nStatistics:\n");
3647
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3648
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3649
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3650
    tcg_dump_info(f, cpu_fprintf);
3651
}
3652

    
3653
#if !defined(CONFIG_USER_ONLY)
3654

    
3655
#define MMUSUFFIX _cmmu
3656
#define GETPC() NULL
3657
#define env cpu_single_env
3658
#define SOFTMMU_CODE_ACCESS
3659

    
3660
#define SHIFT 0
3661
#include "softmmu_template.h"
3662

    
3663
#define SHIFT 1
3664
#include "softmmu_template.h"
3665

    
3666
#define SHIFT 2
3667
#include "softmmu_template.h"
3668

    
3669
#define SHIFT 3
3670
#include "softmmu_template.h"
3671

    
3672
#undef env
3673

    
3674
#endif