Statistics
| Branch: | Revision:

root / exec.c @ dc828ca1

History | View | Annotate | Download (108.8 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#include "qemu-common.h"
38
#include "tcg.h"
39
#include "hw/hw.h"
40
#include "osdep.h"
41
#include "kvm.h"
42
#if defined(CONFIG_USER_ONLY)
43
#include <qemu.h>
44
#endif
45

    
46
//#define DEBUG_TB_INVALIDATE
47
//#define DEBUG_FLUSH
48
//#define DEBUG_TLB
49
//#define DEBUG_UNASSIGNED
50

    
51
/* make various TB consistency checks */
52
//#define DEBUG_TB_CHECK
53
//#define DEBUG_TLB_CHECK
54

    
55
//#define DEBUG_IOPORT
56
//#define DEBUG_SUBPAGE
57

    
58
#if !defined(CONFIG_USER_ONLY)
59
/* TB consistency checks only implemented for usermode emulation.  */
60
#undef DEBUG_TB_CHECK
61
#endif
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#if defined(TARGET_SPARC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67
#elif defined(TARGET_SPARC)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69
#elif defined(TARGET_ALPHA)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72
#elif defined(TARGET_PPC64)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78
#else
79
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80
#define TARGET_PHYS_ADDR_SPACE_BITS 32
81
#endif
82

    
83
static TranslationBlock *tbs;
84
int code_gen_max_blocks;
85
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86
static int nb_tbs;
87
/* any access to the tbs or the page table must use this lock */
88
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89

    
90
#if defined(__arm__) || defined(__sparc_v9__)
91
/* The prologue must be reachable with a direct jump. ARM and Sparc64
92
 have limited branch ranges (possibly also PPC) so place it in a
93
 section close to code segment. */
94
#define code_gen_section                                \
95
    __attribute__((__section__(".gen_code")))           \
96
    __attribute__((aligned (32)))
97
#else
98
#define code_gen_section                                \
99
    __attribute__((aligned (32)))
100
#endif
101

    
102
uint8_t code_gen_prologue[1024] code_gen_section;
103
static uint8_t *code_gen_buffer;
104
static unsigned long code_gen_buffer_size;
105
/* threshold to flush the translated code buffer */
106
static unsigned long code_gen_buffer_max_size;
107
uint8_t *code_gen_ptr;
108

    
109
#if !defined(CONFIG_USER_ONLY)
110
ram_addr_t phys_ram_size;
111
int phys_ram_fd;
112
uint8_t *phys_ram_base;
113
uint8_t *phys_ram_dirty;
114
static int in_migration;
115
static ram_addr_t phys_ram_alloc_offset = 0;
116
#endif
117

    
118
CPUState *first_cpu;
119
/* current CPU in the current thread. It is only valid inside
120
   cpu_exec() */
121
CPUState *cpu_single_env;
122
/* 0 = Do not count executed instructions.
123
   1 = Precise instruction counting.
124
   2 = Adaptive rate instruction counting.  */
125
int use_icount = 0;
126
/* Current instruction counter.  While executing translated code this may
127
   include some instructions that have not yet been executed.  */
128
int64_t qemu_icount;
129

    
130
typedef struct PageDesc {
131
    /* list of TBs intersecting this ram page */
132
    TranslationBlock *first_tb;
133
    /* in order to optimize self modifying code, we count the number
134
       of lookups we do to a given page to use a bitmap */
135
    unsigned int code_write_count;
136
    uint8_t *code_bitmap;
137
#if defined(CONFIG_USER_ONLY)
138
    unsigned long flags;
139
#endif
140
} PageDesc;
141

    
142
typedef struct PhysPageDesc {
143
    /* offset in host memory of the page + io_index in the low bits */
144
    ram_addr_t phys_offset;
145
    ram_addr_t region_offset;
146
} PhysPageDesc;
147

    
148
#define L2_BITS 10
149
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
150
/* XXX: this is a temporary hack for alpha target.
151
 *      In the future, this is to be replaced by a multi-level table
152
 *      to actually be able to handle the complete 64 bits address space.
153
 */
154
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
155
#else
156
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
157
#endif
158

    
159
#define L1_SIZE (1 << L1_BITS)
160
#define L2_SIZE (1 << L2_BITS)
161

    
162
unsigned long qemu_real_host_page_size;
163
unsigned long qemu_host_page_bits;
164
unsigned long qemu_host_page_size;
165
unsigned long qemu_host_page_mask;
166

    
167
/* XXX: for system emulation, it could just be an array */
168
static PageDesc *l1_map[L1_SIZE];
169
static PhysPageDesc **l1_phys_map;
170

    
171
#if !defined(CONFIG_USER_ONLY)
172
static void io_mem_init(void);
173

    
174
/* io memory support */
175
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
176
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
177
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
178
static char io_mem_used[IO_MEM_NB_ENTRIES];
179
static int io_mem_watch;
180
#endif
181

    
182
/* log support */
183
static const char *logfilename = "/tmp/qemu.log";
184
FILE *logfile;
185
int loglevel;
186
static int log_append = 0;
187

    
188
/* statistics */
189
static int tlb_flush_count;
190
static int tb_flush_count;
191
static int tb_phys_invalidate_count;
192

    
193
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194
typedef struct subpage_t {
195
    target_phys_addr_t base;
196
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
197
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
198
    void *opaque[TARGET_PAGE_SIZE][2][4];
199
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
200
} subpage_t;
201

    
202
#ifdef _WIN32
203
static void map_exec(void *addr, long size)
204
{
205
    DWORD old_protect;
206
    VirtualProtect(addr, size,
207
                   PAGE_EXECUTE_READWRITE, &old_protect);
208
    
209
}
210
#else
211
static void map_exec(void *addr, long size)
212
{
213
    unsigned long start, end, page_size;
214
    
215
    page_size = getpagesize();
216
    start = (unsigned long)addr;
217
    start &= ~(page_size - 1);
218
    
219
    end = (unsigned long)addr + size;
220
    end += page_size - 1;
221
    end &= ~(page_size - 1);
222
    
223
    mprotect((void *)start, end - start,
224
             PROT_READ | PROT_WRITE | PROT_EXEC);
225
}
226
#endif
227

    
228
static void page_init(void)
229
{
230
    /* NOTE: we can always suppose that qemu_host_page_size >=
231
       TARGET_PAGE_SIZE */
232
#ifdef _WIN32
233
    {
234
        SYSTEM_INFO system_info;
235

    
236
        GetSystemInfo(&system_info);
237
        qemu_real_host_page_size = system_info.dwPageSize;
238
    }
239
#else
240
    qemu_real_host_page_size = getpagesize();
241
#endif
242
    if (qemu_host_page_size == 0)
243
        qemu_host_page_size = qemu_real_host_page_size;
244
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
245
        qemu_host_page_size = TARGET_PAGE_SIZE;
246
    qemu_host_page_bits = 0;
247
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
248
        qemu_host_page_bits++;
249
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
250
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
251
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
252

    
253
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
254
    {
255
        long long startaddr, endaddr;
256
        FILE *f;
257
        int n;
258

    
259
        mmap_lock();
260
        last_brk = (unsigned long)sbrk(0);
261
        f = fopen("/proc/self/maps", "r");
262
        if (f) {
263
            do {
264
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
265
                if (n == 2) {
266
                    startaddr = MIN(startaddr,
267
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
268
                    endaddr = MIN(endaddr,
269
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
271
                                   TARGET_PAGE_ALIGN(endaddr),
272
                                   PAGE_RESERVED); 
273
                }
274
            } while (!feof(f));
275
            fclose(f);
276
        }
277
        mmap_unlock();
278
    }
279
#endif
280
}
281

    
282
static inline PageDesc **page_l1_map(target_ulong index)
283
{
284
#if TARGET_LONG_BITS > 32
285
    /* Host memory outside guest VM.  For 32-bit targets we have already
286
       excluded high addresses.  */
287
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
288
        return NULL;
289
#endif
290
    return &l1_map[index >> L2_BITS];
291
}
292

    
293
static inline PageDesc *page_find_alloc(target_ulong index)
294
{
295
    PageDesc **lp, *p;
296
    lp = page_l1_map(index);
297
    if (!lp)
298
        return NULL;
299

    
300
    p = *lp;
301
    if (!p) {
302
        /* allocate if not found */
303
#if defined(CONFIG_USER_ONLY)
304
        size_t len = sizeof(PageDesc) * L2_SIZE;
305
        /* Don't use qemu_malloc because it may recurse.  */
306
        p = mmap(0, len, PROT_READ | PROT_WRITE,
307
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
308
        *lp = p;
309
        if (h2g_valid(p)) {
310
            unsigned long addr = h2g(p);
311
            page_set_flags(addr & TARGET_PAGE_MASK,
312
                           TARGET_PAGE_ALIGN(addr + len),
313
                           PAGE_RESERVED); 
314
        }
315
#else
316
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
317
        *lp = p;
318
#endif
319
    }
320
    return p + (index & (L2_SIZE - 1));
321
}
322

    
323
static inline PageDesc *page_find(target_ulong index)
324
{
325
    PageDesc **lp, *p;
326
    lp = page_l1_map(index);
327
    if (!lp)
328
        return NULL;
329

    
330
    p = *lp;
331
    if (!p)
332
        return 0;
333
    return p + (index & (L2_SIZE - 1));
334
}
335

    
336
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
337
{
338
    void **lp, **p;
339
    PhysPageDesc *pd;
340

    
341
    p = (void **)l1_phys_map;
342
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
343

    
344
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
345
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
346
#endif
347
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
348
    p = *lp;
349
    if (!p) {
350
        /* allocate if not found */
351
        if (!alloc)
352
            return NULL;
353
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
354
        memset(p, 0, sizeof(void *) * L1_SIZE);
355
        *lp = p;
356
    }
357
#endif
358
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
359
    pd = *lp;
360
    if (!pd) {
361
        int i;
362
        /* allocate if not found */
363
        if (!alloc)
364
            return NULL;
365
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
366
        *lp = pd;
367
        for (i = 0; i < L2_SIZE; i++) {
368
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
369
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
370
        }
371
    }
372
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
373
}
374

    
375
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
376
{
377
    return phys_page_find_alloc(index, 0);
378
}
379

    
380
#if !defined(CONFIG_USER_ONLY)
381
static void tlb_protect_code(ram_addr_t ram_addr);
382
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
383
                                    target_ulong vaddr);
384
#define mmap_lock() do { } while(0)
385
#define mmap_unlock() do { } while(0)
386
#endif
387

    
388
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
389

    
390
#if defined(CONFIG_USER_ONLY)
391
/* Currently it is not recommanded to allocate big chunks of data in
392
   user mode. It will change when a dedicated libc will be used */
393
#define USE_STATIC_CODE_GEN_BUFFER
394
#endif
395

    
396
#ifdef USE_STATIC_CODE_GEN_BUFFER
397
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
398
#endif
399

    
400
static void code_gen_alloc(unsigned long tb_size)
401
{
402
#ifdef USE_STATIC_CODE_GEN_BUFFER
403
    code_gen_buffer = static_code_gen_buffer;
404
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
405
    map_exec(code_gen_buffer, code_gen_buffer_size);
406
#else
407
    code_gen_buffer_size = tb_size;
408
    if (code_gen_buffer_size == 0) {
409
#if defined(CONFIG_USER_ONLY)
410
        /* in user mode, phys_ram_size is not meaningful */
411
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
412
#else
413
        /* XXX: needs ajustments */
414
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
415
#endif
416
    }
417
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
418
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
419
    /* The code gen buffer location may have constraints depending on
420
       the host cpu and OS */
421
#if defined(__linux__) 
422
    {
423
        int flags;
424
        void *start = NULL;
425

    
426
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
427
#if defined(__x86_64__)
428
        flags |= MAP_32BIT;
429
        /* Cannot map more than that */
430
        if (code_gen_buffer_size > (800 * 1024 * 1024))
431
            code_gen_buffer_size = (800 * 1024 * 1024);
432
#elif defined(__sparc_v9__)
433
        // Map the buffer below 2G, so we can use direct calls and branches
434
        flags |= MAP_FIXED;
435
        start = (void *) 0x60000000UL;
436
        if (code_gen_buffer_size > (512 * 1024 * 1024))
437
            code_gen_buffer_size = (512 * 1024 * 1024);
438
#elif defined(__arm__)
439
        /* Map the buffer below 32M, so we can use direct calls and branches */
440
        flags |= MAP_FIXED;
441
        start = (void *) 0x01000000UL;
442
        if (code_gen_buffer_size > 16 * 1024 * 1024)
443
            code_gen_buffer_size = 16 * 1024 * 1024;
444
#endif
445
        code_gen_buffer = mmap(start, code_gen_buffer_size,
446
                               PROT_WRITE | PROT_READ | PROT_EXEC,
447
                               flags, -1, 0);
448
        if (code_gen_buffer == MAP_FAILED) {
449
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
450
            exit(1);
451
        }
452
    }
453
#elif defined(__FreeBSD__) || defined(__DragonFly__)
454
    {
455
        int flags;
456
        void *addr = NULL;
457
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
458
#if defined(__x86_64__)
459
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
460
         * 0x40000000 is free */
461
        flags |= MAP_FIXED;
462
        addr = (void *)0x40000000;
463
        /* Cannot map more than that */
464
        if (code_gen_buffer_size > (800 * 1024 * 1024))
465
            code_gen_buffer_size = (800 * 1024 * 1024);
466
#endif
467
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
468
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
469
                               flags, -1, 0);
470
        if (code_gen_buffer == MAP_FAILED) {
471
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
472
            exit(1);
473
        }
474
    }
475
#else
476
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
477
    map_exec(code_gen_buffer, code_gen_buffer_size);
478
#endif
479
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
480
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
481
    code_gen_buffer_max_size = code_gen_buffer_size - 
482
        code_gen_max_block_size();
483
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
484
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
485
}
486

    
487
/* Must be called before using the QEMU cpus. 'tb_size' is the size
488
   (in bytes) allocated to the translation buffer. Zero means default
489
   size. */
490
void cpu_exec_init_all(unsigned long tb_size)
491
{
492
    cpu_gen_init();
493
    code_gen_alloc(tb_size);
494
    code_gen_ptr = code_gen_buffer;
495
    page_init();
496
#if !defined(CONFIG_USER_ONLY)
497
    io_mem_init();
498
#endif
499
}
500

    
501
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
502

    
503
#define CPU_COMMON_SAVE_VERSION 1
504

    
505
static void cpu_common_save(QEMUFile *f, void *opaque)
506
{
507
    CPUState *env = opaque;
508

    
509
    qemu_put_be32s(f, &env->halted);
510
    qemu_put_be32s(f, &env->interrupt_request);
511
}
512

    
513
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
514
{
515
    CPUState *env = opaque;
516

    
517
    if (version_id != CPU_COMMON_SAVE_VERSION)
518
        return -EINVAL;
519

    
520
    qemu_get_be32s(f, &env->halted);
521
    qemu_get_be32s(f, &env->interrupt_request);
522
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
523
       version_id is increased. */
524
    env->interrupt_request &= ~0x01;
525
    tlb_flush(env, 1);
526

    
527
    return 0;
528
}
529
#endif
530

    
531
void cpu_exec_init(CPUState *env)
532
{
533
    CPUState **penv;
534
    int cpu_index;
535

    
536
#if defined(CONFIG_USER_ONLY)
537
    cpu_list_lock();
538
#endif
539
    env->next_cpu = NULL;
540
    penv = &first_cpu;
541
    cpu_index = 0;
542
    while (*penv != NULL) {
543
        penv = (CPUState **)&(*penv)->next_cpu;
544
        cpu_index++;
545
    }
546
    env->cpu_index = cpu_index;
547
    TAILQ_INIT(&env->breakpoints);
548
    TAILQ_INIT(&env->watchpoints);
549
    *penv = env;
550
#if defined(CONFIG_USER_ONLY)
551
    cpu_list_unlock();
552
#endif
553
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
554
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
555
                    cpu_common_save, cpu_common_load, env);
556
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
557
                    cpu_save, cpu_load, env);
558
#endif
559
}
560

    
561
static inline void invalidate_page_bitmap(PageDesc *p)
562
{
563
    if (p->code_bitmap) {
564
        qemu_free(p->code_bitmap);
565
        p->code_bitmap = NULL;
566
    }
567
    p->code_write_count = 0;
568
}
569

    
570
/* set to NULL all the 'first_tb' fields in all PageDescs */
571
static void page_flush_tb(void)
572
{
573
    int i, j;
574
    PageDesc *p;
575

    
576
    for(i = 0; i < L1_SIZE; i++) {
577
        p = l1_map[i];
578
        if (p) {
579
            for(j = 0; j < L2_SIZE; j++) {
580
                p->first_tb = NULL;
581
                invalidate_page_bitmap(p);
582
                p++;
583
            }
584
        }
585
    }
586
}
587

    
588
/* flush all the translation blocks */
589
/* XXX: tb_flush is currently not thread safe */
590
void tb_flush(CPUState *env1)
591
{
592
    CPUState *env;
593
#if defined(DEBUG_FLUSH)
594
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
595
           (unsigned long)(code_gen_ptr - code_gen_buffer),
596
           nb_tbs, nb_tbs > 0 ?
597
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
598
#endif
599
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
600
        cpu_abort(env1, "Internal error: code buffer overflow\n");
601

    
602
    nb_tbs = 0;
603

    
604
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
605
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
606
    }
607

    
608
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
609
    page_flush_tb();
610

    
611
    code_gen_ptr = code_gen_buffer;
612
    /* XXX: flush processor icache at this point if cache flush is
613
       expensive */
614
    tb_flush_count++;
615
}
616

    
617
#ifdef DEBUG_TB_CHECK
618

    
619
static void tb_invalidate_check(target_ulong address)
620
{
621
    TranslationBlock *tb;
622
    int i;
623
    address &= TARGET_PAGE_MASK;
624
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
625
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
626
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
627
                  address >= tb->pc + tb->size)) {
628
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
629
                       address, (long)tb->pc, tb->size);
630
            }
631
        }
632
    }
633
}
634

    
635
/* verify that all the pages have correct rights for code */
636
static void tb_page_check(void)
637
{
638
    TranslationBlock *tb;
639
    int i, flags1, flags2;
640

    
641
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
642
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
643
            flags1 = page_get_flags(tb->pc);
644
            flags2 = page_get_flags(tb->pc + tb->size - 1);
645
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
646
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
647
                       (long)tb->pc, tb->size, flags1, flags2);
648
            }
649
        }
650
    }
651
}
652

    
653
static void tb_jmp_check(TranslationBlock *tb)
654
{
655
    TranslationBlock *tb1;
656
    unsigned int n1;
657

    
658
    /* suppress any remaining jumps to this TB */
659
    tb1 = tb->jmp_first;
660
    for(;;) {
661
        n1 = (long)tb1 & 3;
662
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
663
        if (n1 == 2)
664
            break;
665
        tb1 = tb1->jmp_next[n1];
666
    }
667
    /* check end of list */
668
    if (tb1 != tb) {
669
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
670
    }
671
}
672

    
673
#endif
674

    
675
/* invalidate one TB */
676
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
677
                             int next_offset)
678
{
679
    TranslationBlock *tb1;
680
    for(;;) {
681
        tb1 = *ptb;
682
        if (tb1 == tb) {
683
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
684
            break;
685
        }
686
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
687
    }
688
}
689

    
690
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
691
{
692
    TranslationBlock *tb1;
693
    unsigned int n1;
694

    
695
    for(;;) {
696
        tb1 = *ptb;
697
        n1 = (long)tb1 & 3;
698
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
699
        if (tb1 == tb) {
700
            *ptb = tb1->page_next[n1];
701
            break;
702
        }
703
        ptb = &tb1->page_next[n1];
704
    }
705
}
706

    
707
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
708
{
709
    TranslationBlock *tb1, **ptb;
710
    unsigned int n1;
711

    
712
    ptb = &tb->jmp_next[n];
713
    tb1 = *ptb;
714
    if (tb1) {
715
        /* find tb(n) in circular list */
716
        for(;;) {
717
            tb1 = *ptb;
718
            n1 = (long)tb1 & 3;
719
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
720
            if (n1 == n && tb1 == tb)
721
                break;
722
            if (n1 == 2) {
723
                ptb = &tb1->jmp_first;
724
            } else {
725
                ptb = &tb1->jmp_next[n1];
726
            }
727
        }
728
        /* now we can suppress tb(n) from the list */
729
        *ptb = tb->jmp_next[n];
730

    
731
        tb->jmp_next[n] = NULL;
732
    }
733
}
734

    
735
/* reset the jump entry 'n' of a TB so that it is not chained to
736
   another TB */
737
static inline void tb_reset_jump(TranslationBlock *tb, int n)
738
{
739
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
740
}
741

    
742
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
743
{
744
    CPUState *env;
745
    PageDesc *p;
746
    unsigned int h, n1;
747
    target_phys_addr_t phys_pc;
748
    TranslationBlock *tb1, *tb2;
749

    
750
    /* remove the TB from the hash list */
751
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
752
    h = tb_phys_hash_func(phys_pc);
753
    tb_remove(&tb_phys_hash[h], tb,
754
              offsetof(TranslationBlock, phys_hash_next));
755

    
756
    /* remove the TB from the page list */
757
    if (tb->page_addr[0] != page_addr) {
758
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
759
        tb_page_remove(&p->first_tb, tb);
760
        invalidate_page_bitmap(p);
761
    }
762
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
763
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
764
        tb_page_remove(&p->first_tb, tb);
765
        invalidate_page_bitmap(p);
766
    }
767

    
768
    tb_invalidated_flag = 1;
769

    
770
    /* remove the TB from the hash list */
771
    h = tb_jmp_cache_hash_func(tb->pc);
772
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
773
        if (env->tb_jmp_cache[h] == tb)
774
            env->tb_jmp_cache[h] = NULL;
775
    }
776

    
777
    /* suppress this TB from the two jump lists */
778
    tb_jmp_remove(tb, 0);
779
    tb_jmp_remove(tb, 1);
780

    
781
    /* suppress any remaining jumps to this TB */
782
    tb1 = tb->jmp_first;
783
    for(;;) {
784
        n1 = (long)tb1 & 3;
785
        if (n1 == 2)
786
            break;
787
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
788
        tb2 = tb1->jmp_next[n1];
789
        tb_reset_jump(tb1, n1);
790
        tb1->jmp_next[n1] = NULL;
791
        tb1 = tb2;
792
    }
793
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
794

    
795
    tb_phys_invalidate_count++;
796
}
797

    
798
static inline void set_bits(uint8_t *tab, int start, int len)
799
{
800
    int end, mask, end1;
801

    
802
    end = start + len;
803
    tab += start >> 3;
804
    mask = 0xff << (start & 7);
805
    if ((start & ~7) == (end & ~7)) {
806
        if (start < end) {
807
            mask &= ~(0xff << (end & 7));
808
            *tab |= mask;
809
        }
810
    } else {
811
        *tab++ |= mask;
812
        start = (start + 8) & ~7;
813
        end1 = end & ~7;
814
        while (start < end1) {
815
            *tab++ = 0xff;
816
            start += 8;
817
        }
818
        if (start < end) {
819
            mask = ~(0xff << (end & 7));
820
            *tab |= mask;
821
        }
822
    }
823
}
824

    
825
static void build_page_bitmap(PageDesc *p)
826
{
827
    int n, tb_start, tb_end;
828
    TranslationBlock *tb;
829

    
830
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
831

    
832
    tb = p->first_tb;
833
    while (tb != NULL) {
834
        n = (long)tb & 3;
835
        tb = (TranslationBlock *)((long)tb & ~3);
836
        /* NOTE: this is subtle as a TB may span two physical pages */
837
        if (n == 0) {
838
            /* NOTE: tb_end may be after the end of the page, but
839
               it is not a problem */
840
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
841
            tb_end = tb_start + tb->size;
842
            if (tb_end > TARGET_PAGE_SIZE)
843
                tb_end = TARGET_PAGE_SIZE;
844
        } else {
845
            tb_start = 0;
846
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
847
        }
848
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
849
        tb = tb->page_next[n];
850
    }
851
}
852

    
853
TranslationBlock *tb_gen_code(CPUState *env,
854
                              target_ulong pc, target_ulong cs_base,
855
                              int flags, int cflags)
856
{
857
    TranslationBlock *tb;
858
    uint8_t *tc_ptr;
859
    target_ulong phys_pc, phys_page2, virt_page2;
860
    int code_gen_size;
861

    
862
    phys_pc = get_phys_addr_code(env, pc);
863
    tb = tb_alloc(pc);
864
    if (!tb) {
865
        /* flush must be done */
866
        tb_flush(env);
867
        /* cannot fail at this point */
868
        tb = tb_alloc(pc);
869
        /* Don't forget to invalidate previous TB info.  */
870
        tb_invalidated_flag = 1;
871
    }
872
    tc_ptr = code_gen_ptr;
873
    tb->tc_ptr = tc_ptr;
874
    tb->cs_base = cs_base;
875
    tb->flags = flags;
876
    tb->cflags = cflags;
877
    cpu_gen_code(env, tb, &code_gen_size);
878
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
879

    
880
    /* check next page if needed */
881
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
882
    phys_page2 = -1;
883
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
884
        phys_page2 = get_phys_addr_code(env, virt_page2);
885
    }
886
    tb_link_phys(tb, phys_pc, phys_page2);
887
    return tb;
888
}
889

    
890
/* invalidate all TBs which intersect with the target physical page
891
   starting in range [start;end[. NOTE: start and end must refer to
892
   the same physical page. 'is_cpu_write_access' should be true if called
893
   from a real cpu write access: the virtual CPU will exit the current
894
   TB if code is modified inside this TB. */
895
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
896
                                   int is_cpu_write_access)
897
{
898
    TranslationBlock *tb, *tb_next, *saved_tb;
899
    CPUState *env = cpu_single_env;
900
    target_ulong tb_start, tb_end;
901
    PageDesc *p;
902
    int n;
903
#ifdef TARGET_HAS_PRECISE_SMC
904
    int current_tb_not_found = is_cpu_write_access;
905
    TranslationBlock *current_tb = NULL;
906
    int current_tb_modified = 0;
907
    target_ulong current_pc = 0;
908
    target_ulong current_cs_base = 0;
909
    int current_flags = 0;
910
#endif /* TARGET_HAS_PRECISE_SMC */
911

    
912
    p = page_find(start >> TARGET_PAGE_BITS);
913
    if (!p)
914
        return;
915
    if (!p->code_bitmap &&
916
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
917
        is_cpu_write_access) {
918
        /* build code bitmap */
919
        build_page_bitmap(p);
920
    }
921

    
922
    /* we remove all the TBs in the range [start, end[ */
923
    /* XXX: see if in some cases it could be faster to invalidate all the code */
924
    tb = p->first_tb;
925
    while (tb != NULL) {
926
        n = (long)tb & 3;
927
        tb = (TranslationBlock *)((long)tb & ~3);
928
        tb_next = tb->page_next[n];
929
        /* NOTE: this is subtle as a TB may span two physical pages */
930
        if (n == 0) {
931
            /* NOTE: tb_end may be after the end of the page, but
932
               it is not a problem */
933
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
934
            tb_end = tb_start + tb->size;
935
        } else {
936
            tb_start = tb->page_addr[1];
937
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
938
        }
939
        if (!(tb_end <= start || tb_start >= end)) {
940
#ifdef TARGET_HAS_PRECISE_SMC
941
            if (current_tb_not_found) {
942
                current_tb_not_found = 0;
943
                current_tb = NULL;
944
                if (env->mem_io_pc) {
945
                    /* now we have a real cpu fault */
946
                    current_tb = tb_find_pc(env->mem_io_pc);
947
                }
948
            }
949
            if (current_tb == tb &&
950
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
951
                /* If we are modifying the current TB, we must stop
952
                its execution. We could be more precise by checking
953
                that the modification is after the current PC, but it
954
                would require a specialized function to partially
955
                restore the CPU state */
956

    
957
                current_tb_modified = 1;
958
                cpu_restore_state(current_tb, env,
959
                                  env->mem_io_pc, NULL);
960
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
961
                                     &current_flags);
962
            }
963
#endif /* TARGET_HAS_PRECISE_SMC */
964
            /* we need to do that to handle the case where a signal
965
               occurs while doing tb_phys_invalidate() */
966
            saved_tb = NULL;
967
            if (env) {
968
                saved_tb = env->current_tb;
969
                env->current_tb = NULL;
970
            }
971
            tb_phys_invalidate(tb, -1);
972
            if (env) {
973
                env->current_tb = saved_tb;
974
                if (env->interrupt_request && env->current_tb)
975
                    cpu_interrupt(env, env->interrupt_request);
976
            }
977
        }
978
        tb = tb_next;
979
    }
980
#if !defined(CONFIG_USER_ONLY)
981
    /* if no code remaining, no need to continue to use slow writes */
982
    if (!p->first_tb) {
983
        invalidate_page_bitmap(p);
984
        if (is_cpu_write_access) {
985
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
986
        }
987
    }
988
#endif
989
#ifdef TARGET_HAS_PRECISE_SMC
990
    if (current_tb_modified) {
991
        /* we generate a block containing just the instruction
992
           modifying the memory. It will ensure that it cannot modify
993
           itself */
994
        env->current_tb = NULL;
995
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
996
        cpu_resume_from_signal(env, NULL);
997
    }
998
#endif
999
}
1000

    
1001
/* len must be <= 8 and start must be a multiple of len */
1002
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1003
{
1004
    PageDesc *p;
1005
    int offset, b;
1006
#if 0
1007
    if (1) {
1008
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1009
                  cpu_single_env->mem_io_vaddr, len,
1010
                  cpu_single_env->eip,
1011
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1012
    }
1013
#endif
1014
    p = page_find(start >> TARGET_PAGE_BITS);
1015
    if (!p)
1016
        return;
1017
    if (p->code_bitmap) {
1018
        offset = start & ~TARGET_PAGE_MASK;
1019
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1020
        if (b & ((1 << len) - 1))
1021
            goto do_invalidate;
1022
    } else {
1023
    do_invalidate:
1024
        tb_invalidate_phys_page_range(start, start + len, 1);
1025
    }
1026
}
1027

    
1028
#if !defined(CONFIG_SOFTMMU)
1029
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1030
                                    unsigned long pc, void *puc)
1031
{
1032
    TranslationBlock *tb;
1033
    PageDesc *p;
1034
    int n;
1035
#ifdef TARGET_HAS_PRECISE_SMC
1036
    TranslationBlock *current_tb = NULL;
1037
    CPUState *env = cpu_single_env;
1038
    int current_tb_modified = 0;
1039
    target_ulong current_pc = 0;
1040
    target_ulong current_cs_base = 0;
1041
    int current_flags = 0;
1042
#endif
1043

    
1044
    addr &= TARGET_PAGE_MASK;
1045
    p = page_find(addr >> TARGET_PAGE_BITS);
1046
    if (!p)
1047
        return;
1048
    tb = p->first_tb;
1049
#ifdef TARGET_HAS_PRECISE_SMC
1050
    if (tb && pc != 0) {
1051
        current_tb = tb_find_pc(pc);
1052
    }
1053
#endif
1054
    while (tb != NULL) {
1055
        n = (long)tb & 3;
1056
        tb = (TranslationBlock *)((long)tb & ~3);
1057
#ifdef TARGET_HAS_PRECISE_SMC
1058
        if (current_tb == tb &&
1059
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1060
                /* If we are modifying the current TB, we must stop
1061
                   its execution. We could be more precise by checking
1062
                   that the modification is after the current PC, but it
1063
                   would require a specialized function to partially
1064
                   restore the CPU state */
1065

    
1066
            current_tb_modified = 1;
1067
            cpu_restore_state(current_tb, env, pc, puc);
1068
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1069
                                 &current_flags);
1070
        }
1071
#endif /* TARGET_HAS_PRECISE_SMC */
1072
        tb_phys_invalidate(tb, addr);
1073
        tb = tb->page_next[n];
1074
    }
1075
    p->first_tb = NULL;
1076
#ifdef TARGET_HAS_PRECISE_SMC
1077
    if (current_tb_modified) {
1078
        /* we generate a block containing just the instruction
1079
           modifying the memory. It will ensure that it cannot modify
1080
           itself */
1081
        env->current_tb = NULL;
1082
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1083
        cpu_resume_from_signal(env, puc);
1084
    }
1085
#endif
1086
}
1087
#endif
1088

    
1089
/* add the tb in the target page and protect it if necessary */
1090
static inline void tb_alloc_page(TranslationBlock *tb,
1091
                                 unsigned int n, target_ulong page_addr)
1092
{
1093
    PageDesc *p;
1094
    TranslationBlock *last_first_tb;
1095

    
1096
    tb->page_addr[n] = page_addr;
1097
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1098
    tb->page_next[n] = p->first_tb;
1099
    last_first_tb = p->first_tb;
1100
    p->first_tb = (TranslationBlock *)((long)tb | n);
1101
    invalidate_page_bitmap(p);
1102

    
1103
#if defined(TARGET_HAS_SMC) || 1
1104

    
1105
#if defined(CONFIG_USER_ONLY)
1106
    if (p->flags & PAGE_WRITE) {
1107
        target_ulong addr;
1108
        PageDesc *p2;
1109
        int prot;
1110

    
1111
        /* force the host page as non writable (writes will have a
1112
           page fault + mprotect overhead) */
1113
        page_addr &= qemu_host_page_mask;
1114
        prot = 0;
1115
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1116
            addr += TARGET_PAGE_SIZE) {
1117

    
1118
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1119
            if (!p2)
1120
                continue;
1121
            prot |= p2->flags;
1122
            p2->flags &= ~PAGE_WRITE;
1123
            page_get_flags(addr);
1124
          }
1125
        mprotect(g2h(page_addr), qemu_host_page_size,
1126
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1127
#ifdef DEBUG_TB_INVALIDATE
1128
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1129
               page_addr);
1130
#endif
1131
    }
1132
#else
1133
    /* if some code is already present, then the pages are already
1134
       protected. So we handle the case where only the first TB is
1135
       allocated in a physical page */
1136
    if (!last_first_tb) {
1137
        tlb_protect_code(page_addr);
1138
    }
1139
#endif
1140

    
1141
#endif /* TARGET_HAS_SMC */
1142
}
1143

    
1144
/* Allocate a new translation block. Flush the translation buffer if
1145
   too many translation blocks or too much generated code. */
1146
TranslationBlock *tb_alloc(target_ulong pc)
1147
{
1148
    TranslationBlock *tb;
1149

    
1150
    if (nb_tbs >= code_gen_max_blocks ||
1151
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1152
        return NULL;
1153
    tb = &tbs[nb_tbs++];
1154
    tb->pc = pc;
1155
    tb->cflags = 0;
1156
    return tb;
1157
}
1158

    
1159
void tb_free(TranslationBlock *tb)
1160
{
1161
    /* In practice this is mostly used for single use temporary TB
1162
       Ignore the hard cases and just back up if this TB happens to
1163
       be the last one generated.  */
1164
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1165
        code_gen_ptr = tb->tc_ptr;
1166
        nb_tbs--;
1167
    }
1168
}
1169

    
1170
/* add a new TB and link it to the physical page tables. phys_page2 is
1171
   (-1) to indicate that only one page contains the TB. */
1172
void tb_link_phys(TranslationBlock *tb,
1173
                  target_ulong phys_pc, target_ulong phys_page2)
1174
{
1175
    unsigned int h;
1176
    TranslationBlock **ptb;
1177

    
1178
    /* Grab the mmap lock to stop another thread invalidating this TB
1179
       before we are done.  */
1180
    mmap_lock();
1181
    /* add in the physical hash table */
1182
    h = tb_phys_hash_func(phys_pc);
1183
    ptb = &tb_phys_hash[h];
1184
    tb->phys_hash_next = *ptb;
1185
    *ptb = tb;
1186

    
1187
    /* add in the page list */
1188
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1189
    if (phys_page2 != -1)
1190
        tb_alloc_page(tb, 1, phys_page2);
1191
    else
1192
        tb->page_addr[1] = -1;
1193

    
1194
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1195
    tb->jmp_next[0] = NULL;
1196
    tb->jmp_next[1] = NULL;
1197

    
1198
    /* init original jump addresses */
1199
    if (tb->tb_next_offset[0] != 0xffff)
1200
        tb_reset_jump(tb, 0);
1201
    if (tb->tb_next_offset[1] != 0xffff)
1202
        tb_reset_jump(tb, 1);
1203

    
1204
#ifdef DEBUG_TB_CHECK
1205
    tb_page_check();
1206
#endif
1207
    mmap_unlock();
1208
}
1209

    
1210
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1211
   tb[1].tc_ptr. Return NULL if not found */
1212
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1213
{
1214
    int m_min, m_max, m;
1215
    unsigned long v;
1216
    TranslationBlock *tb;
1217

    
1218
    if (nb_tbs <= 0)
1219
        return NULL;
1220
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1221
        tc_ptr >= (unsigned long)code_gen_ptr)
1222
        return NULL;
1223
    /* binary search (cf Knuth) */
1224
    m_min = 0;
1225
    m_max = nb_tbs - 1;
1226
    while (m_min <= m_max) {
1227
        m = (m_min + m_max) >> 1;
1228
        tb = &tbs[m];
1229
        v = (unsigned long)tb->tc_ptr;
1230
        if (v == tc_ptr)
1231
            return tb;
1232
        else if (tc_ptr < v) {
1233
            m_max = m - 1;
1234
        } else {
1235
            m_min = m + 1;
1236
        }
1237
    }
1238
    return &tbs[m_max];
1239
}
1240

    
1241
static void tb_reset_jump_recursive(TranslationBlock *tb);
1242

    
1243
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1244
{
1245
    TranslationBlock *tb1, *tb_next, **ptb;
1246
    unsigned int n1;
1247

    
1248
    tb1 = tb->jmp_next[n];
1249
    if (tb1 != NULL) {
1250
        /* find head of list */
1251
        for(;;) {
1252
            n1 = (long)tb1 & 3;
1253
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1254
            if (n1 == 2)
1255
                break;
1256
            tb1 = tb1->jmp_next[n1];
1257
        }
1258
        /* we are now sure now that tb jumps to tb1 */
1259
        tb_next = tb1;
1260

    
1261
        /* remove tb from the jmp_first list */
1262
        ptb = &tb_next->jmp_first;
1263
        for(;;) {
1264
            tb1 = *ptb;
1265
            n1 = (long)tb1 & 3;
1266
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1267
            if (n1 == n && tb1 == tb)
1268
                break;
1269
            ptb = &tb1->jmp_next[n1];
1270
        }
1271
        *ptb = tb->jmp_next[n];
1272
        tb->jmp_next[n] = NULL;
1273

    
1274
        /* suppress the jump to next tb in generated code */
1275
        tb_reset_jump(tb, n);
1276

    
1277
        /* suppress jumps in the tb on which we could have jumped */
1278
        tb_reset_jump_recursive(tb_next);
1279
    }
1280
}
1281

    
1282
static void tb_reset_jump_recursive(TranslationBlock *tb)
1283
{
1284
    tb_reset_jump_recursive2(tb, 0);
1285
    tb_reset_jump_recursive2(tb, 1);
1286
}
1287

    
1288
#if defined(TARGET_HAS_ICE)
1289
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1290
{
1291
    target_phys_addr_t addr;
1292
    target_ulong pd;
1293
    ram_addr_t ram_addr;
1294
    PhysPageDesc *p;
1295

    
1296
    addr = cpu_get_phys_page_debug(env, pc);
1297
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1298
    if (!p) {
1299
        pd = IO_MEM_UNASSIGNED;
1300
    } else {
1301
        pd = p->phys_offset;
1302
    }
1303
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1304
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1305
}
1306
#endif
1307

    
1308
/* Add a watchpoint.  */
1309
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1310
                          int flags, CPUWatchpoint **watchpoint)
1311
{
1312
    target_ulong len_mask = ~(len - 1);
1313
    CPUWatchpoint *wp;
1314

    
1315
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1316
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1317
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1318
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1319
        return -EINVAL;
1320
    }
1321
    wp = qemu_malloc(sizeof(*wp));
1322

    
1323
    wp->vaddr = addr;
1324
    wp->len_mask = len_mask;
1325
    wp->flags = flags;
1326

    
1327
    /* keep all GDB-injected watchpoints in front */
1328
    if (flags & BP_GDB)
1329
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1330
    else
1331
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1332

    
1333
    tlb_flush_page(env, addr);
1334

    
1335
    if (watchpoint)
1336
        *watchpoint = wp;
1337
    return 0;
1338
}
1339

    
1340
/* Remove a specific watchpoint.  */
1341
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1342
                          int flags)
1343
{
1344
    target_ulong len_mask = ~(len - 1);
1345
    CPUWatchpoint *wp;
1346

    
1347
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1348
        if (addr == wp->vaddr && len_mask == wp->len_mask
1349
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1350
            cpu_watchpoint_remove_by_ref(env, wp);
1351
            return 0;
1352
        }
1353
    }
1354
    return -ENOENT;
1355
}
1356

    
1357
/* Remove a specific watchpoint by reference.  */
1358
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1359
{
1360
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1361

    
1362
    tlb_flush_page(env, watchpoint->vaddr);
1363

    
1364
    qemu_free(watchpoint);
1365
}
1366

    
1367
/* Remove all matching watchpoints.  */
1368
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1369
{
1370
    CPUWatchpoint *wp, *next;
1371

    
1372
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1373
        if (wp->flags & mask)
1374
            cpu_watchpoint_remove_by_ref(env, wp);
1375
    }
1376
}
1377

    
1378
/* Add a breakpoint.  */
1379
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1380
                          CPUBreakpoint **breakpoint)
1381
{
1382
#if defined(TARGET_HAS_ICE)
1383
    CPUBreakpoint *bp;
1384

    
1385
    bp = qemu_malloc(sizeof(*bp));
1386

    
1387
    bp->pc = pc;
1388
    bp->flags = flags;
1389

    
1390
    /* keep all GDB-injected breakpoints in front */
1391
    if (flags & BP_GDB)
1392
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1393
    else
1394
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1395

    
1396
    breakpoint_invalidate(env, pc);
1397

    
1398
    if (breakpoint)
1399
        *breakpoint = bp;
1400
    return 0;
1401
#else
1402
    return -ENOSYS;
1403
#endif
1404
}
1405

    
1406
/* Remove a specific breakpoint.  */
1407
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1408
{
1409
#if defined(TARGET_HAS_ICE)
1410
    CPUBreakpoint *bp;
1411

    
1412
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1413
        if (bp->pc == pc && bp->flags == flags) {
1414
            cpu_breakpoint_remove_by_ref(env, bp);
1415
            return 0;
1416
        }
1417
    }
1418
    return -ENOENT;
1419
#else
1420
    return -ENOSYS;
1421
#endif
1422
}
1423

    
1424
/* Remove a specific breakpoint by reference.  */
1425
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1426
{
1427
#if defined(TARGET_HAS_ICE)
1428
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1429

    
1430
    breakpoint_invalidate(env, breakpoint->pc);
1431

    
1432
    qemu_free(breakpoint);
1433
#endif
1434
}
1435

    
1436
/* Remove all matching breakpoints. */
1437
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1438
{
1439
#if defined(TARGET_HAS_ICE)
1440
    CPUBreakpoint *bp, *next;
1441

    
1442
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1443
        if (bp->flags & mask)
1444
            cpu_breakpoint_remove_by_ref(env, bp);
1445
    }
1446
#endif
1447
}
1448

    
1449
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1450
   CPU loop after each instruction */
1451
void cpu_single_step(CPUState *env, int enabled)
1452
{
1453
#if defined(TARGET_HAS_ICE)
1454
    if (env->singlestep_enabled != enabled) {
1455
        env->singlestep_enabled = enabled;
1456
        if (kvm_enabled())
1457
            kvm_update_guest_debug(env, 0);
1458
        else {
1459
            /* must flush all the translated code to avoid inconsistancies */
1460
            /* XXX: only flush what is necessary */
1461
            tb_flush(env);
1462
        }
1463
    }
1464
#endif
1465
}
1466

    
1467
/* enable or disable low levels log */
1468
void cpu_set_log(int log_flags)
1469
{
1470
    loglevel = log_flags;
1471
    if (loglevel && !logfile) {
1472
        logfile = fopen(logfilename, log_append ? "a" : "w");
1473
        if (!logfile) {
1474
            perror(logfilename);
1475
            _exit(1);
1476
        }
1477
#if !defined(CONFIG_SOFTMMU)
1478
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1479
        {
1480
            static char logfile_buf[4096];
1481
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1482
        }
1483
#else
1484
        setvbuf(logfile, NULL, _IOLBF, 0);
1485
#endif
1486
        log_append = 1;
1487
    }
1488
    if (!loglevel && logfile) {
1489
        fclose(logfile);
1490
        logfile = NULL;
1491
    }
1492
}
1493

    
1494
void cpu_set_log_filename(const char *filename)
1495
{
1496
    logfilename = strdup(filename);
1497
    if (logfile) {
1498
        fclose(logfile);
1499
        logfile = NULL;
1500
    }
1501
    cpu_set_log(loglevel);
1502
}
1503

    
1504
static void cpu_unlink_tb(CPUState *env)
1505
{
1506
#if defined(USE_NPTL)
1507
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1508
       problem and hope the cpu will stop of its own accord.  For userspace
1509
       emulation this often isn't actually as bad as it sounds.  Often
1510
       signals are used primarily to interrupt blocking syscalls.  */
1511
#else
1512
    TranslationBlock *tb;
1513
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1514

    
1515
    tb = env->current_tb;
1516
    /* if the cpu is currently executing code, we must unlink it and
1517
       all the potentially executing TB */
1518
    if (tb && !testandset(&interrupt_lock)) {
1519
        env->current_tb = NULL;
1520
        tb_reset_jump_recursive(tb);
1521
        resetlock(&interrupt_lock);
1522
    }
1523
#endif
1524
}
1525

    
1526
/* mask must never be zero, except for A20 change call */
1527
void cpu_interrupt(CPUState *env, int mask)
1528
{
1529
    int old_mask;
1530

    
1531
    old_mask = env->interrupt_request;
1532
    env->interrupt_request |= mask;
1533

    
1534
    if (use_icount) {
1535
        env->icount_decr.u16.high = 0xffff;
1536
#ifndef CONFIG_USER_ONLY
1537
        if (!can_do_io(env)
1538
            && (mask & ~old_mask) != 0) {
1539
            cpu_abort(env, "Raised interrupt while not in I/O function");
1540
        }
1541
#endif
1542
    } else {
1543
        cpu_unlink_tb(env);
1544
    }
1545
}
1546

    
1547
void cpu_reset_interrupt(CPUState *env, int mask)
1548
{
1549
    env->interrupt_request &= ~mask;
1550
}
1551

    
1552
void cpu_exit(CPUState *env)
1553
{
1554
    env->exit_request = 1;
1555
    cpu_unlink_tb(env);
1556
}
1557

    
1558
const CPULogItem cpu_log_items[] = {
1559
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1560
      "show generated host assembly code for each compiled TB" },
1561
    { CPU_LOG_TB_IN_ASM, "in_asm",
1562
      "show target assembly code for each compiled TB" },
1563
    { CPU_LOG_TB_OP, "op",
1564
      "show micro ops for each compiled TB" },
1565
    { CPU_LOG_TB_OP_OPT, "op_opt",
1566
      "show micro ops "
1567
#ifdef TARGET_I386
1568
      "before eflags optimization and "
1569
#endif
1570
      "after liveness analysis" },
1571
    { CPU_LOG_INT, "int",
1572
      "show interrupts/exceptions in short format" },
1573
    { CPU_LOG_EXEC, "exec",
1574
      "show trace before each executed TB (lots of logs)" },
1575
    { CPU_LOG_TB_CPU, "cpu",
1576
      "show CPU state before block translation" },
1577
#ifdef TARGET_I386
1578
    { CPU_LOG_PCALL, "pcall",
1579
      "show protected mode far calls/returns/exceptions" },
1580
    { CPU_LOG_RESET, "cpu_reset",
1581
      "show CPU state before CPU resets" },
1582
#endif
1583
#ifdef DEBUG_IOPORT
1584
    { CPU_LOG_IOPORT, "ioport",
1585
      "show all i/o ports accesses" },
1586
#endif
1587
    { 0, NULL, NULL },
1588
};
1589

    
1590
static int cmp1(const char *s1, int n, const char *s2)
1591
{
1592
    if (strlen(s2) != n)
1593
        return 0;
1594
    return memcmp(s1, s2, n) == 0;
1595
}
1596

    
1597
/* takes a comma separated list of log masks. Return 0 if error. */
1598
int cpu_str_to_log_mask(const char *str)
1599
{
1600
    const CPULogItem *item;
1601
    int mask;
1602
    const char *p, *p1;
1603

    
1604
    p = str;
1605
    mask = 0;
1606
    for(;;) {
1607
        p1 = strchr(p, ',');
1608
        if (!p1)
1609
            p1 = p + strlen(p);
1610
        if(cmp1(p,p1-p,"all")) {
1611
                for(item = cpu_log_items; item->mask != 0; item++) {
1612
                        mask |= item->mask;
1613
                }
1614
        } else {
1615
        for(item = cpu_log_items; item->mask != 0; item++) {
1616
            if (cmp1(p, p1 - p, item->name))
1617
                goto found;
1618
        }
1619
        return 0;
1620
        }
1621
    found:
1622
        mask |= item->mask;
1623
        if (*p1 != ',')
1624
            break;
1625
        p = p1 + 1;
1626
    }
1627
    return mask;
1628
}
1629

    
1630
void cpu_abort(CPUState *env, const char *fmt, ...)
1631
{
1632
    va_list ap;
1633
    va_list ap2;
1634

    
1635
    va_start(ap, fmt);
1636
    va_copy(ap2, ap);
1637
    fprintf(stderr, "qemu: fatal: ");
1638
    vfprintf(stderr, fmt, ap);
1639
    fprintf(stderr, "\n");
1640
#ifdef TARGET_I386
1641
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1642
#else
1643
    cpu_dump_state(env, stderr, fprintf, 0);
1644
#endif
1645
    if (qemu_log_enabled()) {
1646
        qemu_log("qemu: fatal: ");
1647
        qemu_log_vprintf(fmt, ap2);
1648
        qemu_log("\n");
1649
#ifdef TARGET_I386
1650
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1651
#else
1652
        log_cpu_state(env, 0);
1653
#endif
1654
        qemu_log_flush();
1655
        qemu_log_close();
1656
    }
1657
    va_end(ap2);
1658
    va_end(ap);
1659
    abort();
1660
}
1661

    
1662
CPUState *cpu_copy(CPUState *env)
1663
{
1664
    CPUState *new_env = cpu_init(env->cpu_model_str);
1665
    CPUState *next_cpu = new_env->next_cpu;
1666
    int cpu_index = new_env->cpu_index;
1667
#if defined(TARGET_HAS_ICE)
1668
    CPUBreakpoint *bp;
1669
    CPUWatchpoint *wp;
1670
#endif
1671

    
1672
    memcpy(new_env, env, sizeof(CPUState));
1673

    
1674
    /* Preserve chaining and index. */
1675
    new_env->next_cpu = next_cpu;
1676
    new_env->cpu_index = cpu_index;
1677

    
1678
    /* Clone all break/watchpoints.
1679
       Note: Once we support ptrace with hw-debug register access, make sure
1680
       BP_CPU break/watchpoints are handled correctly on clone. */
1681
    TAILQ_INIT(&env->breakpoints);
1682
    TAILQ_INIT(&env->watchpoints);
1683
#if defined(TARGET_HAS_ICE)
1684
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1685
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1686
    }
1687
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1688
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1689
                              wp->flags, NULL);
1690
    }
1691
#endif
1692

    
1693
    return new_env;
1694
}
1695

    
1696
#if !defined(CONFIG_USER_ONLY)
1697

    
1698
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1699
{
1700
    unsigned int i;
1701

    
1702
    /* Discard jump cache entries for any tb which might potentially
1703
       overlap the flushed page.  */
1704
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1705
    memset (&env->tb_jmp_cache[i], 0, 
1706
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1707

    
1708
    i = tb_jmp_cache_hash_page(addr);
1709
    memset (&env->tb_jmp_cache[i], 0, 
1710
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1711
}
1712

    
1713
/* NOTE: if flush_global is true, also flush global entries (not
1714
   implemented yet) */
1715
void tlb_flush(CPUState *env, int flush_global)
1716
{
1717
    int i;
1718

    
1719
#if defined(DEBUG_TLB)
1720
    printf("tlb_flush:\n");
1721
#endif
1722
    /* must reset current TB so that interrupts cannot modify the
1723
       links while we are modifying them */
1724
    env->current_tb = NULL;
1725

    
1726
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1727
        env->tlb_table[0][i].addr_read = -1;
1728
        env->tlb_table[0][i].addr_write = -1;
1729
        env->tlb_table[0][i].addr_code = -1;
1730
        env->tlb_table[1][i].addr_read = -1;
1731
        env->tlb_table[1][i].addr_write = -1;
1732
        env->tlb_table[1][i].addr_code = -1;
1733
#if (NB_MMU_MODES >= 3)
1734
        env->tlb_table[2][i].addr_read = -1;
1735
        env->tlb_table[2][i].addr_write = -1;
1736
        env->tlb_table[2][i].addr_code = -1;
1737
#endif
1738
#if (NB_MMU_MODES >= 4)
1739
        env->tlb_table[3][i].addr_read = -1;
1740
        env->tlb_table[3][i].addr_write = -1;
1741
        env->tlb_table[3][i].addr_code = -1;
1742
#endif
1743
#if (NB_MMU_MODES >= 5)
1744
        env->tlb_table[4][i].addr_read = -1;
1745
        env->tlb_table[4][i].addr_write = -1;
1746
        env->tlb_table[4][i].addr_code = -1;
1747
#endif
1748

    
1749
    }
1750

    
1751
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1752

    
1753
#ifdef USE_KQEMU
1754
    if (env->kqemu_enabled) {
1755
        kqemu_flush(env, flush_global);
1756
    }
1757
#endif
1758
    tlb_flush_count++;
1759
}
1760

    
1761
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1762
{
1763
    if (addr == (tlb_entry->addr_read &
1764
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1765
        addr == (tlb_entry->addr_write &
1766
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1767
        addr == (tlb_entry->addr_code &
1768
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1769
        tlb_entry->addr_read = -1;
1770
        tlb_entry->addr_write = -1;
1771
        tlb_entry->addr_code = -1;
1772
    }
1773
}
1774

    
1775
void tlb_flush_page(CPUState *env, target_ulong addr)
1776
{
1777
    int i;
1778

    
1779
#if defined(DEBUG_TLB)
1780
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1781
#endif
1782
    /* must reset current TB so that interrupts cannot modify the
1783
       links while we are modifying them */
1784
    env->current_tb = NULL;
1785

    
1786
    addr &= TARGET_PAGE_MASK;
1787
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1788
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1789
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1790
#if (NB_MMU_MODES >= 3)
1791
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1792
#endif
1793
#if (NB_MMU_MODES >= 4)
1794
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1795
#endif
1796
#if (NB_MMU_MODES >= 5)
1797
    tlb_flush_entry(&env->tlb_table[4][i], addr);
1798
#endif
1799

    
1800
    tlb_flush_jmp_cache(env, addr);
1801

    
1802
#ifdef USE_KQEMU
1803
    if (env->kqemu_enabled) {
1804
        kqemu_flush_page(env, addr);
1805
    }
1806
#endif
1807
}
1808

    
1809
/* update the TLBs so that writes to code in the virtual page 'addr'
1810
   can be detected */
1811
static void tlb_protect_code(ram_addr_t ram_addr)
1812
{
1813
    cpu_physical_memory_reset_dirty(ram_addr,
1814
                                    ram_addr + TARGET_PAGE_SIZE,
1815
                                    CODE_DIRTY_FLAG);
1816
}
1817

    
1818
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1819
   tested for self modifying code */
1820
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1821
                                    target_ulong vaddr)
1822
{
1823
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1824
}
1825

    
1826
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1827
                                         unsigned long start, unsigned long length)
1828
{
1829
    unsigned long addr;
1830
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1831
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1832
        if ((addr - start) < length) {
1833
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1834
        }
1835
    }
1836
}
1837

    
1838
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1839
                                     int dirty_flags)
1840
{
1841
    CPUState *env;
1842
    unsigned long length, start1;
1843
    int i, mask, len;
1844
    uint8_t *p;
1845

    
1846
    start &= TARGET_PAGE_MASK;
1847
    end = TARGET_PAGE_ALIGN(end);
1848

    
1849
    length = end - start;
1850
    if (length == 0)
1851
        return;
1852
    len = length >> TARGET_PAGE_BITS;
1853
#ifdef USE_KQEMU
1854
    /* XXX: should not depend on cpu context */
1855
    env = first_cpu;
1856
    if (env->kqemu_enabled) {
1857
        ram_addr_t addr;
1858
        addr = start;
1859
        for(i = 0; i < len; i++) {
1860
            kqemu_set_notdirty(env, addr);
1861
            addr += TARGET_PAGE_SIZE;
1862
        }
1863
    }
1864
#endif
1865
    mask = ~dirty_flags;
1866
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1867
    for(i = 0; i < len; i++)
1868
        p[i] &= mask;
1869

    
1870
    /* we modify the TLB cache so that the dirty bit will be set again
1871
       when accessing the range */
1872
    start1 = start + (unsigned long)phys_ram_base;
1873
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1874
        for(i = 0; i < CPU_TLB_SIZE; i++)
1875
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1876
        for(i = 0; i < CPU_TLB_SIZE; i++)
1877
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1878
#if (NB_MMU_MODES >= 3)
1879
        for(i = 0; i < CPU_TLB_SIZE; i++)
1880
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1881
#endif
1882
#if (NB_MMU_MODES >= 4)
1883
        for(i = 0; i < CPU_TLB_SIZE; i++)
1884
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1885
#endif
1886
#if (NB_MMU_MODES >= 5)
1887
        for(i = 0; i < CPU_TLB_SIZE; i++)
1888
            tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1889
#endif
1890
    }
1891
}
1892

    
1893
int cpu_physical_memory_set_dirty_tracking(int enable)
1894
{
1895
    in_migration = enable;
1896
    return 0;
1897
}
1898

    
1899
int cpu_physical_memory_get_dirty_tracking(void)
1900
{
1901
    return in_migration;
1902
}
1903

    
1904
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1905
{
1906
    if (kvm_enabled())
1907
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1908
}
1909

    
1910
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1911
{
1912
    ram_addr_t ram_addr;
1913

    
1914
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1915
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1916
            tlb_entry->addend - (unsigned long)phys_ram_base;
1917
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1918
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1919
        }
1920
    }
1921
}
1922

    
1923
/* update the TLB according to the current state of the dirty bits */
1924
void cpu_tlb_update_dirty(CPUState *env)
1925
{
1926
    int i;
1927
    for(i = 0; i < CPU_TLB_SIZE; i++)
1928
        tlb_update_dirty(&env->tlb_table[0][i]);
1929
    for(i = 0; i < CPU_TLB_SIZE; i++)
1930
        tlb_update_dirty(&env->tlb_table[1][i]);
1931
#if (NB_MMU_MODES >= 3)
1932
    for(i = 0; i < CPU_TLB_SIZE; i++)
1933
        tlb_update_dirty(&env->tlb_table[2][i]);
1934
#endif
1935
#if (NB_MMU_MODES >= 4)
1936
    for(i = 0; i < CPU_TLB_SIZE; i++)
1937
        tlb_update_dirty(&env->tlb_table[3][i]);
1938
#endif
1939
#if (NB_MMU_MODES >= 5)
1940
    for(i = 0; i < CPU_TLB_SIZE; i++)
1941
        tlb_update_dirty(&env->tlb_table[4][i]);
1942
#endif
1943
}
1944

    
1945
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1946
{
1947
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1948
        tlb_entry->addr_write = vaddr;
1949
}
1950

    
1951
/* update the TLB corresponding to virtual page vaddr
1952
   so that it is no longer dirty */
1953
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1954
{
1955
    int i;
1956

    
1957
    vaddr &= TARGET_PAGE_MASK;
1958
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1959
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1960
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1961
#if (NB_MMU_MODES >= 3)
1962
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1963
#endif
1964
#if (NB_MMU_MODES >= 4)
1965
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1966
#endif
1967
#if (NB_MMU_MODES >= 5)
1968
    tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
1969
#endif
1970
}
1971

    
1972
/* add a new TLB entry. At most one entry for a given virtual address
1973
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1974
   (can only happen in non SOFTMMU mode for I/O pages or pages
1975
   conflicting with the host address space). */
1976
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1977
                      target_phys_addr_t paddr, int prot,
1978
                      int mmu_idx, int is_softmmu)
1979
{
1980
    PhysPageDesc *p;
1981
    unsigned long pd;
1982
    unsigned int index;
1983
    target_ulong address;
1984
    target_ulong code_address;
1985
    target_phys_addr_t addend;
1986
    int ret;
1987
    CPUTLBEntry *te;
1988
    CPUWatchpoint *wp;
1989
    target_phys_addr_t iotlb;
1990

    
1991
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1992
    if (!p) {
1993
        pd = IO_MEM_UNASSIGNED;
1994
    } else {
1995
        pd = p->phys_offset;
1996
    }
1997
#if defined(DEBUG_TLB)
1998
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1999
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2000
#endif
2001

    
2002
    ret = 0;
2003
    address = vaddr;
2004
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2005
        /* IO memory case (romd handled later) */
2006
        address |= TLB_MMIO;
2007
    }
2008
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
2009
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2010
        /* Normal RAM.  */
2011
        iotlb = pd & TARGET_PAGE_MASK;
2012
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2013
            iotlb |= IO_MEM_NOTDIRTY;
2014
        else
2015
            iotlb |= IO_MEM_ROM;
2016
    } else {
2017
        /* IO handlers are currently passed a phsical address.
2018
           It would be nice to pass an offset from the base address
2019
           of that region.  This would avoid having to special case RAM,
2020
           and avoid full address decoding in every device.
2021
           We can't use the high bits of pd for this because
2022
           IO_MEM_ROMD uses these as a ram address.  */
2023
        iotlb = (pd & ~TARGET_PAGE_MASK);
2024
        if (p) {
2025
            iotlb += p->region_offset;
2026
        } else {
2027
            iotlb += paddr;
2028
        }
2029
    }
2030

    
2031
    code_address = address;
2032
    /* Make accesses to pages with watchpoints go via the
2033
       watchpoint trap routines.  */
2034
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2035
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2036
            iotlb = io_mem_watch + paddr;
2037
            /* TODO: The memory case can be optimized by not trapping
2038
               reads of pages with a write breakpoint.  */
2039
            address |= TLB_MMIO;
2040
        }
2041
    }
2042

    
2043
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2044
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2045
    te = &env->tlb_table[mmu_idx][index];
2046
    te->addend = addend - vaddr;
2047
    if (prot & PAGE_READ) {
2048
        te->addr_read = address;
2049
    } else {
2050
        te->addr_read = -1;
2051
    }
2052

    
2053
    if (prot & PAGE_EXEC) {
2054
        te->addr_code = code_address;
2055
    } else {
2056
        te->addr_code = -1;
2057
    }
2058
    if (prot & PAGE_WRITE) {
2059
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2060
            (pd & IO_MEM_ROMD)) {
2061
            /* Write access calls the I/O callback.  */
2062
            te->addr_write = address | TLB_MMIO;
2063
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2064
                   !cpu_physical_memory_is_dirty(pd)) {
2065
            te->addr_write = address | TLB_NOTDIRTY;
2066
        } else {
2067
            te->addr_write = address;
2068
        }
2069
    } else {
2070
        te->addr_write = -1;
2071
    }
2072
    return ret;
2073
}
2074

    
2075
#else
2076

    
2077
void tlb_flush(CPUState *env, int flush_global)
2078
{
2079
}
2080

    
2081
void tlb_flush_page(CPUState *env, target_ulong addr)
2082
{
2083
}
2084

    
2085
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2086
                      target_phys_addr_t paddr, int prot,
2087
                      int mmu_idx, int is_softmmu)
2088
{
2089
    return 0;
2090
}
2091

    
2092
/* dump memory mappings */
2093
void page_dump(FILE *f)
2094
{
2095
    unsigned long start, end;
2096
    int i, j, prot, prot1;
2097
    PageDesc *p;
2098

    
2099
    fprintf(f, "%-8s %-8s %-8s %s\n",
2100
            "start", "end", "size", "prot");
2101
    start = -1;
2102
    end = -1;
2103
    prot = 0;
2104
    for(i = 0; i <= L1_SIZE; i++) {
2105
        if (i < L1_SIZE)
2106
            p = l1_map[i];
2107
        else
2108
            p = NULL;
2109
        for(j = 0;j < L2_SIZE; j++) {
2110
            if (!p)
2111
                prot1 = 0;
2112
            else
2113
                prot1 = p[j].flags;
2114
            if (prot1 != prot) {
2115
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2116
                if (start != -1) {
2117
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2118
                            start, end, end - start,
2119
                            prot & PAGE_READ ? 'r' : '-',
2120
                            prot & PAGE_WRITE ? 'w' : '-',
2121
                            prot & PAGE_EXEC ? 'x' : '-');
2122
                }
2123
                if (prot1 != 0)
2124
                    start = end;
2125
                else
2126
                    start = -1;
2127
                prot = prot1;
2128
            }
2129
            if (!p)
2130
                break;
2131
        }
2132
    }
2133
}
2134

    
2135
int page_get_flags(target_ulong address)
2136
{
2137
    PageDesc *p;
2138

    
2139
    p = page_find(address >> TARGET_PAGE_BITS);
2140
    if (!p)
2141
        return 0;
2142
    return p->flags;
2143
}
2144

    
2145
/* modify the flags of a page and invalidate the code if
2146
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2147
   depending on PAGE_WRITE */
2148
void page_set_flags(target_ulong start, target_ulong end, int flags)
2149
{
2150
    PageDesc *p;
2151
    target_ulong addr;
2152

    
2153
    /* mmap_lock should already be held.  */
2154
    start = start & TARGET_PAGE_MASK;
2155
    end = TARGET_PAGE_ALIGN(end);
2156
    if (flags & PAGE_WRITE)
2157
        flags |= PAGE_WRITE_ORG;
2158
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2159
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2160
        /* We may be called for host regions that are outside guest
2161
           address space.  */
2162
        if (!p)
2163
            return;
2164
        /* if the write protection is set, then we invalidate the code
2165
           inside */
2166
        if (!(p->flags & PAGE_WRITE) &&
2167
            (flags & PAGE_WRITE) &&
2168
            p->first_tb) {
2169
            tb_invalidate_phys_page(addr, 0, NULL);
2170
        }
2171
        p->flags = flags;
2172
    }
2173
}
2174

    
2175
int page_check_range(target_ulong start, target_ulong len, int flags)
2176
{
2177
    PageDesc *p;
2178
    target_ulong end;
2179
    target_ulong addr;
2180

    
2181
    if (start + len < start)
2182
        /* we've wrapped around */
2183
        return -1;
2184

    
2185
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2186
    start = start & TARGET_PAGE_MASK;
2187

    
2188
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2189
        p = page_find(addr >> TARGET_PAGE_BITS);
2190
        if( !p )
2191
            return -1;
2192
        if( !(p->flags & PAGE_VALID) )
2193
            return -1;
2194

    
2195
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2196
            return -1;
2197
        if (flags & PAGE_WRITE) {
2198
            if (!(p->flags & PAGE_WRITE_ORG))
2199
                return -1;
2200
            /* unprotect the page if it was put read-only because it
2201
               contains translated code */
2202
            if (!(p->flags & PAGE_WRITE)) {
2203
                if (!page_unprotect(addr, 0, NULL))
2204
                    return -1;
2205
            }
2206
            return 0;
2207
        }
2208
    }
2209
    return 0;
2210
}
2211

    
2212
/* called from signal handler: invalidate the code and unprotect the
2213
   page. Return TRUE if the fault was succesfully handled. */
2214
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2215
{
2216
    unsigned int page_index, prot, pindex;
2217
    PageDesc *p, *p1;
2218
    target_ulong host_start, host_end, addr;
2219

    
2220
    /* Technically this isn't safe inside a signal handler.  However we
2221
       know this only ever happens in a synchronous SEGV handler, so in
2222
       practice it seems to be ok.  */
2223
    mmap_lock();
2224

    
2225
    host_start = address & qemu_host_page_mask;
2226
    page_index = host_start >> TARGET_PAGE_BITS;
2227
    p1 = page_find(page_index);
2228
    if (!p1) {
2229
        mmap_unlock();
2230
        return 0;
2231
    }
2232
    host_end = host_start + qemu_host_page_size;
2233
    p = p1;
2234
    prot = 0;
2235
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2236
        prot |= p->flags;
2237
        p++;
2238
    }
2239
    /* if the page was really writable, then we change its
2240
       protection back to writable */
2241
    if (prot & PAGE_WRITE_ORG) {
2242
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2243
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2244
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2245
                     (prot & PAGE_BITS) | PAGE_WRITE);
2246
            p1[pindex].flags |= PAGE_WRITE;
2247
            /* and since the content will be modified, we must invalidate
2248
               the corresponding translated code. */
2249
            tb_invalidate_phys_page(address, pc, puc);
2250
#ifdef DEBUG_TB_CHECK
2251
            tb_invalidate_check(address);
2252
#endif
2253
            mmap_unlock();
2254
            return 1;
2255
        }
2256
    }
2257
    mmap_unlock();
2258
    return 0;
2259
}
2260

    
2261
static inline void tlb_set_dirty(CPUState *env,
2262
                                 unsigned long addr, target_ulong vaddr)
2263
{
2264
}
2265
#endif /* defined(CONFIG_USER_ONLY) */
2266

    
2267
#if !defined(CONFIG_USER_ONLY)
2268

    
2269
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2270
                             ram_addr_t memory, ram_addr_t region_offset);
2271
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2272
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2273
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2274
                      need_subpage)                                     \
2275
    do {                                                                \
2276
        if (addr > start_addr)                                          \
2277
            start_addr2 = 0;                                            \
2278
        else {                                                          \
2279
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2280
            if (start_addr2 > 0)                                        \
2281
                need_subpage = 1;                                       \
2282
        }                                                               \
2283
                                                                        \
2284
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2285
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2286
        else {                                                          \
2287
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2288
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2289
                need_subpage = 1;                                       \
2290
        }                                                               \
2291
    } while (0)
2292

    
2293
/* register physical memory. 'size' must be a multiple of the target
2294
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2295
   io memory page.  The address used when calling the IO function is
2296
   the offset from the start of the region, plus region_offset.  Both
2297
   start_region and regon_offset are rounded down to a page boundary
2298
   before calculating this offset.  This should not be a problem unless
2299
   the low bits of start_addr and region_offset differ.  */
2300
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2301
                                         ram_addr_t size,
2302
                                         ram_addr_t phys_offset,
2303
                                         ram_addr_t region_offset)
2304
{
2305
    target_phys_addr_t addr, end_addr;
2306
    PhysPageDesc *p;
2307
    CPUState *env;
2308
    ram_addr_t orig_size = size;
2309
    void *subpage;
2310

    
2311
#ifdef USE_KQEMU
2312
    /* XXX: should not depend on cpu context */
2313
    env = first_cpu;
2314
    if (env->kqemu_enabled) {
2315
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2316
    }
2317
#endif
2318
    if (kvm_enabled())
2319
        kvm_set_phys_mem(start_addr, size, phys_offset);
2320

    
2321
    if (phys_offset == IO_MEM_UNASSIGNED) {
2322
        region_offset = start_addr;
2323
    }
2324
    region_offset &= TARGET_PAGE_MASK;
2325
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2326
    end_addr = start_addr + (target_phys_addr_t)size;
2327
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2328
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2329
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2330
            ram_addr_t orig_memory = p->phys_offset;
2331
            target_phys_addr_t start_addr2, end_addr2;
2332
            int need_subpage = 0;
2333

    
2334
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2335
                          need_subpage);
2336
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2337
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2338
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2339
                                           &p->phys_offset, orig_memory,
2340
                                           p->region_offset);
2341
                } else {
2342
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2343
                                            >> IO_MEM_SHIFT];
2344
                }
2345
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2346
                                 region_offset);
2347
                p->region_offset = 0;
2348
            } else {
2349
                p->phys_offset = phys_offset;
2350
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2351
                    (phys_offset & IO_MEM_ROMD))
2352
                    phys_offset += TARGET_PAGE_SIZE;
2353
            }
2354
        } else {
2355
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2356
            p->phys_offset = phys_offset;
2357
            p->region_offset = region_offset;
2358
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2359
                (phys_offset & IO_MEM_ROMD)) {
2360
                phys_offset += TARGET_PAGE_SIZE;
2361
            } else {
2362
                target_phys_addr_t start_addr2, end_addr2;
2363
                int need_subpage = 0;
2364

    
2365
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2366
                              end_addr2, need_subpage);
2367

    
2368
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2369
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2370
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2371
                                           addr & TARGET_PAGE_MASK);
2372
                    subpage_register(subpage, start_addr2, end_addr2,
2373
                                     phys_offset, region_offset);
2374
                    p->region_offset = 0;
2375
                }
2376
            }
2377
        }
2378
        region_offset += TARGET_PAGE_SIZE;
2379
    }
2380

    
2381
    /* since each CPU stores ram addresses in its TLB cache, we must
2382
       reset the modified entries */
2383
    /* XXX: slow ! */
2384
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2385
        tlb_flush(env, 1);
2386
    }
2387
}
2388

    
2389
/* XXX: temporary until new memory mapping API */
2390
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2391
{
2392
    PhysPageDesc *p;
2393

    
2394
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2395
    if (!p)
2396
        return IO_MEM_UNASSIGNED;
2397
    return p->phys_offset;
2398
}
2399

    
2400
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2401
{
2402
    if (kvm_enabled())
2403
        kvm_coalesce_mmio_region(addr, size);
2404
}
2405

    
2406
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2407
{
2408
    if (kvm_enabled())
2409
        kvm_uncoalesce_mmio_region(addr, size);
2410
}
2411

    
2412
/* XXX: better than nothing */
2413
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2414
{
2415
    ram_addr_t addr;
2416
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2417
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2418
                (uint64_t)size, (uint64_t)phys_ram_size);
2419
        abort();
2420
    }
2421
    addr = phys_ram_alloc_offset;
2422
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2423
    return addr;
2424
}
2425

    
2426
void qemu_ram_free(ram_addr_t addr)
2427
{
2428
}
2429

    
2430
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2431
   This may only be used if you actually allocated the ram, and
2432
   aready know how but the ram block is.  */
2433
void *qemu_get_ram_ptr(ram_addr_t addr)
2434
{
2435
    return phys_ram_base + addr;
2436
}
2437

    
2438
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2439
{
2440
#ifdef DEBUG_UNASSIGNED
2441
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2442
#endif
2443
#if defined(TARGET_SPARC)
2444
    do_unassigned_access(addr, 0, 0, 0, 1);
2445
#endif
2446
    return 0;
2447
}
2448

    
2449
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2450
{
2451
#ifdef DEBUG_UNASSIGNED
2452
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2453
#endif
2454
#if defined(TARGET_SPARC)
2455
    do_unassigned_access(addr, 0, 0, 0, 2);
2456
#endif
2457
    return 0;
2458
}
2459

    
2460
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2461
{
2462
#ifdef DEBUG_UNASSIGNED
2463
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2464
#endif
2465
#if defined(TARGET_SPARC)
2466
    do_unassigned_access(addr, 0, 0, 0, 4);
2467
#endif
2468
    return 0;
2469
}
2470

    
2471
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2472
{
2473
#ifdef DEBUG_UNASSIGNED
2474
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2475
#endif
2476
#if defined(TARGET_SPARC)
2477
    do_unassigned_access(addr, 1, 0, 0, 1);
2478
#endif
2479
}
2480

    
2481
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2482
{
2483
#ifdef DEBUG_UNASSIGNED
2484
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2485
#endif
2486
#if defined(TARGET_SPARC)
2487
    do_unassigned_access(addr, 1, 0, 0, 2);
2488
#endif
2489
}
2490

    
2491
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2492
{
2493
#ifdef DEBUG_UNASSIGNED
2494
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2495
#endif
2496
#if defined(TARGET_SPARC)
2497
    do_unassigned_access(addr, 1, 0, 0, 4);
2498
#endif
2499
}
2500

    
2501
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2502
    unassigned_mem_readb,
2503
    unassigned_mem_readw,
2504
    unassigned_mem_readl,
2505
};
2506

    
2507
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2508
    unassigned_mem_writeb,
2509
    unassigned_mem_writew,
2510
    unassigned_mem_writel,
2511
};
2512

    
2513
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2514
                                uint32_t val)
2515
{
2516
    int dirty_flags;
2517
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2518
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2519
#if !defined(CONFIG_USER_ONLY)
2520
        tb_invalidate_phys_page_fast(ram_addr, 1);
2521
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2522
#endif
2523
    }
2524
    stb_p(phys_ram_base + ram_addr, val);
2525
#ifdef USE_KQEMU
2526
    if (cpu_single_env->kqemu_enabled &&
2527
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2528
        kqemu_modify_page(cpu_single_env, ram_addr);
2529
#endif
2530
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2531
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2532
    /* we remove the notdirty callback only if the code has been
2533
       flushed */
2534
    if (dirty_flags == 0xff)
2535
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2536
}
2537

    
2538
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2539
                                uint32_t val)
2540
{
2541
    int dirty_flags;
2542
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2543
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2544
#if !defined(CONFIG_USER_ONLY)
2545
        tb_invalidate_phys_page_fast(ram_addr, 2);
2546
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2547
#endif
2548
    }
2549
    stw_p(phys_ram_base + ram_addr, val);
2550
#ifdef USE_KQEMU
2551
    if (cpu_single_env->kqemu_enabled &&
2552
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2553
        kqemu_modify_page(cpu_single_env, ram_addr);
2554
#endif
2555
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2556
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2557
    /* we remove the notdirty callback only if the code has been
2558
       flushed */
2559
    if (dirty_flags == 0xff)
2560
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2561
}
2562

    
2563
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2564
                                uint32_t val)
2565
{
2566
    int dirty_flags;
2567
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2568
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2569
#if !defined(CONFIG_USER_ONLY)
2570
        tb_invalidate_phys_page_fast(ram_addr, 4);
2571
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2572
#endif
2573
    }
2574
    stl_p(phys_ram_base + ram_addr, val);
2575
#ifdef USE_KQEMU
2576
    if (cpu_single_env->kqemu_enabled &&
2577
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2578
        kqemu_modify_page(cpu_single_env, ram_addr);
2579
#endif
2580
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2581
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2582
    /* we remove the notdirty callback only if the code has been
2583
       flushed */
2584
    if (dirty_flags == 0xff)
2585
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2586
}
2587

    
2588
static CPUReadMemoryFunc *error_mem_read[3] = {
2589
    NULL, /* never used */
2590
    NULL, /* never used */
2591
    NULL, /* never used */
2592
};
2593

    
2594
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2595
    notdirty_mem_writeb,
2596
    notdirty_mem_writew,
2597
    notdirty_mem_writel,
2598
};
2599

    
2600
/* Generate a debug exception if a watchpoint has been hit.  */
2601
static void check_watchpoint(int offset, int len_mask, int flags)
2602
{
2603
    CPUState *env = cpu_single_env;
2604
    target_ulong pc, cs_base;
2605
    TranslationBlock *tb;
2606
    target_ulong vaddr;
2607
    CPUWatchpoint *wp;
2608
    int cpu_flags;
2609

    
2610
    if (env->watchpoint_hit) {
2611
        /* We re-entered the check after replacing the TB. Now raise
2612
         * the debug interrupt so that is will trigger after the
2613
         * current instruction. */
2614
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2615
        return;
2616
    }
2617
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2618
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2619
        if ((vaddr == (wp->vaddr & len_mask) ||
2620
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2621
            wp->flags |= BP_WATCHPOINT_HIT;
2622
            if (!env->watchpoint_hit) {
2623
                env->watchpoint_hit = wp;
2624
                tb = tb_find_pc(env->mem_io_pc);
2625
                if (!tb) {
2626
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2627
                              "pc=%p", (void *)env->mem_io_pc);
2628
                }
2629
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2630
                tb_phys_invalidate(tb, -1);
2631
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2632
                    env->exception_index = EXCP_DEBUG;
2633
                } else {
2634
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2635
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2636
                }
2637
                cpu_resume_from_signal(env, NULL);
2638
            }
2639
        } else {
2640
            wp->flags &= ~BP_WATCHPOINT_HIT;
2641
        }
2642
    }
2643
}
2644

    
2645
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2646
   so these check for a hit then pass through to the normal out-of-line
2647
   phys routines.  */
2648
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2649
{
2650
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2651
    return ldub_phys(addr);
2652
}
2653

    
2654
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2655
{
2656
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2657
    return lduw_phys(addr);
2658
}
2659

    
2660
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2661
{
2662
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2663
    return ldl_phys(addr);
2664
}
2665

    
2666
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2667
                             uint32_t val)
2668
{
2669
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2670
    stb_phys(addr, val);
2671
}
2672

    
2673
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2674
                             uint32_t val)
2675
{
2676
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2677
    stw_phys(addr, val);
2678
}
2679

    
2680
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2681
                             uint32_t val)
2682
{
2683
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2684
    stl_phys(addr, val);
2685
}
2686

    
2687
static CPUReadMemoryFunc *watch_mem_read[3] = {
2688
    watch_mem_readb,
2689
    watch_mem_readw,
2690
    watch_mem_readl,
2691
};
2692

    
2693
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2694
    watch_mem_writeb,
2695
    watch_mem_writew,
2696
    watch_mem_writel,
2697
};
2698

    
2699
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2700
                                 unsigned int len)
2701
{
2702
    uint32_t ret;
2703
    unsigned int idx;
2704

    
2705
    idx = SUBPAGE_IDX(addr);
2706
#if defined(DEBUG_SUBPAGE)
2707
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2708
           mmio, len, addr, idx);
2709
#endif
2710
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2711
                                       addr + mmio->region_offset[idx][0][len]);
2712

    
2713
    return ret;
2714
}
2715

    
2716
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2717
                              uint32_t value, unsigned int len)
2718
{
2719
    unsigned int idx;
2720

    
2721
    idx = SUBPAGE_IDX(addr);
2722
#if defined(DEBUG_SUBPAGE)
2723
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2724
           mmio, len, addr, idx, value);
2725
#endif
2726
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2727
                                  addr + mmio->region_offset[idx][1][len],
2728
                                  value);
2729
}
2730

    
2731
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2732
{
2733
#if defined(DEBUG_SUBPAGE)
2734
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2735
#endif
2736

    
2737
    return subpage_readlen(opaque, addr, 0);
2738
}
2739

    
2740
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2741
                            uint32_t value)
2742
{
2743
#if defined(DEBUG_SUBPAGE)
2744
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2745
#endif
2746
    subpage_writelen(opaque, addr, value, 0);
2747
}
2748

    
2749
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2750
{
2751
#if defined(DEBUG_SUBPAGE)
2752
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2753
#endif
2754

    
2755
    return subpage_readlen(opaque, addr, 1);
2756
}
2757

    
2758
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2759
                            uint32_t value)
2760
{
2761
#if defined(DEBUG_SUBPAGE)
2762
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2763
#endif
2764
    subpage_writelen(opaque, addr, value, 1);
2765
}
2766

    
2767
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2768
{
2769
#if defined(DEBUG_SUBPAGE)
2770
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2771
#endif
2772

    
2773
    return subpage_readlen(opaque, addr, 2);
2774
}
2775

    
2776
static void subpage_writel (void *opaque,
2777
                         target_phys_addr_t addr, uint32_t value)
2778
{
2779
#if defined(DEBUG_SUBPAGE)
2780
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2781
#endif
2782
    subpage_writelen(opaque, addr, value, 2);
2783
}
2784

    
2785
static CPUReadMemoryFunc *subpage_read[] = {
2786
    &subpage_readb,
2787
    &subpage_readw,
2788
    &subpage_readl,
2789
};
2790

    
2791
static CPUWriteMemoryFunc *subpage_write[] = {
2792
    &subpage_writeb,
2793
    &subpage_writew,
2794
    &subpage_writel,
2795
};
2796

    
2797
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2798
                             ram_addr_t memory, ram_addr_t region_offset)
2799
{
2800
    int idx, eidx;
2801
    unsigned int i;
2802

    
2803
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2804
        return -1;
2805
    idx = SUBPAGE_IDX(start);
2806
    eidx = SUBPAGE_IDX(end);
2807
#if defined(DEBUG_SUBPAGE)
2808
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2809
           mmio, start, end, idx, eidx, memory);
2810
#endif
2811
    memory >>= IO_MEM_SHIFT;
2812
    for (; idx <= eidx; idx++) {
2813
        for (i = 0; i < 4; i++) {
2814
            if (io_mem_read[memory][i]) {
2815
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2816
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2817
                mmio->region_offset[idx][0][i] = region_offset;
2818
            }
2819
            if (io_mem_write[memory][i]) {
2820
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2821
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2822
                mmio->region_offset[idx][1][i] = region_offset;
2823
            }
2824
        }
2825
    }
2826

    
2827
    return 0;
2828
}
2829

    
2830
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2831
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2832
{
2833
    subpage_t *mmio;
2834
    int subpage_memory;
2835

    
2836
    mmio = qemu_mallocz(sizeof(subpage_t));
2837

    
2838
    mmio->base = base;
2839
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2840
#if defined(DEBUG_SUBPAGE)
2841
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2842
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2843
#endif
2844
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2845
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2846
                         region_offset);
2847

    
2848
    return mmio;
2849
}
2850

    
2851
static int get_free_io_mem_idx(void)
2852
{
2853
    int i;
2854

    
2855
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2856
        if (!io_mem_used[i]) {
2857
            io_mem_used[i] = 1;
2858
            return i;
2859
        }
2860

    
2861
    return -1;
2862
}
2863

    
2864
static void io_mem_init(void)
2865
{
2866
    int i;
2867

    
2868
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2869
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2870
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2871
    for (i=0; i<5; i++)
2872
        io_mem_used[i] = 1;
2873

    
2874
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2875
                                          watch_mem_write, NULL);
2876
    /* alloc dirty bits array */
2877
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2878
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2879
}
2880

    
2881
/* mem_read and mem_write are arrays of functions containing the
2882
   function to access byte (index 0), word (index 1) and dword (index
2883
   2). Functions can be omitted with a NULL function pointer. The
2884
   registered functions may be modified dynamically later.
2885
   If io_index is non zero, the corresponding io zone is
2886
   modified. If it is zero, a new io zone is allocated. The return
2887
   value can be used with cpu_register_physical_memory(). (-1) is
2888
   returned if error. */
2889
int cpu_register_io_memory(int io_index,
2890
                           CPUReadMemoryFunc **mem_read,
2891
                           CPUWriteMemoryFunc **mem_write,
2892
                           void *opaque)
2893
{
2894
    int i, subwidth = 0;
2895

    
2896
    if (io_index <= 0) {
2897
        io_index = get_free_io_mem_idx();
2898
        if (io_index == -1)
2899
            return io_index;
2900
    } else {
2901
        if (io_index >= IO_MEM_NB_ENTRIES)
2902
            return -1;
2903
    }
2904

    
2905
    for(i = 0;i < 3; i++) {
2906
        if (!mem_read[i] || !mem_write[i])
2907
            subwidth = IO_MEM_SUBWIDTH;
2908
        io_mem_read[io_index][i] = mem_read[i];
2909
        io_mem_write[io_index][i] = mem_write[i];
2910
    }
2911
    io_mem_opaque[io_index] = opaque;
2912
    return (io_index << IO_MEM_SHIFT) | subwidth;
2913
}
2914

    
2915
void cpu_unregister_io_memory(int io_table_address)
2916
{
2917
    int i;
2918
    int io_index = io_table_address >> IO_MEM_SHIFT;
2919

    
2920
    for (i=0;i < 3; i++) {
2921
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2922
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2923
    }
2924
    io_mem_opaque[io_index] = NULL;
2925
    io_mem_used[io_index] = 0;
2926
}
2927

    
2928
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2929
{
2930
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2931
}
2932

    
2933
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2934
{
2935
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2936
}
2937

    
2938
#endif /* !defined(CONFIG_USER_ONLY) */
2939

    
2940
/* physical memory access (slow version, mainly for debug) */
2941
#if defined(CONFIG_USER_ONLY)
2942
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2943
                            int len, int is_write)
2944
{
2945
    int l, flags;
2946
    target_ulong page;
2947
    void * p;
2948

    
2949
    while (len > 0) {
2950
        page = addr & TARGET_PAGE_MASK;
2951
        l = (page + TARGET_PAGE_SIZE) - addr;
2952
        if (l > len)
2953
            l = len;
2954
        flags = page_get_flags(page);
2955
        if (!(flags & PAGE_VALID))
2956
            return;
2957
        if (is_write) {
2958
            if (!(flags & PAGE_WRITE))
2959
                return;
2960
            /* XXX: this code should not depend on lock_user */
2961
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2962
                /* FIXME - should this return an error rather than just fail? */
2963
                return;
2964
            memcpy(p, buf, l);
2965
            unlock_user(p, addr, l);
2966
        } else {
2967
            if (!(flags & PAGE_READ))
2968
                return;
2969
            /* XXX: this code should not depend on lock_user */
2970
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2971
                /* FIXME - should this return an error rather than just fail? */
2972
                return;
2973
            memcpy(buf, p, l);
2974
            unlock_user(p, addr, 0);
2975
        }
2976
        len -= l;
2977
        buf += l;
2978
        addr += l;
2979
    }
2980
}
2981

    
2982
#else
2983
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2984
                            int len, int is_write)
2985
{
2986
    int l, io_index;
2987
    uint8_t *ptr;
2988
    uint32_t val;
2989
    target_phys_addr_t page;
2990
    unsigned long pd;
2991
    PhysPageDesc *p;
2992

    
2993
    while (len > 0) {
2994
        page = addr & TARGET_PAGE_MASK;
2995
        l = (page + TARGET_PAGE_SIZE) - addr;
2996
        if (l > len)
2997
            l = len;
2998
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2999
        if (!p) {
3000
            pd = IO_MEM_UNASSIGNED;
3001
        } else {
3002
            pd = p->phys_offset;
3003
        }
3004

    
3005
        if (is_write) {
3006
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3007
                target_phys_addr_t addr1 = addr;
3008
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3009
                if (p)
3010
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3011
                /* XXX: could force cpu_single_env to NULL to avoid
3012
                   potential bugs */
3013
                if (l >= 4 && ((addr1 & 3) == 0)) {
3014
                    /* 32 bit write access */
3015
                    val = ldl_p(buf);
3016
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3017
                    l = 4;
3018
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3019
                    /* 16 bit write access */
3020
                    val = lduw_p(buf);
3021
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3022
                    l = 2;
3023
                } else {
3024
                    /* 8 bit write access */
3025
                    val = ldub_p(buf);
3026
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3027
                    l = 1;
3028
                }
3029
            } else {
3030
                unsigned long addr1;
3031
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3032
                /* RAM case */
3033
                ptr = phys_ram_base + addr1;
3034
                memcpy(ptr, buf, l);
3035
                if (!cpu_physical_memory_is_dirty(addr1)) {
3036
                    /* invalidate code */
3037
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3038
                    /* set dirty bit */
3039
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3040
                        (0xff & ~CODE_DIRTY_FLAG);
3041
                }
3042
            }
3043
        } else {
3044
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3045
                !(pd & IO_MEM_ROMD)) {
3046
                target_phys_addr_t addr1 = addr;
3047
                /* I/O case */
3048
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3049
                if (p)
3050
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3051
                if (l >= 4 && ((addr1 & 3) == 0)) {
3052
                    /* 32 bit read access */
3053
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3054
                    stl_p(buf, val);
3055
                    l = 4;
3056
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3057
                    /* 16 bit read access */
3058
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3059
                    stw_p(buf, val);
3060
                    l = 2;
3061
                } else {
3062
                    /* 8 bit read access */
3063
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3064
                    stb_p(buf, val);
3065
                    l = 1;
3066
                }
3067
            } else {
3068
                /* RAM case */
3069
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3070
                    (addr & ~TARGET_PAGE_MASK);
3071
                memcpy(buf, ptr, l);
3072
            }
3073
        }
3074
        len -= l;
3075
        buf += l;
3076
        addr += l;
3077
    }
3078
}
3079

    
3080
/* used for ROM loading : can write in RAM and ROM */
3081
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3082
                                   const uint8_t *buf, int len)
3083
{
3084
    int l;
3085
    uint8_t *ptr;
3086
    target_phys_addr_t page;
3087
    unsigned long pd;
3088
    PhysPageDesc *p;
3089

    
3090
    while (len > 0) {
3091
        page = addr & TARGET_PAGE_MASK;
3092
        l = (page + TARGET_PAGE_SIZE) - addr;
3093
        if (l > len)
3094
            l = len;
3095
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3096
        if (!p) {
3097
            pd = IO_MEM_UNASSIGNED;
3098
        } else {
3099
            pd = p->phys_offset;
3100
        }
3101

    
3102
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3103
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3104
            !(pd & IO_MEM_ROMD)) {
3105
            /* do nothing */
3106
        } else {
3107
            unsigned long addr1;
3108
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3109
            /* ROM/RAM case */
3110
            ptr = phys_ram_base + addr1;
3111
            memcpy(ptr, buf, l);
3112
        }
3113
        len -= l;
3114
        buf += l;
3115
        addr += l;
3116
    }
3117
}
3118

    
3119
typedef struct {
3120
    void *buffer;
3121
    target_phys_addr_t addr;
3122
    target_phys_addr_t len;
3123
} BounceBuffer;
3124

    
3125
static BounceBuffer bounce;
3126

    
3127
typedef struct MapClient {
3128
    void *opaque;
3129
    void (*callback)(void *opaque);
3130
    LIST_ENTRY(MapClient) link;
3131
} MapClient;
3132

    
3133
static LIST_HEAD(map_client_list, MapClient) map_client_list
3134
    = LIST_HEAD_INITIALIZER(map_client_list);
3135

    
3136
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3137
{
3138
    MapClient *client = qemu_malloc(sizeof(*client));
3139

    
3140
    client->opaque = opaque;
3141
    client->callback = callback;
3142
    LIST_INSERT_HEAD(&map_client_list, client, link);
3143
    return client;
3144
}
3145

    
3146
void cpu_unregister_map_client(void *_client)
3147
{
3148
    MapClient *client = (MapClient *)_client;
3149

    
3150
    LIST_REMOVE(client, link);
3151
}
3152

    
3153
static void cpu_notify_map_clients(void)
3154
{
3155
    MapClient *client;
3156

    
3157
    while (!LIST_EMPTY(&map_client_list)) {
3158
        client = LIST_FIRST(&map_client_list);
3159
        client->callback(client->opaque);
3160
        LIST_REMOVE(client, link);
3161
    }
3162
}
3163

    
3164
/* Map a physical memory region into a host virtual address.
3165
 * May map a subset of the requested range, given by and returned in *plen.
3166
 * May return NULL if resources needed to perform the mapping are exhausted.
3167
 * Use only for reads OR writes - not for read-modify-write operations.
3168
 * Use cpu_register_map_client() to know when retrying the map operation is
3169
 * likely to succeed.
3170
 */
3171
void *cpu_physical_memory_map(target_phys_addr_t addr,
3172
                              target_phys_addr_t *plen,
3173
                              int is_write)
3174
{
3175
    target_phys_addr_t len = *plen;
3176
    target_phys_addr_t done = 0;
3177
    int l;
3178
    uint8_t *ret = NULL;
3179
    uint8_t *ptr;
3180
    target_phys_addr_t page;
3181
    unsigned long pd;
3182
    PhysPageDesc *p;
3183
    unsigned long addr1;
3184

    
3185
    while (len > 0) {
3186
        page = addr & TARGET_PAGE_MASK;
3187
        l = (page + TARGET_PAGE_SIZE) - addr;
3188
        if (l > len)
3189
            l = len;
3190
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3191
        if (!p) {
3192
            pd = IO_MEM_UNASSIGNED;
3193
        } else {
3194
            pd = p->phys_offset;
3195
        }
3196

    
3197
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3198
            if (done || bounce.buffer) {
3199
                break;
3200
            }
3201
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3202
            bounce.addr = addr;
3203
            bounce.len = l;
3204
            if (!is_write) {
3205
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3206
            }
3207
            ptr = bounce.buffer;
3208
        } else {
3209
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3210
            ptr = phys_ram_base + addr1;
3211
        }
3212
        if (!done) {
3213
            ret = ptr;
3214
        } else if (ret + done != ptr) {
3215
            break;
3216
        }
3217

    
3218
        len -= l;
3219
        addr += l;
3220
        done += l;
3221
    }
3222
    *plen = done;
3223
    return ret;
3224
}
3225

    
3226
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3227
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3228
 * the amount of memory that was actually read or written by the caller.
3229
 */
3230
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3231
                               int is_write, target_phys_addr_t access_len)
3232
{
3233
    if (buffer != bounce.buffer) {
3234
        if (is_write) {
3235
            unsigned long addr1 = (uint8_t *)buffer - phys_ram_base;
3236
            while (access_len) {
3237
                unsigned l;
3238
                l = TARGET_PAGE_SIZE;
3239
                if (l > access_len)
3240
                    l = access_len;
3241
                if (!cpu_physical_memory_is_dirty(addr1)) {
3242
                    /* invalidate code */
3243
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3244
                    /* set dirty bit */
3245
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3246
                        (0xff & ~CODE_DIRTY_FLAG);
3247
                }
3248
                addr1 += l;
3249
                access_len -= l;
3250
            }
3251
        }
3252
        return;
3253
    }
3254
    if (is_write) {
3255
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3256
    }
3257
    qemu_free(bounce.buffer);
3258
    bounce.buffer = NULL;
3259
    cpu_notify_map_clients();
3260
}
3261

    
3262
/* warning: addr must be aligned */
3263
uint32_t ldl_phys(target_phys_addr_t addr)
3264
{
3265
    int io_index;
3266
    uint8_t *ptr;
3267
    uint32_t val;
3268
    unsigned long pd;
3269
    PhysPageDesc *p;
3270

    
3271
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3272
    if (!p) {
3273
        pd = IO_MEM_UNASSIGNED;
3274
    } else {
3275
        pd = p->phys_offset;
3276
    }
3277

    
3278
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3279
        !(pd & IO_MEM_ROMD)) {
3280
        /* I/O case */
3281
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3282
        if (p)
3283
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3284
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3285
    } else {
3286
        /* RAM case */
3287
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3288
            (addr & ~TARGET_PAGE_MASK);
3289
        val = ldl_p(ptr);
3290
    }
3291
    return val;
3292
}
3293

    
3294
/* warning: addr must be aligned */
3295
uint64_t ldq_phys(target_phys_addr_t addr)
3296
{
3297
    int io_index;
3298
    uint8_t *ptr;
3299
    uint64_t val;
3300
    unsigned long pd;
3301
    PhysPageDesc *p;
3302

    
3303
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3304
    if (!p) {
3305
        pd = IO_MEM_UNASSIGNED;
3306
    } else {
3307
        pd = p->phys_offset;
3308
    }
3309

    
3310
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3311
        !(pd & IO_MEM_ROMD)) {
3312
        /* I/O case */
3313
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3314
        if (p)
3315
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3316
#ifdef TARGET_WORDS_BIGENDIAN
3317
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3318
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3319
#else
3320
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3321
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3322
#endif
3323
    } else {
3324
        /* RAM case */
3325
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3326
            (addr & ~TARGET_PAGE_MASK);
3327
        val = ldq_p(ptr);
3328
    }
3329
    return val;
3330
}
3331

    
3332
/* XXX: optimize */
3333
uint32_t ldub_phys(target_phys_addr_t addr)
3334
{
3335
    uint8_t val;
3336
    cpu_physical_memory_read(addr, &val, 1);
3337
    return val;
3338
}
3339

    
3340
/* XXX: optimize */
3341
uint32_t lduw_phys(target_phys_addr_t addr)
3342
{
3343
    uint16_t val;
3344
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3345
    return tswap16(val);
3346
}
3347

    
3348
/* warning: addr must be aligned. The ram page is not masked as dirty
3349
   and the code inside is not invalidated. It is useful if the dirty
3350
   bits are used to track modified PTEs */
3351
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3352
{
3353
    int io_index;
3354
    uint8_t *ptr;
3355
    unsigned long pd;
3356
    PhysPageDesc *p;
3357

    
3358
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3359
    if (!p) {
3360
        pd = IO_MEM_UNASSIGNED;
3361
    } else {
3362
        pd = p->phys_offset;
3363
    }
3364

    
3365
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3366
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3367
        if (p)
3368
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3369
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3370
    } else {
3371
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3372
        ptr = phys_ram_base + addr1;
3373
        stl_p(ptr, val);
3374

    
3375
        if (unlikely(in_migration)) {
3376
            if (!cpu_physical_memory_is_dirty(addr1)) {
3377
                /* invalidate code */
3378
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3379
                /* set dirty bit */
3380
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3381
                    (0xff & ~CODE_DIRTY_FLAG);
3382
            }
3383
        }
3384
    }
3385
}
3386

    
3387
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3388
{
3389
    int io_index;
3390
    uint8_t *ptr;
3391
    unsigned long pd;
3392
    PhysPageDesc *p;
3393

    
3394
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3395
    if (!p) {
3396
        pd = IO_MEM_UNASSIGNED;
3397
    } else {
3398
        pd = p->phys_offset;
3399
    }
3400

    
3401
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3402
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3403
        if (p)
3404
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3405
#ifdef TARGET_WORDS_BIGENDIAN
3406
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3407
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3408
#else
3409
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3410
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3411
#endif
3412
    } else {
3413
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3414
            (addr & ~TARGET_PAGE_MASK);
3415
        stq_p(ptr, val);
3416
    }
3417
}
3418

    
3419
/* warning: addr must be aligned */
3420
void stl_phys(target_phys_addr_t addr, uint32_t val)
3421
{
3422
    int io_index;
3423
    uint8_t *ptr;
3424
    unsigned long pd;
3425
    PhysPageDesc *p;
3426

    
3427
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3428
    if (!p) {
3429
        pd = IO_MEM_UNASSIGNED;
3430
    } else {
3431
        pd = p->phys_offset;
3432
    }
3433

    
3434
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3435
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3436
        if (p)
3437
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3438
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3439
    } else {
3440
        unsigned long addr1;
3441
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3442
        /* RAM case */
3443
        ptr = phys_ram_base + addr1;
3444
        stl_p(ptr, val);
3445
        if (!cpu_physical_memory_is_dirty(addr1)) {
3446
            /* invalidate code */
3447
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3448
            /* set dirty bit */
3449
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3450
                (0xff & ~CODE_DIRTY_FLAG);
3451
        }
3452
    }
3453
}
3454

    
3455
/* XXX: optimize */
3456
void stb_phys(target_phys_addr_t addr, uint32_t val)
3457
{
3458
    uint8_t v = val;
3459
    cpu_physical_memory_write(addr, &v, 1);
3460
}
3461

    
3462
/* XXX: optimize */
3463
void stw_phys(target_phys_addr_t addr, uint32_t val)
3464
{
3465
    uint16_t v = tswap16(val);
3466
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3467
}
3468

    
3469
/* XXX: optimize */
3470
void stq_phys(target_phys_addr_t addr, uint64_t val)
3471
{
3472
    val = tswap64(val);
3473
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3474
}
3475

    
3476
#endif
3477

    
3478
/* virtual memory access for debug (includes writing to ROM) */
3479
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3480
                        uint8_t *buf, int len, int is_write)
3481
{
3482
    int l;
3483
    target_phys_addr_t phys_addr;
3484
    target_ulong page;
3485

    
3486
    while (len > 0) {
3487
        page = addr & TARGET_PAGE_MASK;
3488
        phys_addr = cpu_get_phys_page_debug(env, page);
3489
        /* if no physical page mapped, return an error */
3490
        if (phys_addr == -1)
3491
            return -1;
3492
        l = (page + TARGET_PAGE_SIZE) - addr;
3493
        if (l > len)
3494
            l = len;
3495
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3496
#if !defined(CONFIG_USER_ONLY)
3497
        if (is_write)
3498
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3499
        else
3500
#endif
3501
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3502
        len -= l;
3503
        buf += l;
3504
        addr += l;
3505
    }
3506
    return 0;
3507
}
3508

    
3509
/* in deterministic execution mode, instructions doing device I/Os
3510
   must be at the end of the TB */
3511
void cpu_io_recompile(CPUState *env, void *retaddr)
3512
{
3513
    TranslationBlock *tb;
3514
    uint32_t n, cflags;
3515
    target_ulong pc, cs_base;
3516
    uint64_t flags;
3517

    
3518
    tb = tb_find_pc((unsigned long)retaddr);
3519
    if (!tb) {
3520
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3521
                  retaddr);
3522
    }
3523
    n = env->icount_decr.u16.low + tb->icount;
3524
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3525
    /* Calculate how many instructions had been executed before the fault
3526
       occurred.  */
3527
    n = n - env->icount_decr.u16.low;
3528
    /* Generate a new TB ending on the I/O insn.  */
3529
    n++;
3530
    /* On MIPS and SH, delay slot instructions can only be restarted if
3531
       they were already the first instruction in the TB.  If this is not
3532
       the first instruction in a TB then re-execute the preceding
3533
       branch.  */
3534
#if defined(TARGET_MIPS)
3535
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3536
        env->active_tc.PC -= 4;
3537
        env->icount_decr.u16.low++;
3538
        env->hflags &= ~MIPS_HFLAG_BMASK;
3539
    }
3540
#elif defined(TARGET_SH4)
3541
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3542
            && n > 1) {
3543
        env->pc -= 2;
3544
        env->icount_decr.u16.low++;
3545
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3546
    }
3547
#endif
3548
    /* This should never happen.  */
3549
    if (n > CF_COUNT_MASK)
3550
        cpu_abort(env, "TB too big during recompile");
3551

    
3552
    cflags = n | CF_LAST_IO;
3553
    pc = tb->pc;
3554
    cs_base = tb->cs_base;
3555
    flags = tb->flags;
3556
    tb_phys_invalidate(tb, -1);
3557
    /* FIXME: In theory this could raise an exception.  In practice
3558
       we have already translated the block once so it's probably ok.  */
3559
    tb_gen_code(env, pc, cs_base, flags, cflags);
3560
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3561
       the first in the TB) then we end up generating a whole new TB and
3562
       repeating the fault, which is horribly inefficient.
3563
       Better would be to execute just this insn uncached, or generate a
3564
       second new TB.  */
3565
    cpu_resume_from_signal(env, NULL);
3566
}
3567

    
3568
void dump_exec_info(FILE *f,
3569
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3570
{
3571
    int i, target_code_size, max_target_code_size;
3572
    int direct_jmp_count, direct_jmp2_count, cross_page;
3573
    TranslationBlock *tb;
3574

    
3575
    target_code_size = 0;
3576
    max_target_code_size = 0;
3577
    cross_page = 0;
3578
    direct_jmp_count = 0;
3579
    direct_jmp2_count = 0;
3580
    for(i = 0; i < nb_tbs; i++) {
3581
        tb = &tbs[i];
3582
        target_code_size += tb->size;
3583
        if (tb->size > max_target_code_size)
3584
            max_target_code_size = tb->size;
3585
        if (tb->page_addr[1] != -1)
3586
            cross_page++;
3587
        if (tb->tb_next_offset[0] != 0xffff) {
3588
            direct_jmp_count++;
3589
            if (tb->tb_next_offset[1] != 0xffff) {
3590
                direct_jmp2_count++;
3591
            }
3592
        }
3593
    }
3594
    /* XXX: avoid using doubles ? */
3595
    cpu_fprintf(f, "Translation buffer state:\n");
3596
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3597
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3598
    cpu_fprintf(f, "TB count            %d/%d\n", 
3599
                nb_tbs, code_gen_max_blocks);
3600
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3601
                nb_tbs ? target_code_size / nb_tbs : 0,
3602
                max_target_code_size);
3603
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3604
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3605
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3606
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3607
            cross_page,
3608
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3609
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3610
                direct_jmp_count,
3611
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3612
                direct_jmp2_count,
3613
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3614
    cpu_fprintf(f, "\nStatistics:\n");
3615
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3616
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3617
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3618
    tcg_dump_info(f, cpu_fprintf);
3619
}
3620

    
3621
#if !defined(CONFIG_USER_ONLY)
3622

    
3623
#define MMUSUFFIX _cmmu
3624
#define GETPC() NULL
3625
#define env cpu_single_env
3626
#define SOFTMMU_CODE_ACCESS
3627

    
3628
#define SHIFT 0
3629
#include "softmmu_template.h"
3630

    
3631
#define SHIFT 1
3632
#include "softmmu_template.h"
3633

    
3634
#define SHIFT 2
3635
#include "softmmu_template.h"
3636

    
3637
#define SHIFT 3
3638
#include "softmmu_template.h"
3639

    
3640
#undef env
3641

    
3642
#endif