Statistics
| Branch: | Revision:

root / exec.c @ 2507c12a

History | View | Annotate | Download (126 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#ifdef _WIN32
21
#include <windows.h>
22
#else
23
#include <sys/types.h>
24
#include <sys/mman.h>
25
#endif
26

    
27
#include "qemu-common.h"
28
#include "cpu.h"
29
#include "exec-all.h"
30
#include "tcg.h"
31
#include "hw/hw.h"
32
#include "hw/qdev.h"
33
#include "osdep.h"
34
#include "kvm.h"
35
#include "qemu-timer.h"
36
#if defined(CONFIG_USER_ONLY)
37
#include <qemu.h>
38
#include <signal.h>
39
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40
#include <sys/param.h>
41
#if __FreeBSD_version >= 700104
42
#define HAVE_KINFO_GETVMMAP
43
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
44
#include <sys/time.h>
45
#include <sys/proc.h>
46
#include <machine/profile.h>
47
#define _KERNEL
48
#include <sys/user.h>
49
#undef _KERNEL
50
#undef sigqueue
51
#include <libutil.h>
52
#endif
53
#endif
54
#endif
55

    
56
//#define DEBUG_TB_INVALIDATE
57
//#define DEBUG_FLUSH
58
//#define DEBUG_TLB
59
//#define DEBUG_UNASSIGNED
60

    
61
/* make various TB consistency checks */
62
//#define DEBUG_TB_CHECK
63
//#define DEBUG_TLB_CHECK
64

    
65
//#define DEBUG_IOPORT
66
//#define DEBUG_SUBPAGE
67

    
68
#if !defined(CONFIG_USER_ONLY)
69
/* TB consistency checks only implemented for usermode emulation.  */
70
#undef DEBUG_TB_CHECK
71
#endif
72

    
73
#define SMC_BITMAP_USE_THRESHOLD 10
74

    
75
static TranslationBlock *tbs;
76
static int code_gen_max_blocks;
77
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
78
static int nb_tbs;
79
/* any access to the tbs or the page table must use this lock */
80
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
81

    
82
#if defined(__arm__) || defined(__sparc_v9__)
83
/* The prologue must be reachable with a direct jump. ARM and Sparc64
84
 have limited branch ranges (possibly also PPC) so place it in a
85
 section close to code segment. */
86
#define code_gen_section                                \
87
    __attribute__((__section__(".gen_code")))           \
88
    __attribute__((aligned (32)))
89
#elif defined(_WIN32)
90
/* Maximum alignment for Win32 is 16. */
91
#define code_gen_section                                \
92
    __attribute__((aligned (16)))
93
#else
94
#define code_gen_section                                \
95
    __attribute__((aligned (32)))
96
#endif
97

    
98
uint8_t code_gen_prologue[1024] code_gen_section;
99
static uint8_t *code_gen_buffer;
100
static unsigned long code_gen_buffer_size;
101
/* threshold to flush the translated code buffer */
102
static unsigned long code_gen_buffer_max_size;
103
static uint8_t *code_gen_ptr;
104

    
105
#if !defined(CONFIG_USER_ONLY)
106
int phys_ram_fd;
107
static int in_migration;
108

    
109
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
110
#endif
111

    
112
CPUState *first_cpu;
113
/* current CPU in the current thread. It is only valid inside
114
   cpu_exec() */
115
CPUState *cpu_single_env;
116
/* 0 = Do not count executed instructions.
117
   1 = Precise instruction counting.
118
   2 = Adaptive rate instruction counting.  */
119
int use_icount = 0;
120
/* Current instruction counter.  While executing translated code this may
121
   include some instructions that have not yet been executed.  */
122
int64_t qemu_icount;
123

    
124
typedef struct PageDesc {
125
    /* list of TBs intersecting this ram page */
126
    TranslationBlock *first_tb;
127
    /* in order to optimize self modifying code, we count the number
128
       of lookups we do to a given page to use a bitmap */
129
    unsigned int code_write_count;
130
    uint8_t *code_bitmap;
131
#if defined(CONFIG_USER_ONLY)
132
    unsigned long flags;
133
#endif
134
} PageDesc;
135

    
136
/* In system mode we want L1_MAP to be based on ram offsets,
137
   while in user mode we want it to be based on virtual addresses.  */
138
#if !defined(CONFIG_USER_ONLY)
139
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
141
#else
142
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
143
#endif
144
#else
145
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
146
#endif
147

    
148
/* Size of the L2 (and L3, etc) page tables.  */
149
#define L2_BITS 10
150
#define L2_SIZE (1 << L2_BITS)
151

    
152
/* The bits remaining after N lower levels of page tables.  */
153
#define P_L1_BITS_REM \
154
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155
#define V_L1_BITS_REM \
156
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157

    
158
/* Size of the L1 page table.  Avoid silly small sizes.  */
159
#if P_L1_BITS_REM < 4
160
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
161
#else
162
#define P_L1_BITS  P_L1_BITS_REM
163
#endif
164

    
165
#if V_L1_BITS_REM < 4
166
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
167
#else
168
#define V_L1_BITS  V_L1_BITS_REM
169
#endif
170

    
171
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
172
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
173

    
174
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
176

    
177
unsigned long qemu_real_host_page_size;
178
unsigned long qemu_host_page_bits;
179
unsigned long qemu_host_page_size;
180
unsigned long qemu_host_page_mask;
181

    
182
/* This is a multi-level map on the virtual address space.
183
   The bottom level has pointers to PageDesc.  */
184
static void *l1_map[V_L1_SIZE];
185

    
186
#if !defined(CONFIG_USER_ONLY)
187
typedef struct PhysPageDesc {
188
    /* offset in host memory of the page + io_index in the low bits */
189
    ram_addr_t phys_offset;
190
    ram_addr_t region_offset;
191
} PhysPageDesc;
192

    
193
/* This is a multi-level map on the physical address space.
194
   The bottom level has pointers to PhysPageDesc.  */
195
static void *l1_phys_map[P_L1_SIZE];
196

    
197
static void io_mem_init(void);
198

    
199
/* io memory support */
200
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
201
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
202
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
203
static char io_mem_used[IO_MEM_NB_ENTRIES];
204
static int io_mem_watch;
205
#endif
206

    
207
/* log support */
208
#ifdef WIN32
209
static const char *logfilename = "qemu.log";
210
#else
211
static const char *logfilename = "/tmp/qemu.log";
212
#endif
213
FILE *logfile;
214
int loglevel;
215
static int log_append = 0;
216

    
217
/* statistics */
218
#if !defined(CONFIG_USER_ONLY)
219
static int tlb_flush_count;
220
#endif
221
static int tb_flush_count;
222
static int tb_phys_invalidate_count;
223

    
224
#ifdef _WIN32
225
static void map_exec(void *addr, long size)
226
{
227
    DWORD old_protect;
228
    VirtualProtect(addr, size,
229
                   PAGE_EXECUTE_READWRITE, &old_protect);
230
    
231
}
232
#else
233
static void map_exec(void *addr, long size)
234
{
235
    unsigned long start, end, page_size;
236
    
237
    page_size = getpagesize();
238
    start = (unsigned long)addr;
239
    start &= ~(page_size - 1);
240
    
241
    end = (unsigned long)addr + size;
242
    end += page_size - 1;
243
    end &= ~(page_size - 1);
244
    
245
    mprotect((void *)start, end - start,
246
             PROT_READ | PROT_WRITE | PROT_EXEC);
247
}
248
#endif
249

    
250
static void page_init(void)
251
{
252
    /* NOTE: we can always suppose that qemu_host_page_size >=
253
       TARGET_PAGE_SIZE */
254
#ifdef _WIN32
255
    {
256
        SYSTEM_INFO system_info;
257

    
258
        GetSystemInfo(&system_info);
259
        qemu_real_host_page_size = system_info.dwPageSize;
260
    }
261
#else
262
    qemu_real_host_page_size = getpagesize();
263
#endif
264
    if (qemu_host_page_size == 0)
265
        qemu_host_page_size = qemu_real_host_page_size;
266
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
267
        qemu_host_page_size = TARGET_PAGE_SIZE;
268
    qemu_host_page_bits = 0;
269
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
270
        qemu_host_page_bits++;
271
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
272

    
273
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
274
    {
275
#ifdef HAVE_KINFO_GETVMMAP
276
        struct kinfo_vmentry *freep;
277
        int i, cnt;
278

    
279
        freep = kinfo_getvmmap(getpid(), &cnt);
280
        if (freep) {
281
            mmap_lock();
282
            for (i = 0; i < cnt; i++) {
283
                unsigned long startaddr, endaddr;
284

    
285
                startaddr = freep[i].kve_start;
286
                endaddr = freep[i].kve_end;
287
                if (h2g_valid(startaddr)) {
288
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
289

    
290
                    if (h2g_valid(endaddr)) {
291
                        endaddr = h2g(endaddr);
292
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
293
                    } else {
294
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
295
                        endaddr = ~0ul;
296
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
297
#endif
298
                    }
299
                }
300
            }
301
            free(freep);
302
            mmap_unlock();
303
        }
304
#else
305
        FILE *f;
306

    
307
        last_brk = (unsigned long)sbrk(0);
308

    
309
        f = fopen("/compat/linux/proc/self/maps", "r");
310
        if (f) {
311
            mmap_lock();
312

    
313
            do {
314
                unsigned long startaddr, endaddr;
315
                int n;
316

    
317
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
318

    
319
                if (n == 2 && h2g_valid(startaddr)) {
320
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
321

    
322
                    if (h2g_valid(endaddr)) {
323
                        endaddr = h2g(endaddr);
324
                    } else {
325
                        endaddr = ~0ul;
326
                    }
327
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
328
                }
329
            } while (!feof(f));
330

    
331
            fclose(f);
332
            mmap_unlock();
333
        }
334
#endif
335
    }
336
#endif
337
}
338

    
339
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
340
{
341
    PageDesc *pd;
342
    void **lp;
343
    int i;
344

    
345
#if defined(CONFIG_USER_ONLY)
346
    /* We can't use qemu_malloc because it may recurse into a locked mutex. */
347
# define ALLOC(P, SIZE)                                 \
348
    do {                                                \
349
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
350
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
351
    } while (0)
352
#else
353
# define ALLOC(P, SIZE) \
354
    do { P = qemu_mallocz(SIZE); } while (0)
355
#endif
356

    
357
    /* Level 1.  Always allocated.  */
358
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
359

    
360
    /* Level 2..N-1.  */
361
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
362
        void **p = *lp;
363

    
364
        if (p == NULL) {
365
            if (!alloc) {
366
                return NULL;
367
            }
368
            ALLOC(p, sizeof(void *) * L2_SIZE);
369
            *lp = p;
370
        }
371

    
372
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
373
    }
374

    
375
    pd = *lp;
376
    if (pd == NULL) {
377
        if (!alloc) {
378
            return NULL;
379
        }
380
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
381
        *lp = pd;
382
    }
383

    
384
#undef ALLOC
385

    
386
    return pd + (index & (L2_SIZE - 1));
387
}
388

    
389
static inline PageDesc *page_find(tb_page_addr_t index)
390
{
391
    return page_find_alloc(index, 0);
392
}
393

    
394
#if !defined(CONFIG_USER_ONLY)
395
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
396
{
397
    PhysPageDesc *pd;
398
    void **lp;
399
    int i;
400

    
401
    /* Level 1.  Always allocated.  */
402
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
403

    
404
    /* Level 2..N-1.  */
405
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
406
        void **p = *lp;
407
        if (p == NULL) {
408
            if (!alloc) {
409
                return NULL;
410
            }
411
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
412
        }
413
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
414
    }
415

    
416
    pd = *lp;
417
    if (pd == NULL) {
418
        int i;
419

    
420
        if (!alloc) {
421
            return NULL;
422
        }
423

    
424
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
425

    
426
        for (i = 0; i < L2_SIZE; i++) {
427
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
428
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
429
        }
430
    }
431

    
432
    return pd + (index & (L2_SIZE - 1));
433
}
434

    
435
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
436
{
437
    return phys_page_find_alloc(index, 0);
438
}
439

    
440
static void tlb_protect_code(ram_addr_t ram_addr);
441
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
442
                                    target_ulong vaddr);
443
#define mmap_lock() do { } while(0)
444
#define mmap_unlock() do { } while(0)
445
#endif
446

    
447
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
448

    
449
#if defined(CONFIG_USER_ONLY)
450
/* Currently it is not recommended to allocate big chunks of data in
451
   user mode. It will change when a dedicated libc will be used */
452
#define USE_STATIC_CODE_GEN_BUFFER
453
#endif
454

    
455
#ifdef USE_STATIC_CODE_GEN_BUFFER
456
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
457
               __attribute__((aligned (CODE_GEN_ALIGN)));
458
#endif
459

    
460
static void code_gen_alloc(unsigned long tb_size)
461
{
462
#ifdef USE_STATIC_CODE_GEN_BUFFER
463
    code_gen_buffer = static_code_gen_buffer;
464
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
465
    map_exec(code_gen_buffer, code_gen_buffer_size);
466
#else
467
    code_gen_buffer_size = tb_size;
468
    if (code_gen_buffer_size == 0) {
469
#if defined(CONFIG_USER_ONLY)
470
        /* in user mode, phys_ram_size is not meaningful */
471
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472
#else
473
        /* XXX: needs adjustments */
474
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
475
#endif
476
    }
477
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
478
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
479
    /* The code gen buffer location may have constraints depending on
480
       the host cpu and OS */
481
#if defined(__linux__) 
482
    {
483
        int flags;
484
        void *start = NULL;
485

    
486
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
487
#if defined(__x86_64__)
488
        flags |= MAP_32BIT;
489
        /* Cannot map more than that */
490
        if (code_gen_buffer_size > (800 * 1024 * 1024))
491
            code_gen_buffer_size = (800 * 1024 * 1024);
492
#elif defined(__sparc_v9__)
493
        // Map the buffer below 2G, so we can use direct calls and branches
494
        flags |= MAP_FIXED;
495
        start = (void *) 0x60000000UL;
496
        if (code_gen_buffer_size > (512 * 1024 * 1024))
497
            code_gen_buffer_size = (512 * 1024 * 1024);
498
#elif defined(__arm__)
499
        /* Map the buffer below 32M, so we can use direct calls and branches */
500
        flags |= MAP_FIXED;
501
        start = (void *) 0x01000000UL;
502
        if (code_gen_buffer_size > 16 * 1024 * 1024)
503
            code_gen_buffer_size = 16 * 1024 * 1024;
504
#elif defined(__s390x__)
505
        /* Map the buffer so that we can use direct calls and branches.  */
506
        /* We have a +- 4GB range on the branches; leave some slop.  */
507
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
508
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
509
        }
510
        start = (void *)0x90000000UL;
511
#endif
512
        code_gen_buffer = mmap(start, code_gen_buffer_size,
513
                               PROT_WRITE | PROT_READ | PROT_EXEC,
514
                               flags, -1, 0);
515
        if (code_gen_buffer == MAP_FAILED) {
516
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
517
            exit(1);
518
        }
519
    }
520
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
521
    {
522
        int flags;
523
        void *addr = NULL;
524
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
525
#if defined(__x86_64__)
526
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
527
         * 0x40000000 is free */
528
        flags |= MAP_FIXED;
529
        addr = (void *)0x40000000;
530
        /* Cannot map more than that */
531
        if (code_gen_buffer_size > (800 * 1024 * 1024))
532
            code_gen_buffer_size = (800 * 1024 * 1024);
533
#endif
534
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
535
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
536
                               flags, -1, 0);
537
        if (code_gen_buffer == MAP_FAILED) {
538
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
539
            exit(1);
540
        }
541
    }
542
#else
543
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
544
    map_exec(code_gen_buffer, code_gen_buffer_size);
545
#endif
546
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
547
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
548
    code_gen_buffer_max_size = code_gen_buffer_size - 
549
        (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
550
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
551
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
552
}
553

    
554
/* Must be called before using the QEMU cpus. 'tb_size' is the size
555
   (in bytes) allocated to the translation buffer. Zero means default
556
   size. */
557
void cpu_exec_init_all(unsigned long tb_size)
558
{
559
    cpu_gen_init();
560
    code_gen_alloc(tb_size);
561
    code_gen_ptr = code_gen_buffer;
562
    page_init();
563
#if !defined(CONFIG_USER_ONLY)
564
    io_mem_init();
565
#endif
566
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
567
    /* There's no guest base to take into account, so go ahead and
568
       initialize the prologue now.  */
569
    tcg_prologue_init(&tcg_ctx);
570
#endif
571
}
572

    
573
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
574

    
575
static int cpu_common_post_load(void *opaque, int version_id)
576
{
577
    CPUState *env = opaque;
578

    
579
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
580
       version_id is increased. */
581
    env->interrupt_request &= ~0x01;
582
    tlb_flush(env, 1);
583

    
584
    return 0;
585
}
586

    
587
static const VMStateDescription vmstate_cpu_common = {
588
    .name = "cpu_common",
589
    .version_id = 1,
590
    .minimum_version_id = 1,
591
    .minimum_version_id_old = 1,
592
    .post_load = cpu_common_post_load,
593
    .fields      = (VMStateField []) {
594
        VMSTATE_UINT32(halted, CPUState),
595
        VMSTATE_UINT32(interrupt_request, CPUState),
596
        VMSTATE_END_OF_LIST()
597
    }
598
};
599
#endif
600

    
601
CPUState *qemu_get_cpu(int cpu)
602
{
603
    CPUState *env = first_cpu;
604

    
605
    while (env) {
606
        if (env->cpu_index == cpu)
607
            break;
608
        env = env->next_cpu;
609
    }
610

    
611
    return env;
612
}
613

    
614
void cpu_exec_init(CPUState *env)
615
{
616
    CPUState **penv;
617
    int cpu_index;
618

    
619
#if defined(CONFIG_USER_ONLY)
620
    cpu_list_lock();
621
#endif
622
    env->next_cpu = NULL;
623
    penv = &first_cpu;
624
    cpu_index = 0;
625
    while (*penv != NULL) {
626
        penv = &(*penv)->next_cpu;
627
        cpu_index++;
628
    }
629
    env->cpu_index = cpu_index;
630
    env->numa_node = 0;
631
    QTAILQ_INIT(&env->breakpoints);
632
    QTAILQ_INIT(&env->watchpoints);
633
    *penv = env;
634
#if defined(CONFIG_USER_ONLY)
635
    cpu_list_unlock();
636
#endif
637
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
638
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
639
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
640
                    cpu_save, cpu_load, env);
641
#endif
642
}
643

    
644
static inline void invalidate_page_bitmap(PageDesc *p)
645
{
646
    if (p->code_bitmap) {
647
        qemu_free(p->code_bitmap);
648
        p->code_bitmap = NULL;
649
    }
650
    p->code_write_count = 0;
651
}
652

    
653
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
654

    
655
static void page_flush_tb_1 (int level, void **lp)
656
{
657
    int i;
658

    
659
    if (*lp == NULL) {
660
        return;
661
    }
662
    if (level == 0) {
663
        PageDesc *pd = *lp;
664
        for (i = 0; i < L2_SIZE; ++i) {
665
            pd[i].first_tb = NULL;
666
            invalidate_page_bitmap(pd + i);
667
        }
668
    } else {
669
        void **pp = *lp;
670
        for (i = 0; i < L2_SIZE; ++i) {
671
            page_flush_tb_1 (level - 1, pp + i);
672
        }
673
    }
674
}
675

    
676
static void page_flush_tb(void)
677
{
678
    int i;
679
    for (i = 0; i < V_L1_SIZE; i++) {
680
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
681
    }
682
}
683

    
684
/* flush all the translation blocks */
685
/* XXX: tb_flush is currently not thread safe */
686
void tb_flush(CPUState *env1)
687
{
688
    CPUState *env;
689
#if defined(DEBUG_FLUSH)
690
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
691
           (unsigned long)(code_gen_ptr - code_gen_buffer),
692
           nb_tbs, nb_tbs > 0 ?
693
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
694
#endif
695
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
696
        cpu_abort(env1, "Internal error: code buffer overflow\n");
697

    
698
    nb_tbs = 0;
699

    
700
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
701
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
702
    }
703

    
704
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
705
    page_flush_tb();
706

    
707
    code_gen_ptr = code_gen_buffer;
708
    /* XXX: flush processor icache at this point if cache flush is
709
       expensive */
710
    tb_flush_count++;
711
}
712

    
713
#ifdef DEBUG_TB_CHECK
714

    
715
static void tb_invalidate_check(target_ulong address)
716
{
717
    TranslationBlock *tb;
718
    int i;
719
    address &= TARGET_PAGE_MASK;
720
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
721
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
722
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
723
                  address >= tb->pc + tb->size)) {
724
                printf("ERROR invalidate: address=" TARGET_FMT_lx
725
                       " PC=%08lx size=%04x\n",
726
                       address, (long)tb->pc, tb->size);
727
            }
728
        }
729
    }
730
}
731

    
732
/* verify that all the pages have correct rights for code */
733
static void tb_page_check(void)
734
{
735
    TranslationBlock *tb;
736
    int i, flags1, flags2;
737

    
738
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
739
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
740
            flags1 = page_get_flags(tb->pc);
741
            flags2 = page_get_flags(tb->pc + tb->size - 1);
742
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
743
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
744
                       (long)tb->pc, tb->size, flags1, flags2);
745
            }
746
        }
747
    }
748
}
749

    
750
#endif
751

    
752
/* invalidate one TB */
753
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
754
                             int next_offset)
755
{
756
    TranslationBlock *tb1;
757
    for(;;) {
758
        tb1 = *ptb;
759
        if (tb1 == tb) {
760
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
761
            break;
762
        }
763
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
764
    }
765
}
766

    
767
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
768
{
769
    TranslationBlock *tb1;
770
    unsigned int n1;
771

    
772
    for(;;) {
773
        tb1 = *ptb;
774
        n1 = (long)tb1 & 3;
775
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
776
        if (tb1 == tb) {
777
            *ptb = tb1->page_next[n1];
778
            break;
779
        }
780
        ptb = &tb1->page_next[n1];
781
    }
782
}
783

    
784
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
785
{
786
    TranslationBlock *tb1, **ptb;
787
    unsigned int n1;
788

    
789
    ptb = &tb->jmp_next[n];
790
    tb1 = *ptb;
791
    if (tb1) {
792
        /* find tb(n) in circular list */
793
        for(;;) {
794
            tb1 = *ptb;
795
            n1 = (long)tb1 & 3;
796
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
797
            if (n1 == n && tb1 == tb)
798
                break;
799
            if (n1 == 2) {
800
                ptb = &tb1->jmp_first;
801
            } else {
802
                ptb = &tb1->jmp_next[n1];
803
            }
804
        }
805
        /* now we can suppress tb(n) from the list */
806
        *ptb = tb->jmp_next[n];
807

    
808
        tb->jmp_next[n] = NULL;
809
    }
810
}
811

    
812
/* reset the jump entry 'n' of a TB so that it is not chained to
813
   another TB */
814
static inline void tb_reset_jump(TranslationBlock *tb, int n)
815
{
816
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
817
}
818

    
819
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
820
{
821
    CPUState *env;
822
    PageDesc *p;
823
    unsigned int h, n1;
824
    tb_page_addr_t phys_pc;
825
    TranslationBlock *tb1, *tb2;
826

    
827
    /* remove the TB from the hash list */
828
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
829
    h = tb_phys_hash_func(phys_pc);
830
    tb_remove(&tb_phys_hash[h], tb,
831
              offsetof(TranslationBlock, phys_hash_next));
832

    
833
    /* remove the TB from the page list */
834
    if (tb->page_addr[0] != page_addr) {
835
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
836
        tb_page_remove(&p->first_tb, tb);
837
        invalidate_page_bitmap(p);
838
    }
839
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
840
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
841
        tb_page_remove(&p->first_tb, tb);
842
        invalidate_page_bitmap(p);
843
    }
844

    
845
    tb_invalidated_flag = 1;
846

    
847
    /* remove the TB from the hash list */
848
    h = tb_jmp_cache_hash_func(tb->pc);
849
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
850
        if (env->tb_jmp_cache[h] == tb)
851
            env->tb_jmp_cache[h] = NULL;
852
    }
853

    
854
    /* suppress this TB from the two jump lists */
855
    tb_jmp_remove(tb, 0);
856
    tb_jmp_remove(tb, 1);
857

    
858
    /* suppress any remaining jumps to this TB */
859
    tb1 = tb->jmp_first;
860
    for(;;) {
861
        n1 = (long)tb1 & 3;
862
        if (n1 == 2)
863
            break;
864
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
865
        tb2 = tb1->jmp_next[n1];
866
        tb_reset_jump(tb1, n1);
867
        tb1->jmp_next[n1] = NULL;
868
        tb1 = tb2;
869
    }
870
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
871

    
872
    tb_phys_invalidate_count++;
873
}
874

    
875
static inline void set_bits(uint8_t *tab, int start, int len)
876
{
877
    int end, mask, end1;
878

    
879
    end = start + len;
880
    tab += start >> 3;
881
    mask = 0xff << (start & 7);
882
    if ((start & ~7) == (end & ~7)) {
883
        if (start < end) {
884
            mask &= ~(0xff << (end & 7));
885
            *tab |= mask;
886
        }
887
    } else {
888
        *tab++ |= mask;
889
        start = (start + 8) & ~7;
890
        end1 = end & ~7;
891
        while (start < end1) {
892
            *tab++ = 0xff;
893
            start += 8;
894
        }
895
        if (start < end) {
896
            mask = ~(0xff << (end & 7));
897
            *tab |= mask;
898
        }
899
    }
900
}
901

    
902
static void build_page_bitmap(PageDesc *p)
903
{
904
    int n, tb_start, tb_end;
905
    TranslationBlock *tb;
906

    
907
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
908

    
909
    tb = p->first_tb;
910
    while (tb != NULL) {
911
        n = (long)tb & 3;
912
        tb = (TranslationBlock *)((long)tb & ~3);
913
        /* NOTE: this is subtle as a TB may span two physical pages */
914
        if (n == 0) {
915
            /* NOTE: tb_end may be after the end of the page, but
916
               it is not a problem */
917
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
918
            tb_end = tb_start + tb->size;
919
            if (tb_end > TARGET_PAGE_SIZE)
920
                tb_end = TARGET_PAGE_SIZE;
921
        } else {
922
            tb_start = 0;
923
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
924
        }
925
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
926
        tb = tb->page_next[n];
927
    }
928
}
929

    
930
TranslationBlock *tb_gen_code(CPUState *env,
931
                              target_ulong pc, target_ulong cs_base,
932
                              int flags, int cflags)
933
{
934
    TranslationBlock *tb;
935
    uint8_t *tc_ptr;
936
    tb_page_addr_t phys_pc, phys_page2;
937
    target_ulong virt_page2;
938
    int code_gen_size;
939

    
940
    phys_pc = get_page_addr_code(env, pc);
941
    tb = tb_alloc(pc);
942
    if (!tb) {
943
        /* flush must be done */
944
        tb_flush(env);
945
        /* cannot fail at this point */
946
        tb = tb_alloc(pc);
947
        /* Don't forget to invalidate previous TB info.  */
948
        tb_invalidated_flag = 1;
949
    }
950
    tc_ptr = code_gen_ptr;
951
    tb->tc_ptr = tc_ptr;
952
    tb->cs_base = cs_base;
953
    tb->flags = flags;
954
    tb->cflags = cflags;
955
    cpu_gen_code(env, tb, &code_gen_size);
956
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
957

    
958
    /* check next page if needed */
959
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
960
    phys_page2 = -1;
961
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
962
        phys_page2 = get_page_addr_code(env, virt_page2);
963
    }
964
    tb_link_page(tb, phys_pc, phys_page2);
965
    return tb;
966
}
967

    
968
/* invalidate all TBs which intersect with the target physical page
969
   starting in range [start;end[. NOTE: start and end must refer to
970
   the same physical page. 'is_cpu_write_access' should be true if called
971
   from a real cpu write access: the virtual CPU will exit the current
972
   TB if code is modified inside this TB. */
973
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
974
                                   int is_cpu_write_access)
975
{
976
    TranslationBlock *tb, *tb_next, *saved_tb;
977
    CPUState *env = cpu_single_env;
978
    tb_page_addr_t tb_start, tb_end;
979
    PageDesc *p;
980
    int n;
981
#ifdef TARGET_HAS_PRECISE_SMC
982
    int current_tb_not_found = is_cpu_write_access;
983
    TranslationBlock *current_tb = NULL;
984
    int current_tb_modified = 0;
985
    target_ulong current_pc = 0;
986
    target_ulong current_cs_base = 0;
987
    int current_flags = 0;
988
#endif /* TARGET_HAS_PRECISE_SMC */
989

    
990
    p = page_find(start >> TARGET_PAGE_BITS);
991
    if (!p)
992
        return;
993
    if (!p->code_bitmap &&
994
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
995
        is_cpu_write_access) {
996
        /* build code bitmap */
997
        build_page_bitmap(p);
998
    }
999

    
1000
    /* we remove all the TBs in the range [start, end[ */
1001
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1002
    tb = p->first_tb;
1003
    while (tb != NULL) {
1004
        n = (long)tb & 3;
1005
        tb = (TranslationBlock *)((long)tb & ~3);
1006
        tb_next = tb->page_next[n];
1007
        /* NOTE: this is subtle as a TB may span two physical pages */
1008
        if (n == 0) {
1009
            /* NOTE: tb_end may be after the end of the page, but
1010
               it is not a problem */
1011
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1012
            tb_end = tb_start + tb->size;
1013
        } else {
1014
            tb_start = tb->page_addr[1];
1015
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1016
        }
1017
        if (!(tb_end <= start || tb_start >= end)) {
1018
#ifdef TARGET_HAS_PRECISE_SMC
1019
            if (current_tb_not_found) {
1020
                current_tb_not_found = 0;
1021
                current_tb = NULL;
1022
                if (env->mem_io_pc) {
1023
                    /* now we have a real cpu fault */
1024
                    current_tb = tb_find_pc(env->mem_io_pc);
1025
                }
1026
            }
1027
            if (current_tb == tb &&
1028
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1029
                /* If we are modifying the current TB, we must stop
1030
                its execution. We could be more precise by checking
1031
                that the modification is after the current PC, but it
1032
                would require a specialized function to partially
1033
                restore the CPU state */
1034

    
1035
                current_tb_modified = 1;
1036
                cpu_restore_state(current_tb, env,
1037
                                  env->mem_io_pc, NULL);
1038
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1039
                                     &current_flags);
1040
            }
1041
#endif /* TARGET_HAS_PRECISE_SMC */
1042
            /* we need to do that to handle the case where a signal
1043
               occurs while doing tb_phys_invalidate() */
1044
            saved_tb = NULL;
1045
            if (env) {
1046
                saved_tb = env->current_tb;
1047
                env->current_tb = NULL;
1048
            }
1049
            tb_phys_invalidate(tb, -1);
1050
            if (env) {
1051
                env->current_tb = saved_tb;
1052
                if (env->interrupt_request && env->current_tb)
1053
                    cpu_interrupt(env, env->interrupt_request);
1054
            }
1055
        }
1056
        tb = tb_next;
1057
    }
1058
#if !defined(CONFIG_USER_ONLY)
1059
    /* if no code remaining, no need to continue to use slow writes */
1060
    if (!p->first_tb) {
1061
        invalidate_page_bitmap(p);
1062
        if (is_cpu_write_access) {
1063
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1064
        }
1065
    }
1066
#endif
1067
#ifdef TARGET_HAS_PRECISE_SMC
1068
    if (current_tb_modified) {
1069
        /* we generate a block containing just the instruction
1070
           modifying the memory. It will ensure that it cannot modify
1071
           itself */
1072
        env->current_tb = NULL;
1073
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1074
        cpu_resume_from_signal(env, NULL);
1075
    }
1076
#endif
1077
}
1078

    
1079
/* len must be <= 8 and start must be a multiple of len */
1080
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1081
{
1082
    PageDesc *p;
1083
    int offset, b;
1084
#if 0
1085
    if (1) {
1086
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1087
                  cpu_single_env->mem_io_vaddr, len,
1088
                  cpu_single_env->eip,
1089
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1090
    }
1091
#endif
1092
    p = page_find(start >> TARGET_PAGE_BITS);
1093
    if (!p)
1094
        return;
1095
    if (p->code_bitmap) {
1096
        offset = start & ~TARGET_PAGE_MASK;
1097
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1098
        if (b & ((1 << len) - 1))
1099
            goto do_invalidate;
1100
    } else {
1101
    do_invalidate:
1102
        tb_invalidate_phys_page_range(start, start + len, 1);
1103
    }
1104
}
1105

    
1106
#if !defined(CONFIG_SOFTMMU)
1107
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1108
                                    unsigned long pc, void *puc)
1109
{
1110
    TranslationBlock *tb;
1111
    PageDesc *p;
1112
    int n;
1113
#ifdef TARGET_HAS_PRECISE_SMC
1114
    TranslationBlock *current_tb = NULL;
1115
    CPUState *env = cpu_single_env;
1116
    int current_tb_modified = 0;
1117
    target_ulong current_pc = 0;
1118
    target_ulong current_cs_base = 0;
1119
    int current_flags = 0;
1120
#endif
1121

    
1122
    addr &= TARGET_PAGE_MASK;
1123
    p = page_find(addr >> TARGET_PAGE_BITS);
1124
    if (!p)
1125
        return;
1126
    tb = p->first_tb;
1127
#ifdef TARGET_HAS_PRECISE_SMC
1128
    if (tb && pc != 0) {
1129
        current_tb = tb_find_pc(pc);
1130
    }
1131
#endif
1132
    while (tb != NULL) {
1133
        n = (long)tb & 3;
1134
        tb = (TranslationBlock *)((long)tb & ~3);
1135
#ifdef TARGET_HAS_PRECISE_SMC
1136
        if (current_tb == tb &&
1137
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1138
                /* If we are modifying the current TB, we must stop
1139
                   its execution. We could be more precise by checking
1140
                   that the modification is after the current PC, but it
1141
                   would require a specialized function to partially
1142
                   restore the CPU state */
1143

    
1144
            current_tb_modified = 1;
1145
            cpu_restore_state(current_tb, env, pc, puc);
1146
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1147
                                 &current_flags);
1148
        }
1149
#endif /* TARGET_HAS_PRECISE_SMC */
1150
        tb_phys_invalidate(tb, addr);
1151
        tb = tb->page_next[n];
1152
    }
1153
    p->first_tb = NULL;
1154
#ifdef TARGET_HAS_PRECISE_SMC
1155
    if (current_tb_modified) {
1156
        /* we generate a block containing just the instruction
1157
           modifying the memory. It will ensure that it cannot modify
1158
           itself */
1159
        env->current_tb = NULL;
1160
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1161
        cpu_resume_from_signal(env, puc);
1162
    }
1163
#endif
1164
}
1165
#endif
1166

    
1167
/* add the tb in the target page and protect it if necessary */
1168
static inline void tb_alloc_page(TranslationBlock *tb,
1169
                                 unsigned int n, tb_page_addr_t page_addr)
1170
{
1171
    PageDesc *p;
1172
    TranslationBlock *last_first_tb;
1173

    
1174
    tb->page_addr[n] = page_addr;
1175
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1176
    tb->page_next[n] = p->first_tb;
1177
    last_first_tb = p->first_tb;
1178
    p->first_tb = (TranslationBlock *)((long)tb | n);
1179
    invalidate_page_bitmap(p);
1180

    
1181
#if defined(TARGET_HAS_SMC) || 1
1182

    
1183
#if defined(CONFIG_USER_ONLY)
1184
    if (p->flags & PAGE_WRITE) {
1185
        target_ulong addr;
1186
        PageDesc *p2;
1187
        int prot;
1188

    
1189
        /* force the host page as non writable (writes will have a
1190
           page fault + mprotect overhead) */
1191
        page_addr &= qemu_host_page_mask;
1192
        prot = 0;
1193
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1194
            addr += TARGET_PAGE_SIZE) {
1195

    
1196
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1197
            if (!p2)
1198
                continue;
1199
            prot |= p2->flags;
1200
            p2->flags &= ~PAGE_WRITE;
1201
          }
1202
        mprotect(g2h(page_addr), qemu_host_page_size,
1203
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1204
#ifdef DEBUG_TB_INVALIDATE
1205
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1206
               page_addr);
1207
#endif
1208
    }
1209
#else
1210
    /* if some code is already present, then the pages are already
1211
       protected. So we handle the case where only the first TB is
1212
       allocated in a physical page */
1213
    if (!last_first_tb) {
1214
        tlb_protect_code(page_addr);
1215
    }
1216
#endif
1217

    
1218
#endif /* TARGET_HAS_SMC */
1219
}
1220

    
1221
/* Allocate a new translation block. Flush the translation buffer if
1222
   too many translation blocks or too much generated code. */
1223
TranslationBlock *tb_alloc(target_ulong pc)
1224
{
1225
    TranslationBlock *tb;
1226

    
1227
    if (nb_tbs >= code_gen_max_blocks ||
1228
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1229
        return NULL;
1230
    tb = &tbs[nb_tbs++];
1231
    tb->pc = pc;
1232
    tb->cflags = 0;
1233
    return tb;
1234
}
1235

    
1236
void tb_free(TranslationBlock *tb)
1237
{
1238
    /* In practice this is mostly used for single use temporary TB
1239
       Ignore the hard cases and just back up if this TB happens to
1240
       be the last one generated.  */
1241
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1242
        code_gen_ptr = tb->tc_ptr;
1243
        nb_tbs--;
1244
    }
1245
}
1246

    
1247
/* add a new TB and link it to the physical page tables. phys_page2 is
1248
   (-1) to indicate that only one page contains the TB. */
1249
void tb_link_page(TranslationBlock *tb,
1250
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1251
{
1252
    unsigned int h;
1253
    TranslationBlock **ptb;
1254

    
1255
    /* Grab the mmap lock to stop another thread invalidating this TB
1256
       before we are done.  */
1257
    mmap_lock();
1258
    /* add in the physical hash table */
1259
    h = tb_phys_hash_func(phys_pc);
1260
    ptb = &tb_phys_hash[h];
1261
    tb->phys_hash_next = *ptb;
1262
    *ptb = tb;
1263

    
1264
    /* add in the page list */
1265
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1266
    if (phys_page2 != -1)
1267
        tb_alloc_page(tb, 1, phys_page2);
1268
    else
1269
        tb->page_addr[1] = -1;
1270

    
1271
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1272
    tb->jmp_next[0] = NULL;
1273
    tb->jmp_next[1] = NULL;
1274

    
1275
    /* init original jump addresses */
1276
    if (tb->tb_next_offset[0] != 0xffff)
1277
        tb_reset_jump(tb, 0);
1278
    if (tb->tb_next_offset[1] != 0xffff)
1279
        tb_reset_jump(tb, 1);
1280

    
1281
#ifdef DEBUG_TB_CHECK
1282
    tb_page_check();
1283
#endif
1284
    mmap_unlock();
1285
}
1286

    
1287
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1288
   tb[1].tc_ptr. Return NULL if not found */
1289
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1290
{
1291
    int m_min, m_max, m;
1292
    unsigned long v;
1293
    TranslationBlock *tb;
1294

    
1295
    if (nb_tbs <= 0)
1296
        return NULL;
1297
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1298
        tc_ptr >= (unsigned long)code_gen_ptr)
1299
        return NULL;
1300
    /* binary search (cf Knuth) */
1301
    m_min = 0;
1302
    m_max = nb_tbs - 1;
1303
    while (m_min <= m_max) {
1304
        m = (m_min + m_max) >> 1;
1305
        tb = &tbs[m];
1306
        v = (unsigned long)tb->tc_ptr;
1307
        if (v == tc_ptr)
1308
            return tb;
1309
        else if (tc_ptr < v) {
1310
            m_max = m - 1;
1311
        } else {
1312
            m_min = m + 1;
1313
        }
1314
    }
1315
    return &tbs[m_max];
1316
}
1317

    
1318
static void tb_reset_jump_recursive(TranslationBlock *tb);
1319

    
1320
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1321
{
1322
    TranslationBlock *tb1, *tb_next, **ptb;
1323
    unsigned int n1;
1324

    
1325
    tb1 = tb->jmp_next[n];
1326
    if (tb1 != NULL) {
1327
        /* find head of list */
1328
        for(;;) {
1329
            n1 = (long)tb1 & 3;
1330
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1331
            if (n1 == 2)
1332
                break;
1333
            tb1 = tb1->jmp_next[n1];
1334
        }
1335
        /* we are now sure now that tb jumps to tb1 */
1336
        tb_next = tb1;
1337

    
1338
        /* remove tb from the jmp_first list */
1339
        ptb = &tb_next->jmp_first;
1340
        for(;;) {
1341
            tb1 = *ptb;
1342
            n1 = (long)tb1 & 3;
1343
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1344
            if (n1 == n && tb1 == tb)
1345
                break;
1346
            ptb = &tb1->jmp_next[n1];
1347
        }
1348
        *ptb = tb->jmp_next[n];
1349
        tb->jmp_next[n] = NULL;
1350

    
1351
        /* suppress the jump to next tb in generated code */
1352
        tb_reset_jump(tb, n);
1353

    
1354
        /* suppress jumps in the tb on which we could have jumped */
1355
        tb_reset_jump_recursive(tb_next);
1356
    }
1357
}
1358

    
1359
static void tb_reset_jump_recursive(TranslationBlock *tb)
1360
{
1361
    tb_reset_jump_recursive2(tb, 0);
1362
    tb_reset_jump_recursive2(tb, 1);
1363
}
1364

    
1365
#if defined(TARGET_HAS_ICE)
1366
#if defined(CONFIG_USER_ONLY)
1367
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1368
{
1369
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1370
}
1371
#else
1372
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1373
{
1374
    target_phys_addr_t addr;
1375
    target_ulong pd;
1376
    ram_addr_t ram_addr;
1377
    PhysPageDesc *p;
1378

    
1379
    addr = cpu_get_phys_page_debug(env, pc);
1380
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1381
    if (!p) {
1382
        pd = IO_MEM_UNASSIGNED;
1383
    } else {
1384
        pd = p->phys_offset;
1385
    }
1386
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1387
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1388
}
1389
#endif
1390
#endif /* TARGET_HAS_ICE */
1391

    
1392
#if defined(CONFIG_USER_ONLY)
1393
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1394

    
1395
{
1396
}
1397

    
1398
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1399
                          int flags, CPUWatchpoint **watchpoint)
1400
{
1401
    return -ENOSYS;
1402
}
1403
#else
1404
/* Add a watchpoint.  */
1405
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1406
                          int flags, CPUWatchpoint **watchpoint)
1407
{
1408
    target_ulong len_mask = ~(len - 1);
1409
    CPUWatchpoint *wp;
1410

    
1411
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1412
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1413
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1414
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1415
        return -EINVAL;
1416
    }
1417
    wp = qemu_malloc(sizeof(*wp));
1418

    
1419
    wp->vaddr = addr;
1420
    wp->len_mask = len_mask;
1421
    wp->flags = flags;
1422

    
1423
    /* keep all GDB-injected watchpoints in front */
1424
    if (flags & BP_GDB)
1425
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1426
    else
1427
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1428

    
1429
    tlb_flush_page(env, addr);
1430

    
1431
    if (watchpoint)
1432
        *watchpoint = wp;
1433
    return 0;
1434
}
1435

    
1436
/* Remove a specific watchpoint.  */
1437
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1438
                          int flags)
1439
{
1440
    target_ulong len_mask = ~(len - 1);
1441
    CPUWatchpoint *wp;
1442

    
1443
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1444
        if (addr == wp->vaddr && len_mask == wp->len_mask
1445
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1446
            cpu_watchpoint_remove_by_ref(env, wp);
1447
            return 0;
1448
        }
1449
    }
1450
    return -ENOENT;
1451
}
1452

    
1453
/* Remove a specific watchpoint by reference.  */
1454
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1455
{
1456
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1457

    
1458
    tlb_flush_page(env, watchpoint->vaddr);
1459

    
1460
    qemu_free(watchpoint);
1461
}
1462

    
1463
/* Remove all matching watchpoints.  */
1464
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1465
{
1466
    CPUWatchpoint *wp, *next;
1467

    
1468
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1469
        if (wp->flags & mask)
1470
            cpu_watchpoint_remove_by_ref(env, wp);
1471
    }
1472
}
1473
#endif
1474

    
1475
/* Add a breakpoint.  */
1476
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1477
                          CPUBreakpoint **breakpoint)
1478
{
1479
#if defined(TARGET_HAS_ICE)
1480
    CPUBreakpoint *bp;
1481

    
1482
    bp = qemu_malloc(sizeof(*bp));
1483

    
1484
    bp->pc = pc;
1485
    bp->flags = flags;
1486

    
1487
    /* keep all GDB-injected breakpoints in front */
1488
    if (flags & BP_GDB)
1489
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1490
    else
1491
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1492

    
1493
    breakpoint_invalidate(env, pc);
1494

    
1495
    if (breakpoint)
1496
        *breakpoint = bp;
1497
    return 0;
1498
#else
1499
    return -ENOSYS;
1500
#endif
1501
}
1502

    
1503
/* Remove a specific breakpoint.  */
1504
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1505
{
1506
#if defined(TARGET_HAS_ICE)
1507
    CPUBreakpoint *bp;
1508

    
1509
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1510
        if (bp->pc == pc && bp->flags == flags) {
1511
            cpu_breakpoint_remove_by_ref(env, bp);
1512
            return 0;
1513
        }
1514
    }
1515
    return -ENOENT;
1516
#else
1517
    return -ENOSYS;
1518
#endif
1519
}
1520

    
1521
/* Remove a specific breakpoint by reference.  */
1522
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1523
{
1524
#if defined(TARGET_HAS_ICE)
1525
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1526

    
1527
    breakpoint_invalidate(env, breakpoint->pc);
1528

    
1529
    qemu_free(breakpoint);
1530
#endif
1531
}
1532

    
1533
/* Remove all matching breakpoints. */
1534
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1535
{
1536
#if defined(TARGET_HAS_ICE)
1537
    CPUBreakpoint *bp, *next;
1538

    
1539
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1540
        if (bp->flags & mask)
1541
            cpu_breakpoint_remove_by_ref(env, bp);
1542
    }
1543
#endif
1544
}
1545

    
1546
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1547
   CPU loop after each instruction */
1548
void cpu_single_step(CPUState *env, int enabled)
1549
{
1550
#if defined(TARGET_HAS_ICE)
1551
    if (env->singlestep_enabled != enabled) {
1552
        env->singlestep_enabled = enabled;
1553
        if (kvm_enabled())
1554
            kvm_update_guest_debug(env, 0);
1555
        else {
1556
            /* must flush all the translated code to avoid inconsistencies */
1557
            /* XXX: only flush what is necessary */
1558
            tb_flush(env);
1559
        }
1560
    }
1561
#endif
1562
}
1563

    
1564
/* enable or disable low levels log */
1565
void cpu_set_log(int log_flags)
1566
{
1567
    loglevel = log_flags;
1568
    if (loglevel && !logfile) {
1569
        logfile = fopen(logfilename, log_append ? "a" : "w");
1570
        if (!logfile) {
1571
            perror(logfilename);
1572
            _exit(1);
1573
        }
1574
#if !defined(CONFIG_SOFTMMU)
1575
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1576
        {
1577
            static char logfile_buf[4096];
1578
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1579
        }
1580
#elif !defined(_WIN32)
1581
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1582
        setvbuf(logfile, NULL, _IOLBF, 0);
1583
#endif
1584
        log_append = 1;
1585
    }
1586
    if (!loglevel && logfile) {
1587
        fclose(logfile);
1588
        logfile = NULL;
1589
    }
1590
}
1591

    
1592
void cpu_set_log_filename(const char *filename)
1593
{
1594
    logfilename = strdup(filename);
1595
    if (logfile) {
1596
        fclose(logfile);
1597
        logfile = NULL;
1598
    }
1599
    cpu_set_log(loglevel);
1600
}
1601

    
1602
static void cpu_unlink_tb(CPUState *env)
1603
{
1604
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1605
       problem and hope the cpu will stop of its own accord.  For userspace
1606
       emulation this often isn't actually as bad as it sounds.  Often
1607
       signals are used primarily to interrupt blocking syscalls.  */
1608
    TranslationBlock *tb;
1609
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1610

    
1611
    spin_lock(&interrupt_lock);
1612
    tb = env->current_tb;
1613
    /* if the cpu is currently executing code, we must unlink it and
1614
       all the potentially executing TB */
1615
    if (tb) {
1616
        env->current_tb = NULL;
1617
        tb_reset_jump_recursive(tb);
1618
    }
1619
    spin_unlock(&interrupt_lock);
1620
}
1621

    
1622
/* mask must never be zero, except for A20 change call */
1623
void cpu_interrupt(CPUState *env, int mask)
1624
{
1625
    int old_mask;
1626

    
1627
    old_mask = env->interrupt_request;
1628
    env->interrupt_request |= mask;
1629

    
1630
#ifndef CONFIG_USER_ONLY
1631
    /*
1632
     * If called from iothread context, wake the target cpu in
1633
     * case its halted.
1634
     */
1635
    if (!qemu_cpu_self(env)) {
1636
        qemu_cpu_kick(env);
1637
        return;
1638
    }
1639
#endif
1640

    
1641
    if (use_icount) {
1642
        env->icount_decr.u16.high = 0xffff;
1643
#ifndef CONFIG_USER_ONLY
1644
        if (!can_do_io(env)
1645
            && (mask & ~old_mask) != 0) {
1646
            cpu_abort(env, "Raised interrupt while not in I/O function");
1647
        }
1648
#endif
1649
    } else {
1650
        cpu_unlink_tb(env);
1651
    }
1652
}
1653

    
1654
void cpu_reset_interrupt(CPUState *env, int mask)
1655
{
1656
    env->interrupt_request &= ~mask;
1657
}
1658

    
1659
void cpu_exit(CPUState *env)
1660
{
1661
    env->exit_request = 1;
1662
    cpu_unlink_tb(env);
1663
}
1664

    
1665
const CPULogItem cpu_log_items[] = {
1666
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1667
      "show generated host assembly code for each compiled TB" },
1668
    { CPU_LOG_TB_IN_ASM, "in_asm",
1669
      "show target assembly code for each compiled TB" },
1670
    { CPU_LOG_TB_OP, "op",
1671
      "show micro ops for each compiled TB" },
1672
    { CPU_LOG_TB_OP_OPT, "op_opt",
1673
      "show micro ops "
1674
#ifdef TARGET_I386
1675
      "before eflags optimization and "
1676
#endif
1677
      "after liveness analysis" },
1678
    { CPU_LOG_INT, "int",
1679
      "show interrupts/exceptions in short format" },
1680
    { CPU_LOG_EXEC, "exec",
1681
      "show trace before each executed TB (lots of logs)" },
1682
    { CPU_LOG_TB_CPU, "cpu",
1683
      "show CPU state before block translation" },
1684
#ifdef TARGET_I386
1685
    { CPU_LOG_PCALL, "pcall",
1686
      "show protected mode far calls/returns/exceptions" },
1687
    { CPU_LOG_RESET, "cpu_reset",
1688
      "show CPU state before CPU resets" },
1689
#endif
1690
#ifdef DEBUG_IOPORT
1691
    { CPU_LOG_IOPORT, "ioport",
1692
      "show all i/o ports accesses" },
1693
#endif
1694
    { 0, NULL, NULL },
1695
};
1696

    
1697
#ifndef CONFIG_USER_ONLY
1698
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1699
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1700

    
1701
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1702
                                  ram_addr_t size,
1703
                                  ram_addr_t phys_offset)
1704
{
1705
    CPUPhysMemoryClient *client;
1706
    QLIST_FOREACH(client, &memory_client_list, list) {
1707
        client->set_memory(client, start_addr, size, phys_offset);
1708
    }
1709
}
1710

    
1711
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1712
                                        target_phys_addr_t end)
1713
{
1714
    CPUPhysMemoryClient *client;
1715
    QLIST_FOREACH(client, &memory_client_list, list) {
1716
        int r = client->sync_dirty_bitmap(client, start, end);
1717
        if (r < 0)
1718
            return r;
1719
    }
1720
    return 0;
1721
}
1722

    
1723
static int cpu_notify_migration_log(int enable)
1724
{
1725
    CPUPhysMemoryClient *client;
1726
    QLIST_FOREACH(client, &memory_client_list, list) {
1727
        int r = client->migration_log(client, enable);
1728
        if (r < 0)
1729
            return r;
1730
    }
1731
    return 0;
1732
}
1733

    
1734
static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1735
                                 int level, void **lp)
1736
{
1737
    int i;
1738

    
1739
    if (*lp == NULL) {
1740
        return;
1741
    }
1742
    if (level == 0) {
1743
        PhysPageDesc *pd = *lp;
1744
        for (i = 0; i < L2_SIZE; ++i) {
1745
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1746
                client->set_memory(client, pd[i].region_offset,
1747
                                   TARGET_PAGE_SIZE, pd[i].phys_offset);
1748
            }
1749
        }
1750
    } else {
1751
        void **pp = *lp;
1752
        for (i = 0; i < L2_SIZE; ++i) {
1753
            phys_page_for_each_1(client, level - 1, pp + i);
1754
        }
1755
    }
1756
}
1757

    
1758
static void phys_page_for_each(CPUPhysMemoryClient *client)
1759
{
1760
    int i;
1761
    for (i = 0; i < P_L1_SIZE; ++i) {
1762
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1763
                             l1_phys_map + 1);
1764
    }
1765
}
1766

    
1767
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1768
{
1769
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1770
    phys_page_for_each(client);
1771
}
1772

    
1773
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1774
{
1775
    QLIST_REMOVE(client, list);
1776
}
1777
#endif
1778

    
1779
static int cmp1(const char *s1, int n, const char *s2)
1780
{
1781
    if (strlen(s2) != n)
1782
        return 0;
1783
    return memcmp(s1, s2, n) == 0;
1784
}
1785

    
1786
/* takes a comma separated list of log masks. Return 0 if error. */
1787
int cpu_str_to_log_mask(const char *str)
1788
{
1789
    const CPULogItem *item;
1790
    int mask;
1791
    const char *p, *p1;
1792

    
1793
    p = str;
1794
    mask = 0;
1795
    for(;;) {
1796
        p1 = strchr(p, ',');
1797
        if (!p1)
1798
            p1 = p + strlen(p);
1799
        if(cmp1(p,p1-p,"all")) {
1800
            for(item = cpu_log_items; item->mask != 0; item++) {
1801
                mask |= item->mask;
1802
            }
1803
        } else {
1804
            for(item = cpu_log_items; item->mask != 0; item++) {
1805
                if (cmp1(p, p1 - p, item->name))
1806
                    goto found;
1807
            }
1808
            return 0;
1809
        }
1810
    found:
1811
        mask |= item->mask;
1812
        if (*p1 != ',')
1813
            break;
1814
        p = p1 + 1;
1815
    }
1816
    return mask;
1817
}
1818

    
1819
void cpu_abort(CPUState *env, const char *fmt, ...)
1820
{
1821
    va_list ap;
1822
    va_list ap2;
1823

    
1824
    va_start(ap, fmt);
1825
    va_copy(ap2, ap);
1826
    fprintf(stderr, "qemu: fatal: ");
1827
    vfprintf(stderr, fmt, ap);
1828
    fprintf(stderr, "\n");
1829
#ifdef TARGET_I386
1830
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1831
#else
1832
    cpu_dump_state(env, stderr, fprintf, 0);
1833
#endif
1834
    if (qemu_log_enabled()) {
1835
        qemu_log("qemu: fatal: ");
1836
        qemu_log_vprintf(fmt, ap2);
1837
        qemu_log("\n");
1838
#ifdef TARGET_I386
1839
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1840
#else
1841
        log_cpu_state(env, 0);
1842
#endif
1843
        qemu_log_flush();
1844
        qemu_log_close();
1845
    }
1846
    va_end(ap2);
1847
    va_end(ap);
1848
#if defined(CONFIG_USER_ONLY)
1849
    {
1850
        struct sigaction act;
1851
        sigfillset(&act.sa_mask);
1852
        act.sa_handler = SIG_DFL;
1853
        sigaction(SIGABRT, &act, NULL);
1854
    }
1855
#endif
1856
    abort();
1857
}
1858

    
1859
CPUState *cpu_copy(CPUState *env)
1860
{
1861
    CPUState *new_env = cpu_init(env->cpu_model_str);
1862
    CPUState *next_cpu = new_env->next_cpu;
1863
    int cpu_index = new_env->cpu_index;
1864
#if defined(TARGET_HAS_ICE)
1865
    CPUBreakpoint *bp;
1866
    CPUWatchpoint *wp;
1867
#endif
1868

    
1869
    memcpy(new_env, env, sizeof(CPUState));
1870

    
1871
    /* Preserve chaining and index. */
1872
    new_env->next_cpu = next_cpu;
1873
    new_env->cpu_index = cpu_index;
1874

    
1875
    /* Clone all break/watchpoints.
1876
       Note: Once we support ptrace with hw-debug register access, make sure
1877
       BP_CPU break/watchpoints are handled correctly on clone. */
1878
    QTAILQ_INIT(&env->breakpoints);
1879
    QTAILQ_INIT(&env->watchpoints);
1880
#if defined(TARGET_HAS_ICE)
1881
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1882
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1883
    }
1884
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1885
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1886
                              wp->flags, NULL);
1887
    }
1888
#endif
1889

    
1890
    return new_env;
1891
}
1892

    
1893
#if !defined(CONFIG_USER_ONLY)
1894

    
1895
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1896
{
1897
    unsigned int i;
1898

    
1899
    /* Discard jump cache entries for any tb which might potentially
1900
       overlap the flushed page.  */
1901
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1902
    memset (&env->tb_jmp_cache[i], 0, 
1903
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1904

    
1905
    i = tb_jmp_cache_hash_page(addr);
1906
    memset (&env->tb_jmp_cache[i], 0, 
1907
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1908
}
1909

    
1910
static CPUTLBEntry s_cputlb_empty_entry = {
1911
    .addr_read  = -1,
1912
    .addr_write = -1,
1913
    .addr_code  = -1,
1914
    .addend     = -1,
1915
};
1916

    
1917
/* NOTE: if flush_global is true, also flush global entries (not
1918
   implemented yet) */
1919
void tlb_flush(CPUState *env, int flush_global)
1920
{
1921
    int i;
1922

    
1923
#if defined(DEBUG_TLB)
1924
    printf("tlb_flush:\n");
1925
#endif
1926
    /* must reset current TB so that interrupts cannot modify the
1927
       links while we are modifying them */
1928
    env->current_tb = NULL;
1929

    
1930
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1931
        int mmu_idx;
1932
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1933
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1934
        }
1935
    }
1936

    
1937
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1938

    
1939
    env->tlb_flush_addr = -1;
1940
    env->tlb_flush_mask = 0;
1941
    tlb_flush_count++;
1942
}
1943

    
1944
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1945
{
1946
    if (addr == (tlb_entry->addr_read &
1947
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1948
        addr == (tlb_entry->addr_write &
1949
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1950
        addr == (tlb_entry->addr_code &
1951
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1952
        *tlb_entry = s_cputlb_empty_entry;
1953
    }
1954
}
1955

    
1956
void tlb_flush_page(CPUState *env, target_ulong addr)
1957
{
1958
    int i;
1959
    int mmu_idx;
1960

    
1961
#if defined(DEBUG_TLB)
1962
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1963
#endif
1964
    /* Check if we need to flush due to large pages.  */
1965
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1966
#if defined(DEBUG_TLB)
1967
        printf("tlb_flush_page: forced full flush ("
1968
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1969
               env->tlb_flush_addr, env->tlb_flush_mask);
1970
#endif
1971
        tlb_flush(env, 1);
1972
        return;
1973
    }
1974
    /* must reset current TB so that interrupts cannot modify the
1975
       links while we are modifying them */
1976
    env->current_tb = NULL;
1977

    
1978
    addr &= TARGET_PAGE_MASK;
1979
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1980
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1981
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1982

    
1983
    tlb_flush_jmp_cache(env, addr);
1984
}
1985

    
1986
/* update the TLBs so that writes to code in the virtual page 'addr'
1987
   can be detected */
1988
static void tlb_protect_code(ram_addr_t ram_addr)
1989
{
1990
    cpu_physical_memory_reset_dirty(ram_addr,
1991
                                    ram_addr + TARGET_PAGE_SIZE,
1992
                                    CODE_DIRTY_FLAG);
1993
}
1994

    
1995
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1996
   tested for self modifying code */
1997
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1998
                                    target_ulong vaddr)
1999
{
2000
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2001
}
2002

    
2003
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2004
                                         unsigned long start, unsigned long length)
2005
{
2006
    unsigned long addr;
2007
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2008
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2009
        if ((addr - start) < length) {
2010
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2011
        }
2012
    }
2013
}
2014

    
2015
/* Note: start and end must be within the same ram block.  */
2016
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2017
                                     int dirty_flags)
2018
{
2019
    CPUState *env;
2020
    unsigned long length, start1;
2021
    int i;
2022

    
2023
    start &= TARGET_PAGE_MASK;
2024
    end = TARGET_PAGE_ALIGN(end);
2025

    
2026
    length = end - start;
2027
    if (length == 0)
2028
        return;
2029
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2030

    
2031
    /* we modify the TLB cache so that the dirty bit will be set again
2032
       when accessing the range */
2033
    start1 = (unsigned long)qemu_safe_ram_ptr(start);
2034
    /* Chek that we don't span multiple blocks - this breaks the
2035
       address comparisons below.  */
2036
    if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2037
            != (end - 1) - start) {
2038
        abort();
2039
    }
2040

    
2041
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2042
        int mmu_idx;
2043
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2044
            for(i = 0; i < CPU_TLB_SIZE; i++)
2045
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2046
                                      start1, length);
2047
        }
2048
    }
2049
}
2050

    
2051
int cpu_physical_memory_set_dirty_tracking(int enable)
2052
{
2053
    int ret = 0;
2054
    in_migration = enable;
2055
    ret = cpu_notify_migration_log(!!enable);
2056
    return ret;
2057
}
2058

    
2059
int cpu_physical_memory_get_dirty_tracking(void)
2060
{
2061
    return in_migration;
2062
}
2063

    
2064
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2065
                                   target_phys_addr_t end_addr)
2066
{
2067
    int ret;
2068

    
2069
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2070
    return ret;
2071
}
2072

    
2073
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2074
{
2075
    ram_addr_t ram_addr;
2076
    void *p;
2077

    
2078
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2079
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2080
            + tlb_entry->addend);
2081
        ram_addr = qemu_ram_addr_from_host_nofail(p);
2082
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2083
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2084
        }
2085
    }
2086
}
2087

    
2088
/* update the TLB according to the current state of the dirty bits */
2089
void cpu_tlb_update_dirty(CPUState *env)
2090
{
2091
    int i;
2092
    int mmu_idx;
2093
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2094
        for(i = 0; i < CPU_TLB_SIZE; i++)
2095
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2096
    }
2097
}
2098

    
2099
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2100
{
2101
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2102
        tlb_entry->addr_write = vaddr;
2103
}
2104

    
2105
/* update the TLB corresponding to virtual page vaddr
2106
   so that it is no longer dirty */
2107
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2108
{
2109
    int i;
2110
    int mmu_idx;
2111

    
2112
    vaddr &= TARGET_PAGE_MASK;
2113
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2114
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2115
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2116
}
2117

    
2118
/* Our TLB does not support large pages, so remember the area covered by
2119
   large pages and trigger a full TLB flush if these are invalidated.  */
2120
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2121
                               target_ulong size)
2122
{
2123
    target_ulong mask = ~(size - 1);
2124

    
2125
    if (env->tlb_flush_addr == (target_ulong)-1) {
2126
        env->tlb_flush_addr = vaddr & mask;
2127
        env->tlb_flush_mask = mask;
2128
        return;
2129
    }
2130
    /* Extend the existing region to include the new page.
2131
       This is a compromise between unnecessary flushes and the cost
2132
       of maintaining a full variable size TLB.  */
2133
    mask &= env->tlb_flush_mask;
2134
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2135
        mask <<= 1;
2136
    }
2137
    env->tlb_flush_addr &= mask;
2138
    env->tlb_flush_mask = mask;
2139
}
2140

    
2141
/* Add a new TLB entry. At most one entry for a given virtual address
2142
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2143
   supplied size is only used by tlb_flush_page.  */
2144
void tlb_set_page(CPUState *env, target_ulong vaddr,
2145
                  target_phys_addr_t paddr, int prot,
2146
                  int mmu_idx, target_ulong size)
2147
{
2148
    PhysPageDesc *p;
2149
    unsigned long pd;
2150
    unsigned int index;
2151
    target_ulong address;
2152
    target_ulong code_address;
2153
    unsigned long addend;
2154
    CPUTLBEntry *te;
2155
    CPUWatchpoint *wp;
2156
    target_phys_addr_t iotlb;
2157

    
2158
    assert(size >= TARGET_PAGE_SIZE);
2159
    if (size != TARGET_PAGE_SIZE) {
2160
        tlb_add_large_page(env, vaddr, size);
2161
    }
2162
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2163
    if (!p) {
2164
        pd = IO_MEM_UNASSIGNED;
2165
    } else {
2166
        pd = p->phys_offset;
2167
    }
2168
#if defined(DEBUG_TLB)
2169
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2170
           " prot=%x idx=%d pd=0x%08lx\n",
2171
           vaddr, paddr, prot, mmu_idx, pd);
2172
#endif
2173

    
2174
    address = vaddr;
2175
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2176
        /* IO memory case (romd handled later) */
2177
        address |= TLB_MMIO;
2178
    }
2179
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2180
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2181
        /* Normal RAM.  */
2182
        iotlb = pd & TARGET_PAGE_MASK;
2183
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2184
            iotlb |= IO_MEM_NOTDIRTY;
2185
        else
2186
            iotlb |= IO_MEM_ROM;
2187
    } else {
2188
        /* IO handlers are currently passed a physical address.
2189
           It would be nice to pass an offset from the base address
2190
           of that region.  This would avoid having to special case RAM,
2191
           and avoid full address decoding in every device.
2192
           We can't use the high bits of pd for this because
2193
           IO_MEM_ROMD uses these as a ram address.  */
2194
        iotlb = (pd & ~TARGET_PAGE_MASK);
2195
        if (p) {
2196
            iotlb += p->region_offset;
2197
        } else {
2198
            iotlb += paddr;
2199
        }
2200
    }
2201

    
2202
    code_address = address;
2203
    /* Make accesses to pages with watchpoints go via the
2204
       watchpoint trap routines.  */
2205
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2206
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2207
            /* Avoid trapping reads of pages with a write breakpoint. */
2208
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2209
                iotlb = io_mem_watch + paddr;
2210
                address |= TLB_MMIO;
2211
                break;
2212
            }
2213
        }
2214
    }
2215

    
2216
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2217
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2218
    te = &env->tlb_table[mmu_idx][index];
2219
    te->addend = addend - vaddr;
2220
    if (prot & PAGE_READ) {
2221
        te->addr_read = address;
2222
    } else {
2223
        te->addr_read = -1;
2224
    }
2225

    
2226
    if (prot & PAGE_EXEC) {
2227
        te->addr_code = code_address;
2228
    } else {
2229
        te->addr_code = -1;
2230
    }
2231
    if (prot & PAGE_WRITE) {
2232
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2233
            (pd & IO_MEM_ROMD)) {
2234
            /* Write access calls the I/O callback.  */
2235
            te->addr_write = address | TLB_MMIO;
2236
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2237
                   !cpu_physical_memory_is_dirty(pd)) {
2238
            te->addr_write = address | TLB_NOTDIRTY;
2239
        } else {
2240
            te->addr_write = address;
2241
        }
2242
    } else {
2243
        te->addr_write = -1;
2244
    }
2245
}
2246

    
2247
#else
2248

    
2249
void tlb_flush(CPUState *env, int flush_global)
2250
{
2251
}
2252

    
2253
void tlb_flush_page(CPUState *env, target_ulong addr)
2254
{
2255
}
2256

    
2257
/*
2258
 * Walks guest process memory "regions" one by one
2259
 * and calls callback function 'fn' for each region.
2260
 */
2261

    
2262
struct walk_memory_regions_data
2263
{
2264
    walk_memory_regions_fn fn;
2265
    void *priv;
2266
    unsigned long start;
2267
    int prot;
2268
};
2269

    
2270
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2271
                                   abi_ulong end, int new_prot)
2272
{
2273
    if (data->start != -1ul) {
2274
        int rc = data->fn(data->priv, data->start, end, data->prot);
2275
        if (rc != 0) {
2276
            return rc;
2277
        }
2278
    }
2279

    
2280
    data->start = (new_prot ? end : -1ul);
2281
    data->prot = new_prot;
2282

    
2283
    return 0;
2284
}
2285

    
2286
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2287
                                 abi_ulong base, int level, void **lp)
2288
{
2289
    abi_ulong pa;
2290
    int i, rc;
2291

    
2292
    if (*lp == NULL) {
2293
        return walk_memory_regions_end(data, base, 0);
2294
    }
2295

    
2296
    if (level == 0) {
2297
        PageDesc *pd = *lp;
2298
        for (i = 0; i < L2_SIZE; ++i) {
2299
            int prot = pd[i].flags;
2300

    
2301
            pa = base | (i << TARGET_PAGE_BITS);
2302
            if (prot != data->prot) {
2303
                rc = walk_memory_regions_end(data, pa, prot);
2304
                if (rc != 0) {
2305
                    return rc;
2306
                }
2307
            }
2308
        }
2309
    } else {
2310
        void **pp = *lp;
2311
        for (i = 0; i < L2_SIZE; ++i) {
2312
            pa = base | ((abi_ulong)i <<
2313
                (TARGET_PAGE_BITS + L2_BITS * level));
2314
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2315
            if (rc != 0) {
2316
                return rc;
2317
            }
2318
        }
2319
    }
2320

    
2321
    return 0;
2322
}
2323

    
2324
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2325
{
2326
    struct walk_memory_regions_data data;
2327
    unsigned long i;
2328

    
2329
    data.fn = fn;
2330
    data.priv = priv;
2331
    data.start = -1ul;
2332
    data.prot = 0;
2333

    
2334
    for (i = 0; i < V_L1_SIZE; i++) {
2335
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2336
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2337
        if (rc != 0) {
2338
            return rc;
2339
        }
2340
    }
2341

    
2342
    return walk_memory_regions_end(&data, 0, 0);
2343
}
2344

    
2345
static int dump_region(void *priv, abi_ulong start,
2346
    abi_ulong end, unsigned long prot)
2347
{
2348
    FILE *f = (FILE *)priv;
2349

    
2350
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2351
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2352
        start, end, end - start,
2353
        ((prot & PAGE_READ) ? 'r' : '-'),
2354
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2355
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2356

    
2357
    return (0);
2358
}
2359

    
2360
/* dump memory mappings */
2361
void page_dump(FILE *f)
2362
{
2363
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2364
            "start", "end", "size", "prot");
2365
    walk_memory_regions(f, dump_region);
2366
}
2367

    
2368
int page_get_flags(target_ulong address)
2369
{
2370
    PageDesc *p;
2371

    
2372
    p = page_find(address >> TARGET_PAGE_BITS);
2373
    if (!p)
2374
        return 0;
2375
    return p->flags;
2376
}
2377

    
2378
/* Modify the flags of a page and invalidate the code if necessary.
2379
   The flag PAGE_WRITE_ORG is positioned automatically depending
2380
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2381
void page_set_flags(target_ulong start, target_ulong end, int flags)
2382
{
2383
    target_ulong addr, len;
2384

    
2385
    /* This function should never be called with addresses outside the
2386
       guest address space.  If this assert fires, it probably indicates
2387
       a missing call to h2g_valid.  */
2388
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2389
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2390
#endif
2391
    assert(start < end);
2392

    
2393
    start = start & TARGET_PAGE_MASK;
2394
    end = TARGET_PAGE_ALIGN(end);
2395

    
2396
    if (flags & PAGE_WRITE) {
2397
        flags |= PAGE_WRITE_ORG;
2398
    }
2399

    
2400
    for (addr = start, len = end - start;
2401
         len != 0;
2402
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2403
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2404

    
2405
        /* If the write protection bit is set, then we invalidate
2406
           the code inside.  */
2407
        if (!(p->flags & PAGE_WRITE) &&
2408
            (flags & PAGE_WRITE) &&
2409
            p->first_tb) {
2410
            tb_invalidate_phys_page(addr, 0, NULL);
2411
        }
2412
        p->flags = flags;
2413
    }
2414
}
2415

    
2416
int page_check_range(target_ulong start, target_ulong len, int flags)
2417
{
2418
    PageDesc *p;
2419
    target_ulong end;
2420
    target_ulong addr;
2421

    
2422
    /* This function should never be called with addresses outside the
2423
       guest address space.  If this assert fires, it probably indicates
2424
       a missing call to h2g_valid.  */
2425
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2426
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2427
#endif
2428

    
2429
    if (len == 0) {
2430
        return 0;
2431
    }
2432
    if (start + len - 1 < start) {
2433
        /* We've wrapped around.  */
2434
        return -1;
2435
    }
2436

    
2437
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2438
    start = start & TARGET_PAGE_MASK;
2439

    
2440
    for (addr = start, len = end - start;
2441
         len != 0;
2442
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2443
        p = page_find(addr >> TARGET_PAGE_BITS);
2444
        if( !p )
2445
            return -1;
2446
        if( !(p->flags & PAGE_VALID) )
2447
            return -1;
2448

    
2449
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2450
            return -1;
2451
        if (flags & PAGE_WRITE) {
2452
            if (!(p->flags & PAGE_WRITE_ORG))
2453
                return -1;
2454
            /* unprotect the page if it was put read-only because it
2455
               contains translated code */
2456
            if (!(p->flags & PAGE_WRITE)) {
2457
                if (!page_unprotect(addr, 0, NULL))
2458
                    return -1;
2459
            }
2460
            return 0;
2461
        }
2462
    }
2463
    return 0;
2464
}
2465

    
2466
/* called from signal handler: invalidate the code and unprotect the
2467
   page. Return TRUE if the fault was successfully handled. */
2468
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2469
{
2470
    unsigned int prot;
2471
    PageDesc *p;
2472
    target_ulong host_start, host_end, addr;
2473

    
2474
    /* Technically this isn't safe inside a signal handler.  However we
2475
       know this only ever happens in a synchronous SEGV handler, so in
2476
       practice it seems to be ok.  */
2477
    mmap_lock();
2478

    
2479
    p = page_find(address >> TARGET_PAGE_BITS);
2480
    if (!p) {
2481
        mmap_unlock();
2482
        return 0;
2483
    }
2484

    
2485
    /* if the page was really writable, then we change its
2486
       protection back to writable */
2487
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2488
        host_start = address & qemu_host_page_mask;
2489
        host_end = host_start + qemu_host_page_size;
2490

    
2491
        prot = 0;
2492
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2493
            p = page_find(addr >> TARGET_PAGE_BITS);
2494
            p->flags |= PAGE_WRITE;
2495
            prot |= p->flags;
2496

    
2497
            /* and since the content will be modified, we must invalidate
2498
               the corresponding translated code. */
2499
            tb_invalidate_phys_page(addr, pc, puc);
2500
#ifdef DEBUG_TB_CHECK
2501
            tb_invalidate_check(addr);
2502
#endif
2503
        }
2504
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2505
                 prot & PAGE_BITS);
2506

    
2507
        mmap_unlock();
2508
        return 1;
2509
    }
2510
    mmap_unlock();
2511
    return 0;
2512
}
2513

    
2514
static inline void tlb_set_dirty(CPUState *env,
2515
                                 unsigned long addr, target_ulong vaddr)
2516
{
2517
}
2518
#endif /* defined(CONFIG_USER_ONLY) */
2519

    
2520
#if !defined(CONFIG_USER_ONLY)
2521

    
2522
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2523
typedef struct subpage_t {
2524
    target_phys_addr_t base;
2525
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2526
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2527
} subpage_t;
2528

    
2529
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2530
                             ram_addr_t memory, ram_addr_t region_offset);
2531
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2532
                                ram_addr_t orig_memory,
2533
                                ram_addr_t region_offset);
2534
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2535
                      need_subpage)                                     \
2536
    do {                                                                \
2537
        if (addr > start_addr)                                          \
2538
            start_addr2 = 0;                                            \
2539
        else {                                                          \
2540
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2541
            if (start_addr2 > 0)                                        \
2542
                need_subpage = 1;                                       \
2543
        }                                                               \
2544
                                                                        \
2545
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2546
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2547
        else {                                                          \
2548
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2549
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2550
                need_subpage = 1;                                       \
2551
        }                                                               \
2552
    } while (0)
2553

    
2554
/* register physical memory.
2555
   For RAM, 'size' must be a multiple of the target page size.
2556
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2557
   io memory page.  The address used when calling the IO function is
2558
   the offset from the start of the region, plus region_offset.  Both
2559
   start_addr and region_offset are rounded down to a page boundary
2560
   before calculating this offset.  This should not be a problem unless
2561
   the low bits of start_addr and region_offset differ.  */
2562
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2563
                                         ram_addr_t size,
2564
                                         ram_addr_t phys_offset,
2565
                                         ram_addr_t region_offset)
2566
{
2567
    target_phys_addr_t addr, end_addr;
2568
    PhysPageDesc *p;
2569
    CPUState *env;
2570
    ram_addr_t orig_size = size;
2571
    subpage_t *subpage;
2572

    
2573
    cpu_notify_set_memory(start_addr, size, phys_offset);
2574

    
2575
    if (phys_offset == IO_MEM_UNASSIGNED) {
2576
        region_offset = start_addr;
2577
    }
2578
    region_offset &= TARGET_PAGE_MASK;
2579
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2580
    end_addr = start_addr + (target_phys_addr_t)size;
2581
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2582
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2583
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2584
            ram_addr_t orig_memory = p->phys_offset;
2585
            target_phys_addr_t start_addr2, end_addr2;
2586
            int need_subpage = 0;
2587

    
2588
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2589
                          need_subpage);
2590
            if (need_subpage) {
2591
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2592
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2593
                                           &p->phys_offset, orig_memory,
2594
                                           p->region_offset);
2595
                } else {
2596
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2597
                                            >> IO_MEM_SHIFT];
2598
                }
2599
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2600
                                 region_offset);
2601
                p->region_offset = 0;
2602
            } else {
2603
                p->phys_offset = phys_offset;
2604
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2605
                    (phys_offset & IO_MEM_ROMD))
2606
                    phys_offset += TARGET_PAGE_SIZE;
2607
            }
2608
        } else {
2609
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2610
            p->phys_offset = phys_offset;
2611
            p->region_offset = region_offset;
2612
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2613
                (phys_offset & IO_MEM_ROMD)) {
2614
                phys_offset += TARGET_PAGE_SIZE;
2615
            } else {
2616
                target_phys_addr_t start_addr2, end_addr2;
2617
                int need_subpage = 0;
2618

    
2619
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2620
                              end_addr2, need_subpage);
2621

    
2622
                if (need_subpage) {
2623
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2624
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2625
                                           addr & TARGET_PAGE_MASK);
2626
                    subpage_register(subpage, start_addr2, end_addr2,
2627
                                     phys_offset, region_offset);
2628
                    p->region_offset = 0;
2629
                }
2630
            }
2631
        }
2632
        region_offset += TARGET_PAGE_SIZE;
2633
    }
2634

    
2635
    /* since each CPU stores ram addresses in its TLB cache, we must
2636
       reset the modified entries */
2637
    /* XXX: slow ! */
2638
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2639
        tlb_flush(env, 1);
2640
    }
2641
}
2642

    
2643
/* XXX: temporary until new memory mapping API */
2644
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2645
{
2646
    PhysPageDesc *p;
2647

    
2648
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2649
    if (!p)
2650
        return IO_MEM_UNASSIGNED;
2651
    return p->phys_offset;
2652
}
2653

    
2654
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2655
{
2656
    if (kvm_enabled())
2657
        kvm_coalesce_mmio_region(addr, size);
2658
}
2659

    
2660
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2661
{
2662
    if (kvm_enabled())
2663
        kvm_uncoalesce_mmio_region(addr, size);
2664
}
2665

    
2666
void qemu_flush_coalesced_mmio_buffer(void)
2667
{
2668
    if (kvm_enabled())
2669
        kvm_flush_coalesced_mmio_buffer();
2670
}
2671

    
2672
#if defined(__linux__) && !defined(TARGET_S390X)
2673

    
2674
#include <sys/vfs.h>
2675

    
2676
#define HUGETLBFS_MAGIC       0x958458f6
2677

    
2678
static long gethugepagesize(const char *path)
2679
{
2680
    struct statfs fs;
2681
    int ret;
2682

    
2683
    do {
2684
        ret = statfs(path, &fs);
2685
    } while (ret != 0 && errno == EINTR);
2686

    
2687
    if (ret != 0) {
2688
        perror(path);
2689
        return 0;
2690
    }
2691

    
2692
    if (fs.f_type != HUGETLBFS_MAGIC)
2693
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2694

    
2695
    return fs.f_bsize;
2696
}
2697

    
2698
static void *file_ram_alloc(RAMBlock *block,
2699
                            ram_addr_t memory,
2700
                            const char *path)
2701
{
2702
    char *filename;
2703
    void *area;
2704
    int fd;
2705
#ifdef MAP_POPULATE
2706
    int flags;
2707
#endif
2708
    unsigned long hpagesize;
2709

    
2710
    hpagesize = gethugepagesize(path);
2711
    if (!hpagesize) {
2712
        return NULL;
2713
    }
2714

    
2715
    if (memory < hpagesize) {
2716
        return NULL;
2717
    }
2718

    
2719
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2720
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2721
        return NULL;
2722
    }
2723

    
2724
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2725
        return NULL;
2726
    }
2727

    
2728
    fd = mkstemp(filename);
2729
    if (fd < 0) {
2730
        perror("unable to create backing store for hugepages");
2731
        free(filename);
2732
        return NULL;
2733
    }
2734
    unlink(filename);
2735
    free(filename);
2736

    
2737
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2738

    
2739
    /*
2740
     * ftruncate is not supported by hugetlbfs in older
2741
     * hosts, so don't bother bailing out on errors.
2742
     * If anything goes wrong with it under other filesystems,
2743
     * mmap will fail.
2744
     */
2745
    if (ftruncate(fd, memory))
2746
        perror("ftruncate");
2747

    
2748
#ifdef MAP_POPULATE
2749
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2750
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2751
     * to sidestep this quirk.
2752
     */
2753
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2754
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2755
#else
2756
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2757
#endif
2758
    if (area == MAP_FAILED) {
2759
        perror("file_ram_alloc: can't mmap RAM pages");
2760
        close(fd);
2761
        return (NULL);
2762
    }
2763
    block->fd = fd;
2764
    return area;
2765
}
2766
#endif
2767

    
2768
static ram_addr_t find_ram_offset(ram_addr_t size)
2769
{
2770
    RAMBlock *block, *next_block;
2771
    ram_addr_t offset = 0, mingap = ULONG_MAX;
2772

    
2773
    if (QLIST_EMPTY(&ram_list.blocks))
2774
        return 0;
2775

    
2776
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2777
        ram_addr_t end, next = ULONG_MAX;
2778

    
2779
        end = block->offset + block->length;
2780

    
2781
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2782
            if (next_block->offset >= end) {
2783
                next = MIN(next, next_block->offset);
2784
            }
2785
        }
2786
        if (next - end >= size && next - end < mingap) {
2787
            offset =  end;
2788
            mingap = next - end;
2789
        }
2790
    }
2791
    return offset;
2792
}
2793

    
2794
static ram_addr_t last_ram_offset(void)
2795
{
2796
    RAMBlock *block;
2797
    ram_addr_t last = 0;
2798

    
2799
    QLIST_FOREACH(block, &ram_list.blocks, next)
2800
        last = MAX(last, block->offset + block->length);
2801

    
2802
    return last;
2803
}
2804

    
2805
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2806
                                   ram_addr_t size, void *host)
2807
{
2808
    RAMBlock *new_block, *block;
2809

    
2810
    size = TARGET_PAGE_ALIGN(size);
2811
    new_block = qemu_mallocz(sizeof(*new_block));
2812

    
2813
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2814
        char *id = dev->parent_bus->info->get_dev_path(dev);
2815
        if (id) {
2816
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2817
            qemu_free(id);
2818
        }
2819
    }
2820
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2821

    
2822
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2823
        if (!strcmp(block->idstr, new_block->idstr)) {
2824
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2825
                    new_block->idstr);
2826
            abort();
2827
        }
2828
    }
2829

    
2830
    if (host) {
2831
        new_block->host = host;
2832
    } else {
2833
        if (mem_path) {
2834
#if defined (__linux__) && !defined(TARGET_S390X)
2835
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2836
            if (!new_block->host) {
2837
                new_block->host = qemu_vmalloc(size);
2838
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2839
            }
2840
#else
2841
            fprintf(stderr, "-mem-path option unsupported\n");
2842
            exit(1);
2843
#endif
2844
        } else {
2845
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2846
            /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2847
            new_block->host = mmap((void*)0x1000000, size,
2848
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2849
                                   MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2850
#else
2851
            new_block->host = qemu_vmalloc(size);
2852
#endif
2853
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2854
        }
2855
    }
2856

    
2857
    new_block->offset = find_ram_offset(size);
2858
    new_block->length = size;
2859

    
2860
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2861

    
2862
    ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2863
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2864
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2865
           0xff, size >> TARGET_PAGE_BITS);
2866

    
2867
    if (kvm_enabled())
2868
        kvm_setup_guest_memory(new_block->host, size);
2869

    
2870
    return new_block->offset;
2871
}
2872

    
2873
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2874
{
2875
    return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2876
}
2877

    
2878
void qemu_ram_free(ram_addr_t addr)
2879
{
2880
    RAMBlock *block;
2881

    
2882
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2883
        if (addr == block->offset) {
2884
            QLIST_REMOVE(block, next);
2885
            if (mem_path) {
2886
#if defined (__linux__) && !defined(TARGET_S390X)
2887
                if (block->fd) {
2888
                    munmap(block->host, block->length);
2889
                    close(block->fd);
2890
                } else {
2891
                    qemu_vfree(block->host);
2892
                }
2893
#endif
2894
            } else {
2895
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2896
                munmap(block->host, block->length);
2897
#else
2898
                qemu_vfree(block->host);
2899
#endif
2900
            }
2901
            qemu_free(block);
2902
            return;
2903
        }
2904
    }
2905

    
2906
}
2907

    
2908
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2909
   With the exception of the softmmu code in this file, this should
2910
   only be used for local memory (e.g. video ram) that the device owns,
2911
   and knows it isn't going to access beyond the end of the block.
2912

2913
   It should not be used for general purpose DMA.
2914
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2915
 */
2916
void *qemu_get_ram_ptr(ram_addr_t addr)
2917
{
2918
    RAMBlock *block;
2919

    
2920
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2921
        if (addr - block->offset < block->length) {
2922
            QLIST_REMOVE(block, next);
2923
            QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2924
            return block->host + (addr - block->offset);
2925
        }
2926
    }
2927

    
2928
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2929
    abort();
2930

    
2931
    return NULL;
2932
}
2933

    
2934
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2935
 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2936
 */
2937
void *qemu_safe_ram_ptr(ram_addr_t addr)
2938
{
2939
    RAMBlock *block;
2940

    
2941
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2942
        if (addr - block->offset < block->length) {
2943
            return block->host + (addr - block->offset);
2944
        }
2945
    }
2946

    
2947
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2948
    abort();
2949

    
2950
    return NULL;
2951
}
2952

    
2953
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2954
{
2955
    RAMBlock *block;
2956
    uint8_t *host = ptr;
2957

    
2958
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2959
        if (host - block->host < block->length) {
2960
            *ram_addr = block->offset + (host - block->host);
2961
            return 0;
2962
        }
2963
    }
2964
    return -1;
2965
}
2966

    
2967
/* Some of the softmmu routines need to translate from a host pointer
2968
   (typically a TLB entry) back to a ram offset.  */
2969
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2970
{
2971
    ram_addr_t ram_addr;
2972

    
2973
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2974
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2975
        abort();
2976
    }
2977
    return ram_addr;
2978
}
2979

    
2980
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2981
{
2982
#ifdef DEBUG_UNASSIGNED
2983
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2984
#endif
2985
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2986
    do_unassigned_access(addr, 0, 0, 0, 1);
2987
#endif
2988
    return 0;
2989
}
2990

    
2991
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2992
{
2993
#ifdef DEBUG_UNASSIGNED
2994
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2995
#endif
2996
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2997
    do_unassigned_access(addr, 0, 0, 0, 2);
2998
#endif
2999
    return 0;
3000
}
3001

    
3002
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3003
{
3004
#ifdef DEBUG_UNASSIGNED
3005
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3006
#endif
3007
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3008
    do_unassigned_access(addr, 0, 0, 0, 4);
3009
#endif
3010
    return 0;
3011
}
3012

    
3013
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3014
{
3015
#ifdef DEBUG_UNASSIGNED
3016
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3017
#endif
3018
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3019
    do_unassigned_access(addr, 1, 0, 0, 1);
3020
#endif
3021
}
3022

    
3023
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3024
{
3025
#ifdef DEBUG_UNASSIGNED
3026
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3027
#endif
3028
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3029
    do_unassigned_access(addr, 1, 0, 0, 2);
3030
#endif
3031
}
3032

    
3033
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3034
{
3035
#ifdef DEBUG_UNASSIGNED
3036
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3037
#endif
3038
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3039
    do_unassigned_access(addr, 1, 0, 0, 4);
3040
#endif
3041
}
3042

    
3043
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3044
    unassigned_mem_readb,
3045
    unassigned_mem_readw,
3046
    unassigned_mem_readl,
3047
};
3048

    
3049
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3050
    unassigned_mem_writeb,
3051
    unassigned_mem_writew,
3052
    unassigned_mem_writel,
3053
};
3054

    
3055
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3056
                                uint32_t val)
3057
{
3058
    int dirty_flags;
3059
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3060
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3061
#if !defined(CONFIG_USER_ONLY)
3062
        tb_invalidate_phys_page_fast(ram_addr, 1);
3063
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3064
#endif
3065
    }
3066
    stb_p(qemu_get_ram_ptr(ram_addr), val);
3067
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3068
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3069
    /* we remove the notdirty callback only if the code has been
3070
       flushed */
3071
    if (dirty_flags == 0xff)
3072
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3073
}
3074

    
3075
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3076
                                uint32_t val)
3077
{
3078
    int dirty_flags;
3079
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3080
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3081
#if !defined(CONFIG_USER_ONLY)
3082
        tb_invalidate_phys_page_fast(ram_addr, 2);
3083
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3084
#endif
3085
    }
3086
    stw_p(qemu_get_ram_ptr(ram_addr), val);
3087
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3088
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3089
    /* we remove the notdirty callback only if the code has been
3090
       flushed */
3091
    if (dirty_flags == 0xff)
3092
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3093
}
3094

    
3095
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3096
                                uint32_t val)
3097
{
3098
    int dirty_flags;
3099
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3100
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3101
#if !defined(CONFIG_USER_ONLY)
3102
        tb_invalidate_phys_page_fast(ram_addr, 4);
3103
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3104
#endif
3105
    }
3106
    stl_p(qemu_get_ram_ptr(ram_addr), val);
3107
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3108
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3109
    /* we remove the notdirty callback only if the code has been
3110
       flushed */
3111
    if (dirty_flags == 0xff)
3112
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3113
}
3114

    
3115
static CPUReadMemoryFunc * const error_mem_read[3] = {
3116
    NULL, /* never used */
3117
    NULL, /* never used */
3118
    NULL, /* never used */
3119
};
3120

    
3121
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3122
    notdirty_mem_writeb,
3123
    notdirty_mem_writew,
3124
    notdirty_mem_writel,
3125
};
3126

    
3127
/* Generate a debug exception if a watchpoint has been hit.  */
3128
static void check_watchpoint(int offset, int len_mask, int flags)
3129
{
3130
    CPUState *env = cpu_single_env;
3131
    target_ulong pc, cs_base;
3132
    TranslationBlock *tb;
3133
    target_ulong vaddr;
3134
    CPUWatchpoint *wp;
3135
    int cpu_flags;
3136

    
3137
    if (env->watchpoint_hit) {
3138
        /* We re-entered the check after replacing the TB. Now raise
3139
         * the debug interrupt so that is will trigger after the
3140
         * current instruction. */
3141
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3142
        return;
3143
    }
3144
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3145
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3146
        if ((vaddr == (wp->vaddr & len_mask) ||
3147
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3148
            wp->flags |= BP_WATCHPOINT_HIT;
3149
            if (!env->watchpoint_hit) {
3150
                env->watchpoint_hit = wp;
3151
                tb = tb_find_pc(env->mem_io_pc);
3152
                if (!tb) {
3153
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3154
                              "pc=%p", (void *)env->mem_io_pc);
3155
                }
3156
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3157
                tb_phys_invalidate(tb, -1);
3158
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3159
                    env->exception_index = EXCP_DEBUG;
3160
                } else {
3161
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3162
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3163
                }
3164
                cpu_resume_from_signal(env, NULL);
3165
            }
3166
        } else {
3167
            wp->flags &= ~BP_WATCHPOINT_HIT;
3168
        }
3169
    }
3170
}
3171

    
3172
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3173
   so these check for a hit then pass through to the normal out-of-line
3174
   phys routines.  */
3175
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3176
{
3177
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3178
    return ldub_phys(addr);
3179
}
3180

    
3181
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3182
{
3183
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3184
    return lduw_phys(addr);
3185
}
3186

    
3187
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3188
{
3189
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3190
    return ldl_phys(addr);
3191
}
3192

    
3193
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3194
                             uint32_t val)
3195
{
3196
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3197
    stb_phys(addr, val);
3198
}
3199

    
3200
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3201
                             uint32_t val)
3202
{
3203
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3204
    stw_phys(addr, val);
3205
}
3206

    
3207
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3208
                             uint32_t val)
3209
{
3210
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3211
    stl_phys(addr, val);
3212
}
3213

    
3214
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3215
    watch_mem_readb,
3216
    watch_mem_readw,
3217
    watch_mem_readl,
3218
};
3219

    
3220
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3221
    watch_mem_writeb,
3222
    watch_mem_writew,
3223
    watch_mem_writel,
3224
};
3225

    
3226
static inline uint32_t subpage_readlen (subpage_t *mmio,
3227
                                        target_phys_addr_t addr,
3228
                                        unsigned int len)
3229
{
3230
    unsigned int idx = SUBPAGE_IDX(addr);
3231
#if defined(DEBUG_SUBPAGE)
3232
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3233
           mmio, len, addr, idx);
3234
#endif
3235

    
3236
    addr += mmio->region_offset[idx];
3237
    idx = mmio->sub_io_index[idx];
3238
    return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3239
}
3240

    
3241
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3242
                                     uint32_t value, unsigned int len)
3243
{
3244
    unsigned int idx = SUBPAGE_IDX(addr);
3245
#if defined(DEBUG_SUBPAGE)
3246
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3247
           __func__, mmio, len, addr, idx, value);
3248
#endif
3249

    
3250
    addr += mmio->region_offset[idx];
3251
    idx = mmio->sub_io_index[idx];
3252
    io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3253
}
3254

    
3255
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3256
{
3257
    return subpage_readlen(opaque, addr, 0);
3258
}
3259

    
3260
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3261
                            uint32_t value)
3262
{
3263
    subpage_writelen(opaque, addr, value, 0);
3264
}
3265

    
3266
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3267
{
3268
    return subpage_readlen(opaque, addr, 1);
3269
}
3270

    
3271
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3272
                            uint32_t value)
3273
{
3274
    subpage_writelen(opaque, addr, value, 1);
3275
}
3276

    
3277
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3278
{
3279
    return subpage_readlen(opaque, addr, 2);
3280
}
3281

    
3282
static void subpage_writel (void *opaque, target_phys_addr_t addr,
3283
                            uint32_t value)
3284
{
3285
    subpage_writelen(opaque, addr, value, 2);
3286
}
3287

    
3288
static CPUReadMemoryFunc * const subpage_read[] = {
3289
    &subpage_readb,
3290
    &subpage_readw,
3291
    &subpage_readl,
3292
};
3293

    
3294
static CPUWriteMemoryFunc * const subpage_write[] = {
3295
    &subpage_writeb,
3296
    &subpage_writew,
3297
    &subpage_writel,
3298
};
3299

    
3300
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3301
                             ram_addr_t memory, ram_addr_t region_offset)
3302
{
3303
    int idx, eidx;
3304

    
3305
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3306
        return -1;
3307
    idx = SUBPAGE_IDX(start);
3308
    eidx = SUBPAGE_IDX(end);
3309
#if defined(DEBUG_SUBPAGE)
3310
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3311
           mmio, start, end, idx, eidx, memory);
3312
#endif
3313
    if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3314
        memory = IO_MEM_UNASSIGNED;
3315
    memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3316
    for (; idx <= eidx; idx++) {
3317
        mmio->sub_io_index[idx] = memory;
3318
        mmio->region_offset[idx] = region_offset;
3319
    }
3320

    
3321
    return 0;
3322
}
3323

    
3324
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3325
                                ram_addr_t orig_memory,
3326
                                ram_addr_t region_offset)
3327
{
3328
    subpage_t *mmio;
3329
    int subpage_memory;
3330

    
3331
    mmio = qemu_mallocz(sizeof(subpage_t));
3332

    
3333
    mmio->base = base;
3334
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3335
                                            DEVICE_NATIVE_ENDIAN);
3336
#if defined(DEBUG_SUBPAGE)
3337
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3338
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3339
#endif
3340
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3341
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3342

    
3343
    return mmio;
3344
}
3345

    
3346
static int get_free_io_mem_idx(void)
3347
{
3348
    int i;
3349

    
3350
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3351
        if (!io_mem_used[i]) {
3352
            io_mem_used[i] = 1;
3353
            return i;
3354
        }
3355
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3356
    return -1;
3357
}
3358

    
3359
/*
3360
 * Usually, devices operate in little endian mode. There are devices out
3361
 * there that operate in big endian too. Each device gets byte swapped
3362
 * mmio if plugged onto a CPU that does the other endianness.
3363
 *
3364
 * CPU          Device           swap?
3365
 *
3366
 * little       little           no
3367
 * little       big              yes
3368
 * big          little           yes
3369
 * big          big              no
3370
 */
3371

    
3372
typedef struct SwapEndianContainer {
3373
    CPUReadMemoryFunc *read[3];
3374
    CPUWriteMemoryFunc *write[3];
3375
    void *opaque;
3376
} SwapEndianContainer;
3377

    
3378
static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3379
{
3380
    uint32_t val;
3381
    SwapEndianContainer *c = opaque;
3382
    val = c->read[0](c->opaque, addr);
3383
    return val;
3384
}
3385

    
3386
static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3387
{
3388
    uint32_t val;
3389
    SwapEndianContainer *c = opaque;
3390
    val = bswap16(c->read[1](c->opaque, addr));
3391
    return val;
3392
}
3393

    
3394
static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3395
{
3396
    uint32_t val;
3397
    SwapEndianContainer *c = opaque;
3398
    val = bswap32(c->read[2](c->opaque, addr));
3399
    return val;
3400
}
3401

    
3402
static CPUReadMemoryFunc * const swapendian_readfn[3]={
3403
    swapendian_mem_readb,
3404
    swapendian_mem_readw,
3405
    swapendian_mem_readl
3406
};
3407

    
3408
static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3409
                                  uint32_t val)
3410
{
3411
    SwapEndianContainer *c = opaque;
3412
    c->write[0](c->opaque, addr, val);
3413
}
3414

    
3415
static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3416
                                  uint32_t val)
3417
{
3418
    SwapEndianContainer *c = opaque;
3419
    c->write[1](c->opaque, addr, bswap16(val));
3420
}
3421

    
3422
static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3423
                                  uint32_t val)
3424
{
3425
    SwapEndianContainer *c = opaque;
3426
    c->write[2](c->opaque, addr, bswap32(val));
3427
}
3428

    
3429
static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3430
    swapendian_mem_writeb,
3431
    swapendian_mem_writew,
3432
    swapendian_mem_writel
3433
};
3434

    
3435
static void swapendian_init(int io_index)
3436
{
3437
    SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3438
    int i;
3439

    
3440
    /* Swap mmio for big endian targets */
3441
    c->opaque = io_mem_opaque[io_index];
3442
    for (i = 0; i < 3; i++) {
3443
        c->read[i] = io_mem_read[io_index][i];
3444
        c->write[i] = io_mem_write[io_index][i];
3445

    
3446
        io_mem_read[io_index][i] = swapendian_readfn[i];
3447
        io_mem_write[io_index][i] = swapendian_writefn[i];
3448
    }
3449
    io_mem_opaque[io_index] = c;
3450
}
3451

    
3452
static void swapendian_del(int io_index)
3453
{
3454
    if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3455
        qemu_free(io_mem_opaque[io_index]);
3456
    }
3457
}
3458

    
3459
/* mem_read and mem_write are arrays of functions containing the
3460
   function to access byte (index 0), word (index 1) and dword (index
3461
   2). Functions can be omitted with a NULL function pointer.
3462
   If io_index is non zero, the corresponding io zone is
3463
   modified. If it is zero, a new io zone is allocated. The return
3464
   value can be used with cpu_register_physical_memory(). (-1) is
3465
   returned if error. */
3466
static int cpu_register_io_memory_fixed(int io_index,
3467
                                        CPUReadMemoryFunc * const *mem_read,
3468
                                        CPUWriteMemoryFunc * const *mem_write,
3469
                                        void *opaque, enum device_endian endian)
3470
{
3471
    int i;
3472

    
3473
    if (io_index <= 0) {
3474
        io_index = get_free_io_mem_idx();
3475
        if (io_index == -1)
3476
            return io_index;
3477
    } else {
3478
        io_index >>= IO_MEM_SHIFT;
3479
        if (io_index >= IO_MEM_NB_ENTRIES)
3480
            return -1;
3481
    }
3482

    
3483
    for (i = 0; i < 3; ++i) {
3484
        io_mem_read[io_index][i]
3485
            = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3486
    }
3487
    for (i = 0; i < 3; ++i) {
3488
        io_mem_write[io_index][i]
3489
            = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3490
    }
3491
    io_mem_opaque[io_index] = opaque;
3492

    
3493
    switch (endian) {
3494
    case DEVICE_BIG_ENDIAN:
3495
#ifndef TARGET_WORDS_BIGENDIAN
3496
        swapendian_init(io_index);
3497
#endif
3498
        break;
3499
    case DEVICE_LITTLE_ENDIAN:
3500
#ifdef TARGET_WORDS_BIGENDIAN
3501
        swapendian_init(io_index);
3502
#endif
3503
        break;
3504
    case DEVICE_NATIVE_ENDIAN:
3505
    default:
3506
        break;
3507
    }
3508

    
3509
    return (io_index << IO_MEM_SHIFT);
3510
}
3511

    
3512
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3513
                           CPUWriteMemoryFunc * const *mem_write,
3514
                           void *opaque, enum device_endian endian)
3515
{
3516
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
3517
}
3518

    
3519
void cpu_unregister_io_memory(int io_table_address)
3520
{
3521
    int i;
3522
    int io_index = io_table_address >> IO_MEM_SHIFT;
3523

    
3524
    swapendian_del(io_index);
3525

    
3526
    for (i=0;i < 3; i++) {
3527
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3528
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3529
    }
3530
    io_mem_opaque[io_index] = NULL;
3531
    io_mem_used[io_index] = 0;
3532
}
3533

    
3534
static void io_mem_init(void)
3535
{
3536
    int i;
3537

    
3538
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3539
                                 unassigned_mem_write, NULL,
3540
                                 DEVICE_NATIVE_ENDIAN);
3541
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3542
                                 unassigned_mem_write, NULL,
3543
                                 DEVICE_NATIVE_ENDIAN);
3544
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3545
                                 notdirty_mem_write, NULL,
3546
                                 DEVICE_NATIVE_ENDIAN);
3547
    for (i=0; i<5; i++)
3548
        io_mem_used[i] = 1;
3549

    
3550
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3551
                                          watch_mem_write, NULL,
3552
                                          DEVICE_NATIVE_ENDIAN);
3553
}
3554

    
3555
#endif /* !defined(CONFIG_USER_ONLY) */
3556

    
3557
/* physical memory access (slow version, mainly for debug) */
3558
#if defined(CONFIG_USER_ONLY)
3559
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3560
                        uint8_t *buf, int len, int is_write)
3561
{
3562
    int l, flags;
3563
    target_ulong page;
3564
    void * p;
3565

    
3566
    while (len > 0) {
3567
        page = addr & TARGET_PAGE_MASK;
3568
        l = (page + TARGET_PAGE_SIZE) - addr;
3569
        if (l > len)
3570
            l = len;
3571
        flags = page_get_flags(page);
3572
        if (!(flags & PAGE_VALID))
3573
            return -1;
3574
        if (is_write) {
3575
            if (!(flags & PAGE_WRITE))
3576
                return -1;
3577
            /* XXX: this code should not depend on lock_user */
3578
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3579
                return -1;
3580
            memcpy(p, buf, l);
3581
            unlock_user(p, addr, l);
3582
        } else {
3583
            if (!(flags & PAGE_READ))
3584
                return -1;
3585
            /* XXX: this code should not depend on lock_user */
3586
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3587
                return -1;
3588
            memcpy(buf, p, l);
3589
            unlock_user(p, addr, 0);
3590
        }
3591
        len -= l;
3592
        buf += l;
3593
        addr += l;
3594
    }
3595
    return 0;
3596
}
3597

    
3598
#else
3599
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3600
                            int len, int is_write)
3601
{
3602
    int l, io_index;
3603
    uint8_t *ptr;
3604
    uint32_t val;
3605
    target_phys_addr_t page;
3606
    unsigned long pd;
3607
    PhysPageDesc *p;
3608

    
3609
    while (len > 0) {
3610
        page = addr & TARGET_PAGE_MASK;
3611
        l = (page + TARGET_PAGE_SIZE) - addr;
3612
        if (l > len)
3613
            l = len;
3614
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3615
        if (!p) {
3616
            pd = IO_MEM_UNASSIGNED;
3617
        } else {
3618
            pd = p->phys_offset;
3619
        }
3620

    
3621
        if (is_write) {
3622
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3623
                target_phys_addr_t addr1 = addr;
3624
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3625
                if (p)
3626
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3627
                /* XXX: could force cpu_single_env to NULL to avoid
3628
                   potential bugs */
3629
                if (l >= 4 && ((addr1 & 3) == 0)) {
3630
                    /* 32 bit write access */
3631
                    val = ldl_p(buf);
3632
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3633
                    l = 4;
3634
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3635
                    /* 16 bit write access */
3636
                    val = lduw_p(buf);
3637
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3638
                    l = 2;
3639
                } else {
3640
                    /* 8 bit write access */
3641
                    val = ldub_p(buf);
3642
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3643
                    l = 1;
3644
                }
3645
            } else {
3646
                unsigned long addr1;
3647
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3648
                /* RAM case */
3649
                ptr = qemu_get_ram_ptr(addr1);
3650
                memcpy(ptr, buf, l);
3651
                if (!cpu_physical_memory_is_dirty(addr1)) {
3652
                    /* invalidate code */
3653
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3654
                    /* set dirty bit */
3655
                    cpu_physical_memory_set_dirty_flags(
3656
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3657
                }
3658
            }
3659
        } else {
3660
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3661
                !(pd & IO_MEM_ROMD)) {
3662
                target_phys_addr_t addr1 = addr;
3663
                /* I/O case */
3664
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3665
                if (p)
3666
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3667
                if (l >= 4 && ((addr1 & 3) == 0)) {
3668
                    /* 32 bit read access */
3669
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3670
                    stl_p(buf, val);
3671
                    l = 4;
3672
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3673
                    /* 16 bit read access */
3674
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3675
                    stw_p(buf, val);
3676
                    l = 2;
3677
                } else {
3678
                    /* 8 bit read access */
3679
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3680
                    stb_p(buf, val);
3681
                    l = 1;
3682
                }
3683
            } else {
3684
                /* RAM case */
3685
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3686
                    (addr & ~TARGET_PAGE_MASK);
3687
                memcpy(buf, ptr, l);
3688
            }
3689
        }
3690
        len -= l;
3691
        buf += l;
3692
        addr += l;
3693
    }
3694
}
3695

    
3696
/* used for ROM loading : can write in RAM and ROM */
3697
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3698
                                   const uint8_t *buf, int len)
3699
{
3700
    int l;
3701
    uint8_t *ptr;
3702
    target_phys_addr_t page;
3703
    unsigned long pd;
3704
    PhysPageDesc *p;
3705

    
3706
    while (len > 0) {
3707
        page = addr & TARGET_PAGE_MASK;
3708
        l = (page + TARGET_PAGE_SIZE) - addr;
3709
        if (l > len)
3710
            l = len;
3711
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3712
        if (!p) {
3713
            pd = IO_MEM_UNASSIGNED;
3714
        } else {
3715
            pd = p->phys_offset;
3716
        }
3717

    
3718
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3719
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3720
            !(pd & IO_MEM_ROMD)) {
3721
            /* do nothing */
3722
        } else {
3723
            unsigned long addr1;
3724
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3725
            /* ROM/RAM case */
3726
            ptr = qemu_get_ram_ptr(addr1);
3727
            memcpy(ptr, buf, l);
3728
        }
3729
        len -= l;
3730
        buf += l;
3731
        addr += l;
3732
    }
3733
}
3734

    
3735
typedef struct {
3736
    void *buffer;
3737
    target_phys_addr_t addr;
3738
    target_phys_addr_t len;
3739
} BounceBuffer;
3740

    
3741
static BounceBuffer bounce;
3742

    
3743
typedef struct MapClient {
3744
    void *opaque;
3745
    void (*callback)(void *opaque);
3746
    QLIST_ENTRY(MapClient) link;
3747
} MapClient;
3748

    
3749
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3750
    = QLIST_HEAD_INITIALIZER(map_client_list);
3751

    
3752
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3753
{
3754
    MapClient *client = qemu_malloc(sizeof(*client));
3755

    
3756
    client->opaque = opaque;
3757
    client->callback = callback;
3758
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3759
    return client;
3760
}
3761

    
3762
void cpu_unregister_map_client(void *_client)
3763
{
3764
    MapClient *client = (MapClient *)_client;
3765

    
3766
    QLIST_REMOVE(client, link);
3767
    qemu_free(client);
3768
}
3769

    
3770
static void cpu_notify_map_clients(void)
3771
{
3772
    MapClient *client;
3773

    
3774
    while (!QLIST_EMPTY(&map_client_list)) {
3775
        client = QLIST_FIRST(&map_client_list);
3776
        client->callback(client->opaque);
3777
        cpu_unregister_map_client(client);
3778
    }
3779
}
3780

    
3781
/* Map a physical memory region into a host virtual address.
3782
 * May map a subset of the requested range, given by and returned in *plen.
3783
 * May return NULL if resources needed to perform the mapping are exhausted.
3784
 * Use only for reads OR writes - not for read-modify-write operations.
3785
 * Use cpu_register_map_client() to know when retrying the map operation is
3786
 * likely to succeed.
3787
 */
3788
void *cpu_physical_memory_map(target_phys_addr_t addr,
3789
                              target_phys_addr_t *plen,
3790
                              int is_write)
3791
{
3792
    target_phys_addr_t len = *plen;
3793
    target_phys_addr_t done = 0;
3794
    int l;
3795
    uint8_t *ret = NULL;
3796
    uint8_t *ptr;
3797
    target_phys_addr_t page;
3798
    unsigned long pd;
3799
    PhysPageDesc *p;
3800
    unsigned long addr1;
3801

    
3802
    while (len > 0) {
3803
        page = addr & TARGET_PAGE_MASK;
3804
        l = (page + TARGET_PAGE_SIZE) - addr;
3805
        if (l > len)
3806
            l = len;
3807
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3808
        if (!p) {
3809
            pd = IO_MEM_UNASSIGNED;
3810
        } else {
3811
            pd = p->phys_offset;
3812
        }
3813

    
3814
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3815
            if (done || bounce.buffer) {
3816
                break;
3817
            }
3818
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3819
            bounce.addr = addr;
3820
            bounce.len = l;
3821
            if (!is_write) {
3822
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3823
            }
3824
            ptr = bounce.buffer;
3825
        } else {
3826
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3827
            ptr = qemu_get_ram_ptr(addr1);
3828
        }
3829
        if (!done) {
3830
            ret = ptr;
3831
        } else if (ret + done != ptr) {
3832
            break;
3833
        }
3834

    
3835
        len -= l;
3836
        addr += l;
3837
        done += l;
3838
    }
3839
    *plen = done;
3840
    return ret;
3841
}
3842

    
3843
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3844
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3845
 * the amount of memory that was actually read or written by the caller.
3846
 */
3847
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3848
                               int is_write, target_phys_addr_t access_len)
3849
{
3850
    if (buffer != bounce.buffer) {
3851
        if (is_write) {
3852
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3853
            while (access_len) {
3854
                unsigned l;
3855
                l = TARGET_PAGE_SIZE;
3856
                if (l > access_len)
3857
                    l = access_len;
3858
                if (!cpu_physical_memory_is_dirty(addr1)) {
3859
                    /* invalidate code */
3860
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3861
                    /* set dirty bit */
3862
                    cpu_physical_memory_set_dirty_flags(
3863
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3864
                }
3865
                addr1 += l;
3866
                access_len -= l;
3867
            }
3868
        }
3869
        return;
3870
    }
3871
    if (is_write) {
3872
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3873
    }
3874
    qemu_vfree(bounce.buffer);
3875
    bounce.buffer = NULL;
3876
    cpu_notify_map_clients();
3877
}
3878

    
3879
/* warning: addr must be aligned */
3880
uint32_t ldl_phys(target_phys_addr_t addr)
3881
{
3882
    int io_index;
3883
    uint8_t *ptr;
3884
    uint32_t val;
3885
    unsigned long pd;
3886
    PhysPageDesc *p;
3887

    
3888
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3889
    if (!p) {
3890
        pd = IO_MEM_UNASSIGNED;
3891
    } else {
3892
        pd = p->phys_offset;
3893
    }
3894

    
3895
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3896
        !(pd & IO_MEM_ROMD)) {
3897
        /* I/O case */
3898
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3899
        if (p)
3900
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3901
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3902
    } else {
3903
        /* RAM case */
3904
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3905
            (addr & ~TARGET_PAGE_MASK);
3906
        val = ldl_p(ptr);
3907
    }
3908
    return val;
3909
}
3910

    
3911
/* warning: addr must be aligned */
3912
uint64_t ldq_phys(target_phys_addr_t addr)
3913
{
3914
    int io_index;
3915
    uint8_t *ptr;
3916
    uint64_t val;
3917
    unsigned long pd;
3918
    PhysPageDesc *p;
3919

    
3920
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3921
    if (!p) {
3922
        pd = IO_MEM_UNASSIGNED;
3923
    } else {
3924
        pd = p->phys_offset;
3925
    }
3926

    
3927
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3928
        !(pd & IO_MEM_ROMD)) {
3929
        /* I/O case */
3930
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3931
        if (p)
3932
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3933
#ifdef TARGET_WORDS_BIGENDIAN
3934
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3935
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3936
#else
3937
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3938
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3939
#endif
3940
    } else {
3941
        /* RAM case */
3942
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3943
            (addr & ~TARGET_PAGE_MASK);
3944
        val = ldq_p(ptr);
3945
    }
3946
    return val;
3947
}
3948

    
3949
/* XXX: optimize */
3950
uint32_t ldub_phys(target_phys_addr_t addr)
3951
{
3952
    uint8_t val;
3953
    cpu_physical_memory_read(addr, &val, 1);
3954
    return val;
3955
}
3956

    
3957
/* warning: addr must be aligned */
3958
uint32_t lduw_phys(target_phys_addr_t addr)
3959
{
3960
    int io_index;
3961
    uint8_t *ptr;
3962
    uint64_t val;
3963
    unsigned long pd;
3964
    PhysPageDesc *p;
3965

    
3966
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3967
    if (!p) {
3968
        pd = IO_MEM_UNASSIGNED;
3969
    } else {
3970
        pd = p->phys_offset;
3971
    }
3972

    
3973
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3974
        !(pd & IO_MEM_ROMD)) {
3975
        /* I/O case */
3976
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3977
        if (p)
3978
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3979
        val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
3980
    } else {
3981
        /* RAM case */
3982
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3983
            (addr & ~TARGET_PAGE_MASK);
3984
        val = lduw_p(ptr);
3985
    }
3986
    return val;
3987
}
3988

    
3989
/* warning: addr must be aligned. The ram page is not masked as dirty
3990
   and the code inside is not invalidated. It is useful if the dirty
3991
   bits are used to track modified PTEs */
3992
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3993
{
3994
    int io_index;
3995
    uint8_t *ptr;
3996
    unsigned long pd;
3997
    PhysPageDesc *p;
3998

    
3999
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4000
    if (!p) {
4001
        pd = IO_MEM_UNASSIGNED;
4002
    } else {
4003
        pd = p->phys_offset;
4004
    }
4005

    
4006
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4007
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4008
        if (p)
4009
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4010
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4011
    } else {
4012
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4013
        ptr = qemu_get_ram_ptr(addr1);
4014
        stl_p(ptr, val);
4015

    
4016
        if (unlikely(in_migration)) {
4017
            if (!cpu_physical_memory_is_dirty(addr1)) {
4018
                /* invalidate code */
4019
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4020
                /* set dirty bit */
4021
                cpu_physical_memory_set_dirty_flags(
4022
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
4023
            }
4024
        }
4025
    }
4026
}
4027

    
4028
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4029
{
4030
    int io_index;
4031
    uint8_t *ptr;
4032
    unsigned long pd;
4033
    PhysPageDesc *p;
4034

    
4035
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4036
    if (!p) {
4037
        pd = IO_MEM_UNASSIGNED;
4038
    } else {
4039
        pd = p->phys_offset;
4040
    }
4041

    
4042
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4043
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4044
        if (p)
4045
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4046
#ifdef TARGET_WORDS_BIGENDIAN
4047
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4048
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4049
#else
4050
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4051
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4052
#endif
4053
    } else {
4054
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4055
            (addr & ~TARGET_PAGE_MASK);
4056
        stq_p(ptr, val);
4057
    }
4058
}
4059

    
4060
/* warning: addr must be aligned */
4061
void stl_phys(target_phys_addr_t addr, uint32_t val)
4062
{
4063
    int io_index;
4064
    uint8_t *ptr;
4065
    unsigned long pd;
4066
    PhysPageDesc *p;
4067

    
4068
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4069
    if (!p) {
4070
        pd = IO_MEM_UNASSIGNED;
4071
    } else {
4072
        pd = p->phys_offset;
4073
    }
4074

    
4075
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4076
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4077
        if (p)
4078
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4079
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4080
    } else {
4081
        unsigned long addr1;
4082
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4083
        /* RAM case */
4084
        ptr = qemu_get_ram_ptr(addr1);
4085
        stl_p(ptr, val);
4086
        if (!cpu_physical_memory_is_dirty(addr1)) {
4087
            /* invalidate code */
4088
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4089
            /* set dirty bit */
4090
            cpu_physical_memory_set_dirty_flags(addr1,
4091
                (0xff & ~CODE_DIRTY_FLAG));
4092
        }
4093
    }
4094
}
4095

    
4096
/* XXX: optimize */
4097
void stb_phys(target_phys_addr_t addr, uint32_t val)
4098
{
4099
    uint8_t v = val;
4100
    cpu_physical_memory_write(addr, &v, 1);
4101
}
4102

    
4103
/* warning: addr must be aligned */
4104
void stw_phys(target_phys_addr_t addr, uint32_t val)
4105
{
4106
    int io_index;
4107
    uint8_t *ptr;
4108
    unsigned long pd;
4109
    PhysPageDesc *p;
4110

    
4111
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4112
    if (!p) {
4113
        pd = IO_MEM_UNASSIGNED;
4114
    } else {
4115
        pd = p->phys_offset;
4116
    }
4117

    
4118
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4119
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4120
        if (p)
4121
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4122
        io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4123
    } else {
4124
        unsigned long addr1;
4125
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4126
        /* RAM case */
4127
        ptr = qemu_get_ram_ptr(addr1);
4128
        stw_p(ptr, val);
4129
        if (!cpu_physical_memory_is_dirty(addr1)) {
4130
            /* invalidate code */
4131
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4132
            /* set dirty bit */
4133
            cpu_physical_memory_set_dirty_flags(addr1,
4134
                (0xff & ~CODE_DIRTY_FLAG));
4135
        }
4136
    }
4137
}
4138

    
4139
/* XXX: optimize */
4140
void stq_phys(target_phys_addr_t addr, uint64_t val)
4141
{
4142
    val = tswap64(val);
4143
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4144
}
4145

    
4146
/* virtual memory access for debug (includes writing to ROM) */
4147
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4148
                        uint8_t *buf, int len, int is_write)
4149
{
4150
    int l;
4151
    target_phys_addr_t phys_addr;
4152
    target_ulong page;
4153

    
4154
    while (len > 0) {
4155
        page = addr & TARGET_PAGE_MASK;
4156
        phys_addr = cpu_get_phys_page_debug(env, page);
4157
        /* if no physical page mapped, return an error */
4158
        if (phys_addr == -1)
4159
            return -1;
4160
        l = (page + TARGET_PAGE_SIZE) - addr;
4161
        if (l > len)
4162
            l = len;
4163
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4164
        if (is_write)
4165
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4166
        else
4167
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4168
        len -= l;
4169
        buf += l;
4170
        addr += l;
4171
    }
4172
    return 0;
4173
}
4174
#endif
4175

    
4176
/* in deterministic execution mode, instructions doing device I/Os
4177
   must be at the end of the TB */
4178
void cpu_io_recompile(CPUState *env, void *retaddr)
4179
{
4180
    TranslationBlock *tb;
4181
    uint32_t n, cflags;
4182
    target_ulong pc, cs_base;
4183
    uint64_t flags;
4184

    
4185
    tb = tb_find_pc((unsigned long)retaddr);
4186
    if (!tb) {
4187
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4188
                  retaddr);
4189
    }
4190
    n = env->icount_decr.u16.low + tb->icount;
4191
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4192
    /* Calculate how many instructions had been executed before the fault
4193
       occurred.  */
4194
    n = n - env->icount_decr.u16.low;
4195
    /* Generate a new TB ending on the I/O insn.  */
4196
    n++;
4197
    /* On MIPS and SH, delay slot instructions can only be restarted if
4198
       they were already the first instruction in the TB.  If this is not
4199
       the first instruction in a TB then re-execute the preceding
4200
       branch.  */
4201
#if defined(TARGET_MIPS)
4202
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4203
        env->active_tc.PC -= 4;
4204
        env->icount_decr.u16.low++;
4205
        env->hflags &= ~MIPS_HFLAG_BMASK;
4206
    }
4207
#elif defined(TARGET_SH4)
4208
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4209
            && n > 1) {
4210
        env->pc -= 2;
4211
        env->icount_decr.u16.low++;
4212
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4213
    }
4214
#endif
4215
    /* This should never happen.  */
4216
    if (n > CF_COUNT_MASK)
4217
        cpu_abort(env, "TB too big during recompile");
4218

    
4219
    cflags = n | CF_LAST_IO;
4220
    pc = tb->pc;
4221
    cs_base = tb->cs_base;
4222
    flags = tb->flags;
4223
    tb_phys_invalidate(tb, -1);
4224
    /* FIXME: In theory this could raise an exception.  In practice
4225
       we have already translated the block once so it's probably ok.  */
4226
    tb_gen_code(env, pc, cs_base, flags, cflags);
4227
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4228
       the first in the TB) then we end up generating a whole new TB and
4229
       repeating the fault, which is horribly inefficient.
4230
       Better would be to execute just this insn uncached, or generate a
4231
       second new TB.  */
4232
    cpu_resume_from_signal(env, NULL);
4233
}
4234

    
4235
#if !defined(CONFIG_USER_ONLY)
4236

    
4237
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4238
{
4239
    int i, target_code_size, max_target_code_size;
4240
    int direct_jmp_count, direct_jmp2_count, cross_page;
4241
    TranslationBlock *tb;
4242

    
4243
    target_code_size = 0;
4244
    max_target_code_size = 0;
4245
    cross_page = 0;
4246
    direct_jmp_count = 0;
4247
    direct_jmp2_count = 0;
4248
    for(i = 0; i < nb_tbs; i++) {
4249
        tb = &tbs[i];
4250
        target_code_size += tb->size;
4251
        if (tb->size > max_target_code_size)
4252
            max_target_code_size = tb->size;
4253
        if (tb->page_addr[1] != -1)
4254
            cross_page++;
4255
        if (tb->tb_next_offset[0] != 0xffff) {
4256
            direct_jmp_count++;
4257
            if (tb->tb_next_offset[1] != 0xffff) {
4258
                direct_jmp2_count++;
4259
            }
4260
        }
4261
    }
4262
    /* XXX: avoid using doubles ? */
4263
    cpu_fprintf(f, "Translation buffer state:\n");
4264
    cpu_fprintf(f, "gen code size       %td/%ld\n",
4265
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4266
    cpu_fprintf(f, "TB count            %d/%d\n", 
4267
                nb_tbs, code_gen_max_blocks);
4268
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4269
                nb_tbs ? target_code_size / nb_tbs : 0,
4270
                max_target_code_size);
4271
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4272
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4273
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4274
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4275
            cross_page,
4276
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4277
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4278
                direct_jmp_count,
4279
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4280
                direct_jmp2_count,
4281
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4282
    cpu_fprintf(f, "\nStatistics:\n");
4283
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4284
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4285
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4286
    tcg_dump_info(f, cpu_fprintf);
4287
}
4288

    
4289
#define MMUSUFFIX _cmmu
4290
#define GETPC() NULL
4291
#define env cpu_single_env
4292
#define SOFTMMU_CODE_ACCESS
4293

    
4294
#define SHIFT 0
4295
#include "softmmu_template.h"
4296

    
4297
#define SHIFT 1
4298
#include "softmmu_template.h"
4299

    
4300
#define SHIFT 2
4301
#include "softmmu_template.h"
4302

    
4303
#define SHIFT 3
4304
#include "softmmu_template.h"
4305

    
4306
#undef env
4307

    
4308
#endif