Statistics
| Branch: | Revision:

root / exec.c @ 7d82af38

History | View | Annotate | Download (127.2 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#ifdef _WIN32
21
#include <windows.h>
22
#else
23
#include <sys/types.h>
24
#include <sys/mman.h>
25
#endif
26

    
27
#include "qemu-common.h"
28
#include "cpu.h"
29
#include "exec-all.h"
30
#include "tcg.h"
31
#include "hw/hw.h"
32
#include "hw/qdev.h"
33
#include "osdep.h"
34
#include "kvm.h"
35
#include "qemu-timer.h"
36
#if defined(CONFIG_USER_ONLY)
37
#include <qemu.h>
38
#include <signal.h>
39
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
40
#include <sys/param.h>
41
#if __FreeBSD_version >= 700104
42
#define HAVE_KINFO_GETVMMAP
43
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
44
#include <sys/time.h>
45
#include <sys/proc.h>
46
#include <machine/profile.h>
47
#define _KERNEL
48
#include <sys/user.h>
49
#undef _KERNEL
50
#undef sigqueue
51
#include <libutil.h>
52
#endif
53
#endif
54
#endif
55

    
56
//#define DEBUG_TB_INVALIDATE
57
//#define DEBUG_FLUSH
58
//#define DEBUG_TLB
59
//#define DEBUG_UNASSIGNED
60

    
61
/* make various TB consistency checks */
62
//#define DEBUG_TB_CHECK
63
//#define DEBUG_TLB_CHECK
64

    
65
//#define DEBUG_IOPORT
66
//#define DEBUG_SUBPAGE
67

    
68
#if !defined(CONFIG_USER_ONLY)
69
/* TB consistency checks only implemented for usermode emulation.  */
70
#undef DEBUG_TB_CHECK
71
#endif
72

    
73
#define SMC_BITMAP_USE_THRESHOLD 10
74

    
75
static TranslationBlock *tbs;
76
static int code_gen_max_blocks;
77
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
78
static int nb_tbs;
79
/* any access to the tbs or the page table must use this lock */
80
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
81

    
82
#if defined(__arm__) || defined(__sparc_v9__)
83
/* The prologue must be reachable with a direct jump. ARM and Sparc64
84
 have limited branch ranges (possibly also PPC) so place it in a
85
 section close to code segment. */
86
#define code_gen_section                                \
87
    __attribute__((__section__(".gen_code")))           \
88
    __attribute__((aligned (32)))
89
#elif defined(_WIN32)
90
/* Maximum alignment for Win32 is 16. */
91
#define code_gen_section                                \
92
    __attribute__((aligned (16)))
93
#else
94
#define code_gen_section                                \
95
    __attribute__((aligned (32)))
96
#endif
97

    
98
uint8_t code_gen_prologue[1024] code_gen_section;
99
static uint8_t *code_gen_buffer;
100
static unsigned long code_gen_buffer_size;
101
/* threshold to flush the translated code buffer */
102
static unsigned long code_gen_buffer_max_size;
103
static uint8_t *code_gen_ptr;
104

    
105
#if !defined(CONFIG_USER_ONLY)
106
int phys_ram_fd;
107
static int in_migration;
108

    
109
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
110
#endif
111

    
112
CPUState *first_cpu;
113
/* current CPU in the current thread. It is only valid inside
114
   cpu_exec() */
115
CPUState *cpu_single_env;
116
/* 0 = Do not count executed instructions.
117
   1 = Precise instruction counting.
118
   2 = Adaptive rate instruction counting.  */
119
int use_icount = 0;
120
/* Current instruction counter.  While executing translated code this may
121
   include some instructions that have not yet been executed.  */
122
int64_t qemu_icount;
123

    
124
typedef struct PageDesc {
125
    /* list of TBs intersecting this ram page */
126
    TranslationBlock *first_tb;
127
    /* in order to optimize self modifying code, we count the number
128
       of lookups we do to a given page to use a bitmap */
129
    unsigned int code_write_count;
130
    uint8_t *code_bitmap;
131
#if defined(CONFIG_USER_ONLY)
132
    unsigned long flags;
133
#endif
134
} PageDesc;
135

    
136
/* In system mode we want L1_MAP to be based on ram offsets,
137
   while in user mode we want it to be based on virtual addresses.  */
138
#if !defined(CONFIG_USER_ONLY)
139
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
140
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
141
#else
142
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
143
#endif
144
#else
145
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
146
#endif
147

    
148
/* Size of the L2 (and L3, etc) page tables.  */
149
#define L2_BITS 10
150
#define L2_SIZE (1 << L2_BITS)
151

    
152
/* The bits remaining after N lower levels of page tables.  */
153
#define P_L1_BITS_REM \
154
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
155
#define V_L1_BITS_REM \
156
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157

    
158
/* Size of the L1 page table.  Avoid silly small sizes.  */
159
#if P_L1_BITS_REM < 4
160
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
161
#else
162
#define P_L1_BITS  P_L1_BITS_REM
163
#endif
164

    
165
#if V_L1_BITS_REM < 4
166
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
167
#else
168
#define V_L1_BITS  V_L1_BITS_REM
169
#endif
170

    
171
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
172
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
173

    
174
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
175
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
176

    
177
unsigned long qemu_real_host_page_size;
178
unsigned long qemu_host_page_bits;
179
unsigned long qemu_host_page_size;
180
unsigned long qemu_host_page_mask;
181

    
182
/* This is a multi-level map on the virtual address space.
183
   The bottom level has pointers to PageDesc.  */
184
static void *l1_map[V_L1_SIZE];
185

    
186
#if !defined(CONFIG_USER_ONLY)
187
typedef struct PhysPageDesc {
188
    /* offset in host memory of the page + io_index in the low bits */
189
    ram_addr_t phys_offset;
190
    ram_addr_t region_offset;
191
} PhysPageDesc;
192

    
193
/* This is a multi-level map on the physical address space.
194
   The bottom level has pointers to PhysPageDesc.  */
195
static void *l1_phys_map[P_L1_SIZE];
196

    
197
static void io_mem_init(void);
198

    
199
/* io memory support */
200
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
201
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
202
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
203
static char io_mem_used[IO_MEM_NB_ENTRIES];
204
static int io_mem_watch;
205
#endif
206

    
207
/* log support */
208
#ifdef WIN32
209
static const char *logfilename = "qemu.log";
210
#else
211
static const char *logfilename = "/tmp/qemu.log";
212
#endif
213
FILE *logfile;
214
int loglevel;
215
static int log_append = 0;
216

    
217
/* statistics */
218
#if !defined(CONFIG_USER_ONLY)
219
static int tlb_flush_count;
220
#endif
221
static int tb_flush_count;
222
static int tb_phys_invalidate_count;
223

    
224
#ifdef _WIN32
225
static void map_exec(void *addr, long size)
226
{
227
    DWORD old_protect;
228
    VirtualProtect(addr, size,
229
                   PAGE_EXECUTE_READWRITE, &old_protect);
230
    
231
}
232
#else
233
static void map_exec(void *addr, long size)
234
{
235
    unsigned long start, end, page_size;
236
    
237
    page_size = getpagesize();
238
    start = (unsigned long)addr;
239
    start &= ~(page_size - 1);
240
    
241
    end = (unsigned long)addr + size;
242
    end += page_size - 1;
243
    end &= ~(page_size - 1);
244
    
245
    mprotect((void *)start, end - start,
246
             PROT_READ | PROT_WRITE | PROT_EXEC);
247
}
248
#endif
249

    
250
static void page_init(void)
251
{
252
    /* NOTE: we can always suppose that qemu_host_page_size >=
253
       TARGET_PAGE_SIZE */
254
#ifdef _WIN32
255
    {
256
        SYSTEM_INFO system_info;
257

    
258
        GetSystemInfo(&system_info);
259
        qemu_real_host_page_size = system_info.dwPageSize;
260
    }
261
#else
262
    qemu_real_host_page_size = getpagesize();
263
#endif
264
    if (qemu_host_page_size == 0)
265
        qemu_host_page_size = qemu_real_host_page_size;
266
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
267
        qemu_host_page_size = TARGET_PAGE_SIZE;
268
    qemu_host_page_bits = 0;
269
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
270
        qemu_host_page_bits++;
271
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
272

    
273
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
274
    {
275
#ifdef HAVE_KINFO_GETVMMAP
276
        struct kinfo_vmentry *freep;
277
        int i, cnt;
278

    
279
        freep = kinfo_getvmmap(getpid(), &cnt);
280
        if (freep) {
281
            mmap_lock();
282
            for (i = 0; i < cnt; i++) {
283
                unsigned long startaddr, endaddr;
284

    
285
                startaddr = freep[i].kve_start;
286
                endaddr = freep[i].kve_end;
287
                if (h2g_valid(startaddr)) {
288
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
289

    
290
                    if (h2g_valid(endaddr)) {
291
                        endaddr = h2g(endaddr);
292
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
293
                    } else {
294
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
295
                        endaddr = ~0ul;
296
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
297
#endif
298
                    }
299
                }
300
            }
301
            free(freep);
302
            mmap_unlock();
303
        }
304
#else
305
        FILE *f;
306

    
307
        last_brk = (unsigned long)sbrk(0);
308

    
309
        f = fopen("/compat/linux/proc/self/maps", "r");
310
        if (f) {
311
            mmap_lock();
312

    
313
            do {
314
                unsigned long startaddr, endaddr;
315
                int n;
316

    
317
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
318

    
319
                if (n == 2 && h2g_valid(startaddr)) {
320
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
321

    
322
                    if (h2g_valid(endaddr)) {
323
                        endaddr = h2g(endaddr);
324
                    } else {
325
                        endaddr = ~0ul;
326
                    }
327
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
328
                }
329
            } while (!feof(f));
330

    
331
            fclose(f);
332
            mmap_unlock();
333
        }
334
#endif
335
    }
336
#endif
337
}
338

    
339
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
340
{
341
    PageDesc *pd;
342
    void **lp;
343
    int i;
344

    
345
#if defined(CONFIG_USER_ONLY)
346
    /* We can't use qemu_malloc because it may recurse into a locked mutex. */
347
# define ALLOC(P, SIZE)                                 \
348
    do {                                                \
349
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
350
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
351
    } while (0)
352
#else
353
# define ALLOC(P, SIZE) \
354
    do { P = qemu_mallocz(SIZE); } while (0)
355
#endif
356

    
357
    /* Level 1.  Always allocated.  */
358
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
359

    
360
    /* Level 2..N-1.  */
361
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
362
        void **p = *lp;
363

    
364
        if (p == NULL) {
365
            if (!alloc) {
366
                return NULL;
367
            }
368
            ALLOC(p, sizeof(void *) * L2_SIZE);
369
            *lp = p;
370
        }
371

    
372
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
373
    }
374

    
375
    pd = *lp;
376
    if (pd == NULL) {
377
        if (!alloc) {
378
            return NULL;
379
        }
380
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
381
        *lp = pd;
382
    }
383

    
384
#undef ALLOC
385

    
386
    return pd + (index & (L2_SIZE - 1));
387
}
388

    
389
static inline PageDesc *page_find(tb_page_addr_t index)
390
{
391
    return page_find_alloc(index, 0);
392
}
393

    
394
#if !defined(CONFIG_USER_ONLY)
395
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
396
{
397
    PhysPageDesc *pd;
398
    void **lp;
399
    int i;
400

    
401
    /* Level 1.  Always allocated.  */
402
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
403

    
404
    /* Level 2..N-1.  */
405
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
406
        void **p = *lp;
407
        if (p == NULL) {
408
            if (!alloc) {
409
                return NULL;
410
            }
411
            *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
412
        }
413
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
414
    }
415

    
416
    pd = *lp;
417
    if (pd == NULL) {
418
        int i;
419

    
420
        if (!alloc) {
421
            return NULL;
422
        }
423

    
424
        *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
425

    
426
        for (i = 0; i < L2_SIZE; i++) {
427
            pd[i].phys_offset = IO_MEM_UNASSIGNED;
428
            pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
429
        }
430
    }
431

    
432
    return pd + (index & (L2_SIZE - 1));
433
}
434

    
435
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
436
{
437
    return phys_page_find_alloc(index, 0);
438
}
439

    
440
static void tlb_protect_code(ram_addr_t ram_addr);
441
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
442
                                    target_ulong vaddr);
443
#define mmap_lock() do { } while(0)
444
#define mmap_unlock() do { } while(0)
445
#endif
446

    
447
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
448

    
449
#if defined(CONFIG_USER_ONLY)
450
/* Currently it is not recommended to allocate big chunks of data in
451
   user mode. It will change when a dedicated libc will be used */
452
#define USE_STATIC_CODE_GEN_BUFFER
453
#endif
454

    
455
#ifdef USE_STATIC_CODE_GEN_BUFFER
456
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
457
               __attribute__((aligned (CODE_GEN_ALIGN)));
458
#endif
459

    
460
static void code_gen_alloc(unsigned long tb_size)
461
{
462
#ifdef USE_STATIC_CODE_GEN_BUFFER
463
    code_gen_buffer = static_code_gen_buffer;
464
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
465
    map_exec(code_gen_buffer, code_gen_buffer_size);
466
#else
467
    code_gen_buffer_size = tb_size;
468
    if (code_gen_buffer_size == 0) {
469
#if defined(CONFIG_USER_ONLY)
470
        /* in user mode, phys_ram_size is not meaningful */
471
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
472
#else
473
        /* XXX: needs adjustments */
474
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
475
#endif
476
    }
477
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
478
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
479
    /* The code gen buffer location may have constraints depending on
480
       the host cpu and OS */
481
#if defined(__linux__) 
482
    {
483
        int flags;
484
        void *start = NULL;
485

    
486
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
487
#if defined(__x86_64__)
488
        flags |= MAP_32BIT;
489
        /* Cannot map more than that */
490
        if (code_gen_buffer_size > (800 * 1024 * 1024))
491
            code_gen_buffer_size = (800 * 1024 * 1024);
492
#elif defined(__sparc_v9__)
493
        // Map the buffer below 2G, so we can use direct calls and branches
494
        flags |= MAP_FIXED;
495
        start = (void *) 0x60000000UL;
496
        if (code_gen_buffer_size > (512 * 1024 * 1024))
497
            code_gen_buffer_size = (512 * 1024 * 1024);
498
#elif defined(__arm__)
499
        /* Map the buffer below 32M, so we can use direct calls and branches */
500
        flags |= MAP_FIXED;
501
        start = (void *) 0x01000000UL;
502
        if (code_gen_buffer_size > 16 * 1024 * 1024)
503
            code_gen_buffer_size = 16 * 1024 * 1024;
504
#elif defined(__s390x__)
505
        /* Map the buffer so that we can use direct calls and branches.  */
506
        /* We have a +- 4GB range on the branches; leave some slop.  */
507
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
508
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
509
        }
510
        start = (void *)0x90000000UL;
511
#endif
512
        code_gen_buffer = mmap(start, code_gen_buffer_size,
513
                               PROT_WRITE | PROT_READ | PROT_EXEC,
514
                               flags, -1, 0);
515
        if (code_gen_buffer == MAP_FAILED) {
516
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
517
            exit(1);
518
        }
519
    }
520
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
521
    || defined(__DragonFly__) || defined(__OpenBSD__)
522
    {
523
        int flags;
524
        void *addr = NULL;
525
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
526
#if defined(__x86_64__)
527
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
528
         * 0x40000000 is free */
529
        flags |= MAP_FIXED;
530
        addr = (void *)0x40000000;
531
        /* Cannot map more than that */
532
        if (code_gen_buffer_size > (800 * 1024 * 1024))
533
            code_gen_buffer_size = (800 * 1024 * 1024);
534
#elif defined(__sparc_v9__)
535
        // Map the buffer below 2G, so we can use direct calls and branches
536
        flags |= MAP_FIXED;
537
        addr = (void *) 0x60000000UL;
538
        if (code_gen_buffer_size > (512 * 1024 * 1024)) {
539
            code_gen_buffer_size = (512 * 1024 * 1024);
540
        }
541
#endif
542
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
543
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
544
                               flags, -1, 0);
545
        if (code_gen_buffer == MAP_FAILED) {
546
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
547
            exit(1);
548
        }
549
    }
550
#else
551
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
552
    map_exec(code_gen_buffer, code_gen_buffer_size);
553
#endif
554
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
555
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
556
    code_gen_buffer_max_size = code_gen_buffer_size - 
557
        (TCG_MAX_OP_SIZE * OPC_MAX_SIZE);
558
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
559
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
560
}
561

    
562
/* Must be called before using the QEMU cpus. 'tb_size' is the size
563
   (in bytes) allocated to the translation buffer. Zero means default
564
   size. */
565
void cpu_exec_init_all(unsigned long tb_size)
566
{
567
    cpu_gen_init();
568
    code_gen_alloc(tb_size);
569
    code_gen_ptr = code_gen_buffer;
570
    page_init();
571
#if !defined(CONFIG_USER_ONLY)
572
    io_mem_init();
573
#endif
574
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
575
    /* There's no guest base to take into account, so go ahead and
576
       initialize the prologue now.  */
577
    tcg_prologue_init(&tcg_ctx);
578
#endif
579
}
580

    
581
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
582

    
583
static int cpu_common_post_load(void *opaque, int version_id)
584
{
585
    CPUState *env = opaque;
586

    
587
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
588
       version_id is increased. */
589
    env->interrupt_request &= ~0x01;
590
    tlb_flush(env, 1);
591

    
592
    return 0;
593
}
594

    
595
static const VMStateDescription vmstate_cpu_common = {
596
    .name = "cpu_common",
597
    .version_id = 1,
598
    .minimum_version_id = 1,
599
    .minimum_version_id_old = 1,
600
    .post_load = cpu_common_post_load,
601
    .fields      = (VMStateField []) {
602
        VMSTATE_UINT32(halted, CPUState),
603
        VMSTATE_UINT32(interrupt_request, CPUState),
604
        VMSTATE_END_OF_LIST()
605
    }
606
};
607
#endif
608

    
609
CPUState *qemu_get_cpu(int cpu)
610
{
611
    CPUState *env = first_cpu;
612

    
613
    while (env) {
614
        if (env->cpu_index == cpu)
615
            break;
616
        env = env->next_cpu;
617
    }
618

    
619
    return env;
620
}
621

    
622
void cpu_exec_init(CPUState *env)
623
{
624
    CPUState **penv;
625
    int cpu_index;
626

    
627
#if defined(CONFIG_USER_ONLY)
628
    cpu_list_lock();
629
#endif
630
    env->next_cpu = NULL;
631
    penv = &first_cpu;
632
    cpu_index = 0;
633
    while (*penv != NULL) {
634
        penv = &(*penv)->next_cpu;
635
        cpu_index++;
636
    }
637
    env->cpu_index = cpu_index;
638
    env->numa_node = 0;
639
    QTAILQ_INIT(&env->breakpoints);
640
    QTAILQ_INIT(&env->watchpoints);
641
    *penv = env;
642
#if defined(CONFIG_USER_ONLY)
643
    cpu_list_unlock();
644
#endif
645
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
646
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
647
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
648
                    cpu_save, cpu_load, env);
649
#endif
650
}
651

    
652
/* Allocate a new translation block. Flush the translation buffer if
653
   too many translation blocks or too much generated code. */
654
static TranslationBlock *tb_alloc(target_ulong pc)
655
{
656
    TranslationBlock *tb;
657

    
658
    if (nb_tbs >= code_gen_max_blocks ||
659
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
660
        return NULL;
661
    tb = &tbs[nb_tbs++];
662
    tb->pc = pc;
663
    tb->cflags = 0;
664
    return tb;
665
}
666

    
667
void tb_free(TranslationBlock *tb)
668
{
669
    /* In practice this is mostly used for single use temporary TB
670
       Ignore the hard cases and just back up if this TB happens to
671
       be the last one generated.  */
672
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
673
        code_gen_ptr = tb->tc_ptr;
674
        nb_tbs--;
675
    }
676
}
677

    
678
static inline void invalidate_page_bitmap(PageDesc *p)
679
{
680
    if (p->code_bitmap) {
681
        qemu_free(p->code_bitmap);
682
        p->code_bitmap = NULL;
683
    }
684
    p->code_write_count = 0;
685
}
686

    
687
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
688

    
689
static void page_flush_tb_1 (int level, void **lp)
690
{
691
    int i;
692

    
693
    if (*lp == NULL) {
694
        return;
695
    }
696
    if (level == 0) {
697
        PageDesc *pd = *lp;
698
        for (i = 0; i < L2_SIZE; ++i) {
699
            pd[i].first_tb = NULL;
700
            invalidate_page_bitmap(pd + i);
701
        }
702
    } else {
703
        void **pp = *lp;
704
        for (i = 0; i < L2_SIZE; ++i) {
705
            page_flush_tb_1 (level - 1, pp + i);
706
        }
707
    }
708
}
709

    
710
static void page_flush_tb(void)
711
{
712
    int i;
713
    for (i = 0; i < V_L1_SIZE; i++) {
714
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
715
    }
716
}
717

    
718
/* flush all the translation blocks */
719
/* XXX: tb_flush is currently not thread safe */
720
void tb_flush(CPUState *env1)
721
{
722
    CPUState *env;
723
#if defined(DEBUG_FLUSH)
724
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
725
           (unsigned long)(code_gen_ptr - code_gen_buffer),
726
           nb_tbs, nb_tbs > 0 ?
727
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
728
#endif
729
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
730
        cpu_abort(env1, "Internal error: code buffer overflow\n");
731

    
732
    nb_tbs = 0;
733

    
734
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
735
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
736
    }
737

    
738
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
739
    page_flush_tb();
740

    
741
    code_gen_ptr = code_gen_buffer;
742
    /* XXX: flush processor icache at this point if cache flush is
743
       expensive */
744
    tb_flush_count++;
745
}
746

    
747
#ifdef DEBUG_TB_CHECK
748

    
749
static void tb_invalidate_check(target_ulong address)
750
{
751
    TranslationBlock *tb;
752
    int i;
753
    address &= TARGET_PAGE_MASK;
754
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
755
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
756
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
757
                  address >= tb->pc + tb->size)) {
758
                printf("ERROR invalidate: address=" TARGET_FMT_lx
759
                       " PC=%08lx size=%04x\n",
760
                       address, (long)tb->pc, tb->size);
761
            }
762
        }
763
    }
764
}
765

    
766
/* verify that all the pages have correct rights for code */
767
static void tb_page_check(void)
768
{
769
    TranslationBlock *tb;
770
    int i, flags1, flags2;
771

    
772
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
773
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
774
            flags1 = page_get_flags(tb->pc);
775
            flags2 = page_get_flags(tb->pc + tb->size - 1);
776
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
777
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
778
                       (long)tb->pc, tb->size, flags1, flags2);
779
            }
780
        }
781
    }
782
}
783

    
784
#endif
785

    
786
/* invalidate one TB */
787
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
788
                             int next_offset)
789
{
790
    TranslationBlock *tb1;
791
    for(;;) {
792
        tb1 = *ptb;
793
        if (tb1 == tb) {
794
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
795
            break;
796
        }
797
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
798
    }
799
}
800

    
801
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
802
{
803
    TranslationBlock *tb1;
804
    unsigned int n1;
805

    
806
    for(;;) {
807
        tb1 = *ptb;
808
        n1 = (long)tb1 & 3;
809
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
810
        if (tb1 == tb) {
811
            *ptb = tb1->page_next[n1];
812
            break;
813
        }
814
        ptb = &tb1->page_next[n1];
815
    }
816
}
817

    
818
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
819
{
820
    TranslationBlock *tb1, **ptb;
821
    unsigned int n1;
822

    
823
    ptb = &tb->jmp_next[n];
824
    tb1 = *ptb;
825
    if (tb1) {
826
        /* find tb(n) in circular list */
827
        for(;;) {
828
            tb1 = *ptb;
829
            n1 = (long)tb1 & 3;
830
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
831
            if (n1 == n && tb1 == tb)
832
                break;
833
            if (n1 == 2) {
834
                ptb = &tb1->jmp_first;
835
            } else {
836
                ptb = &tb1->jmp_next[n1];
837
            }
838
        }
839
        /* now we can suppress tb(n) from the list */
840
        *ptb = tb->jmp_next[n];
841

    
842
        tb->jmp_next[n] = NULL;
843
    }
844
}
845

    
846
/* reset the jump entry 'n' of a TB so that it is not chained to
847
   another TB */
848
static inline void tb_reset_jump(TranslationBlock *tb, int n)
849
{
850
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
851
}
852

    
853
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
854
{
855
    CPUState *env;
856
    PageDesc *p;
857
    unsigned int h, n1;
858
    tb_page_addr_t phys_pc;
859
    TranslationBlock *tb1, *tb2;
860

    
861
    /* remove the TB from the hash list */
862
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
863
    h = tb_phys_hash_func(phys_pc);
864
    tb_remove(&tb_phys_hash[h], tb,
865
              offsetof(TranslationBlock, phys_hash_next));
866

    
867
    /* remove the TB from the page list */
868
    if (tb->page_addr[0] != page_addr) {
869
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
870
        tb_page_remove(&p->first_tb, tb);
871
        invalidate_page_bitmap(p);
872
    }
873
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
874
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
875
        tb_page_remove(&p->first_tb, tb);
876
        invalidate_page_bitmap(p);
877
    }
878

    
879
    tb_invalidated_flag = 1;
880

    
881
    /* remove the TB from the hash list */
882
    h = tb_jmp_cache_hash_func(tb->pc);
883
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
884
        if (env->tb_jmp_cache[h] == tb)
885
            env->tb_jmp_cache[h] = NULL;
886
    }
887

    
888
    /* suppress this TB from the two jump lists */
889
    tb_jmp_remove(tb, 0);
890
    tb_jmp_remove(tb, 1);
891

    
892
    /* suppress any remaining jumps to this TB */
893
    tb1 = tb->jmp_first;
894
    for(;;) {
895
        n1 = (long)tb1 & 3;
896
        if (n1 == 2)
897
            break;
898
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
899
        tb2 = tb1->jmp_next[n1];
900
        tb_reset_jump(tb1, n1);
901
        tb1->jmp_next[n1] = NULL;
902
        tb1 = tb2;
903
    }
904
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
905

    
906
    tb_phys_invalidate_count++;
907
}
908

    
909
static inline void set_bits(uint8_t *tab, int start, int len)
910
{
911
    int end, mask, end1;
912

    
913
    end = start + len;
914
    tab += start >> 3;
915
    mask = 0xff << (start & 7);
916
    if ((start & ~7) == (end & ~7)) {
917
        if (start < end) {
918
            mask &= ~(0xff << (end & 7));
919
            *tab |= mask;
920
        }
921
    } else {
922
        *tab++ |= mask;
923
        start = (start + 8) & ~7;
924
        end1 = end & ~7;
925
        while (start < end1) {
926
            *tab++ = 0xff;
927
            start += 8;
928
        }
929
        if (start < end) {
930
            mask = ~(0xff << (end & 7));
931
            *tab |= mask;
932
        }
933
    }
934
}
935

    
936
static void build_page_bitmap(PageDesc *p)
937
{
938
    int n, tb_start, tb_end;
939
    TranslationBlock *tb;
940

    
941
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
942

    
943
    tb = p->first_tb;
944
    while (tb != NULL) {
945
        n = (long)tb & 3;
946
        tb = (TranslationBlock *)((long)tb & ~3);
947
        /* NOTE: this is subtle as a TB may span two physical pages */
948
        if (n == 0) {
949
            /* NOTE: tb_end may be after the end of the page, but
950
               it is not a problem */
951
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
952
            tb_end = tb_start + tb->size;
953
            if (tb_end > TARGET_PAGE_SIZE)
954
                tb_end = TARGET_PAGE_SIZE;
955
        } else {
956
            tb_start = 0;
957
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
958
        }
959
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
960
        tb = tb->page_next[n];
961
    }
962
}
963

    
964
TranslationBlock *tb_gen_code(CPUState *env,
965
                              target_ulong pc, target_ulong cs_base,
966
                              int flags, int cflags)
967
{
968
    TranslationBlock *tb;
969
    uint8_t *tc_ptr;
970
    tb_page_addr_t phys_pc, phys_page2;
971
    target_ulong virt_page2;
972
    int code_gen_size;
973

    
974
    phys_pc = get_page_addr_code(env, pc);
975
    tb = tb_alloc(pc);
976
    if (!tb) {
977
        /* flush must be done */
978
        tb_flush(env);
979
        /* cannot fail at this point */
980
        tb = tb_alloc(pc);
981
        /* Don't forget to invalidate previous TB info.  */
982
        tb_invalidated_flag = 1;
983
    }
984
    tc_ptr = code_gen_ptr;
985
    tb->tc_ptr = tc_ptr;
986
    tb->cs_base = cs_base;
987
    tb->flags = flags;
988
    tb->cflags = cflags;
989
    cpu_gen_code(env, tb, &code_gen_size);
990
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
991

    
992
    /* check next page if needed */
993
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
994
    phys_page2 = -1;
995
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
996
        phys_page2 = get_page_addr_code(env, virt_page2);
997
    }
998
    tb_link_page(tb, phys_pc, phys_page2);
999
    return tb;
1000
}
1001

    
1002
/* invalidate all TBs which intersect with the target physical page
1003
   starting in range [start;end[. NOTE: start and end must refer to
1004
   the same physical page. 'is_cpu_write_access' should be true if called
1005
   from a real cpu write access: the virtual CPU will exit the current
1006
   TB if code is modified inside this TB. */
1007
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1008
                                   int is_cpu_write_access)
1009
{
1010
    TranslationBlock *tb, *tb_next, *saved_tb;
1011
    CPUState *env = cpu_single_env;
1012
    tb_page_addr_t tb_start, tb_end;
1013
    PageDesc *p;
1014
    int n;
1015
#ifdef TARGET_HAS_PRECISE_SMC
1016
    int current_tb_not_found = is_cpu_write_access;
1017
    TranslationBlock *current_tb = NULL;
1018
    int current_tb_modified = 0;
1019
    target_ulong current_pc = 0;
1020
    target_ulong current_cs_base = 0;
1021
    int current_flags = 0;
1022
#endif /* TARGET_HAS_PRECISE_SMC */
1023

    
1024
    p = page_find(start >> TARGET_PAGE_BITS);
1025
    if (!p)
1026
        return;
1027
    if (!p->code_bitmap &&
1028
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1029
        is_cpu_write_access) {
1030
        /* build code bitmap */
1031
        build_page_bitmap(p);
1032
    }
1033

    
1034
    /* we remove all the TBs in the range [start, end[ */
1035
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1036
    tb = p->first_tb;
1037
    while (tb != NULL) {
1038
        n = (long)tb & 3;
1039
        tb = (TranslationBlock *)((long)tb & ~3);
1040
        tb_next = tb->page_next[n];
1041
        /* NOTE: this is subtle as a TB may span two physical pages */
1042
        if (n == 0) {
1043
            /* NOTE: tb_end may be after the end of the page, but
1044
               it is not a problem */
1045
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1046
            tb_end = tb_start + tb->size;
1047
        } else {
1048
            tb_start = tb->page_addr[1];
1049
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1050
        }
1051
        if (!(tb_end <= start || tb_start >= end)) {
1052
#ifdef TARGET_HAS_PRECISE_SMC
1053
            if (current_tb_not_found) {
1054
                current_tb_not_found = 0;
1055
                current_tb = NULL;
1056
                if (env->mem_io_pc) {
1057
                    /* now we have a real cpu fault */
1058
                    current_tb = tb_find_pc(env->mem_io_pc);
1059
                }
1060
            }
1061
            if (current_tb == tb &&
1062
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1063
                /* If we are modifying the current TB, we must stop
1064
                its execution. We could be more precise by checking
1065
                that the modification is after the current PC, but it
1066
                would require a specialized function to partially
1067
                restore the CPU state */
1068

    
1069
                current_tb_modified = 1;
1070
                cpu_restore_state(current_tb, env,
1071
                                  env->mem_io_pc, NULL);
1072
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1073
                                     &current_flags);
1074
            }
1075
#endif /* TARGET_HAS_PRECISE_SMC */
1076
            /* we need to do that to handle the case where a signal
1077
               occurs while doing tb_phys_invalidate() */
1078
            saved_tb = NULL;
1079
            if (env) {
1080
                saved_tb = env->current_tb;
1081
                env->current_tb = NULL;
1082
            }
1083
            tb_phys_invalidate(tb, -1);
1084
            if (env) {
1085
                env->current_tb = saved_tb;
1086
                if (env->interrupt_request && env->current_tb)
1087
                    cpu_interrupt(env, env->interrupt_request);
1088
            }
1089
        }
1090
        tb = tb_next;
1091
    }
1092
#if !defined(CONFIG_USER_ONLY)
1093
    /* if no code remaining, no need to continue to use slow writes */
1094
    if (!p->first_tb) {
1095
        invalidate_page_bitmap(p);
1096
        if (is_cpu_write_access) {
1097
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1098
        }
1099
    }
1100
#endif
1101
#ifdef TARGET_HAS_PRECISE_SMC
1102
    if (current_tb_modified) {
1103
        /* we generate a block containing just the instruction
1104
           modifying the memory. It will ensure that it cannot modify
1105
           itself */
1106
        env->current_tb = NULL;
1107
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1108
        cpu_resume_from_signal(env, NULL);
1109
    }
1110
#endif
1111
}
1112

    
1113
/* len must be <= 8 and start must be a multiple of len */
1114
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1115
{
1116
    PageDesc *p;
1117
    int offset, b;
1118
#if 0
1119
    if (1) {
1120
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1121
                  cpu_single_env->mem_io_vaddr, len,
1122
                  cpu_single_env->eip,
1123
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1124
    }
1125
#endif
1126
    p = page_find(start >> TARGET_PAGE_BITS);
1127
    if (!p)
1128
        return;
1129
    if (p->code_bitmap) {
1130
        offset = start & ~TARGET_PAGE_MASK;
1131
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1132
        if (b & ((1 << len) - 1))
1133
            goto do_invalidate;
1134
    } else {
1135
    do_invalidate:
1136
        tb_invalidate_phys_page_range(start, start + len, 1);
1137
    }
1138
}
1139

    
1140
#if !defined(CONFIG_SOFTMMU)
1141
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1142
                                    unsigned long pc, void *puc)
1143
{
1144
    TranslationBlock *tb;
1145
    PageDesc *p;
1146
    int n;
1147
#ifdef TARGET_HAS_PRECISE_SMC
1148
    TranslationBlock *current_tb = NULL;
1149
    CPUState *env = cpu_single_env;
1150
    int current_tb_modified = 0;
1151
    target_ulong current_pc = 0;
1152
    target_ulong current_cs_base = 0;
1153
    int current_flags = 0;
1154
#endif
1155

    
1156
    addr &= TARGET_PAGE_MASK;
1157
    p = page_find(addr >> TARGET_PAGE_BITS);
1158
    if (!p)
1159
        return;
1160
    tb = p->first_tb;
1161
#ifdef TARGET_HAS_PRECISE_SMC
1162
    if (tb && pc != 0) {
1163
        current_tb = tb_find_pc(pc);
1164
    }
1165
#endif
1166
    while (tb != NULL) {
1167
        n = (long)tb & 3;
1168
        tb = (TranslationBlock *)((long)tb & ~3);
1169
#ifdef TARGET_HAS_PRECISE_SMC
1170
        if (current_tb == tb &&
1171
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1172
                /* If we are modifying the current TB, we must stop
1173
                   its execution. We could be more precise by checking
1174
                   that the modification is after the current PC, but it
1175
                   would require a specialized function to partially
1176
                   restore the CPU state */
1177

    
1178
            current_tb_modified = 1;
1179
            cpu_restore_state(current_tb, env, pc, puc);
1180
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1181
                                 &current_flags);
1182
        }
1183
#endif /* TARGET_HAS_PRECISE_SMC */
1184
        tb_phys_invalidate(tb, addr);
1185
        tb = tb->page_next[n];
1186
    }
1187
    p->first_tb = NULL;
1188
#ifdef TARGET_HAS_PRECISE_SMC
1189
    if (current_tb_modified) {
1190
        /* we generate a block containing just the instruction
1191
           modifying the memory. It will ensure that it cannot modify
1192
           itself */
1193
        env->current_tb = NULL;
1194
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1195
        cpu_resume_from_signal(env, puc);
1196
    }
1197
#endif
1198
}
1199
#endif
1200

    
1201
/* add the tb in the target page and protect it if necessary */
1202
static inline void tb_alloc_page(TranslationBlock *tb,
1203
                                 unsigned int n, tb_page_addr_t page_addr)
1204
{
1205
    PageDesc *p;
1206
    TranslationBlock *last_first_tb;
1207

    
1208
    tb->page_addr[n] = page_addr;
1209
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1210
    tb->page_next[n] = p->first_tb;
1211
    last_first_tb = p->first_tb;
1212
    p->first_tb = (TranslationBlock *)((long)tb | n);
1213
    invalidate_page_bitmap(p);
1214

    
1215
#if defined(TARGET_HAS_SMC) || 1
1216

    
1217
#if defined(CONFIG_USER_ONLY)
1218
    if (p->flags & PAGE_WRITE) {
1219
        target_ulong addr;
1220
        PageDesc *p2;
1221
        int prot;
1222

    
1223
        /* force the host page as non writable (writes will have a
1224
           page fault + mprotect overhead) */
1225
        page_addr &= qemu_host_page_mask;
1226
        prot = 0;
1227
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1228
            addr += TARGET_PAGE_SIZE) {
1229

    
1230
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1231
            if (!p2)
1232
                continue;
1233
            prot |= p2->flags;
1234
            p2->flags &= ~PAGE_WRITE;
1235
          }
1236
        mprotect(g2h(page_addr), qemu_host_page_size,
1237
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1238
#ifdef DEBUG_TB_INVALIDATE
1239
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1240
               page_addr);
1241
#endif
1242
    }
1243
#else
1244
    /* if some code is already present, then the pages are already
1245
       protected. So we handle the case where only the first TB is
1246
       allocated in a physical page */
1247
    if (!last_first_tb) {
1248
        tlb_protect_code(page_addr);
1249
    }
1250
#endif
1251

    
1252
#endif /* TARGET_HAS_SMC */
1253
}
1254

    
1255
/* add a new TB and link it to the physical page tables. phys_page2 is
1256
   (-1) to indicate that only one page contains the TB. */
1257
void tb_link_page(TranslationBlock *tb,
1258
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1259
{
1260
    unsigned int h;
1261
    TranslationBlock **ptb;
1262

    
1263
    /* Grab the mmap lock to stop another thread invalidating this TB
1264
       before we are done.  */
1265
    mmap_lock();
1266
    /* add in the physical hash table */
1267
    h = tb_phys_hash_func(phys_pc);
1268
    ptb = &tb_phys_hash[h];
1269
    tb->phys_hash_next = *ptb;
1270
    *ptb = tb;
1271

    
1272
    /* add in the page list */
1273
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1274
    if (phys_page2 != -1)
1275
        tb_alloc_page(tb, 1, phys_page2);
1276
    else
1277
        tb->page_addr[1] = -1;
1278

    
1279
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1280
    tb->jmp_next[0] = NULL;
1281
    tb->jmp_next[1] = NULL;
1282

    
1283
    /* init original jump addresses */
1284
    if (tb->tb_next_offset[0] != 0xffff)
1285
        tb_reset_jump(tb, 0);
1286
    if (tb->tb_next_offset[1] != 0xffff)
1287
        tb_reset_jump(tb, 1);
1288

    
1289
#ifdef DEBUG_TB_CHECK
1290
    tb_page_check();
1291
#endif
1292
    mmap_unlock();
1293
}
1294

    
1295
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1296
   tb[1].tc_ptr. Return NULL if not found */
1297
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1298
{
1299
    int m_min, m_max, m;
1300
    unsigned long v;
1301
    TranslationBlock *tb;
1302

    
1303
    if (nb_tbs <= 0)
1304
        return NULL;
1305
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1306
        tc_ptr >= (unsigned long)code_gen_ptr)
1307
        return NULL;
1308
    /* binary search (cf Knuth) */
1309
    m_min = 0;
1310
    m_max = nb_tbs - 1;
1311
    while (m_min <= m_max) {
1312
        m = (m_min + m_max) >> 1;
1313
        tb = &tbs[m];
1314
        v = (unsigned long)tb->tc_ptr;
1315
        if (v == tc_ptr)
1316
            return tb;
1317
        else if (tc_ptr < v) {
1318
            m_max = m - 1;
1319
        } else {
1320
            m_min = m + 1;
1321
        }
1322
    }
1323
    return &tbs[m_max];
1324
}
1325

    
1326
static void tb_reset_jump_recursive(TranslationBlock *tb);
1327

    
1328
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1329
{
1330
    TranslationBlock *tb1, *tb_next, **ptb;
1331
    unsigned int n1;
1332

    
1333
    tb1 = tb->jmp_next[n];
1334
    if (tb1 != NULL) {
1335
        /* find head of list */
1336
        for(;;) {
1337
            n1 = (long)tb1 & 3;
1338
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1339
            if (n1 == 2)
1340
                break;
1341
            tb1 = tb1->jmp_next[n1];
1342
        }
1343
        /* we are now sure now that tb jumps to tb1 */
1344
        tb_next = tb1;
1345

    
1346
        /* remove tb from the jmp_first list */
1347
        ptb = &tb_next->jmp_first;
1348
        for(;;) {
1349
            tb1 = *ptb;
1350
            n1 = (long)tb1 & 3;
1351
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1352
            if (n1 == n && tb1 == tb)
1353
                break;
1354
            ptb = &tb1->jmp_next[n1];
1355
        }
1356
        *ptb = tb->jmp_next[n];
1357
        tb->jmp_next[n] = NULL;
1358

    
1359
        /* suppress the jump to next tb in generated code */
1360
        tb_reset_jump(tb, n);
1361

    
1362
        /* suppress jumps in the tb on which we could have jumped */
1363
        tb_reset_jump_recursive(tb_next);
1364
    }
1365
}
1366

    
1367
static void tb_reset_jump_recursive(TranslationBlock *tb)
1368
{
1369
    tb_reset_jump_recursive2(tb, 0);
1370
    tb_reset_jump_recursive2(tb, 1);
1371
}
1372

    
1373
#if defined(TARGET_HAS_ICE)
1374
#if defined(CONFIG_USER_ONLY)
1375
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1376
{
1377
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1378
}
1379
#else
1380
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1381
{
1382
    target_phys_addr_t addr;
1383
    target_ulong pd;
1384
    ram_addr_t ram_addr;
1385
    PhysPageDesc *p;
1386

    
1387
    addr = cpu_get_phys_page_debug(env, pc);
1388
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1389
    if (!p) {
1390
        pd = IO_MEM_UNASSIGNED;
1391
    } else {
1392
        pd = p->phys_offset;
1393
    }
1394
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1395
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1396
}
1397
#endif
1398
#endif /* TARGET_HAS_ICE */
1399

    
1400
#if defined(CONFIG_USER_ONLY)
1401
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1402

    
1403
{
1404
}
1405

    
1406
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1407
                          int flags, CPUWatchpoint **watchpoint)
1408
{
1409
    return -ENOSYS;
1410
}
1411
#else
1412
/* Add a watchpoint.  */
1413
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1414
                          int flags, CPUWatchpoint **watchpoint)
1415
{
1416
    target_ulong len_mask = ~(len - 1);
1417
    CPUWatchpoint *wp;
1418

    
1419
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1420
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1421
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1422
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1423
        return -EINVAL;
1424
    }
1425
    wp = qemu_malloc(sizeof(*wp));
1426

    
1427
    wp->vaddr = addr;
1428
    wp->len_mask = len_mask;
1429
    wp->flags = flags;
1430

    
1431
    /* keep all GDB-injected watchpoints in front */
1432
    if (flags & BP_GDB)
1433
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1434
    else
1435
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1436

    
1437
    tlb_flush_page(env, addr);
1438

    
1439
    if (watchpoint)
1440
        *watchpoint = wp;
1441
    return 0;
1442
}
1443

    
1444
/* Remove a specific watchpoint.  */
1445
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1446
                          int flags)
1447
{
1448
    target_ulong len_mask = ~(len - 1);
1449
    CPUWatchpoint *wp;
1450

    
1451
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1452
        if (addr == wp->vaddr && len_mask == wp->len_mask
1453
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1454
            cpu_watchpoint_remove_by_ref(env, wp);
1455
            return 0;
1456
        }
1457
    }
1458
    return -ENOENT;
1459
}
1460

    
1461
/* Remove a specific watchpoint by reference.  */
1462
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1463
{
1464
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1465

    
1466
    tlb_flush_page(env, watchpoint->vaddr);
1467

    
1468
    qemu_free(watchpoint);
1469
}
1470

    
1471
/* Remove all matching watchpoints.  */
1472
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1473
{
1474
    CPUWatchpoint *wp, *next;
1475

    
1476
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1477
        if (wp->flags & mask)
1478
            cpu_watchpoint_remove_by_ref(env, wp);
1479
    }
1480
}
1481
#endif
1482

    
1483
/* Add a breakpoint.  */
1484
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1485
                          CPUBreakpoint **breakpoint)
1486
{
1487
#if defined(TARGET_HAS_ICE)
1488
    CPUBreakpoint *bp;
1489

    
1490
    bp = qemu_malloc(sizeof(*bp));
1491

    
1492
    bp->pc = pc;
1493
    bp->flags = flags;
1494

    
1495
    /* keep all GDB-injected breakpoints in front */
1496
    if (flags & BP_GDB)
1497
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1498
    else
1499
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1500

    
1501
    breakpoint_invalidate(env, pc);
1502

    
1503
    if (breakpoint)
1504
        *breakpoint = bp;
1505
    return 0;
1506
#else
1507
    return -ENOSYS;
1508
#endif
1509
}
1510

    
1511
/* Remove a specific breakpoint.  */
1512
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1513
{
1514
#if defined(TARGET_HAS_ICE)
1515
    CPUBreakpoint *bp;
1516

    
1517
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1518
        if (bp->pc == pc && bp->flags == flags) {
1519
            cpu_breakpoint_remove_by_ref(env, bp);
1520
            return 0;
1521
        }
1522
    }
1523
    return -ENOENT;
1524
#else
1525
    return -ENOSYS;
1526
#endif
1527
}
1528

    
1529
/* Remove a specific breakpoint by reference.  */
1530
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1531
{
1532
#if defined(TARGET_HAS_ICE)
1533
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1534

    
1535
    breakpoint_invalidate(env, breakpoint->pc);
1536

    
1537
    qemu_free(breakpoint);
1538
#endif
1539
}
1540

    
1541
/* Remove all matching breakpoints. */
1542
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1543
{
1544
#if defined(TARGET_HAS_ICE)
1545
    CPUBreakpoint *bp, *next;
1546

    
1547
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1548
        if (bp->flags & mask)
1549
            cpu_breakpoint_remove_by_ref(env, bp);
1550
    }
1551
#endif
1552
}
1553

    
1554
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1555
   CPU loop after each instruction */
1556
void cpu_single_step(CPUState *env, int enabled)
1557
{
1558
#if defined(TARGET_HAS_ICE)
1559
    if (env->singlestep_enabled != enabled) {
1560
        env->singlestep_enabled = enabled;
1561
        if (kvm_enabled())
1562
            kvm_update_guest_debug(env, 0);
1563
        else {
1564
            /* must flush all the translated code to avoid inconsistencies */
1565
            /* XXX: only flush what is necessary */
1566
            tb_flush(env);
1567
        }
1568
    }
1569
#endif
1570
}
1571

    
1572
/* enable or disable low levels log */
1573
void cpu_set_log(int log_flags)
1574
{
1575
    loglevel = log_flags;
1576
    if (loglevel && !logfile) {
1577
        logfile = fopen(logfilename, log_append ? "a" : "w");
1578
        if (!logfile) {
1579
            perror(logfilename);
1580
            _exit(1);
1581
        }
1582
#if !defined(CONFIG_SOFTMMU)
1583
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1584
        {
1585
            static char logfile_buf[4096];
1586
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1587
        }
1588
#elif !defined(_WIN32)
1589
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1590
        setvbuf(logfile, NULL, _IOLBF, 0);
1591
#endif
1592
        log_append = 1;
1593
    }
1594
    if (!loglevel && logfile) {
1595
        fclose(logfile);
1596
        logfile = NULL;
1597
    }
1598
}
1599

    
1600
void cpu_set_log_filename(const char *filename)
1601
{
1602
    logfilename = strdup(filename);
1603
    if (logfile) {
1604
        fclose(logfile);
1605
        logfile = NULL;
1606
    }
1607
    cpu_set_log(loglevel);
1608
}
1609

    
1610
static void cpu_unlink_tb(CPUState *env)
1611
{
1612
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1613
       problem and hope the cpu will stop of its own accord.  For userspace
1614
       emulation this often isn't actually as bad as it sounds.  Often
1615
       signals are used primarily to interrupt blocking syscalls.  */
1616
    TranslationBlock *tb;
1617
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1618

    
1619
    spin_lock(&interrupt_lock);
1620
    tb = env->current_tb;
1621
    /* if the cpu is currently executing code, we must unlink it and
1622
       all the potentially executing TB */
1623
    if (tb) {
1624
        env->current_tb = NULL;
1625
        tb_reset_jump_recursive(tb);
1626
    }
1627
    spin_unlock(&interrupt_lock);
1628
}
1629

    
1630
/* mask must never be zero, except for A20 change call */
1631
void cpu_interrupt(CPUState *env, int mask)
1632
{
1633
    int old_mask;
1634

    
1635
    old_mask = env->interrupt_request;
1636
    env->interrupt_request |= mask;
1637

    
1638
#ifndef CONFIG_USER_ONLY
1639
    /*
1640
     * If called from iothread context, wake the target cpu in
1641
     * case its halted.
1642
     */
1643
    if (!qemu_cpu_self(env)) {
1644
        qemu_cpu_kick(env);
1645
        return;
1646
    }
1647
#endif
1648

    
1649
    if (use_icount) {
1650
        env->icount_decr.u16.high = 0xffff;
1651
#ifndef CONFIG_USER_ONLY
1652
        if (!can_do_io(env)
1653
            && (mask & ~old_mask) != 0) {
1654
            cpu_abort(env, "Raised interrupt while not in I/O function");
1655
        }
1656
#endif
1657
    } else {
1658
        cpu_unlink_tb(env);
1659
    }
1660
}
1661

    
1662
void cpu_reset_interrupt(CPUState *env, int mask)
1663
{
1664
    env->interrupt_request &= ~mask;
1665
}
1666

    
1667
void cpu_exit(CPUState *env)
1668
{
1669
    env->exit_request = 1;
1670
    cpu_unlink_tb(env);
1671
}
1672

    
1673
const CPULogItem cpu_log_items[] = {
1674
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1675
      "show generated host assembly code for each compiled TB" },
1676
    { CPU_LOG_TB_IN_ASM, "in_asm",
1677
      "show target assembly code for each compiled TB" },
1678
    { CPU_LOG_TB_OP, "op",
1679
      "show micro ops for each compiled TB" },
1680
    { CPU_LOG_TB_OP_OPT, "op_opt",
1681
      "show micro ops "
1682
#ifdef TARGET_I386
1683
      "before eflags optimization and "
1684
#endif
1685
      "after liveness analysis" },
1686
    { CPU_LOG_INT, "int",
1687
      "show interrupts/exceptions in short format" },
1688
    { CPU_LOG_EXEC, "exec",
1689
      "show trace before each executed TB (lots of logs)" },
1690
    { CPU_LOG_TB_CPU, "cpu",
1691
      "show CPU state before block translation" },
1692
#ifdef TARGET_I386
1693
    { CPU_LOG_PCALL, "pcall",
1694
      "show protected mode far calls/returns/exceptions" },
1695
    { CPU_LOG_RESET, "cpu_reset",
1696
      "show CPU state before CPU resets" },
1697
#endif
1698
#ifdef DEBUG_IOPORT
1699
    { CPU_LOG_IOPORT, "ioport",
1700
      "show all i/o ports accesses" },
1701
#endif
1702
    { 0, NULL, NULL },
1703
};
1704

    
1705
#ifndef CONFIG_USER_ONLY
1706
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1707
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1708

    
1709
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1710
                                  ram_addr_t size,
1711
                                  ram_addr_t phys_offset)
1712
{
1713
    CPUPhysMemoryClient *client;
1714
    QLIST_FOREACH(client, &memory_client_list, list) {
1715
        client->set_memory(client, start_addr, size, phys_offset);
1716
    }
1717
}
1718

    
1719
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1720
                                        target_phys_addr_t end)
1721
{
1722
    CPUPhysMemoryClient *client;
1723
    QLIST_FOREACH(client, &memory_client_list, list) {
1724
        int r = client->sync_dirty_bitmap(client, start, end);
1725
        if (r < 0)
1726
            return r;
1727
    }
1728
    return 0;
1729
}
1730

    
1731
static int cpu_notify_migration_log(int enable)
1732
{
1733
    CPUPhysMemoryClient *client;
1734
    QLIST_FOREACH(client, &memory_client_list, list) {
1735
        int r = client->migration_log(client, enable);
1736
        if (r < 0)
1737
            return r;
1738
    }
1739
    return 0;
1740
}
1741

    
1742
static void phys_page_for_each_1(CPUPhysMemoryClient *client,
1743
                                 int level, void **lp)
1744
{
1745
    int i;
1746

    
1747
    if (*lp == NULL) {
1748
        return;
1749
    }
1750
    if (level == 0) {
1751
        PhysPageDesc *pd = *lp;
1752
        for (i = 0; i < L2_SIZE; ++i) {
1753
            if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
1754
                client->set_memory(client, pd[i].region_offset,
1755
                                   TARGET_PAGE_SIZE, pd[i].phys_offset);
1756
            }
1757
        }
1758
    } else {
1759
        void **pp = *lp;
1760
        for (i = 0; i < L2_SIZE; ++i) {
1761
            phys_page_for_each_1(client, level - 1, pp + i);
1762
        }
1763
    }
1764
}
1765

    
1766
static void phys_page_for_each(CPUPhysMemoryClient *client)
1767
{
1768
    int i;
1769
    for (i = 0; i < P_L1_SIZE; ++i) {
1770
        phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
1771
                             l1_phys_map + 1);
1772
    }
1773
}
1774

    
1775
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1776
{
1777
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1778
    phys_page_for_each(client);
1779
}
1780

    
1781
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1782
{
1783
    QLIST_REMOVE(client, list);
1784
}
1785
#endif
1786

    
1787
static int cmp1(const char *s1, int n, const char *s2)
1788
{
1789
    if (strlen(s2) != n)
1790
        return 0;
1791
    return memcmp(s1, s2, n) == 0;
1792
}
1793

    
1794
/* takes a comma separated list of log masks. Return 0 if error. */
1795
int cpu_str_to_log_mask(const char *str)
1796
{
1797
    const CPULogItem *item;
1798
    int mask;
1799
    const char *p, *p1;
1800

    
1801
    p = str;
1802
    mask = 0;
1803
    for(;;) {
1804
        p1 = strchr(p, ',');
1805
        if (!p1)
1806
            p1 = p + strlen(p);
1807
        if(cmp1(p,p1-p,"all")) {
1808
            for(item = cpu_log_items; item->mask != 0; item++) {
1809
                mask |= item->mask;
1810
            }
1811
        } else {
1812
            for(item = cpu_log_items; item->mask != 0; item++) {
1813
                if (cmp1(p, p1 - p, item->name))
1814
                    goto found;
1815
            }
1816
            return 0;
1817
        }
1818
    found:
1819
        mask |= item->mask;
1820
        if (*p1 != ',')
1821
            break;
1822
        p = p1 + 1;
1823
    }
1824
    return mask;
1825
}
1826

    
1827
void cpu_abort(CPUState *env, const char *fmt, ...)
1828
{
1829
    va_list ap;
1830
    va_list ap2;
1831

    
1832
    va_start(ap, fmt);
1833
    va_copy(ap2, ap);
1834
    fprintf(stderr, "qemu: fatal: ");
1835
    vfprintf(stderr, fmt, ap);
1836
    fprintf(stderr, "\n");
1837
#ifdef TARGET_I386
1838
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1839
#else
1840
    cpu_dump_state(env, stderr, fprintf, 0);
1841
#endif
1842
    if (qemu_log_enabled()) {
1843
        qemu_log("qemu: fatal: ");
1844
        qemu_log_vprintf(fmt, ap2);
1845
        qemu_log("\n");
1846
#ifdef TARGET_I386
1847
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1848
#else
1849
        log_cpu_state(env, 0);
1850
#endif
1851
        qemu_log_flush();
1852
        qemu_log_close();
1853
    }
1854
    va_end(ap2);
1855
    va_end(ap);
1856
#if defined(CONFIG_USER_ONLY)
1857
    {
1858
        struct sigaction act;
1859
        sigfillset(&act.sa_mask);
1860
        act.sa_handler = SIG_DFL;
1861
        sigaction(SIGABRT, &act, NULL);
1862
    }
1863
#endif
1864
    abort();
1865
}
1866

    
1867
CPUState *cpu_copy(CPUState *env)
1868
{
1869
    CPUState *new_env = cpu_init(env->cpu_model_str);
1870
    CPUState *next_cpu = new_env->next_cpu;
1871
    int cpu_index = new_env->cpu_index;
1872
#if defined(TARGET_HAS_ICE)
1873
    CPUBreakpoint *bp;
1874
    CPUWatchpoint *wp;
1875
#endif
1876

    
1877
    memcpy(new_env, env, sizeof(CPUState));
1878

    
1879
    /* Preserve chaining and index. */
1880
    new_env->next_cpu = next_cpu;
1881
    new_env->cpu_index = cpu_index;
1882

    
1883
    /* Clone all break/watchpoints.
1884
       Note: Once we support ptrace with hw-debug register access, make sure
1885
       BP_CPU break/watchpoints are handled correctly on clone. */
1886
    QTAILQ_INIT(&env->breakpoints);
1887
    QTAILQ_INIT(&env->watchpoints);
1888
#if defined(TARGET_HAS_ICE)
1889
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1890
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1891
    }
1892
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1893
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1894
                              wp->flags, NULL);
1895
    }
1896
#endif
1897

    
1898
    return new_env;
1899
}
1900

    
1901
#if !defined(CONFIG_USER_ONLY)
1902

    
1903
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1904
{
1905
    unsigned int i;
1906

    
1907
    /* Discard jump cache entries for any tb which might potentially
1908
       overlap the flushed page.  */
1909
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1910
    memset (&env->tb_jmp_cache[i], 0, 
1911
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1912

    
1913
    i = tb_jmp_cache_hash_page(addr);
1914
    memset (&env->tb_jmp_cache[i], 0, 
1915
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1916
}
1917

    
1918
static CPUTLBEntry s_cputlb_empty_entry = {
1919
    .addr_read  = -1,
1920
    .addr_write = -1,
1921
    .addr_code  = -1,
1922
    .addend     = -1,
1923
};
1924

    
1925
/* NOTE: if flush_global is true, also flush global entries (not
1926
   implemented yet) */
1927
void tlb_flush(CPUState *env, int flush_global)
1928
{
1929
    int i;
1930

    
1931
#if defined(DEBUG_TLB)
1932
    printf("tlb_flush:\n");
1933
#endif
1934
    /* must reset current TB so that interrupts cannot modify the
1935
       links while we are modifying them */
1936
    env->current_tb = NULL;
1937

    
1938
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1939
        int mmu_idx;
1940
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1941
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1942
        }
1943
    }
1944

    
1945
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1946

    
1947
    env->tlb_flush_addr = -1;
1948
    env->tlb_flush_mask = 0;
1949
    tlb_flush_count++;
1950
}
1951

    
1952
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1953
{
1954
    if (addr == (tlb_entry->addr_read &
1955
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1956
        addr == (tlb_entry->addr_write &
1957
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1958
        addr == (tlb_entry->addr_code &
1959
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1960
        *tlb_entry = s_cputlb_empty_entry;
1961
    }
1962
}
1963

    
1964
void tlb_flush_page(CPUState *env, target_ulong addr)
1965
{
1966
    int i;
1967
    int mmu_idx;
1968

    
1969
#if defined(DEBUG_TLB)
1970
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1971
#endif
1972
    /* Check if we need to flush due to large pages.  */
1973
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1974
#if defined(DEBUG_TLB)
1975
        printf("tlb_flush_page: forced full flush ("
1976
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1977
               env->tlb_flush_addr, env->tlb_flush_mask);
1978
#endif
1979
        tlb_flush(env, 1);
1980
        return;
1981
    }
1982
    /* must reset current TB so that interrupts cannot modify the
1983
       links while we are modifying them */
1984
    env->current_tb = NULL;
1985

    
1986
    addr &= TARGET_PAGE_MASK;
1987
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1988
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1989
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1990

    
1991
    tlb_flush_jmp_cache(env, addr);
1992
}
1993

    
1994
/* update the TLBs so that writes to code in the virtual page 'addr'
1995
   can be detected */
1996
static void tlb_protect_code(ram_addr_t ram_addr)
1997
{
1998
    cpu_physical_memory_reset_dirty(ram_addr,
1999
                                    ram_addr + TARGET_PAGE_SIZE,
2000
                                    CODE_DIRTY_FLAG);
2001
}
2002

    
2003
/* update the TLB so that writes in physical page 'phys_addr' are no longer
2004
   tested for self modifying code */
2005
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
2006
                                    target_ulong vaddr)
2007
{
2008
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
2009
}
2010

    
2011
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
2012
                                         unsigned long start, unsigned long length)
2013
{
2014
    unsigned long addr;
2015
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2016
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
2017
        if ((addr - start) < length) {
2018
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
2019
        }
2020
    }
2021
}
2022

    
2023
/* Note: start and end must be within the same ram block.  */
2024
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
2025
                                     int dirty_flags)
2026
{
2027
    CPUState *env;
2028
    unsigned long length, start1;
2029
    int i;
2030

    
2031
    start &= TARGET_PAGE_MASK;
2032
    end = TARGET_PAGE_ALIGN(end);
2033

    
2034
    length = end - start;
2035
    if (length == 0)
2036
        return;
2037
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2038

    
2039
    /* we modify the TLB cache so that the dirty bit will be set again
2040
       when accessing the range */
2041
    start1 = (unsigned long)qemu_safe_ram_ptr(start);
2042
    /* Chek that we don't span multiple blocks - this breaks the
2043
       address comparisons below.  */
2044
    if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2045
            != (end - 1) - start) {
2046
        abort();
2047
    }
2048

    
2049
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2050
        int mmu_idx;
2051
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2052
            for(i = 0; i < CPU_TLB_SIZE; i++)
2053
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2054
                                      start1, length);
2055
        }
2056
    }
2057
}
2058

    
2059
int cpu_physical_memory_set_dirty_tracking(int enable)
2060
{
2061
    int ret = 0;
2062
    in_migration = enable;
2063
    ret = cpu_notify_migration_log(!!enable);
2064
    return ret;
2065
}
2066

    
2067
int cpu_physical_memory_get_dirty_tracking(void)
2068
{
2069
    return in_migration;
2070
}
2071

    
2072
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2073
                                   target_phys_addr_t end_addr)
2074
{
2075
    int ret;
2076

    
2077
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2078
    return ret;
2079
}
2080

    
2081
int cpu_physical_log_start(target_phys_addr_t start_addr,
2082
                           ram_addr_t size)
2083
{
2084
    CPUPhysMemoryClient *client;
2085
    QLIST_FOREACH(client, &memory_client_list, list) {
2086
        if (client->log_start) {
2087
            int r = client->log_start(client, start_addr, size);
2088
            if (r < 0) {
2089
                return r;
2090
            }
2091
        }
2092
    }
2093
    return 0;
2094
}
2095

    
2096
int cpu_physical_log_stop(target_phys_addr_t start_addr,
2097
                          ram_addr_t size)
2098
{
2099
    CPUPhysMemoryClient *client;
2100
    QLIST_FOREACH(client, &memory_client_list, list) {
2101
        if (client->log_stop) {
2102
            int r = client->log_stop(client, start_addr, size);
2103
            if (r < 0) {
2104
                return r;
2105
            }
2106
        }
2107
    }
2108
    return 0;
2109
}
2110

    
2111
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2112
{
2113
    ram_addr_t ram_addr;
2114
    void *p;
2115

    
2116
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2117
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2118
            + tlb_entry->addend);
2119
        ram_addr = qemu_ram_addr_from_host_nofail(p);
2120
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2121
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2122
        }
2123
    }
2124
}
2125

    
2126
/* update the TLB according to the current state of the dirty bits */
2127
void cpu_tlb_update_dirty(CPUState *env)
2128
{
2129
    int i;
2130
    int mmu_idx;
2131
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2132
        for(i = 0; i < CPU_TLB_SIZE; i++)
2133
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2134
    }
2135
}
2136

    
2137
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2138
{
2139
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2140
        tlb_entry->addr_write = vaddr;
2141
}
2142

    
2143
/* update the TLB corresponding to virtual page vaddr
2144
   so that it is no longer dirty */
2145
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2146
{
2147
    int i;
2148
    int mmu_idx;
2149

    
2150
    vaddr &= TARGET_PAGE_MASK;
2151
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2152
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2153
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2154
}
2155

    
2156
/* Our TLB does not support large pages, so remember the area covered by
2157
   large pages and trigger a full TLB flush if these are invalidated.  */
2158
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2159
                               target_ulong size)
2160
{
2161
    target_ulong mask = ~(size - 1);
2162

    
2163
    if (env->tlb_flush_addr == (target_ulong)-1) {
2164
        env->tlb_flush_addr = vaddr & mask;
2165
        env->tlb_flush_mask = mask;
2166
        return;
2167
    }
2168
    /* Extend the existing region to include the new page.
2169
       This is a compromise between unnecessary flushes and the cost
2170
       of maintaining a full variable size TLB.  */
2171
    mask &= env->tlb_flush_mask;
2172
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2173
        mask <<= 1;
2174
    }
2175
    env->tlb_flush_addr &= mask;
2176
    env->tlb_flush_mask = mask;
2177
}
2178

    
2179
/* Add a new TLB entry. At most one entry for a given virtual address
2180
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2181
   supplied size is only used by tlb_flush_page.  */
2182
void tlb_set_page(CPUState *env, target_ulong vaddr,
2183
                  target_phys_addr_t paddr, int prot,
2184
                  int mmu_idx, target_ulong size)
2185
{
2186
    PhysPageDesc *p;
2187
    unsigned long pd;
2188
    unsigned int index;
2189
    target_ulong address;
2190
    target_ulong code_address;
2191
    unsigned long addend;
2192
    CPUTLBEntry *te;
2193
    CPUWatchpoint *wp;
2194
    target_phys_addr_t iotlb;
2195

    
2196
    assert(size >= TARGET_PAGE_SIZE);
2197
    if (size != TARGET_PAGE_SIZE) {
2198
        tlb_add_large_page(env, vaddr, size);
2199
    }
2200
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2201
    if (!p) {
2202
        pd = IO_MEM_UNASSIGNED;
2203
    } else {
2204
        pd = p->phys_offset;
2205
    }
2206
#if defined(DEBUG_TLB)
2207
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2208
           " prot=%x idx=%d pd=0x%08lx\n",
2209
           vaddr, paddr, prot, mmu_idx, pd);
2210
#endif
2211

    
2212
    address = vaddr;
2213
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2214
        /* IO memory case (romd handled later) */
2215
        address |= TLB_MMIO;
2216
    }
2217
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2218
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2219
        /* Normal RAM.  */
2220
        iotlb = pd & TARGET_PAGE_MASK;
2221
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2222
            iotlb |= IO_MEM_NOTDIRTY;
2223
        else
2224
            iotlb |= IO_MEM_ROM;
2225
    } else {
2226
        /* IO handlers are currently passed a physical address.
2227
           It would be nice to pass an offset from the base address
2228
           of that region.  This would avoid having to special case RAM,
2229
           and avoid full address decoding in every device.
2230
           We can't use the high bits of pd for this because
2231
           IO_MEM_ROMD uses these as a ram address.  */
2232
        iotlb = (pd & ~TARGET_PAGE_MASK);
2233
        if (p) {
2234
            iotlb += p->region_offset;
2235
        } else {
2236
            iotlb += paddr;
2237
        }
2238
    }
2239

    
2240
    code_address = address;
2241
    /* Make accesses to pages with watchpoints go via the
2242
       watchpoint trap routines.  */
2243
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2244
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2245
            /* Avoid trapping reads of pages with a write breakpoint. */
2246
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2247
                iotlb = io_mem_watch + paddr;
2248
                address |= TLB_MMIO;
2249
                break;
2250
            }
2251
        }
2252
    }
2253

    
2254
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2255
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2256
    te = &env->tlb_table[mmu_idx][index];
2257
    te->addend = addend - vaddr;
2258
    if (prot & PAGE_READ) {
2259
        te->addr_read = address;
2260
    } else {
2261
        te->addr_read = -1;
2262
    }
2263

    
2264
    if (prot & PAGE_EXEC) {
2265
        te->addr_code = code_address;
2266
    } else {
2267
        te->addr_code = -1;
2268
    }
2269
    if (prot & PAGE_WRITE) {
2270
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2271
            (pd & IO_MEM_ROMD)) {
2272
            /* Write access calls the I/O callback.  */
2273
            te->addr_write = address | TLB_MMIO;
2274
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2275
                   !cpu_physical_memory_is_dirty(pd)) {
2276
            te->addr_write = address | TLB_NOTDIRTY;
2277
        } else {
2278
            te->addr_write = address;
2279
        }
2280
    } else {
2281
        te->addr_write = -1;
2282
    }
2283
}
2284

    
2285
#else
2286

    
2287
void tlb_flush(CPUState *env, int flush_global)
2288
{
2289
}
2290

    
2291
void tlb_flush_page(CPUState *env, target_ulong addr)
2292
{
2293
}
2294

    
2295
/*
2296
 * Walks guest process memory "regions" one by one
2297
 * and calls callback function 'fn' for each region.
2298
 */
2299

    
2300
struct walk_memory_regions_data
2301
{
2302
    walk_memory_regions_fn fn;
2303
    void *priv;
2304
    unsigned long start;
2305
    int prot;
2306
};
2307

    
2308
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2309
                                   abi_ulong end, int new_prot)
2310
{
2311
    if (data->start != -1ul) {
2312
        int rc = data->fn(data->priv, data->start, end, data->prot);
2313
        if (rc != 0) {
2314
            return rc;
2315
        }
2316
    }
2317

    
2318
    data->start = (new_prot ? end : -1ul);
2319
    data->prot = new_prot;
2320

    
2321
    return 0;
2322
}
2323

    
2324
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2325
                                 abi_ulong base, int level, void **lp)
2326
{
2327
    abi_ulong pa;
2328
    int i, rc;
2329

    
2330
    if (*lp == NULL) {
2331
        return walk_memory_regions_end(data, base, 0);
2332
    }
2333

    
2334
    if (level == 0) {
2335
        PageDesc *pd = *lp;
2336
        for (i = 0; i < L2_SIZE; ++i) {
2337
            int prot = pd[i].flags;
2338

    
2339
            pa = base | (i << TARGET_PAGE_BITS);
2340
            if (prot != data->prot) {
2341
                rc = walk_memory_regions_end(data, pa, prot);
2342
                if (rc != 0) {
2343
                    return rc;
2344
                }
2345
            }
2346
        }
2347
    } else {
2348
        void **pp = *lp;
2349
        for (i = 0; i < L2_SIZE; ++i) {
2350
            pa = base | ((abi_ulong)i <<
2351
                (TARGET_PAGE_BITS + L2_BITS * level));
2352
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2353
            if (rc != 0) {
2354
                return rc;
2355
            }
2356
        }
2357
    }
2358

    
2359
    return 0;
2360
}
2361

    
2362
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2363
{
2364
    struct walk_memory_regions_data data;
2365
    unsigned long i;
2366

    
2367
    data.fn = fn;
2368
    data.priv = priv;
2369
    data.start = -1ul;
2370
    data.prot = 0;
2371

    
2372
    for (i = 0; i < V_L1_SIZE; i++) {
2373
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2374
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2375
        if (rc != 0) {
2376
            return rc;
2377
        }
2378
    }
2379

    
2380
    return walk_memory_regions_end(&data, 0, 0);
2381
}
2382

    
2383
static int dump_region(void *priv, abi_ulong start,
2384
    abi_ulong end, unsigned long prot)
2385
{
2386
    FILE *f = (FILE *)priv;
2387

    
2388
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2389
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2390
        start, end, end - start,
2391
        ((prot & PAGE_READ) ? 'r' : '-'),
2392
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2393
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2394

    
2395
    return (0);
2396
}
2397

    
2398
/* dump memory mappings */
2399
void page_dump(FILE *f)
2400
{
2401
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2402
            "start", "end", "size", "prot");
2403
    walk_memory_regions(f, dump_region);
2404
}
2405

    
2406
int page_get_flags(target_ulong address)
2407
{
2408
    PageDesc *p;
2409

    
2410
    p = page_find(address >> TARGET_PAGE_BITS);
2411
    if (!p)
2412
        return 0;
2413
    return p->flags;
2414
}
2415

    
2416
/* Modify the flags of a page and invalidate the code if necessary.
2417
   The flag PAGE_WRITE_ORG is positioned automatically depending
2418
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2419
void page_set_flags(target_ulong start, target_ulong end, int flags)
2420
{
2421
    target_ulong addr, len;
2422

    
2423
    /* This function should never be called with addresses outside the
2424
       guest address space.  If this assert fires, it probably indicates
2425
       a missing call to h2g_valid.  */
2426
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2427
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2428
#endif
2429
    assert(start < end);
2430

    
2431
    start = start & TARGET_PAGE_MASK;
2432
    end = TARGET_PAGE_ALIGN(end);
2433

    
2434
    if (flags & PAGE_WRITE) {
2435
        flags |= PAGE_WRITE_ORG;
2436
    }
2437

    
2438
    for (addr = start, len = end - start;
2439
         len != 0;
2440
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2441
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2442

    
2443
        /* If the write protection bit is set, then we invalidate
2444
           the code inside.  */
2445
        if (!(p->flags & PAGE_WRITE) &&
2446
            (flags & PAGE_WRITE) &&
2447
            p->first_tb) {
2448
            tb_invalidate_phys_page(addr, 0, NULL);
2449
        }
2450
        p->flags = flags;
2451
    }
2452
}
2453

    
2454
int page_check_range(target_ulong start, target_ulong len, int flags)
2455
{
2456
    PageDesc *p;
2457
    target_ulong end;
2458
    target_ulong addr;
2459

    
2460
    /* This function should never be called with addresses outside the
2461
       guest address space.  If this assert fires, it probably indicates
2462
       a missing call to h2g_valid.  */
2463
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2464
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2465
#endif
2466

    
2467
    if (len == 0) {
2468
        return 0;
2469
    }
2470
    if (start + len - 1 < start) {
2471
        /* We've wrapped around.  */
2472
        return -1;
2473
    }
2474

    
2475
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2476
    start = start & TARGET_PAGE_MASK;
2477

    
2478
    for (addr = start, len = end - start;
2479
         len != 0;
2480
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2481
        p = page_find(addr >> TARGET_PAGE_BITS);
2482
        if( !p )
2483
            return -1;
2484
        if( !(p->flags & PAGE_VALID) )
2485
            return -1;
2486

    
2487
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2488
            return -1;
2489
        if (flags & PAGE_WRITE) {
2490
            if (!(p->flags & PAGE_WRITE_ORG))
2491
                return -1;
2492
            /* unprotect the page if it was put read-only because it
2493
               contains translated code */
2494
            if (!(p->flags & PAGE_WRITE)) {
2495
                if (!page_unprotect(addr, 0, NULL))
2496
                    return -1;
2497
            }
2498
            return 0;
2499
        }
2500
    }
2501
    return 0;
2502
}
2503

    
2504
/* called from signal handler: invalidate the code and unprotect the
2505
   page. Return TRUE if the fault was successfully handled. */
2506
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2507
{
2508
    unsigned int prot;
2509
    PageDesc *p;
2510
    target_ulong host_start, host_end, addr;
2511

    
2512
    /* Technically this isn't safe inside a signal handler.  However we
2513
       know this only ever happens in a synchronous SEGV handler, so in
2514
       practice it seems to be ok.  */
2515
    mmap_lock();
2516

    
2517
    p = page_find(address >> TARGET_PAGE_BITS);
2518
    if (!p) {
2519
        mmap_unlock();
2520
        return 0;
2521
    }
2522

    
2523
    /* if the page was really writable, then we change its
2524
       protection back to writable */
2525
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2526
        host_start = address & qemu_host_page_mask;
2527
        host_end = host_start + qemu_host_page_size;
2528

    
2529
        prot = 0;
2530
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2531
            p = page_find(addr >> TARGET_PAGE_BITS);
2532
            p->flags |= PAGE_WRITE;
2533
            prot |= p->flags;
2534

    
2535
            /* and since the content will be modified, we must invalidate
2536
               the corresponding translated code. */
2537
            tb_invalidate_phys_page(addr, pc, puc);
2538
#ifdef DEBUG_TB_CHECK
2539
            tb_invalidate_check(addr);
2540
#endif
2541
        }
2542
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2543
                 prot & PAGE_BITS);
2544

    
2545
        mmap_unlock();
2546
        return 1;
2547
    }
2548
    mmap_unlock();
2549
    return 0;
2550
}
2551

    
2552
static inline void tlb_set_dirty(CPUState *env,
2553
                                 unsigned long addr, target_ulong vaddr)
2554
{
2555
}
2556
#endif /* defined(CONFIG_USER_ONLY) */
2557

    
2558
#if !defined(CONFIG_USER_ONLY)
2559

    
2560
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2561
typedef struct subpage_t {
2562
    target_phys_addr_t base;
2563
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2564
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2565
} subpage_t;
2566

    
2567
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2568
                             ram_addr_t memory, ram_addr_t region_offset);
2569
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2570
                                ram_addr_t orig_memory,
2571
                                ram_addr_t region_offset);
2572
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2573
                      need_subpage)                                     \
2574
    do {                                                                \
2575
        if (addr > start_addr)                                          \
2576
            start_addr2 = 0;                                            \
2577
        else {                                                          \
2578
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2579
            if (start_addr2 > 0)                                        \
2580
                need_subpage = 1;                                       \
2581
        }                                                               \
2582
                                                                        \
2583
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2584
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2585
        else {                                                          \
2586
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2587
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2588
                need_subpage = 1;                                       \
2589
        }                                                               \
2590
    } while (0)
2591

    
2592
/* register physical memory.
2593
   For RAM, 'size' must be a multiple of the target page size.
2594
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2595
   io memory page.  The address used when calling the IO function is
2596
   the offset from the start of the region, plus region_offset.  Both
2597
   start_addr and region_offset are rounded down to a page boundary
2598
   before calculating this offset.  This should not be a problem unless
2599
   the low bits of start_addr and region_offset differ.  */
2600
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2601
                                         ram_addr_t size,
2602
                                         ram_addr_t phys_offset,
2603
                                         ram_addr_t region_offset)
2604
{
2605
    target_phys_addr_t addr, end_addr;
2606
    PhysPageDesc *p;
2607
    CPUState *env;
2608
    ram_addr_t orig_size = size;
2609
    subpage_t *subpage;
2610

    
2611
    cpu_notify_set_memory(start_addr, size, phys_offset);
2612

    
2613
    if (phys_offset == IO_MEM_UNASSIGNED) {
2614
        region_offset = start_addr;
2615
    }
2616
    region_offset &= TARGET_PAGE_MASK;
2617
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2618
    end_addr = start_addr + (target_phys_addr_t)size;
2619
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2620
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2621
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2622
            ram_addr_t orig_memory = p->phys_offset;
2623
            target_phys_addr_t start_addr2, end_addr2;
2624
            int need_subpage = 0;
2625

    
2626
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2627
                          need_subpage);
2628
            if (need_subpage) {
2629
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2630
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2631
                                           &p->phys_offset, orig_memory,
2632
                                           p->region_offset);
2633
                } else {
2634
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2635
                                            >> IO_MEM_SHIFT];
2636
                }
2637
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2638
                                 region_offset);
2639
                p->region_offset = 0;
2640
            } else {
2641
                p->phys_offset = phys_offset;
2642
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2643
                    (phys_offset & IO_MEM_ROMD))
2644
                    phys_offset += TARGET_PAGE_SIZE;
2645
            }
2646
        } else {
2647
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2648
            p->phys_offset = phys_offset;
2649
            p->region_offset = region_offset;
2650
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2651
                (phys_offset & IO_MEM_ROMD)) {
2652
                phys_offset += TARGET_PAGE_SIZE;
2653
            } else {
2654
                target_phys_addr_t start_addr2, end_addr2;
2655
                int need_subpage = 0;
2656

    
2657
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2658
                              end_addr2, need_subpage);
2659

    
2660
                if (need_subpage) {
2661
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2662
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2663
                                           addr & TARGET_PAGE_MASK);
2664
                    subpage_register(subpage, start_addr2, end_addr2,
2665
                                     phys_offset, region_offset);
2666
                    p->region_offset = 0;
2667
                }
2668
            }
2669
        }
2670
        region_offset += TARGET_PAGE_SIZE;
2671
    }
2672

    
2673
    /* since each CPU stores ram addresses in its TLB cache, we must
2674
       reset the modified entries */
2675
    /* XXX: slow ! */
2676
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2677
        tlb_flush(env, 1);
2678
    }
2679
}
2680

    
2681
/* XXX: temporary until new memory mapping API */
2682
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2683
{
2684
    PhysPageDesc *p;
2685

    
2686
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2687
    if (!p)
2688
        return IO_MEM_UNASSIGNED;
2689
    return p->phys_offset;
2690
}
2691

    
2692
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2693
{
2694
    if (kvm_enabled())
2695
        kvm_coalesce_mmio_region(addr, size);
2696
}
2697

    
2698
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2699
{
2700
    if (kvm_enabled())
2701
        kvm_uncoalesce_mmio_region(addr, size);
2702
}
2703

    
2704
void qemu_flush_coalesced_mmio_buffer(void)
2705
{
2706
    if (kvm_enabled())
2707
        kvm_flush_coalesced_mmio_buffer();
2708
}
2709

    
2710
#if defined(__linux__) && !defined(TARGET_S390X)
2711

    
2712
#include <sys/vfs.h>
2713

    
2714
#define HUGETLBFS_MAGIC       0x958458f6
2715

    
2716
static long gethugepagesize(const char *path)
2717
{
2718
    struct statfs fs;
2719
    int ret;
2720

    
2721
    do {
2722
        ret = statfs(path, &fs);
2723
    } while (ret != 0 && errno == EINTR);
2724

    
2725
    if (ret != 0) {
2726
        perror(path);
2727
        return 0;
2728
    }
2729

    
2730
    if (fs.f_type != HUGETLBFS_MAGIC)
2731
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2732

    
2733
    return fs.f_bsize;
2734
}
2735

    
2736
static void *file_ram_alloc(RAMBlock *block,
2737
                            ram_addr_t memory,
2738
                            const char *path)
2739
{
2740
    char *filename;
2741
    void *area;
2742
    int fd;
2743
#ifdef MAP_POPULATE
2744
    int flags;
2745
#endif
2746
    unsigned long hpagesize;
2747

    
2748
    hpagesize = gethugepagesize(path);
2749
    if (!hpagesize) {
2750
        return NULL;
2751
    }
2752

    
2753
    if (memory < hpagesize) {
2754
        return NULL;
2755
    }
2756

    
2757
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2758
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2759
        return NULL;
2760
    }
2761

    
2762
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2763
        return NULL;
2764
    }
2765

    
2766
    fd = mkstemp(filename);
2767
    if (fd < 0) {
2768
        perror("unable to create backing store for hugepages");
2769
        free(filename);
2770
        return NULL;
2771
    }
2772
    unlink(filename);
2773
    free(filename);
2774

    
2775
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2776

    
2777
    /*
2778
     * ftruncate is not supported by hugetlbfs in older
2779
     * hosts, so don't bother bailing out on errors.
2780
     * If anything goes wrong with it under other filesystems,
2781
     * mmap will fail.
2782
     */
2783
    if (ftruncate(fd, memory))
2784
        perror("ftruncate");
2785

    
2786
#ifdef MAP_POPULATE
2787
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2788
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2789
     * to sidestep this quirk.
2790
     */
2791
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2792
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2793
#else
2794
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2795
#endif
2796
    if (area == MAP_FAILED) {
2797
        perror("file_ram_alloc: can't mmap RAM pages");
2798
        close(fd);
2799
        return (NULL);
2800
    }
2801
    block->fd = fd;
2802
    return area;
2803
}
2804
#endif
2805

    
2806
static ram_addr_t find_ram_offset(ram_addr_t size)
2807
{
2808
    RAMBlock *block, *next_block;
2809
    ram_addr_t offset = 0, mingap = ULONG_MAX;
2810

    
2811
    if (QLIST_EMPTY(&ram_list.blocks))
2812
        return 0;
2813

    
2814
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2815
        ram_addr_t end, next = ULONG_MAX;
2816

    
2817
        end = block->offset + block->length;
2818

    
2819
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2820
            if (next_block->offset >= end) {
2821
                next = MIN(next, next_block->offset);
2822
            }
2823
        }
2824
        if (next - end >= size && next - end < mingap) {
2825
            offset =  end;
2826
            mingap = next - end;
2827
        }
2828
    }
2829
    return offset;
2830
}
2831

    
2832
static ram_addr_t last_ram_offset(void)
2833
{
2834
    RAMBlock *block;
2835
    ram_addr_t last = 0;
2836

    
2837
    QLIST_FOREACH(block, &ram_list.blocks, next)
2838
        last = MAX(last, block->offset + block->length);
2839

    
2840
    return last;
2841
}
2842

    
2843
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
2844
                                   ram_addr_t size, void *host)
2845
{
2846
    RAMBlock *new_block, *block;
2847

    
2848
    size = TARGET_PAGE_ALIGN(size);
2849
    new_block = qemu_mallocz(sizeof(*new_block));
2850

    
2851
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2852
        char *id = dev->parent_bus->info->get_dev_path(dev);
2853
        if (id) {
2854
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2855
            qemu_free(id);
2856
        }
2857
    }
2858
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2859

    
2860
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2861
        if (!strcmp(block->idstr, new_block->idstr)) {
2862
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2863
                    new_block->idstr);
2864
            abort();
2865
        }
2866
    }
2867

    
2868
    if (host) {
2869
        new_block->host = host;
2870
    } else {
2871
        if (mem_path) {
2872
#if defined (__linux__) && !defined(TARGET_S390X)
2873
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2874
            if (!new_block->host) {
2875
                new_block->host = qemu_vmalloc(size);
2876
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2877
            }
2878
#else
2879
            fprintf(stderr, "-mem-path option unsupported\n");
2880
            exit(1);
2881
#endif
2882
        } else {
2883
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2884
            /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2885
            new_block->host = mmap((void*)0x1000000, size,
2886
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2887
                                   MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2888
#else
2889
            new_block->host = qemu_vmalloc(size);
2890
#endif
2891
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2892
        }
2893
    }
2894

    
2895
    new_block->offset = find_ram_offset(size);
2896
    new_block->length = size;
2897

    
2898
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2899

    
2900
    ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2901
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2902
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2903
           0xff, size >> TARGET_PAGE_BITS);
2904

    
2905
    if (kvm_enabled())
2906
        kvm_setup_guest_memory(new_block->host, size);
2907

    
2908
    return new_block->offset;
2909
}
2910

    
2911
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
2912
{
2913
    return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
2914
}
2915

    
2916
void qemu_ram_free(ram_addr_t addr)
2917
{
2918
    RAMBlock *block;
2919

    
2920
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2921
        if (addr == block->offset) {
2922
            QLIST_REMOVE(block, next);
2923
            if (mem_path) {
2924
#if defined (__linux__) && !defined(TARGET_S390X)
2925
                if (block->fd) {
2926
                    munmap(block->host, block->length);
2927
                    close(block->fd);
2928
                } else {
2929
                    qemu_vfree(block->host);
2930
                }
2931
#endif
2932
            } else {
2933
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2934
                munmap(block->host, block->length);
2935
#else
2936
                qemu_vfree(block->host);
2937
#endif
2938
            }
2939
            qemu_free(block);
2940
            return;
2941
        }
2942
    }
2943

    
2944
}
2945

    
2946
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2947
   With the exception of the softmmu code in this file, this should
2948
   only be used for local memory (e.g. video ram) that the device owns,
2949
   and knows it isn't going to access beyond the end of the block.
2950

2951
   It should not be used for general purpose DMA.
2952
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2953
 */
2954
void *qemu_get_ram_ptr(ram_addr_t addr)
2955
{
2956
    RAMBlock *block;
2957

    
2958
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2959
        if (addr - block->offset < block->length) {
2960
            /* Move this entry to to start of the list.  */
2961
            if (block != QLIST_FIRST(&ram_list.blocks)) {
2962
                QLIST_REMOVE(block, next);
2963
                QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2964
            }
2965
            return block->host + (addr - block->offset);
2966
        }
2967
    }
2968

    
2969
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2970
    abort();
2971

    
2972
    return NULL;
2973
}
2974

    
2975
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2976
 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2977
 */
2978
void *qemu_safe_ram_ptr(ram_addr_t addr)
2979
{
2980
    RAMBlock *block;
2981

    
2982
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2983
        if (addr - block->offset < block->length) {
2984
            return block->host + (addr - block->offset);
2985
        }
2986
    }
2987

    
2988
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2989
    abort();
2990

    
2991
    return NULL;
2992
}
2993

    
2994
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
2995
{
2996
    RAMBlock *block;
2997
    uint8_t *host = ptr;
2998

    
2999
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3000
        if (host - block->host < block->length) {
3001
            *ram_addr = block->offset + (host - block->host);
3002
            return 0;
3003
        }
3004
    }
3005
    return -1;
3006
}
3007

    
3008
/* Some of the softmmu routines need to translate from a host pointer
3009
   (typically a TLB entry) back to a ram offset.  */
3010
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3011
{
3012
    ram_addr_t ram_addr;
3013

    
3014
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3015
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
3016
        abort();
3017
    }
3018
    return ram_addr;
3019
}
3020

    
3021
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
3022
{
3023
#ifdef DEBUG_UNASSIGNED
3024
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3025
#endif
3026
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3027
    do_unassigned_access(addr, 0, 0, 0, 1);
3028
#endif
3029
    return 0;
3030
}
3031

    
3032
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
3033
{
3034
#ifdef DEBUG_UNASSIGNED
3035
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3036
#endif
3037
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3038
    do_unassigned_access(addr, 0, 0, 0, 2);
3039
#endif
3040
    return 0;
3041
}
3042

    
3043
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
3044
{
3045
#ifdef DEBUG_UNASSIGNED
3046
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3047
#endif
3048
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3049
    do_unassigned_access(addr, 0, 0, 0, 4);
3050
#endif
3051
    return 0;
3052
}
3053

    
3054
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
3055
{
3056
#ifdef DEBUG_UNASSIGNED
3057
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3058
#endif
3059
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3060
    do_unassigned_access(addr, 1, 0, 0, 1);
3061
#endif
3062
}
3063

    
3064
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
3065
{
3066
#ifdef DEBUG_UNASSIGNED
3067
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3068
#endif
3069
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3070
    do_unassigned_access(addr, 1, 0, 0, 2);
3071
#endif
3072
}
3073

    
3074
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
3075
{
3076
#ifdef DEBUG_UNASSIGNED
3077
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3078
#endif
3079
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3080
    do_unassigned_access(addr, 1, 0, 0, 4);
3081
#endif
3082
}
3083

    
3084
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
3085
    unassigned_mem_readb,
3086
    unassigned_mem_readw,
3087
    unassigned_mem_readl,
3088
};
3089

    
3090
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
3091
    unassigned_mem_writeb,
3092
    unassigned_mem_writew,
3093
    unassigned_mem_writel,
3094
};
3095

    
3096
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
3097
                                uint32_t val)
3098
{
3099
    int dirty_flags;
3100
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3101
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3102
#if !defined(CONFIG_USER_ONLY)
3103
        tb_invalidate_phys_page_fast(ram_addr, 1);
3104
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3105
#endif
3106
    }
3107
    stb_p(qemu_get_ram_ptr(ram_addr), val);
3108
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3109
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3110
    /* we remove the notdirty callback only if the code has been
3111
       flushed */
3112
    if (dirty_flags == 0xff)
3113
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3114
}
3115

    
3116
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
3117
                                uint32_t val)
3118
{
3119
    int dirty_flags;
3120
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3121
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3122
#if !defined(CONFIG_USER_ONLY)
3123
        tb_invalidate_phys_page_fast(ram_addr, 2);
3124
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3125
#endif
3126
    }
3127
    stw_p(qemu_get_ram_ptr(ram_addr), val);
3128
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3129
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3130
    /* we remove the notdirty callback only if the code has been
3131
       flushed */
3132
    if (dirty_flags == 0xff)
3133
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3134
}
3135

    
3136
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
3137
                                uint32_t val)
3138
{
3139
    int dirty_flags;
3140
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3141
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3142
#if !defined(CONFIG_USER_ONLY)
3143
        tb_invalidate_phys_page_fast(ram_addr, 4);
3144
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3145
#endif
3146
    }
3147
    stl_p(qemu_get_ram_ptr(ram_addr), val);
3148
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3149
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3150
    /* we remove the notdirty callback only if the code has been
3151
       flushed */
3152
    if (dirty_flags == 0xff)
3153
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3154
}
3155

    
3156
static CPUReadMemoryFunc * const error_mem_read[3] = {
3157
    NULL, /* never used */
3158
    NULL, /* never used */
3159
    NULL, /* never used */
3160
};
3161

    
3162
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
3163
    notdirty_mem_writeb,
3164
    notdirty_mem_writew,
3165
    notdirty_mem_writel,
3166
};
3167

    
3168
/* Generate a debug exception if a watchpoint has been hit.  */
3169
static void check_watchpoint(int offset, int len_mask, int flags)
3170
{
3171
    CPUState *env = cpu_single_env;
3172
    target_ulong pc, cs_base;
3173
    TranslationBlock *tb;
3174
    target_ulong vaddr;
3175
    CPUWatchpoint *wp;
3176
    int cpu_flags;
3177

    
3178
    if (env->watchpoint_hit) {
3179
        /* We re-entered the check after replacing the TB. Now raise
3180
         * the debug interrupt so that is will trigger after the
3181
         * current instruction. */
3182
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3183
        return;
3184
    }
3185
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3186
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3187
        if ((vaddr == (wp->vaddr & len_mask) ||
3188
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3189
            wp->flags |= BP_WATCHPOINT_HIT;
3190
            if (!env->watchpoint_hit) {
3191
                env->watchpoint_hit = wp;
3192
                tb = tb_find_pc(env->mem_io_pc);
3193
                if (!tb) {
3194
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3195
                              "pc=%p", (void *)env->mem_io_pc);
3196
                }
3197
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
3198
                tb_phys_invalidate(tb, -1);
3199
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3200
                    env->exception_index = EXCP_DEBUG;
3201
                } else {
3202
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3203
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3204
                }
3205
                cpu_resume_from_signal(env, NULL);
3206
            }
3207
        } else {
3208
            wp->flags &= ~BP_WATCHPOINT_HIT;
3209
        }
3210
    }
3211
}
3212

    
3213
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3214
   so these check for a hit then pass through to the normal out-of-line
3215
   phys routines.  */
3216
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
3217
{
3218
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
3219
    return ldub_phys(addr);
3220
}
3221

    
3222
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
3223
{
3224
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
3225
    return lduw_phys(addr);
3226
}
3227

    
3228
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
3229
{
3230
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
3231
    return ldl_phys(addr);
3232
}
3233

    
3234
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
3235
                             uint32_t val)
3236
{
3237
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
3238
    stb_phys(addr, val);
3239
}
3240

    
3241
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
3242
                             uint32_t val)
3243
{
3244
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
3245
    stw_phys(addr, val);
3246
}
3247

    
3248
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
3249
                             uint32_t val)
3250
{
3251
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
3252
    stl_phys(addr, val);
3253
}
3254

    
3255
static CPUReadMemoryFunc * const watch_mem_read[3] = {
3256
    watch_mem_readb,
3257
    watch_mem_readw,
3258
    watch_mem_readl,
3259
};
3260

    
3261
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
3262
    watch_mem_writeb,
3263
    watch_mem_writew,
3264
    watch_mem_writel,
3265
};
3266

    
3267
static inline uint32_t subpage_readlen (subpage_t *mmio,
3268
                                        target_phys_addr_t addr,
3269
                                        unsigned int len)
3270
{
3271
    unsigned int idx = SUBPAGE_IDX(addr);
3272
#if defined(DEBUG_SUBPAGE)
3273
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3274
           mmio, len, addr, idx);
3275
#endif
3276

    
3277
    addr += mmio->region_offset[idx];
3278
    idx = mmio->sub_io_index[idx];
3279
    return io_mem_read[idx][len](io_mem_opaque[idx], addr);
3280
}
3281

    
3282
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
3283
                                     uint32_t value, unsigned int len)
3284
{
3285
    unsigned int idx = SUBPAGE_IDX(addr);
3286
#if defined(DEBUG_SUBPAGE)
3287
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3288
           __func__, mmio, len, addr, idx, value);
3289
#endif
3290

    
3291
    addr += mmio->region_offset[idx];
3292
    idx = mmio->sub_io_index[idx];
3293
    io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
3294
}
3295

    
3296
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
3297
{
3298
    return subpage_readlen(opaque, addr, 0);
3299
}
3300

    
3301
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
3302
                            uint32_t value)
3303
{
3304
    subpage_writelen(opaque, addr, value, 0);
3305
}
3306

    
3307
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
3308
{
3309
    return subpage_readlen(opaque, addr, 1);
3310
}
3311

    
3312
static void subpage_writew (void *opaque, target_phys_addr_t addr,
3313
                            uint32_t value)
3314
{
3315
    subpage_writelen(opaque, addr, value, 1);
3316
}
3317

    
3318
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
3319
{
3320
    return subpage_readlen(opaque, addr, 2);
3321
}
3322

    
3323
static void subpage_writel (void *opaque, target_phys_addr_t addr,
3324
                            uint32_t value)
3325
{
3326
    subpage_writelen(opaque, addr, value, 2);
3327
}
3328

    
3329
static CPUReadMemoryFunc * const subpage_read[] = {
3330
    &subpage_readb,
3331
    &subpage_readw,
3332
    &subpage_readl,
3333
};
3334

    
3335
static CPUWriteMemoryFunc * const subpage_write[] = {
3336
    &subpage_writeb,
3337
    &subpage_writew,
3338
    &subpage_writel,
3339
};
3340

    
3341
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3342
                             ram_addr_t memory, ram_addr_t region_offset)
3343
{
3344
    int idx, eidx;
3345

    
3346
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3347
        return -1;
3348
    idx = SUBPAGE_IDX(start);
3349
    eidx = SUBPAGE_IDX(end);
3350
#if defined(DEBUG_SUBPAGE)
3351
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3352
           mmio, start, end, idx, eidx, memory);
3353
#endif
3354
    if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3355
        memory = IO_MEM_UNASSIGNED;
3356
    memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3357
    for (; idx <= eidx; idx++) {
3358
        mmio->sub_io_index[idx] = memory;
3359
        mmio->region_offset[idx] = region_offset;
3360
    }
3361

    
3362
    return 0;
3363
}
3364

    
3365
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3366
                                ram_addr_t orig_memory,
3367
                                ram_addr_t region_offset)
3368
{
3369
    subpage_t *mmio;
3370
    int subpage_memory;
3371

    
3372
    mmio = qemu_mallocz(sizeof(subpage_t));
3373

    
3374
    mmio->base = base;
3375
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3376
                                            DEVICE_NATIVE_ENDIAN);
3377
#if defined(DEBUG_SUBPAGE)
3378
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3379
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3380
#endif
3381
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3382
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3383

    
3384
    return mmio;
3385
}
3386

    
3387
static int get_free_io_mem_idx(void)
3388
{
3389
    int i;
3390

    
3391
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3392
        if (!io_mem_used[i]) {
3393
            io_mem_used[i] = 1;
3394
            return i;
3395
        }
3396
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3397
    return -1;
3398
}
3399

    
3400
/*
3401
 * Usually, devices operate in little endian mode. There are devices out
3402
 * there that operate in big endian too. Each device gets byte swapped
3403
 * mmio if plugged onto a CPU that does the other endianness.
3404
 *
3405
 * CPU          Device           swap?
3406
 *
3407
 * little       little           no
3408
 * little       big              yes
3409
 * big          little           yes
3410
 * big          big              no
3411
 */
3412

    
3413
typedef struct SwapEndianContainer {
3414
    CPUReadMemoryFunc *read[3];
3415
    CPUWriteMemoryFunc *write[3];
3416
    void *opaque;
3417
} SwapEndianContainer;
3418

    
3419
static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3420
{
3421
    uint32_t val;
3422
    SwapEndianContainer *c = opaque;
3423
    val = c->read[0](c->opaque, addr);
3424
    return val;
3425
}
3426

    
3427
static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3428
{
3429
    uint32_t val;
3430
    SwapEndianContainer *c = opaque;
3431
    val = bswap16(c->read[1](c->opaque, addr));
3432
    return val;
3433
}
3434

    
3435
static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3436
{
3437
    uint32_t val;
3438
    SwapEndianContainer *c = opaque;
3439
    val = bswap32(c->read[2](c->opaque, addr));
3440
    return val;
3441
}
3442

    
3443
static CPUReadMemoryFunc * const swapendian_readfn[3]={
3444
    swapendian_mem_readb,
3445
    swapendian_mem_readw,
3446
    swapendian_mem_readl
3447
};
3448

    
3449
static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3450
                                  uint32_t val)
3451
{
3452
    SwapEndianContainer *c = opaque;
3453
    c->write[0](c->opaque, addr, val);
3454
}
3455

    
3456
static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3457
                                  uint32_t val)
3458
{
3459
    SwapEndianContainer *c = opaque;
3460
    c->write[1](c->opaque, addr, bswap16(val));
3461
}
3462

    
3463
static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3464
                                  uint32_t val)
3465
{
3466
    SwapEndianContainer *c = opaque;
3467
    c->write[2](c->opaque, addr, bswap32(val));
3468
}
3469

    
3470
static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3471
    swapendian_mem_writeb,
3472
    swapendian_mem_writew,
3473
    swapendian_mem_writel
3474
};
3475

    
3476
static void swapendian_init(int io_index)
3477
{
3478
    SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3479
    int i;
3480

    
3481
    /* Swap mmio for big endian targets */
3482
    c->opaque = io_mem_opaque[io_index];
3483
    for (i = 0; i < 3; i++) {
3484
        c->read[i] = io_mem_read[io_index][i];
3485
        c->write[i] = io_mem_write[io_index][i];
3486

    
3487
        io_mem_read[io_index][i] = swapendian_readfn[i];
3488
        io_mem_write[io_index][i] = swapendian_writefn[i];
3489
    }
3490
    io_mem_opaque[io_index] = c;
3491
}
3492

    
3493
static void swapendian_del(int io_index)
3494
{
3495
    if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3496
        qemu_free(io_mem_opaque[io_index]);
3497
    }
3498
}
3499

    
3500
/* mem_read and mem_write are arrays of functions containing the
3501
   function to access byte (index 0), word (index 1) and dword (index
3502
   2). Functions can be omitted with a NULL function pointer.
3503
   If io_index is non zero, the corresponding io zone is
3504
   modified. If it is zero, a new io zone is allocated. The return
3505
   value can be used with cpu_register_physical_memory(). (-1) is
3506
   returned if error. */
3507
static int cpu_register_io_memory_fixed(int io_index,
3508
                                        CPUReadMemoryFunc * const *mem_read,
3509
                                        CPUWriteMemoryFunc * const *mem_write,
3510
                                        void *opaque, enum device_endian endian)
3511
{
3512
    int i;
3513

    
3514
    if (io_index <= 0) {
3515
        io_index = get_free_io_mem_idx();
3516
        if (io_index == -1)
3517
            return io_index;
3518
    } else {
3519
        io_index >>= IO_MEM_SHIFT;
3520
        if (io_index >= IO_MEM_NB_ENTRIES)
3521
            return -1;
3522
    }
3523

    
3524
    for (i = 0; i < 3; ++i) {
3525
        io_mem_read[io_index][i]
3526
            = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3527
    }
3528
    for (i = 0; i < 3; ++i) {
3529
        io_mem_write[io_index][i]
3530
            = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3531
    }
3532
    io_mem_opaque[io_index] = opaque;
3533

    
3534
    switch (endian) {
3535
    case DEVICE_BIG_ENDIAN:
3536
#ifndef TARGET_WORDS_BIGENDIAN
3537
        swapendian_init(io_index);
3538
#endif
3539
        break;
3540
    case DEVICE_LITTLE_ENDIAN:
3541
#ifdef TARGET_WORDS_BIGENDIAN
3542
        swapendian_init(io_index);
3543
#endif
3544
        break;
3545
    case DEVICE_NATIVE_ENDIAN:
3546
    default:
3547
        break;
3548
    }
3549

    
3550
    return (io_index << IO_MEM_SHIFT);
3551
}
3552

    
3553
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3554
                           CPUWriteMemoryFunc * const *mem_write,
3555
                           void *opaque, enum device_endian endian)
3556
{
3557
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
3558
}
3559

    
3560
void cpu_unregister_io_memory(int io_table_address)
3561
{
3562
    int i;
3563
    int io_index = io_table_address >> IO_MEM_SHIFT;
3564

    
3565
    swapendian_del(io_index);
3566

    
3567
    for (i=0;i < 3; i++) {
3568
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3569
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3570
    }
3571
    io_mem_opaque[io_index] = NULL;
3572
    io_mem_used[io_index] = 0;
3573
}
3574

    
3575
static void io_mem_init(void)
3576
{
3577
    int i;
3578

    
3579
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3580
                                 unassigned_mem_write, NULL,
3581
                                 DEVICE_NATIVE_ENDIAN);
3582
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3583
                                 unassigned_mem_write, NULL,
3584
                                 DEVICE_NATIVE_ENDIAN);
3585
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3586
                                 notdirty_mem_write, NULL,
3587
                                 DEVICE_NATIVE_ENDIAN);
3588
    for (i=0; i<5; i++)
3589
        io_mem_used[i] = 1;
3590

    
3591
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3592
                                          watch_mem_write, NULL,
3593
                                          DEVICE_NATIVE_ENDIAN);
3594
}
3595

    
3596
#endif /* !defined(CONFIG_USER_ONLY) */
3597

    
3598
/* physical memory access (slow version, mainly for debug) */
3599
#if defined(CONFIG_USER_ONLY)
3600
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3601
                        uint8_t *buf, int len, int is_write)
3602
{
3603
    int l, flags;
3604
    target_ulong page;
3605
    void * p;
3606

    
3607
    while (len > 0) {
3608
        page = addr & TARGET_PAGE_MASK;
3609
        l = (page + TARGET_PAGE_SIZE) - addr;
3610
        if (l > len)
3611
            l = len;
3612
        flags = page_get_flags(page);
3613
        if (!(flags & PAGE_VALID))
3614
            return -1;
3615
        if (is_write) {
3616
            if (!(flags & PAGE_WRITE))
3617
                return -1;
3618
            /* XXX: this code should not depend on lock_user */
3619
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3620
                return -1;
3621
            memcpy(p, buf, l);
3622
            unlock_user(p, addr, l);
3623
        } else {
3624
            if (!(flags & PAGE_READ))
3625
                return -1;
3626
            /* XXX: this code should not depend on lock_user */
3627
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3628
                return -1;
3629
            memcpy(buf, p, l);
3630
            unlock_user(p, addr, 0);
3631
        }
3632
        len -= l;
3633
        buf += l;
3634
        addr += l;
3635
    }
3636
    return 0;
3637
}
3638

    
3639
#else
3640
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3641
                            int len, int is_write)
3642
{
3643
    int l, io_index;
3644
    uint8_t *ptr;
3645
    uint32_t val;
3646
    target_phys_addr_t page;
3647
    unsigned long pd;
3648
    PhysPageDesc *p;
3649

    
3650
    while (len > 0) {
3651
        page = addr & TARGET_PAGE_MASK;
3652
        l = (page + TARGET_PAGE_SIZE) - addr;
3653
        if (l > len)
3654
            l = len;
3655
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3656
        if (!p) {
3657
            pd = IO_MEM_UNASSIGNED;
3658
        } else {
3659
            pd = p->phys_offset;
3660
        }
3661

    
3662
        if (is_write) {
3663
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3664
                target_phys_addr_t addr1 = addr;
3665
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3666
                if (p)
3667
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3668
                /* XXX: could force cpu_single_env to NULL to avoid
3669
                   potential bugs */
3670
                if (l >= 4 && ((addr1 & 3) == 0)) {
3671
                    /* 32 bit write access */
3672
                    val = ldl_p(buf);
3673
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3674
                    l = 4;
3675
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3676
                    /* 16 bit write access */
3677
                    val = lduw_p(buf);
3678
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3679
                    l = 2;
3680
                } else {
3681
                    /* 8 bit write access */
3682
                    val = ldub_p(buf);
3683
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3684
                    l = 1;
3685
                }
3686
            } else {
3687
                unsigned long addr1;
3688
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3689
                /* RAM case */
3690
                ptr = qemu_get_ram_ptr(addr1);
3691
                memcpy(ptr, buf, l);
3692
                if (!cpu_physical_memory_is_dirty(addr1)) {
3693
                    /* invalidate code */
3694
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3695
                    /* set dirty bit */
3696
                    cpu_physical_memory_set_dirty_flags(
3697
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3698
                }
3699
            }
3700
        } else {
3701
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3702
                !(pd & IO_MEM_ROMD)) {
3703
                target_phys_addr_t addr1 = addr;
3704
                /* I/O case */
3705
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3706
                if (p)
3707
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3708
                if (l >= 4 && ((addr1 & 3) == 0)) {
3709
                    /* 32 bit read access */
3710
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3711
                    stl_p(buf, val);
3712
                    l = 4;
3713
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3714
                    /* 16 bit read access */
3715
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3716
                    stw_p(buf, val);
3717
                    l = 2;
3718
                } else {
3719
                    /* 8 bit read access */
3720
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3721
                    stb_p(buf, val);
3722
                    l = 1;
3723
                }
3724
            } else {
3725
                /* RAM case */
3726
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3727
                    (addr & ~TARGET_PAGE_MASK);
3728
                memcpy(buf, ptr, l);
3729
            }
3730
        }
3731
        len -= l;
3732
        buf += l;
3733
        addr += l;
3734
    }
3735
}
3736

    
3737
/* used for ROM loading : can write in RAM and ROM */
3738
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3739
                                   const uint8_t *buf, int len)
3740
{
3741
    int l;
3742
    uint8_t *ptr;
3743
    target_phys_addr_t page;
3744
    unsigned long pd;
3745
    PhysPageDesc *p;
3746

    
3747
    while (len > 0) {
3748
        page = addr & TARGET_PAGE_MASK;
3749
        l = (page + TARGET_PAGE_SIZE) - addr;
3750
        if (l > len)
3751
            l = len;
3752
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3753
        if (!p) {
3754
            pd = IO_MEM_UNASSIGNED;
3755
        } else {
3756
            pd = p->phys_offset;
3757
        }
3758

    
3759
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3760
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3761
            !(pd & IO_MEM_ROMD)) {
3762
            /* do nothing */
3763
        } else {
3764
            unsigned long addr1;
3765
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3766
            /* ROM/RAM case */
3767
            ptr = qemu_get_ram_ptr(addr1);
3768
            memcpy(ptr, buf, l);
3769
        }
3770
        len -= l;
3771
        buf += l;
3772
        addr += l;
3773
    }
3774
}
3775

    
3776
typedef struct {
3777
    void *buffer;
3778
    target_phys_addr_t addr;
3779
    target_phys_addr_t len;
3780
} BounceBuffer;
3781

    
3782
static BounceBuffer bounce;
3783

    
3784
typedef struct MapClient {
3785
    void *opaque;
3786
    void (*callback)(void *opaque);
3787
    QLIST_ENTRY(MapClient) link;
3788
} MapClient;
3789

    
3790
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3791
    = QLIST_HEAD_INITIALIZER(map_client_list);
3792

    
3793
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3794
{
3795
    MapClient *client = qemu_malloc(sizeof(*client));
3796

    
3797
    client->opaque = opaque;
3798
    client->callback = callback;
3799
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3800
    return client;
3801
}
3802

    
3803
void cpu_unregister_map_client(void *_client)
3804
{
3805
    MapClient *client = (MapClient *)_client;
3806

    
3807
    QLIST_REMOVE(client, link);
3808
    qemu_free(client);
3809
}
3810

    
3811
static void cpu_notify_map_clients(void)
3812
{
3813
    MapClient *client;
3814

    
3815
    while (!QLIST_EMPTY(&map_client_list)) {
3816
        client = QLIST_FIRST(&map_client_list);
3817
        client->callback(client->opaque);
3818
        cpu_unregister_map_client(client);
3819
    }
3820
}
3821

    
3822
/* Map a physical memory region into a host virtual address.
3823
 * May map a subset of the requested range, given by and returned in *plen.
3824
 * May return NULL if resources needed to perform the mapping are exhausted.
3825
 * Use only for reads OR writes - not for read-modify-write operations.
3826
 * Use cpu_register_map_client() to know when retrying the map operation is
3827
 * likely to succeed.
3828
 */
3829
void *cpu_physical_memory_map(target_phys_addr_t addr,
3830
                              target_phys_addr_t *plen,
3831
                              int is_write)
3832
{
3833
    target_phys_addr_t len = *plen;
3834
    target_phys_addr_t done = 0;
3835
    int l;
3836
    uint8_t *ret = NULL;
3837
    uint8_t *ptr;
3838
    target_phys_addr_t page;
3839
    unsigned long pd;
3840
    PhysPageDesc *p;
3841
    unsigned long addr1;
3842

    
3843
    while (len > 0) {
3844
        page = addr & TARGET_PAGE_MASK;
3845
        l = (page + TARGET_PAGE_SIZE) - addr;
3846
        if (l > len)
3847
            l = len;
3848
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3849
        if (!p) {
3850
            pd = IO_MEM_UNASSIGNED;
3851
        } else {
3852
            pd = p->phys_offset;
3853
        }
3854

    
3855
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3856
            if (done || bounce.buffer) {
3857
                break;
3858
            }
3859
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3860
            bounce.addr = addr;
3861
            bounce.len = l;
3862
            if (!is_write) {
3863
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3864
            }
3865
            ptr = bounce.buffer;
3866
        } else {
3867
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3868
            ptr = qemu_get_ram_ptr(addr1);
3869
        }
3870
        if (!done) {
3871
            ret = ptr;
3872
        } else if (ret + done != ptr) {
3873
            break;
3874
        }
3875

    
3876
        len -= l;
3877
        addr += l;
3878
        done += l;
3879
    }
3880
    *plen = done;
3881
    return ret;
3882
}
3883

    
3884
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3885
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3886
 * the amount of memory that was actually read or written by the caller.
3887
 */
3888
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3889
                               int is_write, target_phys_addr_t access_len)
3890
{
3891
    if (buffer != bounce.buffer) {
3892
        if (is_write) {
3893
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3894
            while (access_len) {
3895
                unsigned l;
3896
                l = TARGET_PAGE_SIZE;
3897
                if (l > access_len)
3898
                    l = access_len;
3899
                if (!cpu_physical_memory_is_dirty(addr1)) {
3900
                    /* invalidate code */
3901
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3902
                    /* set dirty bit */
3903
                    cpu_physical_memory_set_dirty_flags(
3904
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3905
                }
3906
                addr1 += l;
3907
                access_len -= l;
3908
            }
3909
        }
3910
        return;
3911
    }
3912
    if (is_write) {
3913
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3914
    }
3915
    qemu_vfree(bounce.buffer);
3916
    bounce.buffer = NULL;
3917
    cpu_notify_map_clients();
3918
}
3919

    
3920
/* warning: addr must be aligned */
3921
uint32_t ldl_phys(target_phys_addr_t addr)
3922
{
3923
    int io_index;
3924
    uint8_t *ptr;
3925
    uint32_t val;
3926
    unsigned long pd;
3927
    PhysPageDesc *p;
3928

    
3929
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3930
    if (!p) {
3931
        pd = IO_MEM_UNASSIGNED;
3932
    } else {
3933
        pd = p->phys_offset;
3934
    }
3935

    
3936
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3937
        !(pd & IO_MEM_ROMD)) {
3938
        /* I/O case */
3939
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3940
        if (p)
3941
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3942
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3943
    } else {
3944
        /* RAM case */
3945
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3946
            (addr & ~TARGET_PAGE_MASK);
3947
        val = ldl_p(ptr);
3948
    }
3949
    return val;
3950
}
3951

    
3952
/* warning: addr must be aligned */
3953
uint64_t ldq_phys(target_phys_addr_t addr)
3954
{
3955
    int io_index;
3956
    uint8_t *ptr;
3957
    uint64_t val;
3958
    unsigned long pd;
3959
    PhysPageDesc *p;
3960

    
3961
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3962
    if (!p) {
3963
        pd = IO_MEM_UNASSIGNED;
3964
    } else {
3965
        pd = p->phys_offset;
3966
    }
3967

    
3968
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3969
        !(pd & IO_MEM_ROMD)) {
3970
        /* I/O case */
3971
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3972
        if (p)
3973
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3974
#ifdef TARGET_WORDS_BIGENDIAN
3975
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3976
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3977
#else
3978
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3979
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3980
#endif
3981
    } else {
3982
        /* RAM case */
3983
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3984
            (addr & ~TARGET_PAGE_MASK);
3985
        val = ldq_p(ptr);
3986
    }
3987
    return val;
3988
}
3989

    
3990
/* XXX: optimize */
3991
uint32_t ldub_phys(target_phys_addr_t addr)
3992
{
3993
    uint8_t val;
3994
    cpu_physical_memory_read(addr, &val, 1);
3995
    return val;
3996
}
3997

    
3998
/* warning: addr must be aligned */
3999
uint32_t lduw_phys(target_phys_addr_t addr)
4000
{
4001
    int io_index;
4002
    uint8_t *ptr;
4003
    uint64_t val;
4004
    unsigned long pd;
4005
    PhysPageDesc *p;
4006

    
4007
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4008
    if (!p) {
4009
        pd = IO_MEM_UNASSIGNED;
4010
    } else {
4011
        pd = p->phys_offset;
4012
    }
4013

    
4014
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4015
        !(pd & IO_MEM_ROMD)) {
4016
        /* I/O case */
4017
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4018
        if (p)
4019
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4020
        val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
4021
    } else {
4022
        /* RAM case */
4023
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4024
            (addr & ~TARGET_PAGE_MASK);
4025
        val = lduw_p(ptr);
4026
    }
4027
    return val;
4028
}
4029

    
4030
/* warning: addr must be aligned. The ram page is not masked as dirty
4031
   and the code inside is not invalidated. It is useful if the dirty
4032
   bits are used to track modified PTEs */
4033
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4034
{
4035
    int io_index;
4036
    uint8_t *ptr;
4037
    unsigned long pd;
4038
    PhysPageDesc *p;
4039

    
4040
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4041
    if (!p) {
4042
        pd = IO_MEM_UNASSIGNED;
4043
    } else {
4044
        pd = p->phys_offset;
4045
    }
4046

    
4047
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4048
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4049
        if (p)
4050
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4051
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4052
    } else {
4053
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4054
        ptr = qemu_get_ram_ptr(addr1);
4055
        stl_p(ptr, val);
4056

    
4057
        if (unlikely(in_migration)) {
4058
            if (!cpu_physical_memory_is_dirty(addr1)) {
4059
                /* invalidate code */
4060
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4061
                /* set dirty bit */
4062
                cpu_physical_memory_set_dirty_flags(
4063
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
4064
            }
4065
        }
4066
    }
4067
}
4068

    
4069
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4070
{
4071
    int io_index;
4072
    uint8_t *ptr;
4073
    unsigned long pd;
4074
    PhysPageDesc *p;
4075

    
4076
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4077
    if (!p) {
4078
        pd = IO_MEM_UNASSIGNED;
4079
    } else {
4080
        pd = p->phys_offset;
4081
    }
4082

    
4083
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4084
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4085
        if (p)
4086
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4087
#ifdef TARGET_WORDS_BIGENDIAN
4088
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4089
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4090
#else
4091
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4092
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4093
#endif
4094
    } else {
4095
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4096
            (addr & ~TARGET_PAGE_MASK);
4097
        stq_p(ptr, val);
4098
    }
4099
}
4100

    
4101
/* warning: addr must be aligned */
4102
void stl_phys(target_phys_addr_t addr, uint32_t val)
4103
{
4104
    int io_index;
4105
    uint8_t *ptr;
4106
    unsigned long pd;
4107
    PhysPageDesc *p;
4108

    
4109
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4110
    if (!p) {
4111
        pd = IO_MEM_UNASSIGNED;
4112
    } else {
4113
        pd = p->phys_offset;
4114
    }
4115

    
4116
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4117
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4118
        if (p)
4119
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4120
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4121
    } else {
4122
        unsigned long addr1;
4123
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4124
        /* RAM case */
4125
        ptr = qemu_get_ram_ptr(addr1);
4126
        stl_p(ptr, val);
4127
        if (!cpu_physical_memory_is_dirty(addr1)) {
4128
            /* invalidate code */
4129
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4130
            /* set dirty bit */
4131
            cpu_physical_memory_set_dirty_flags(addr1,
4132
                (0xff & ~CODE_DIRTY_FLAG));
4133
        }
4134
    }
4135
}
4136

    
4137
/* XXX: optimize */
4138
void stb_phys(target_phys_addr_t addr, uint32_t val)
4139
{
4140
    uint8_t v = val;
4141
    cpu_physical_memory_write(addr, &v, 1);
4142
}
4143

    
4144
/* warning: addr must be aligned */
4145
void stw_phys(target_phys_addr_t addr, uint32_t val)
4146
{
4147
    int io_index;
4148
    uint8_t *ptr;
4149
    unsigned long pd;
4150
    PhysPageDesc *p;
4151

    
4152
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4153
    if (!p) {
4154
        pd = IO_MEM_UNASSIGNED;
4155
    } else {
4156
        pd = p->phys_offset;
4157
    }
4158

    
4159
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4160
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4161
        if (p)
4162
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4163
        io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4164
    } else {
4165
        unsigned long addr1;
4166
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4167
        /* RAM case */
4168
        ptr = qemu_get_ram_ptr(addr1);
4169
        stw_p(ptr, val);
4170
        if (!cpu_physical_memory_is_dirty(addr1)) {
4171
            /* invalidate code */
4172
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4173
            /* set dirty bit */
4174
            cpu_physical_memory_set_dirty_flags(addr1,
4175
                (0xff & ~CODE_DIRTY_FLAG));
4176
        }
4177
    }
4178
}
4179

    
4180
/* XXX: optimize */
4181
void stq_phys(target_phys_addr_t addr, uint64_t val)
4182
{
4183
    val = tswap64(val);
4184
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
4185
}
4186

    
4187
/* virtual memory access for debug (includes writing to ROM) */
4188
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4189
                        uint8_t *buf, int len, int is_write)
4190
{
4191
    int l;
4192
    target_phys_addr_t phys_addr;
4193
    target_ulong page;
4194

    
4195
    while (len > 0) {
4196
        page = addr & TARGET_PAGE_MASK;
4197
        phys_addr = cpu_get_phys_page_debug(env, page);
4198
        /* if no physical page mapped, return an error */
4199
        if (phys_addr == -1)
4200
            return -1;
4201
        l = (page + TARGET_PAGE_SIZE) - addr;
4202
        if (l > len)
4203
            l = len;
4204
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4205
        if (is_write)
4206
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4207
        else
4208
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4209
        len -= l;
4210
        buf += l;
4211
        addr += l;
4212
    }
4213
    return 0;
4214
}
4215
#endif
4216

    
4217
/* in deterministic execution mode, instructions doing device I/Os
4218
   must be at the end of the TB */
4219
void cpu_io_recompile(CPUState *env, void *retaddr)
4220
{
4221
    TranslationBlock *tb;
4222
    uint32_t n, cflags;
4223
    target_ulong pc, cs_base;
4224
    uint64_t flags;
4225

    
4226
    tb = tb_find_pc((unsigned long)retaddr);
4227
    if (!tb) {
4228
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4229
                  retaddr);
4230
    }
4231
    n = env->icount_decr.u16.low + tb->icount;
4232
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
4233
    /* Calculate how many instructions had been executed before the fault
4234
       occurred.  */
4235
    n = n - env->icount_decr.u16.low;
4236
    /* Generate a new TB ending on the I/O insn.  */
4237
    n++;
4238
    /* On MIPS and SH, delay slot instructions can only be restarted if
4239
       they were already the first instruction in the TB.  If this is not
4240
       the first instruction in a TB then re-execute the preceding
4241
       branch.  */
4242
#if defined(TARGET_MIPS)
4243
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4244
        env->active_tc.PC -= 4;
4245
        env->icount_decr.u16.low++;
4246
        env->hflags &= ~MIPS_HFLAG_BMASK;
4247
    }
4248
#elif defined(TARGET_SH4)
4249
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4250
            && n > 1) {
4251
        env->pc -= 2;
4252
        env->icount_decr.u16.low++;
4253
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4254
    }
4255
#endif
4256
    /* This should never happen.  */
4257
    if (n > CF_COUNT_MASK)
4258
        cpu_abort(env, "TB too big during recompile");
4259

    
4260
    cflags = n | CF_LAST_IO;
4261
    pc = tb->pc;
4262
    cs_base = tb->cs_base;
4263
    flags = tb->flags;
4264
    tb_phys_invalidate(tb, -1);
4265
    /* FIXME: In theory this could raise an exception.  In practice
4266
       we have already translated the block once so it's probably ok.  */
4267
    tb_gen_code(env, pc, cs_base, flags, cflags);
4268
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4269
       the first in the TB) then we end up generating a whole new TB and
4270
       repeating the fault, which is horribly inefficient.
4271
       Better would be to execute just this insn uncached, or generate a
4272
       second new TB.  */
4273
    cpu_resume_from_signal(env, NULL);
4274
}
4275

    
4276
#if !defined(CONFIG_USER_ONLY)
4277

    
4278
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4279
{
4280
    int i, target_code_size, max_target_code_size;
4281
    int direct_jmp_count, direct_jmp2_count, cross_page;
4282
    TranslationBlock *tb;
4283

    
4284
    target_code_size = 0;
4285
    max_target_code_size = 0;
4286
    cross_page = 0;
4287
    direct_jmp_count = 0;
4288
    direct_jmp2_count = 0;
4289
    for(i = 0; i < nb_tbs; i++) {
4290
        tb = &tbs[i];
4291
        target_code_size += tb->size;
4292
        if (tb->size > max_target_code_size)
4293
            max_target_code_size = tb->size;
4294
        if (tb->page_addr[1] != -1)
4295
            cross_page++;
4296
        if (tb->tb_next_offset[0] != 0xffff) {
4297
            direct_jmp_count++;
4298
            if (tb->tb_next_offset[1] != 0xffff) {
4299
                direct_jmp2_count++;
4300
            }
4301
        }
4302
    }
4303
    /* XXX: avoid using doubles ? */
4304
    cpu_fprintf(f, "Translation buffer state:\n");
4305
    cpu_fprintf(f, "gen code size       %td/%ld\n",
4306
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4307
    cpu_fprintf(f, "TB count            %d/%d\n", 
4308
                nb_tbs, code_gen_max_blocks);
4309
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4310
                nb_tbs ? target_code_size / nb_tbs : 0,
4311
                max_target_code_size);
4312
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4313
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4314
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4315
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4316
            cross_page,
4317
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4318
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4319
                direct_jmp_count,
4320
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4321
                direct_jmp2_count,
4322
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4323
    cpu_fprintf(f, "\nStatistics:\n");
4324
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4325
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4326
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4327
    tcg_dump_info(f, cpu_fprintf);
4328
}
4329

    
4330
#define MMUSUFFIX _cmmu
4331
#define GETPC() NULL
4332
#define env cpu_single_env
4333
#define SOFTMMU_CODE_ACCESS
4334

    
4335
#define SHIFT 0
4336
#include "softmmu_template.h"
4337

    
4338
#define SHIFT 1
4339
#include "softmmu_template.h"
4340

    
4341
#define SHIFT 2
4342
#include "softmmu_template.h"
4343

    
4344
#define SHIFT 3
4345
#include "softmmu_template.h"
4346

    
4347
#undef env
4348

    
4349
#endif