Statistics
| Branch: | Revision:

root / exec.c @ fb5590f7

History | View | Annotate | Download (129.1 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#ifdef _WIN32
21
#include <windows.h>
22
#else
23
#include <sys/types.h>
24
#include <sys/mman.h>
25
#endif
26

    
27
#include "qemu-common.h"
28
#include "cpu.h"
29
#include "tcg.h"
30
#include "hw/hw.h"
31
#include "hw/qdev.h"
32
#include "osdep.h"
33
#include "kvm.h"
34
#include "hw/xen.h"
35
#include "qemu-timer.h"
36
#include "memory.h"
37
#include "exec-memory.h"
38
#if defined(CONFIG_USER_ONLY)
39
#include <qemu.h>
40
#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41
#include <sys/param.h>
42
#if __FreeBSD_version >= 700104
43
#define HAVE_KINFO_GETVMMAP
44
#define sigqueue sigqueue_freebsd  /* avoid redefinition */
45
#include <sys/time.h>
46
#include <sys/proc.h>
47
#include <machine/profile.h>
48
#define _KERNEL
49
#include <sys/user.h>
50
#undef _KERNEL
51
#undef sigqueue
52
#include <libutil.h>
53
#endif
54
#endif
55
#else /* !CONFIG_USER_ONLY */
56
#include "xen-mapcache.h"
57
#include "trace.h"
58
#endif
59

    
60
#define WANT_EXEC_OBSOLETE
61
#include "exec-obsolete.h"
62

    
63
//#define DEBUG_TB_INVALIDATE
64
//#define DEBUG_FLUSH
65
//#define DEBUG_TLB
66
//#define DEBUG_UNASSIGNED
67

    
68
/* make various TB consistency checks */
69
//#define DEBUG_TB_CHECK
70
//#define DEBUG_TLB_CHECK
71

    
72
//#define DEBUG_IOPORT
73
//#define DEBUG_SUBPAGE
74

    
75
#if !defined(CONFIG_USER_ONLY)
76
/* TB consistency checks only implemented for usermode emulation.  */
77
#undef DEBUG_TB_CHECK
78
#endif
79

    
80
#define SMC_BITMAP_USE_THRESHOLD 10
81

    
82
static TranslationBlock *tbs;
83
static int code_gen_max_blocks;
84
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85
static int nb_tbs;
86
/* any access to the tbs or the page table must use this lock */
87
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88

    
89
#if defined(__arm__) || defined(__sparc_v9__)
90
/* The prologue must be reachable with a direct jump. ARM and Sparc64
91
 have limited branch ranges (possibly also PPC) so place it in a
92
 section close to code segment. */
93
#define code_gen_section                                \
94
    __attribute__((__section__(".gen_code")))           \
95
    __attribute__((aligned (32)))
96
#elif defined(_WIN32)
97
/* Maximum alignment for Win32 is 16. */
98
#define code_gen_section                                \
99
    __attribute__((aligned (16)))
100
#else
101
#define code_gen_section                                \
102
    __attribute__((aligned (32)))
103
#endif
104

    
105
uint8_t code_gen_prologue[1024] code_gen_section;
106
static uint8_t *code_gen_buffer;
107
static unsigned long code_gen_buffer_size;
108
/* threshold to flush the translated code buffer */
109
static unsigned long code_gen_buffer_max_size;
110
static uint8_t *code_gen_ptr;
111

    
112
#if !defined(CONFIG_USER_ONLY)
113
int phys_ram_fd;
114
static int in_migration;
115

    
116
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
117

    
118
static MemoryRegion *system_memory;
119
static MemoryRegion *system_io;
120

    
121
MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
122
static MemoryRegion io_mem_subpage_ram;
123

    
124
#endif
125

    
126
CPUState *first_cpu;
127
/* current CPU in the current thread. It is only valid inside
128
   cpu_exec() */
129
DEFINE_TLS(CPUState *,cpu_single_env);
130
/* 0 = Do not count executed instructions.
131
   1 = Precise instruction counting.
132
   2 = Adaptive rate instruction counting.  */
133
int use_icount = 0;
134

    
135
typedef struct PageDesc {
136
    /* list of TBs intersecting this ram page */
137
    TranslationBlock *first_tb;
138
    /* in order to optimize self modifying code, we count the number
139
       of lookups we do to a given page to use a bitmap */
140
    unsigned int code_write_count;
141
    uint8_t *code_bitmap;
142
#if defined(CONFIG_USER_ONLY)
143
    unsigned long flags;
144
#endif
145
} PageDesc;
146

    
147
/* In system mode we want L1_MAP to be based on ram offsets,
148
   while in user mode we want it to be based on virtual addresses.  */
149
#if !defined(CONFIG_USER_ONLY)
150
#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
151
# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
152
#else
153
# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
154
#endif
155
#else
156
# define L1_MAP_ADDR_SPACE_BITS  TARGET_VIRT_ADDR_SPACE_BITS
157
#endif
158

    
159
/* Size of the L2 (and L3, etc) page tables.  */
160
#define L2_BITS 10
161
#define L2_SIZE (1 << L2_BITS)
162

    
163
/* The bits remaining after N lower levels of page tables.  */
164
#define P_L1_BITS_REM \
165
    ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
166
#define V_L1_BITS_REM \
167
    ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168

    
169
/* Size of the L1 page table.  Avoid silly small sizes.  */
170
#if P_L1_BITS_REM < 4
171
#define P_L1_BITS  (P_L1_BITS_REM + L2_BITS)
172
#else
173
#define P_L1_BITS  P_L1_BITS_REM
174
#endif
175

    
176
#if V_L1_BITS_REM < 4
177
#define V_L1_BITS  (V_L1_BITS_REM + L2_BITS)
178
#else
179
#define V_L1_BITS  V_L1_BITS_REM
180
#endif
181

    
182
#define P_L1_SIZE  ((target_phys_addr_t)1 << P_L1_BITS)
183
#define V_L1_SIZE  ((target_ulong)1 << V_L1_BITS)
184

    
185
#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
186
#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
187

    
188
unsigned long qemu_real_host_page_size;
189
unsigned long qemu_host_page_size;
190
unsigned long qemu_host_page_mask;
191

    
192
/* This is a multi-level map on the virtual address space.
193
   The bottom level has pointers to PageDesc.  */
194
static void *l1_map[V_L1_SIZE];
195

    
196
#if !defined(CONFIG_USER_ONLY)
197
typedef struct PhysPageDesc {
198
    /* offset in host memory of the page + io_index in the low bits */
199
    ram_addr_t phys_offset;
200
    ram_addr_t region_offset;
201
} PhysPageDesc;
202

    
203
/* This is a multi-level map on the physical address space.
204
   The bottom level has pointers to PhysPageDesc.  */
205
static void *l1_phys_map[P_L1_SIZE];
206

    
207
static void io_mem_init(void);
208
static void memory_map_init(void);
209

    
210
/* io memory support */
211
MemoryRegion *io_mem_region[IO_MEM_NB_ENTRIES];
212
static char io_mem_used[IO_MEM_NB_ENTRIES];
213
static MemoryRegion io_mem_watch;
214
#endif
215

    
216
/* log support */
217
#ifdef WIN32
218
static const char *logfilename = "qemu.log";
219
#else
220
static const char *logfilename = "/tmp/qemu.log";
221
#endif
222
FILE *logfile;
223
int loglevel;
224
static int log_append = 0;
225

    
226
/* statistics */
227
#if !defined(CONFIG_USER_ONLY)
228
static int tlb_flush_count;
229
#endif
230
static int tb_flush_count;
231
static int tb_phys_invalidate_count;
232

    
233
#ifdef _WIN32
234
static void map_exec(void *addr, long size)
235
{
236
    DWORD old_protect;
237
    VirtualProtect(addr, size,
238
                   PAGE_EXECUTE_READWRITE, &old_protect);
239
    
240
}
241
#else
242
static void map_exec(void *addr, long size)
243
{
244
    unsigned long start, end, page_size;
245
    
246
    page_size = getpagesize();
247
    start = (unsigned long)addr;
248
    start &= ~(page_size - 1);
249
    
250
    end = (unsigned long)addr + size;
251
    end += page_size - 1;
252
    end &= ~(page_size - 1);
253
    
254
    mprotect((void *)start, end - start,
255
             PROT_READ | PROT_WRITE | PROT_EXEC);
256
}
257
#endif
258

    
259
static void page_init(void)
260
{
261
    /* NOTE: we can always suppose that qemu_host_page_size >=
262
       TARGET_PAGE_SIZE */
263
#ifdef _WIN32
264
    {
265
        SYSTEM_INFO system_info;
266

    
267
        GetSystemInfo(&system_info);
268
        qemu_real_host_page_size = system_info.dwPageSize;
269
    }
270
#else
271
    qemu_real_host_page_size = getpagesize();
272
#endif
273
    if (qemu_host_page_size == 0)
274
        qemu_host_page_size = qemu_real_host_page_size;
275
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
276
        qemu_host_page_size = TARGET_PAGE_SIZE;
277
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
278

    
279
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
280
    {
281
#ifdef HAVE_KINFO_GETVMMAP
282
        struct kinfo_vmentry *freep;
283
        int i, cnt;
284

    
285
        freep = kinfo_getvmmap(getpid(), &cnt);
286
        if (freep) {
287
            mmap_lock();
288
            for (i = 0; i < cnt; i++) {
289
                unsigned long startaddr, endaddr;
290

    
291
                startaddr = freep[i].kve_start;
292
                endaddr = freep[i].kve_end;
293
                if (h2g_valid(startaddr)) {
294
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
295

    
296
                    if (h2g_valid(endaddr)) {
297
                        endaddr = h2g(endaddr);
298
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
299
                    } else {
300
#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
301
                        endaddr = ~0ul;
302
                        page_set_flags(startaddr, endaddr, PAGE_RESERVED);
303
#endif
304
                    }
305
                }
306
            }
307
            free(freep);
308
            mmap_unlock();
309
        }
310
#else
311
        FILE *f;
312

    
313
        last_brk = (unsigned long)sbrk(0);
314

    
315
        f = fopen("/compat/linux/proc/self/maps", "r");
316
        if (f) {
317
            mmap_lock();
318

    
319
            do {
320
                unsigned long startaddr, endaddr;
321
                int n;
322

    
323
                n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
324

    
325
                if (n == 2 && h2g_valid(startaddr)) {
326
                    startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
327

    
328
                    if (h2g_valid(endaddr)) {
329
                        endaddr = h2g(endaddr);
330
                    } else {
331
                        endaddr = ~0ul;
332
                    }
333
                    page_set_flags(startaddr, endaddr, PAGE_RESERVED);
334
                }
335
            } while (!feof(f));
336

    
337
            fclose(f);
338
            mmap_unlock();
339
        }
340
#endif
341
    }
342
#endif
343
}
344

    
345
static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
346
{
347
    PageDesc *pd;
348
    void **lp;
349
    int i;
350

    
351
#if defined(CONFIG_USER_ONLY)
352
    /* We can't use g_malloc because it may recurse into a locked mutex. */
353
# define ALLOC(P, SIZE)                                 \
354
    do {                                                \
355
        P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE,    \
356
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);   \
357
    } while (0)
358
#else
359
# define ALLOC(P, SIZE) \
360
    do { P = g_malloc0(SIZE); } while (0)
361
#endif
362

    
363
    /* Level 1.  Always allocated.  */
364
    lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
365

    
366
    /* Level 2..N-1.  */
367
    for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
368
        void **p = *lp;
369

    
370
        if (p == NULL) {
371
            if (!alloc) {
372
                return NULL;
373
            }
374
            ALLOC(p, sizeof(void *) * L2_SIZE);
375
            *lp = p;
376
        }
377

    
378
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
379
    }
380

    
381
    pd = *lp;
382
    if (pd == NULL) {
383
        if (!alloc) {
384
            return NULL;
385
        }
386
        ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
387
        *lp = pd;
388
    }
389

    
390
#undef ALLOC
391

    
392
    return pd + (index & (L2_SIZE - 1));
393
}
394

    
395
static inline PageDesc *page_find(tb_page_addr_t index)
396
{
397
    return page_find_alloc(index, 0);
398
}
399

    
400
#if !defined(CONFIG_USER_ONLY)
401
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
402
{
403
    PhysPageDesc *pd;
404
    void **lp;
405
    int i;
406

    
407
    /* Level 1.  Always allocated.  */
408
    lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
409

    
410
    /* Level 2..N-1.  */
411
    for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
412
        void **p = *lp;
413
        if (p == NULL) {
414
            if (!alloc) {
415
                return NULL;
416
            }
417
            *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
418
        }
419
        lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
420
    }
421

    
422
    pd = *lp;
423
    if (pd == NULL) {
424
        int i;
425
        int first_index = index & ~(L2_SIZE - 1);
426

    
427
        if (!alloc) {
428
            return NULL;
429
        }
430

    
431
        *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
432

    
433
        for (i = 0; i < L2_SIZE; i++) {
434
            pd[i].phys_offset = io_mem_unassigned.ram_addr;
435
            pd[i].region_offset = (first_index + i) << TARGET_PAGE_BITS;
436
        }
437
    }
438

    
439
    return pd + (index & (L2_SIZE - 1));
440
}
441

    
442
static inline PhysPageDesc phys_page_find(target_phys_addr_t index)
443
{
444
    PhysPageDesc *p = phys_page_find_alloc(index, 0);
445

    
446
    if (p) {
447
        return *p;
448
    } else {
449
        return (PhysPageDesc) {
450
            .phys_offset = io_mem_unassigned.ram_addr,
451
            .region_offset = index << TARGET_PAGE_BITS,
452
        };
453
    }
454
}
455

    
456
static void tlb_protect_code(ram_addr_t ram_addr);
457
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
458
                                    target_ulong vaddr);
459
#define mmap_lock() do { } while(0)
460
#define mmap_unlock() do { } while(0)
461
#endif
462

    
463
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
464

    
465
#if defined(CONFIG_USER_ONLY)
466
/* Currently it is not recommended to allocate big chunks of data in
467
   user mode. It will change when a dedicated libc will be used */
468
#define USE_STATIC_CODE_GEN_BUFFER
469
#endif
470

    
471
#ifdef USE_STATIC_CODE_GEN_BUFFER
472
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
473
               __attribute__((aligned (CODE_GEN_ALIGN)));
474
#endif
475

    
476
static void code_gen_alloc(unsigned long tb_size)
477
{
478
#ifdef USE_STATIC_CODE_GEN_BUFFER
479
    code_gen_buffer = static_code_gen_buffer;
480
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
481
    map_exec(code_gen_buffer, code_gen_buffer_size);
482
#else
483
    code_gen_buffer_size = tb_size;
484
    if (code_gen_buffer_size == 0) {
485
#if defined(CONFIG_USER_ONLY)
486
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
487
#else
488
        /* XXX: needs adjustments */
489
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
490
#endif
491
    }
492
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
493
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
494
    /* The code gen buffer location may have constraints depending on
495
       the host cpu and OS */
496
#if defined(__linux__) 
497
    {
498
        int flags;
499
        void *start = NULL;
500

    
501
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
502
#if defined(__x86_64__)
503
        flags |= MAP_32BIT;
504
        /* Cannot map more than that */
505
        if (code_gen_buffer_size > (800 * 1024 * 1024))
506
            code_gen_buffer_size = (800 * 1024 * 1024);
507
#elif defined(__sparc_v9__)
508
        // Map the buffer below 2G, so we can use direct calls and branches
509
        flags |= MAP_FIXED;
510
        start = (void *) 0x60000000UL;
511
        if (code_gen_buffer_size > (512 * 1024 * 1024))
512
            code_gen_buffer_size = (512 * 1024 * 1024);
513
#elif defined(__arm__)
514
        /* Keep the buffer no bigger than 16MB to branch between blocks */
515
        if (code_gen_buffer_size > 16 * 1024 * 1024)
516
            code_gen_buffer_size = 16 * 1024 * 1024;
517
#elif defined(__s390x__)
518
        /* Map the buffer so that we can use direct calls and branches.  */
519
        /* We have a +- 4GB range on the branches; leave some slop.  */
520
        if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
521
            code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
522
        }
523
        start = (void *)0x90000000UL;
524
#endif
525
        code_gen_buffer = mmap(start, code_gen_buffer_size,
526
                               PROT_WRITE | PROT_READ | PROT_EXEC,
527
                               flags, -1, 0);
528
        if (code_gen_buffer == MAP_FAILED) {
529
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
530
            exit(1);
531
        }
532
    }
533
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
534
    || defined(__DragonFly__) || defined(__OpenBSD__) \
535
    || defined(__NetBSD__)
536
    {
537
        int flags;
538
        void *addr = NULL;
539
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
540
#if defined(__x86_64__)
541
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
542
         * 0x40000000 is free */
543
        flags |= MAP_FIXED;
544
        addr = (void *)0x40000000;
545
        /* Cannot map more than that */
546
        if (code_gen_buffer_size > (800 * 1024 * 1024))
547
            code_gen_buffer_size = (800 * 1024 * 1024);
548
#elif defined(__sparc_v9__)
549
        // Map the buffer below 2G, so we can use direct calls and branches
550
        flags |= MAP_FIXED;
551
        addr = (void *) 0x60000000UL;
552
        if (code_gen_buffer_size > (512 * 1024 * 1024)) {
553
            code_gen_buffer_size = (512 * 1024 * 1024);
554
        }
555
#endif
556
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
557
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
558
                               flags, -1, 0);
559
        if (code_gen_buffer == MAP_FAILED) {
560
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
561
            exit(1);
562
        }
563
    }
564
#else
565
    code_gen_buffer = g_malloc(code_gen_buffer_size);
566
    map_exec(code_gen_buffer, code_gen_buffer_size);
567
#endif
568
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
569
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
570
    code_gen_buffer_max_size = code_gen_buffer_size -
571
        (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
572
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
573
    tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
574
}
575

    
576
/* Must be called before using the QEMU cpus. 'tb_size' is the size
577
   (in bytes) allocated to the translation buffer. Zero means default
578
   size. */
579
void tcg_exec_init(unsigned long tb_size)
580
{
581
    cpu_gen_init();
582
    code_gen_alloc(tb_size);
583
    code_gen_ptr = code_gen_buffer;
584
    page_init();
585
#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
586
    /* There's no guest base to take into account, so go ahead and
587
       initialize the prologue now.  */
588
    tcg_prologue_init(&tcg_ctx);
589
#endif
590
}
591

    
592
bool tcg_enabled(void)
593
{
594
    return code_gen_buffer != NULL;
595
}
596

    
597
void cpu_exec_init_all(void)
598
{
599
#if !defined(CONFIG_USER_ONLY)
600
    memory_map_init();
601
    io_mem_init();
602
#endif
603
}
604

    
605
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
606

    
607
static int cpu_common_post_load(void *opaque, int version_id)
608
{
609
    CPUState *env = opaque;
610

    
611
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
612
       version_id is increased. */
613
    env->interrupt_request &= ~0x01;
614
    tlb_flush(env, 1);
615

    
616
    return 0;
617
}
618

    
619
static const VMStateDescription vmstate_cpu_common = {
620
    .name = "cpu_common",
621
    .version_id = 1,
622
    .minimum_version_id = 1,
623
    .minimum_version_id_old = 1,
624
    .post_load = cpu_common_post_load,
625
    .fields      = (VMStateField []) {
626
        VMSTATE_UINT32(halted, CPUState),
627
        VMSTATE_UINT32(interrupt_request, CPUState),
628
        VMSTATE_END_OF_LIST()
629
    }
630
};
631
#endif
632

    
633
CPUState *qemu_get_cpu(int cpu)
634
{
635
    CPUState *env = first_cpu;
636

    
637
    while (env) {
638
        if (env->cpu_index == cpu)
639
            break;
640
        env = env->next_cpu;
641
    }
642

    
643
    return env;
644
}
645

    
646
void cpu_exec_init(CPUState *env)
647
{
648
    CPUState **penv;
649
    int cpu_index;
650

    
651
#if defined(CONFIG_USER_ONLY)
652
    cpu_list_lock();
653
#endif
654
    env->next_cpu = NULL;
655
    penv = &first_cpu;
656
    cpu_index = 0;
657
    while (*penv != NULL) {
658
        penv = &(*penv)->next_cpu;
659
        cpu_index++;
660
    }
661
    env->cpu_index = cpu_index;
662
    env->numa_node = 0;
663
    QTAILQ_INIT(&env->breakpoints);
664
    QTAILQ_INIT(&env->watchpoints);
665
#ifndef CONFIG_USER_ONLY
666
    env->thread_id = qemu_get_thread_id();
667
#endif
668
    *penv = env;
669
#if defined(CONFIG_USER_ONLY)
670
    cpu_list_unlock();
671
#endif
672
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
673
    vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
674
    register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
675
                    cpu_save, cpu_load, env);
676
#endif
677
}
678

    
679
/* Allocate a new translation block. Flush the translation buffer if
680
   too many translation blocks or too much generated code. */
681
static TranslationBlock *tb_alloc(target_ulong pc)
682
{
683
    TranslationBlock *tb;
684

    
685
    if (nb_tbs >= code_gen_max_blocks ||
686
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
687
        return NULL;
688
    tb = &tbs[nb_tbs++];
689
    tb->pc = pc;
690
    tb->cflags = 0;
691
    return tb;
692
}
693

    
694
void tb_free(TranslationBlock *tb)
695
{
696
    /* In practice this is mostly used for single use temporary TB
697
       Ignore the hard cases and just back up if this TB happens to
698
       be the last one generated.  */
699
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
700
        code_gen_ptr = tb->tc_ptr;
701
        nb_tbs--;
702
    }
703
}
704

    
705
static inline void invalidate_page_bitmap(PageDesc *p)
706
{
707
    if (p->code_bitmap) {
708
        g_free(p->code_bitmap);
709
        p->code_bitmap = NULL;
710
    }
711
    p->code_write_count = 0;
712
}
713

    
714
/* Set to NULL all the 'first_tb' fields in all PageDescs. */
715

    
716
static void page_flush_tb_1 (int level, void **lp)
717
{
718
    int i;
719

    
720
    if (*lp == NULL) {
721
        return;
722
    }
723
    if (level == 0) {
724
        PageDesc *pd = *lp;
725
        for (i = 0; i < L2_SIZE; ++i) {
726
            pd[i].first_tb = NULL;
727
            invalidate_page_bitmap(pd + i);
728
        }
729
    } else {
730
        void **pp = *lp;
731
        for (i = 0; i < L2_SIZE; ++i) {
732
            page_flush_tb_1 (level - 1, pp + i);
733
        }
734
    }
735
}
736

    
737
static void page_flush_tb(void)
738
{
739
    int i;
740
    for (i = 0; i < V_L1_SIZE; i++) {
741
        page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
742
    }
743
}
744

    
745
/* flush all the translation blocks */
746
/* XXX: tb_flush is currently not thread safe */
747
void tb_flush(CPUState *env1)
748
{
749
    CPUState *env;
750
#if defined(DEBUG_FLUSH)
751
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
752
           (unsigned long)(code_gen_ptr - code_gen_buffer),
753
           nb_tbs, nb_tbs > 0 ?
754
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
755
#endif
756
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
757
        cpu_abort(env1, "Internal error: code buffer overflow\n");
758

    
759
    nb_tbs = 0;
760

    
761
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
762
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
763
    }
764

    
765
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
766
    page_flush_tb();
767

    
768
    code_gen_ptr = code_gen_buffer;
769
    /* XXX: flush processor icache at this point if cache flush is
770
       expensive */
771
    tb_flush_count++;
772
}
773

    
774
#ifdef DEBUG_TB_CHECK
775

    
776
static void tb_invalidate_check(target_ulong address)
777
{
778
    TranslationBlock *tb;
779
    int i;
780
    address &= TARGET_PAGE_MASK;
781
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
782
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
783
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
784
                  address >= tb->pc + tb->size)) {
785
                printf("ERROR invalidate: address=" TARGET_FMT_lx
786
                       " PC=%08lx size=%04x\n",
787
                       address, (long)tb->pc, tb->size);
788
            }
789
        }
790
    }
791
}
792

    
793
/* verify that all the pages have correct rights for code */
794
static void tb_page_check(void)
795
{
796
    TranslationBlock *tb;
797
    int i, flags1, flags2;
798

    
799
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
800
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
801
            flags1 = page_get_flags(tb->pc);
802
            flags2 = page_get_flags(tb->pc + tb->size - 1);
803
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
804
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
805
                       (long)tb->pc, tb->size, flags1, flags2);
806
            }
807
        }
808
    }
809
}
810

    
811
#endif
812

    
813
/* invalidate one TB */
814
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
815
                             int next_offset)
816
{
817
    TranslationBlock *tb1;
818
    for(;;) {
819
        tb1 = *ptb;
820
        if (tb1 == tb) {
821
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
822
            break;
823
        }
824
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
825
    }
826
}
827

    
828
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
829
{
830
    TranslationBlock *tb1;
831
    unsigned int n1;
832

    
833
    for(;;) {
834
        tb1 = *ptb;
835
        n1 = (long)tb1 & 3;
836
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
837
        if (tb1 == tb) {
838
            *ptb = tb1->page_next[n1];
839
            break;
840
        }
841
        ptb = &tb1->page_next[n1];
842
    }
843
}
844

    
845
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
846
{
847
    TranslationBlock *tb1, **ptb;
848
    unsigned int n1;
849

    
850
    ptb = &tb->jmp_next[n];
851
    tb1 = *ptb;
852
    if (tb1) {
853
        /* find tb(n) in circular list */
854
        for(;;) {
855
            tb1 = *ptb;
856
            n1 = (long)tb1 & 3;
857
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
858
            if (n1 == n && tb1 == tb)
859
                break;
860
            if (n1 == 2) {
861
                ptb = &tb1->jmp_first;
862
            } else {
863
                ptb = &tb1->jmp_next[n1];
864
            }
865
        }
866
        /* now we can suppress tb(n) from the list */
867
        *ptb = tb->jmp_next[n];
868

    
869
        tb->jmp_next[n] = NULL;
870
    }
871
}
872

    
873
/* reset the jump entry 'n' of a TB so that it is not chained to
874
   another TB */
875
static inline void tb_reset_jump(TranslationBlock *tb, int n)
876
{
877
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
878
}
879

    
880
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
881
{
882
    CPUState *env;
883
    PageDesc *p;
884
    unsigned int h, n1;
885
    tb_page_addr_t phys_pc;
886
    TranslationBlock *tb1, *tb2;
887

    
888
    /* remove the TB from the hash list */
889
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
890
    h = tb_phys_hash_func(phys_pc);
891
    tb_remove(&tb_phys_hash[h], tb,
892
              offsetof(TranslationBlock, phys_hash_next));
893

    
894
    /* remove the TB from the page list */
895
    if (tb->page_addr[0] != page_addr) {
896
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
897
        tb_page_remove(&p->first_tb, tb);
898
        invalidate_page_bitmap(p);
899
    }
900
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
901
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
902
        tb_page_remove(&p->first_tb, tb);
903
        invalidate_page_bitmap(p);
904
    }
905

    
906
    tb_invalidated_flag = 1;
907

    
908
    /* remove the TB from the hash list */
909
    h = tb_jmp_cache_hash_func(tb->pc);
910
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
911
        if (env->tb_jmp_cache[h] == tb)
912
            env->tb_jmp_cache[h] = NULL;
913
    }
914

    
915
    /* suppress this TB from the two jump lists */
916
    tb_jmp_remove(tb, 0);
917
    tb_jmp_remove(tb, 1);
918

    
919
    /* suppress any remaining jumps to this TB */
920
    tb1 = tb->jmp_first;
921
    for(;;) {
922
        n1 = (long)tb1 & 3;
923
        if (n1 == 2)
924
            break;
925
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
926
        tb2 = tb1->jmp_next[n1];
927
        tb_reset_jump(tb1, n1);
928
        tb1->jmp_next[n1] = NULL;
929
        tb1 = tb2;
930
    }
931
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
932

    
933
    tb_phys_invalidate_count++;
934
}
935

    
936
static inline void set_bits(uint8_t *tab, int start, int len)
937
{
938
    int end, mask, end1;
939

    
940
    end = start + len;
941
    tab += start >> 3;
942
    mask = 0xff << (start & 7);
943
    if ((start & ~7) == (end & ~7)) {
944
        if (start < end) {
945
            mask &= ~(0xff << (end & 7));
946
            *tab |= mask;
947
        }
948
    } else {
949
        *tab++ |= mask;
950
        start = (start + 8) & ~7;
951
        end1 = end & ~7;
952
        while (start < end1) {
953
            *tab++ = 0xff;
954
            start += 8;
955
        }
956
        if (start < end) {
957
            mask = ~(0xff << (end & 7));
958
            *tab |= mask;
959
        }
960
    }
961
}
962

    
963
static void build_page_bitmap(PageDesc *p)
964
{
965
    int n, tb_start, tb_end;
966
    TranslationBlock *tb;
967

    
968
    p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
969

    
970
    tb = p->first_tb;
971
    while (tb != NULL) {
972
        n = (long)tb & 3;
973
        tb = (TranslationBlock *)((long)tb & ~3);
974
        /* NOTE: this is subtle as a TB may span two physical pages */
975
        if (n == 0) {
976
            /* NOTE: tb_end may be after the end of the page, but
977
               it is not a problem */
978
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
979
            tb_end = tb_start + tb->size;
980
            if (tb_end > TARGET_PAGE_SIZE)
981
                tb_end = TARGET_PAGE_SIZE;
982
        } else {
983
            tb_start = 0;
984
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
985
        }
986
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
987
        tb = tb->page_next[n];
988
    }
989
}
990

    
991
TranslationBlock *tb_gen_code(CPUState *env,
992
                              target_ulong pc, target_ulong cs_base,
993
                              int flags, int cflags)
994
{
995
    TranslationBlock *tb;
996
    uint8_t *tc_ptr;
997
    tb_page_addr_t phys_pc, phys_page2;
998
    target_ulong virt_page2;
999
    int code_gen_size;
1000

    
1001
    phys_pc = get_page_addr_code(env, pc);
1002
    tb = tb_alloc(pc);
1003
    if (!tb) {
1004
        /* flush must be done */
1005
        tb_flush(env);
1006
        /* cannot fail at this point */
1007
        tb = tb_alloc(pc);
1008
        /* Don't forget to invalidate previous TB info.  */
1009
        tb_invalidated_flag = 1;
1010
    }
1011
    tc_ptr = code_gen_ptr;
1012
    tb->tc_ptr = tc_ptr;
1013
    tb->cs_base = cs_base;
1014
    tb->flags = flags;
1015
    tb->cflags = cflags;
1016
    cpu_gen_code(env, tb, &code_gen_size);
1017
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
1018

    
1019
    /* check next page if needed */
1020
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
1021
    phys_page2 = -1;
1022
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
1023
        phys_page2 = get_page_addr_code(env, virt_page2);
1024
    }
1025
    tb_link_page(tb, phys_pc, phys_page2);
1026
    return tb;
1027
}
1028

    
1029
/* invalidate all TBs which intersect with the target physical page
1030
   starting in range [start;end[. NOTE: start and end must refer to
1031
   the same physical page. 'is_cpu_write_access' should be true if called
1032
   from a real cpu write access: the virtual CPU will exit the current
1033
   TB if code is modified inside this TB. */
1034
void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
1035
                                   int is_cpu_write_access)
1036
{
1037
    TranslationBlock *tb, *tb_next, *saved_tb;
1038
    CPUState *env = cpu_single_env;
1039
    tb_page_addr_t tb_start, tb_end;
1040
    PageDesc *p;
1041
    int n;
1042
#ifdef TARGET_HAS_PRECISE_SMC
1043
    int current_tb_not_found = is_cpu_write_access;
1044
    TranslationBlock *current_tb = NULL;
1045
    int current_tb_modified = 0;
1046
    target_ulong current_pc = 0;
1047
    target_ulong current_cs_base = 0;
1048
    int current_flags = 0;
1049
#endif /* TARGET_HAS_PRECISE_SMC */
1050

    
1051
    p = page_find(start >> TARGET_PAGE_BITS);
1052
    if (!p)
1053
        return;
1054
    if (!p->code_bitmap &&
1055
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1056
        is_cpu_write_access) {
1057
        /* build code bitmap */
1058
        build_page_bitmap(p);
1059
    }
1060

    
1061
    /* we remove all the TBs in the range [start, end[ */
1062
    /* XXX: see if in some cases it could be faster to invalidate all the code */
1063
    tb = p->first_tb;
1064
    while (tb != NULL) {
1065
        n = (long)tb & 3;
1066
        tb = (TranslationBlock *)((long)tb & ~3);
1067
        tb_next = tb->page_next[n];
1068
        /* NOTE: this is subtle as a TB may span two physical pages */
1069
        if (n == 0) {
1070
            /* NOTE: tb_end may be after the end of the page, but
1071
               it is not a problem */
1072
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1073
            tb_end = tb_start + tb->size;
1074
        } else {
1075
            tb_start = tb->page_addr[1];
1076
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1077
        }
1078
        if (!(tb_end <= start || tb_start >= end)) {
1079
#ifdef TARGET_HAS_PRECISE_SMC
1080
            if (current_tb_not_found) {
1081
                current_tb_not_found = 0;
1082
                current_tb = NULL;
1083
                if (env->mem_io_pc) {
1084
                    /* now we have a real cpu fault */
1085
                    current_tb = tb_find_pc(env->mem_io_pc);
1086
                }
1087
            }
1088
            if (current_tb == tb &&
1089
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
1090
                /* If we are modifying the current TB, we must stop
1091
                its execution. We could be more precise by checking
1092
                that the modification is after the current PC, but it
1093
                would require a specialized function to partially
1094
                restore the CPU state */
1095

    
1096
                current_tb_modified = 1;
1097
                cpu_restore_state(current_tb, env, env->mem_io_pc);
1098
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1099
                                     &current_flags);
1100
            }
1101
#endif /* TARGET_HAS_PRECISE_SMC */
1102
            /* we need to do that to handle the case where a signal
1103
               occurs while doing tb_phys_invalidate() */
1104
            saved_tb = NULL;
1105
            if (env) {
1106
                saved_tb = env->current_tb;
1107
                env->current_tb = NULL;
1108
            }
1109
            tb_phys_invalidate(tb, -1);
1110
            if (env) {
1111
                env->current_tb = saved_tb;
1112
                if (env->interrupt_request && env->current_tb)
1113
                    cpu_interrupt(env, env->interrupt_request);
1114
            }
1115
        }
1116
        tb = tb_next;
1117
    }
1118
#if !defined(CONFIG_USER_ONLY)
1119
    /* if no code remaining, no need to continue to use slow writes */
1120
    if (!p->first_tb) {
1121
        invalidate_page_bitmap(p);
1122
        if (is_cpu_write_access) {
1123
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1124
        }
1125
    }
1126
#endif
1127
#ifdef TARGET_HAS_PRECISE_SMC
1128
    if (current_tb_modified) {
1129
        /* we generate a block containing just the instruction
1130
           modifying the memory. It will ensure that it cannot modify
1131
           itself */
1132
        env->current_tb = NULL;
1133
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1134
        cpu_resume_from_signal(env, NULL);
1135
    }
1136
#endif
1137
}
1138

    
1139
/* len must be <= 8 and start must be a multiple of len */
1140
static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
1141
{
1142
    PageDesc *p;
1143
    int offset, b;
1144
#if 0
1145
    if (1) {
1146
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1147
                  cpu_single_env->mem_io_vaddr, len,
1148
                  cpu_single_env->eip,
1149
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1150
    }
1151
#endif
1152
    p = page_find(start >> TARGET_PAGE_BITS);
1153
    if (!p)
1154
        return;
1155
    if (p->code_bitmap) {
1156
        offset = start & ~TARGET_PAGE_MASK;
1157
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1158
        if (b & ((1 << len) - 1))
1159
            goto do_invalidate;
1160
    } else {
1161
    do_invalidate:
1162
        tb_invalidate_phys_page_range(start, start + len, 1);
1163
    }
1164
}
1165

    
1166
#if !defined(CONFIG_SOFTMMU)
1167
static void tb_invalidate_phys_page(tb_page_addr_t addr,
1168
                                    unsigned long pc, void *puc)
1169
{
1170
    TranslationBlock *tb;
1171
    PageDesc *p;
1172
    int n;
1173
#ifdef TARGET_HAS_PRECISE_SMC
1174
    TranslationBlock *current_tb = NULL;
1175
    CPUState *env = cpu_single_env;
1176
    int current_tb_modified = 0;
1177
    target_ulong current_pc = 0;
1178
    target_ulong current_cs_base = 0;
1179
    int current_flags = 0;
1180
#endif
1181

    
1182
    addr &= TARGET_PAGE_MASK;
1183
    p = page_find(addr >> TARGET_PAGE_BITS);
1184
    if (!p)
1185
        return;
1186
    tb = p->first_tb;
1187
#ifdef TARGET_HAS_PRECISE_SMC
1188
    if (tb && pc != 0) {
1189
        current_tb = tb_find_pc(pc);
1190
    }
1191
#endif
1192
    while (tb != NULL) {
1193
        n = (long)tb & 3;
1194
        tb = (TranslationBlock *)((long)tb & ~3);
1195
#ifdef TARGET_HAS_PRECISE_SMC
1196
        if (current_tb == tb &&
1197
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1198
                /* If we are modifying the current TB, we must stop
1199
                   its execution. We could be more precise by checking
1200
                   that the modification is after the current PC, but it
1201
                   would require a specialized function to partially
1202
                   restore the CPU state */
1203

    
1204
            current_tb_modified = 1;
1205
            cpu_restore_state(current_tb, env, pc);
1206
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1207
                                 &current_flags);
1208
        }
1209
#endif /* TARGET_HAS_PRECISE_SMC */
1210
        tb_phys_invalidate(tb, addr);
1211
        tb = tb->page_next[n];
1212
    }
1213
    p->first_tb = NULL;
1214
#ifdef TARGET_HAS_PRECISE_SMC
1215
    if (current_tb_modified) {
1216
        /* we generate a block containing just the instruction
1217
           modifying the memory. It will ensure that it cannot modify
1218
           itself */
1219
        env->current_tb = NULL;
1220
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1221
        cpu_resume_from_signal(env, puc);
1222
    }
1223
#endif
1224
}
1225
#endif
1226

    
1227
/* add the tb in the target page and protect it if necessary */
1228
static inline void tb_alloc_page(TranslationBlock *tb,
1229
                                 unsigned int n, tb_page_addr_t page_addr)
1230
{
1231
    PageDesc *p;
1232
#ifndef CONFIG_USER_ONLY
1233
    bool page_already_protected;
1234
#endif
1235

    
1236
    tb->page_addr[n] = page_addr;
1237
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
1238
    tb->page_next[n] = p->first_tb;
1239
#ifndef CONFIG_USER_ONLY
1240
    page_already_protected = p->first_tb != NULL;
1241
#endif
1242
    p->first_tb = (TranslationBlock *)((long)tb | n);
1243
    invalidate_page_bitmap(p);
1244

    
1245
#if defined(TARGET_HAS_SMC) || 1
1246

    
1247
#if defined(CONFIG_USER_ONLY)
1248
    if (p->flags & PAGE_WRITE) {
1249
        target_ulong addr;
1250
        PageDesc *p2;
1251
        int prot;
1252

    
1253
        /* force the host page as non writable (writes will have a
1254
           page fault + mprotect overhead) */
1255
        page_addr &= qemu_host_page_mask;
1256
        prot = 0;
1257
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1258
            addr += TARGET_PAGE_SIZE) {
1259

    
1260
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1261
            if (!p2)
1262
                continue;
1263
            prot |= p2->flags;
1264
            p2->flags &= ~PAGE_WRITE;
1265
          }
1266
        mprotect(g2h(page_addr), qemu_host_page_size,
1267
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1268
#ifdef DEBUG_TB_INVALIDATE
1269
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1270
               page_addr);
1271
#endif
1272
    }
1273
#else
1274
    /* if some code is already present, then the pages are already
1275
       protected. So we handle the case where only the first TB is
1276
       allocated in a physical page */
1277
    if (!page_already_protected) {
1278
        tlb_protect_code(page_addr);
1279
    }
1280
#endif
1281

    
1282
#endif /* TARGET_HAS_SMC */
1283
}
1284

    
1285
/* add a new TB and link it to the physical page tables. phys_page2 is
1286
   (-1) to indicate that only one page contains the TB. */
1287
void tb_link_page(TranslationBlock *tb,
1288
                  tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
1289
{
1290
    unsigned int h;
1291
    TranslationBlock **ptb;
1292

    
1293
    /* Grab the mmap lock to stop another thread invalidating this TB
1294
       before we are done.  */
1295
    mmap_lock();
1296
    /* add in the physical hash table */
1297
    h = tb_phys_hash_func(phys_pc);
1298
    ptb = &tb_phys_hash[h];
1299
    tb->phys_hash_next = *ptb;
1300
    *ptb = tb;
1301

    
1302
    /* add in the page list */
1303
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1304
    if (phys_page2 != -1)
1305
        tb_alloc_page(tb, 1, phys_page2);
1306
    else
1307
        tb->page_addr[1] = -1;
1308

    
1309
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1310
    tb->jmp_next[0] = NULL;
1311
    tb->jmp_next[1] = NULL;
1312

    
1313
    /* init original jump addresses */
1314
    if (tb->tb_next_offset[0] != 0xffff)
1315
        tb_reset_jump(tb, 0);
1316
    if (tb->tb_next_offset[1] != 0xffff)
1317
        tb_reset_jump(tb, 1);
1318

    
1319
#ifdef DEBUG_TB_CHECK
1320
    tb_page_check();
1321
#endif
1322
    mmap_unlock();
1323
}
1324

    
1325
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1326
   tb[1].tc_ptr. Return NULL if not found */
1327
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1328
{
1329
    int m_min, m_max, m;
1330
    unsigned long v;
1331
    TranslationBlock *tb;
1332

    
1333
    if (nb_tbs <= 0)
1334
        return NULL;
1335
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1336
        tc_ptr >= (unsigned long)code_gen_ptr)
1337
        return NULL;
1338
    /* binary search (cf Knuth) */
1339
    m_min = 0;
1340
    m_max = nb_tbs - 1;
1341
    while (m_min <= m_max) {
1342
        m = (m_min + m_max) >> 1;
1343
        tb = &tbs[m];
1344
        v = (unsigned long)tb->tc_ptr;
1345
        if (v == tc_ptr)
1346
            return tb;
1347
        else if (tc_ptr < v) {
1348
            m_max = m - 1;
1349
        } else {
1350
            m_min = m + 1;
1351
        }
1352
    }
1353
    return &tbs[m_max];
1354
}
1355

    
1356
static void tb_reset_jump_recursive(TranslationBlock *tb);
1357

    
1358
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1359
{
1360
    TranslationBlock *tb1, *tb_next, **ptb;
1361
    unsigned int n1;
1362

    
1363
    tb1 = tb->jmp_next[n];
1364
    if (tb1 != NULL) {
1365
        /* find head of list */
1366
        for(;;) {
1367
            n1 = (long)tb1 & 3;
1368
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1369
            if (n1 == 2)
1370
                break;
1371
            tb1 = tb1->jmp_next[n1];
1372
        }
1373
        /* we are now sure now that tb jumps to tb1 */
1374
        tb_next = tb1;
1375

    
1376
        /* remove tb from the jmp_first list */
1377
        ptb = &tb_next->jmp_first;
1378
        for(;;) {
1379
            tb1 = *ptb;
1380
            n1 = (long)tb1 & 3;
1381
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1382
            if (n1 == n && tb1 == tb)
1383
                break;
1384
            ptb = &tb1->jmp_next[n1];
1385
        }
1386
        *ptb = tb->jmp_next[n];
1387
        tb->jmp_next[n] = NULL;
1388

    
1389
        /* suppress the jump to next tb in generated code */
1390
        tb_reset_jump(tb, n);
1391

    
1392
        /* suppress jumps in the tb on which we could have jumped */
1393
        tb_reset_jump_recursive(tb_next);
1394
    }
1395
}
1396

    
1397
static void tb_reset_jump_recursive(TranslationBlock *tb)
1398
{
1399
    tb_reset_jump_recursive2(tb, 0);
1400
    tb_reset_jump_recursive2(tb, 1);
1401
}
1402

    
1403
#if defined(TARGET_HAS_ICE)
1404
#if defined(CONFIG_USER_ONLY)
1405
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1406
{
1407
    tb_invalidate_phys_page_range(pc, pc + 1, 0);
1408
}
1409
#else
1410
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1411
{
1412
    target_phys_addr_t addr;
1413
    target_ulong pd;
1414
    ram_addr_t ram_addr;
1415
    PhysPageDesc p;
1416

    
1417
    addr = cpu_get_phys_page_debug(env, pc);
1418
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1419
    pd = p.phys_offset;
1420
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1421
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1422
}
1423
#endif
1424
#endif /* TARGET_HAS_ICE */
1425

    
1426
#if defined(CONFIG_USER_ONLY)
1427
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1428

    
1429
{
1430
}
1431

    
1432
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1433
                          int flags, CPUWatchpoint **watchpoint)
1434
{
1435
    return -ENOSYS;
1436
}
1437
#else
1438
/* Add a watchpoint.  */
1439
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1440
                          int flags, CPUWatchpoint **watchpoint)
1441
{
1442
    target_ulong len_mask = ~(len - 1);
1443
    CPUWatchpoint *wp;
1444

    
1445
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1446
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1447
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1448
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1449
        return -EINVAL;
1450
    }
1451
    wp = g_malloc(sizeof(*wp));
1452

    
1453
    wp->vaddr = addr;
1454
    wp->len_mask = len_mask;
1455
    wp->flags = flags;
1456

    
1457
    /* keep all GDB-injected watchpoints in front */
1458
    if (flags & BP_GDB)
1459
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1460
    else
1461
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1462

    
1463
    tlb_flush_page(env, addr);
1464

    
1465
    if (watchpoint)
1466
        *watchpoint = wp;
1467
    return 0;
1468
}
1469

    
1470
/* Remove a specific watchpoint.  */
1471
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1472
                          int flags)
1473
{
1474
    target_ulong len_mask = ~(len - 1);
1475
    CPUWatchpoint *wp;
1476

    
1477
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1478
        if (addr == wp->vaddr && len_mask == wp->len_mask
1479
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1480
            cpu_watchpoint_remove_by_ref(env, wp);
1481
            return 0;
1482
        }
1483
    }
1484
    return -ENOENT;
1485
}
1486

    
1487
/* Remove a specific watchpoint by reference.  */
1488
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1489
{
1490
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1491

    
1492
    tlb_flush_page(env, watchpoint->vaddr);
1493

    
1494
    g_free(watchpoint);
1495
}
1496

    
1497
/* Remove all matching watchpoints.  */
1498
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1499
{
1500
    CPUWatchpoint *wp, *next;
1501

    
1502
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1503
        if (wp->flags & mask)
1504
            cpu_watchpoint_remove_by_ref(env, wp);
1505
    }
1506
}
1507
#endif
1508

    
1509
/* Add a breakpoint.  */
1510
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1511
                          CPUBreakpoint **breakpoint)
1512
{
1513
#if defined(TARGET_HAS_ICE)
1514
    CPUBreakpoint *bp;
1515

    
1516
    bp = g_malloc(sizeof(*bp));
1517

    
1518
    bp->pc = pc;
1519
    bp->flags = flags;
1520

    
1521
    /* keep all GDB-injected breakpoints in front */
1522
    if (flags & BP_GDB)
1523
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1524
    else
1525
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1526

    
1527
    breakpoint_invalidate(env, pc);
1528

    
1529
    if (breakpoint)
1530
        *breakpoint = bp;
1531
    return 0;
1532
#else
1533
    return -ENOSYS;
1534
#endif
1535
}
1536

    
1537
/* Remove a specific breakpoint.  */
1538
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1539
{
1540
#if defined(TARGET_HAS_ICE)
1541
    CPUBreakpoint *bp;
1542

    
1543
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1544
        if (bp->pc == pc && bp->flags == flags) {
1545
            cpu_breakpoint_remove_by_ref(env, bp);
1546
            return 0;
1547
        }
1548
    }
1549
    return -ENOENT;
1550
#else
1551
    return -ENOSYS;
1552
#endif
1553
}
1554

    
1555
/* Remove a specific breakpoint by reference.  */
1556
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1557
{
1558
#if defined(TARGET_HAS_ICE)
1559
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1560

    
1561
    breakpoint_invalidate(env, breakpoint->pc);
1562

    
1563
    g_free(breakpoint);
1564
#endif
1565
}
1566

    
1567
/* Remove all matching breakpoints. */
1568
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1569
{
1570
#if defined(TARGET_HAS_ICE)
1571
    CPUBreakpoint *bp, *next;
1572

    
1573
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1574
        if (bp->flags & mask)
1575
            cpu_breakpoint_remove_by_ref(env, bp);
1576
    }
1577
#endif
1578
}
1579

    
1580
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1581
   CPU loop after each instruction */
1582
void cpu_single_step(CPUState *env, int enabled)
1583
{
1584
#if defined(TARGET_HAS_ICE)
1585
    if (env->singlestep_enabled != enabled) {
1586
        env->singlestep_enabled = enabled;
1587
        if (kvm_enabled())
1588
            kvm_update_guest_debug(env, 0);
1589
        else {
1590
            /* must flush all the translated code to avoid inconsistencies */
1591
            /* XXX: only flush what is necessary */
1592
            tb_flush(env);
1593
        }
1594
    }
1595
#endif
1596
}
1597

    
1598
/* enable or disable low levels log */
1599
void cpu_set_log(int log_flags)
1600
{
1601
    loglevel = log_flags;
1602
    if (loglevel && !logfile) {
1603
        logfile = fopen(logfilename, log_append ? "a" : "w");
1604
        if (!logfile) {
1605
            perror(logfilename);
1606
            _exit(1);
1607
        }
1608
#if !defined(CONFIG_SOFTMMU)
1609
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1610
        {
1611
            static char logfile_buf[4096];
1612
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1613
        }
1614
#elif defined(_WIN32)
1615
        /* Win32 doesn't support line-buffering, so use unbuffered output. */
1616
        setvbuf(logfile, NULL, _IONBF, 0);
1617
#else
1618
        setvbuf(logfile, NULL, _IOLBF, 0);
1619
#endif
1620
        log_append = 1;
1621
    }
1622
    if (!loglevel && logfile) {
1623
        fclose(logfile);
1624
        logfile = NULL;
1625
    }
1626
}
1627

    
1628
void cpu_set_log_filename(const char *filename)
1629
{
1630
    logfilename = strdup(filename);
1631
    if (logfile) {
1632
        fclose(logfile);
1633
        logfile = NULL;
1634
    }
1635
    cpu_set_log(loglevel);
1636
}
1637

    
1638
static void cpu_unlink_tb(CPUState *env)
1639
{
1640
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1641
       problem and hope the cpu will stop of its own accord.  For userspace
1642
       emulation this often isn't actually as bad as it sounds.  Often
1643
       signals are used primarily to interrupt blocking syscalls.  */
1644
    TranslationBlock *tb;
1645
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1646

    
1647
    spin_lock(&interrupt_lock);
1648
    tb = env->current_tb;
1649
    /* if the cpu is currently executing code, we must unlink it and
1650
       all the potentially executing TB */
1651
    if (tb) {
1652
        env->current_tb = NULL;
1653
        tb_reset_jump_recursive(tb);
1654
    }
1655
    spin_unlock(&interrupt_lock);
1656
}
1657

    
1658
#ifndef CONFIG_USER_ONLY
1659
/* mask must never be zero, except for A20 change call */
1660
static void tcg_handle_interrupt(CPUState *env, int mask)
1661
{
1662
    int old_mask;
1663

    
1664
    old_mask = env->interrupt_request;
1665
    env->interrupt_request |= mask;
1666

    
1667
    /*
1668
     * If called from iothread context, wake the target cpu in
1669
     * case its halted.
1670
     */
1671
    if (!qemu_cpu_is_self(env)) {
1672
        qemu_cpu_kick(env);
1673
        return;
1674
    }
1675

    
1676
    if (use_icount) {
1677
        env->icount_decr.u16.high = 0xffff;
1678
        if (!can_do_io(env)
1679
            && (mask & ~old_mask) != 0) {
1680
            cpu_abort(env, "Raised interrupt while not in I/O function");
1681
        }
1682
    } else {
1683
        cpu_unlink_tb(env);
1684
    }
1685
}
1686

    
1687
CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1688

    
1689
#else /* CONFIG_USER_ONLY */
1690

    
1691
void cpu_interrupt(CPUState *env, int mask)
1692
{
1693
    env->interrupt_request |= mask;
1694
    cpu_unlink_tb(env);
1695
}
1696
#endif /* CONFIG_USER_ONLY */
1697

    
1698
void cpu_reset_interrupt(CPUState *env, int mask)
1699
{
1700
    env->interrupt_request &= ~mask;
1701
}
1702

    
1703
void cpu_exit(CPUState *env)
1704
{
1705
    env->exit_request = 1;
1706
    cpu_unlink_tb(env);
1707
}
1708

    
1709
const CPULogItem cpu_log_items[] = {
1710
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1711
      "show generated host assembly code for each compiled TB" },
1712
    { CPU_LOG_TB_IN_ASM, "in_asm",
1713
      "show target assembly code for each compiled TB" },
1714
    { CPU_LOG_TB_OP, "op",
1715
      "show micro ops for each compiled TB" },
1716
    { CPU_LOG_TB_OP_OPT, "op_opt",
1717
      "show micro ops "
1718
#ifdef TARGET_I386
1719
      "before eflags optimization and "
1720
#endif
1721
      "after liveness analysis" },
1722
    { CPU_LOG_INT, "int",
1723
      "show interrupts/exceptions in short format" },
1724
    { CPU_LOG_EXEC, "exec",
1725
      "show trace before each executed TB (lots of logs)" },
1726
    { CPU_LOG_TB_CPU, "cpu",
1727
      "show CPU state before block translation" },
1728
#ifdef TARGET_I386
1729
    { CPU_LOG_PCALL, "pcall",
1730
      "show protected mode far calls/returns/exceptions" },
1731
    { CPU_LOG_RESET, "cpu_reset",
1732
      "show CPU state before CPU resets" },
1733
#endif
1734
#ifdef DEBUG_IOPORT
1735
    { CPU_LOG_IOPORT, "ioport",
1736
      "show all i/o ports accesses" },
1737
#endif
1738
    { 0, NULL, NULL },
1739
};
1740

    
1741
static int cmp1(const char *s1, int n, const char *s2)
1742
{
1743
    if (strlen(s2) != n)
1744
        return 0;
1745
    return memcmp(s1, s2, n) == 0;
1746
}
1747

    
1748
/* takes a comma separated list of log masks. Return 0 if error. */
1749
int cpu_str_to_log_mask(const char *str)
1750
{
1751
    const CPULogItem *item;
1752
    int mask;
1753
    const char *p, *p1;
1754

    
1755
    p = str;
1756
    mask = 0;
1757
    for(;;) {
1758
        p1 = strchr(p, ',');
1759
        if (!p1)
1760
            p1 = p + strlen(p);
1761
        if(cmp1(p,p1-p,"all")) {
1762
            for(item = cpu_log_items; item->mask != 0; item++) {
1763
                mask |= item->mask;
1764
            }
1765
        } else {
1766
            for(item = cpu_log_items; item->mask != 0; item++) {
1767
                if (cmp1(p, p1 - p, item->name))
1768
                    goto found;
1769
            }
1770
            return 0;
1771
        }
1772
    found:
1773
        mask |= item->mask;
1774
        if (*p1 != ',')
1775
            break;
1776
        p = p1 + 1;
1777
    }
1778
    return mask;
1779
}
1780

    
1781
void cpu_abort(CPUState *env, const char *fmt, ...)
1782
{
1783
    va_list ap;
1784
    va_list ap2;
1785

    
1786
    va_start(ap, fmt);
1787
    va_copy(ap2, ap);
1788
    fprintf(stderr, "qemu: fatal: ");
1789
    vfprintf(stderr, fmt, ap);
1790
    fprintf(stderr, "\n");
1791
#ifdef TARGET_I386
1792
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1793
#else
1794
    cpu_dump_state(env, stderr, fprintf, 0);
1795
#endif
1796
    if (qemu_log_enabled()) {
1797
        qemu_log("qemu: fatal: ");
1798
        qemu_log_vprintf(fmt, ap2);
1799
        qemu_log("\n");
1800
#ifdef TARGET_I386
1801
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1802
#else
1803
        log_cpu_state(env, 0);
1804
#endif
1805
        qemu_log_flush();
1806
        qemu_log_close();
1807
    }
1808
    va_end(ap2);
1809
    va_end(ap);
1810
#if defined(CONFIG_USER_ONLY)
1811
    {
1812
        struct sigaction act;
1813
        sigfillset(&act.sa_mask);
1814
        act.sa_handler = SIG_DFL;
1815
        sigaction(SIGABRT, &act, NULL);
1816
    }
1817
#endif
1818
    abort();
1819
}
1820

    
1821
CPUState *cpu_copy(CPUState *env)
1822
{
1823
    CPUState *new_env = cpu_init(env->cpu_model_str);
1824
    CPUState *next_cpu = new_env->next_cpu;
1825
    int cpu_index = new_env->cpu_index;
1826
#if defined(TARGET_HAS_ICE)
1827
    CPUBreakpoint *bp;
1828
    CPUWatchpoint *wp;
1829
#endif
1830

    
1831
    memcpy(new_env, env, sizeof(CPUState));
1832

    
1833
    /* Preserve chaining and index. */
1834
    new_env->next_cpu = next_cpu;
1835
    new_env->cpu_index = cpu_index;
1836

    
1837
    /* Clone all break/watchpoints.
1838
       Note: Once we support ptrace with hw-debug register access, make sure
1839
       BP_CPU break/watchpoints are handled correctly on clone. */
1840
    QTAILQ_INIT(&env->breakpoints);
1841
    QTAILQ_INIT(&env->watchpoints);
1842
#if defined(TARGET_HAS_ICE)
1843
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1844
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1845
    }
1846
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1847
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1848
                              wp->flags, NULL);
1849
    }
1850
#endif
1851

    
1852
    return new_env;
1853
}
1854

    
1855
#if !defined(CONFIG_USER_ONLY)
1856

    
1857
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1858
{
1859
    unsigned int i;
1860

    
1861
    /* Discard jump cache entries for any tb which might potentially
1862
       overlap the flushed page.  */
1863
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1864
    memset (&env->tb_jmp_cache[i], 0, 
1865
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1866

    
1867
    i = tb_jmp_cache_hash_page(addr);
1868
    memset (&env->tb_jmp_cache[i], 0, 
1869
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1870
}
1871

    
1872
static CPUTLBEntry s_cputlb_empty_entry = {
1873
    .addr_read  = -1,
1874
    .addr_write = -1,
1875
    .addr_code  = -1,
1876
    .addend     = -1,
1877
};
1878

    
1879
/* NOTE:
1880
 * If flush_global is true (the usual case), flush all tlb entries.
1881
 * If flush_global is false, flush (at least) all tlb entries not
1882
 * marked global.
1883
 *
1884
 * Since QEMU doesn't currently implement a global/not-global flag
1885
 * for tlb entries, at the moment tlb_flush() will also flush all
1886
 * tlb entries in the flush_global == false case. This is OK because
1887
 * CPU architectures generally permit an implementation to drop
1888
 * entries from the TLB at any time, so flushing more entries than
1889
 * required is only an efficiency issue, not a correctness issue.
1890
 */
1891
void tlb_flush(CPUState *env, int flush_global)
1892
{
1893
    int i;
1894

    
1895
#if defined(DEBUG_TLB)
1896
    printf("tlb_flush:\n");
1897
#endif
1898
    /* must reset current TB so that interrupts cannot modify the
1899
       links while we are modifying them */
1900
    env->current_tb = NULL;
1901

    
1902
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1903
        int mmu_idx;
1904
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1905
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1906
        }
1907
    }
1908

    
1909
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1910

    
1911
    env->tlb_flush_addr = -1;
1912
    env->tlb_flush_mask = 0;
1913
    tlb_flush_count++;
1914
}
1915

    
1916
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1917
{
1918
    if (addr == (tlb_entry->addr_read &
1919
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1920
        addr == (tlb_entry->addr_write &
1921
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1922
        addr == (tlb_entry->addr_code &
1923
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1924
        *tlb_entry = s_cputlb_empty_entry;
1925
    }
1926
}
1927

    
1928
void tlb_flush_page(CPUState *env, target_ulong addr)
1929
{
1930
    int i;
1931
    int mmu_idx;
1932

    
1933
#if defined(DEBUG_TLB)
1934
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1935
#endif
1936
    /* Check if we need to flush due to large pages.  */
1937
    if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
1938
#if defined(DEBUG_TLB)
1939
        printf("tlb_flush_page: forced full flush ("
1940
               TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
1941
               env->tlb_flush_addr, env->tlb_flush_mask);
1942
#endif
1943
        tlb_flush(env, 1);
1944
        return;
1945
    }
1946
    /* must reset current TB so that interrupts cannot modify the
1947
       links while we are modifying them */
1948
    env->current_tb = NULL;
1949

    
1950
    addr &= TARGET_PAGE_MASK;
1951
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1952
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1953
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1954

    
1955
    tlb_flush_jmp_cache(env, addr);
1956
}
1957

    
1958
/* update the TLBs so that writes to code in the virtual page 'addr'
1959
   can be detected */
1960
static void tlb_protect_code(ram_addr_t ram_addr)
1961
{
1962
    cpu_physical_memory_reset_dirty(ram_addr,
1963
                                    ram_addr + TARGET_PAGE_SIZE,
1964
                                    CODE_DIRTY_FLAG);
1965
}
1966

    
1967
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1968
   tested for self modifying code */
1969
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1970
                                    target_ulong vaddr)
1971
{
1972
    cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
1973
}
1974

    
1975
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1976
                                         unsigned long start, unsigned long length)
1977
{
1978
    unsigned long addr;
1979
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
1980
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1981
        if ((addr - start) < length) {
1982
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1983
        }
1984
    }
1985
}
1986

    
1987
/* Note: start and end must be within the same ram block.  */
1988
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1989
                                     int dirty_flags)
1990
{
1991
    CPUState *env;
1992
    unsigned long length, start1;
1993
    int i;
1994

    
1995
    start &= TARGET_PAGE_MASK;
1996
    end = TARGET_PAGE_ALIGN(end);
1997

    
1998
    length = end - start;
1999
    if (length == 0)
2000
        return;
2001
    cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
2002

    
2003
    /* we modify the TLB cache so that the dirty bit will be set again
2004
       when accessing the range */
2005
    start1 = (unsigned long)qemu_safe_ram_ptr(start);
2006
    /* Check that we don't span multiple blocks - this breaks the
2007
       address comparisons below.  */
2008
    if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
2009
            != (end - 1) - start) {
2010
        abort();
2011
    }
2012

    
2013
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2014
        int mmu_idx;
2015
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2016
            for(i = 0; i < CPU_TLB_SIZE; i++)
2017
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2018
                                      start1, length);
2019
        }
2020
    }
2021
}
2022

    
2023
int cpu_physical_memory_set_dirty_tracking(int enable)
2024
{
2025
    int ret = 0;
2026
    in_migration = enable;
2027
    return ret;
2028
}
2029

    
2030
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2031
{
2032
    ram_addr_t ram_addr;
2033
    void *p;
2034

    
2035
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
2036
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2037
            + tlb_entry->addend);
2038
        ram_addr = qemu_ram_addr_from_host_nofail(p);
2039
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2040
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2041
        }
2042
    }
2043
}
2044

    
2045
/* update the TLB according to the current state of the dirty bits */
2046
void cpu_tlb_update_dirty(CPUState *env)
2047
{
2048
    int i;
2049
    int mmu_idx;
2050
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2051
        for(i = 0; i < CPU_TLB_SIZE; i++)
2052
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2053
    }
2054
}
2055

    
2056
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2057
{
2058
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2059
        tlb_entry->addr_write = vaddr;
2060
}
2061

    
2062
/* update the TLB corresponding to virtual page vaddr
2063
   so that it is no longer dirty */
2064
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2065
{
2066
    int i;
2067
    int mmu_idx;
2068

    
2069
    vaddr &= TARGET_PAGE_MASK;
2070
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2071
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2072
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2073
}
2074

    
2075
/* Our TLB does not support large pages, so remember the area covered by
2076
   large pages and trigger a full TLB flush if these are invalidated.  */
2077
static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2078
                               target_ulong size)
2079
{
2080
    target_ulong mask = ~(size - 1);
2081

    
2082
    if (env->tlb_flush_addr == (target_ulong)-1) {
2083
        env->tlb_flush_addr = vaddr & mask;
2084
        env->tlb_flush_mask = mask;
2085
        return;
2086
    }
2087
    /* Extend the existing region to include the new page.
2088
       This is a compromise between unnecessary flushes and the cost
2089
       of maintaining a full variable size TLB.  */
2090
    mask &= env->tlb_flush_mask;
2091
    while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2092
        mask <<= 1;
2093
    }
2094
    env->tlb_flush_addr &= mask;
2095
    env->tlb_flush_mask = mask;
2096
}
2097

    
2098
static bool is_ram_rom(ram_addr_t pd)
2099
{
2100
    pd &= ~TARGET_PAGE_MASK;
2101
    return pd == io_mem_ram.ram_addr || pd == io_mem_rom.ram_addr;
2102
}
2103

    
2104
static bool is_romd(ram_addr_t pd)
2105
{
2106
    MemoryRegion *mr;
2107

    
2108
    pd &= ~TARGET_PAGE_MASK;
2109
    mr = io_mem_region[pd];
2110
    return mr->rom_device && mr->readable;
2111
}
2112

    
2113
static bool is_ram_rom_romd(ram_addr_t pd)
2114
{
2115
    return is_ram_rom(pd) || is_romd(pd);
2116
}
2117

    
2118
/* Add a new TLB entry. At most one entry for a given virtual address
2119
   is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2120
   supplied size is only used by tlb_flush_page.  */
2121
void tlb_set_page(CPUState *env, target_ulong vaddr,
2122
                  target_phys_addr_t paddr, int prot,
2123
                  int mmu_idx, target_ulong size)
2124
{
2125
    PhysPageDesc p;
2126
    unsigned long pd;
2127
    unsigned int index;
2128
    target_ulong address;
2129
    target_ulong code_address;
2130
    unsigned long addend;
2131
    CPUTLBEntry *te;
2132
    CPUWatchpoint *wp;
2133
    target_phys_addr_t iotlb;
2134

    
2135
    assert(size >= TARGET_PAGE_SIZE);
2136
    if (size != TARGET_PAGE_SIZE) {
2137
        tlb_add_large_page(env, vaddr, size);
2138
    }
2139
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2140
    pd = p.phys_offset;
2141
#if defined(DEBUG_TLB)
2142
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2143
           " prot=%x idx=%d pd=0x%08lx\n",
2144
           vaddr, paddr, prot, mmu_idx, pd);
2145
#endif
2146

    
2147
    address = vaddr;
2148
    if (!is_ram_rom_romd(pd)) {
2149
        /* IO memory case (romd handled later) */
2150
        address |= TLB_MMIO;
2151
    }
2152
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2153
    if (is_ram_rom(pd)) {
2154
        /* Normal RAM.  */
2155
        iotlb = pd & TARGET_PAGE_MASK;
2156
        if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr)
2157
            iotlb |= io_mem_notdirty.ram_addr;
2158
        else
2159
            iotlb |= io_mem_rom.ram_addr;
2160
    } else {
2161
        /* IO handlers are currently passed a physical address.
2162
           It would be nice to pass an offset from the base address
2163
           of that region.  This would avoid having to special case RAM,
2164
           and avoid full address decoding in every device.
2165
           We can't use the high bits of pd for this because
2166
           IO_MEM_ROMD uses these as a ram address.  */
2167
        iotlb = (pd & ~TARGET_PAGE_MASK);
2168
        iotlb += p.region_offset;
2169
    }
2170

    
2171
    code_address = address;
2172
    /* Make accesses to pages with watchpoints go via the
2173
       watchpoint trap routines.  */
2174
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2175
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2176
            /* Avoid trapping reads of pages with a write breakpoint. */
2177
            if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2178
                iotlb = io_mem_watch.ram_addr + paddr;
2179
                address |= TLB_MMIO;
2180
                break;
2181
            }
2182
        }
2183
    }
2184

    
2185
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2186
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2187
    te = &env->tlb_table[mmu_idx][index];
2188
    te->addend = addend - vaddr;
2189
    if (prot & PAGE_READ) {
2190
        te->addr_read = address;
2191
    } else {
2192
        te->addr_read = -1;
2193
    }
2194

    
2195
    if (prot & PAGE_EXEC) {
2196
        te->addr_code = code_address;
2197
    } else {
2198
        te->addr_code = -1;
2199
    }
2200
    if (prot & PAGE_WRITE) {
2201
        if ((pd & ~TARGET_PAGE_MASK) == io_mem_rom.ram_addr || is_romd(pd)) {
2202
            /* Write access calls the I/O callback.  */
2203
            te->addr_write = address | TLB_MMIO;
2204
        } else if ((pd & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr &&
2205
                   !cpu_physical_memory_is_dirty(pd)) {
2206
            te->addr_write = address | TLB_NOTDIRTY;
2207
        } else {
2208
            te->addr_write = address;
2209
        }
2210
    } else {
2211
        te->addr_write = -1;
2212
    }
2213
}
2214

    
2215
#else
2216

    
2217
void tlb_flush(CPUState *env, int flush_global)
2218
{
2219
}
2220

    
2221
void tlb_flush_page(CPUState *env, target_ulong addr)
2222
{
2223
}
2224

    
2225
/*
2226
 * Walks guest process memory "regions" one by one
2227
 * and calls callback function 'fn' for each region.
2228
 */
2229

    
2230
struct walk_memory_regions_data
2231
{
2232
    walk_memory_regions_fn fn;
2233
    void *priv;
2234
    unsigned long start;
2235
    int prot;
2236
};
2237

    
2238
static int walk_memory_regions_end(struct walk_memory_regions_data *data,
2239
                                   abi_ulong end, int new_prot)
2240
{
2241
    if (data->start != -1ul) {
2242
        int rc = data->fn(data->priv, data->start, end, data->prot);
2243
        if (rc != 0) {
2244
            return rc;
2245
        }
2246
    }
2247

    
2248
    data->start = (new_prot ? end : -1ul);
2249
    data->prot = new_prot;
2250

    
2251
    return 0;
2252
}
2253

    
2254
static int walk_memory_regions_1(struct walk_memory_regions_data *data,
2255
                                 abi_ulong base, int level, void **lp)
2256
{
2257
    abi_ulong pa;
2258
    int i, rc;
2259

    
2260
    if (*lp == NULL) {
2261
        return walk_memory_regions_end(data, base, 0);
2262
    }
2263

    
2264
    if (level == 0) {
2265
        PageDesc *pd = *lp;
2266
        for (i = 0; i < L2_SIZE; ++i) {
2267
            int prot = pd[i].flags;
2268

    
2269
            pa = base | (i << TARGET_PAGE_BITS);
2270
            if (prot != data->prot) {
2271
                rc = walk_memory_regions_end(data, pa, prot);
2272
                if (rc != 0) {
2273
                    return rc;
2274
                }
2275
            }
2276
        }
2277
    } else {
2278
        void **pp = *lp;
2279
        for (i = 0; i < L2_SIZE; ++i) {
2280
            pa = base | ((abi_ulong)i <<
2281
                (TARGET_PAGE_BITS + L2_BITS * level));
2282
            rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2283
            if (rc != 0) {
2284
                return rc;
2285
            }
2286
        }
2287
    }
2288

    
2289
    return 0;
2290
}
2291

    
2292
int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2293
{
2294
    struct walk_memory_regions_data data;
2295
    unsigned long i;
2296

    
2297
    data.fn = fn;
2298
    data.priv = priv;
2299
    data.start = -1ul;
2300
    data.prot = 0;
2301

    
2302
    for (i = 0; i < V_L1_SIZE; i++) {
2303
        int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
2304
                                       V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2305
        if (rc != 0) {
2306
            return rc;
2307
        }
2308
    }
2309

    
2310
    return walk_memory_regions_end(&data, 0, 0);
2311
}
2312

    
2313
static int dump_region(void *priv, abi_ulong start,
2314
    abi_ulong end, unsigned long prot)
2315
{
2316
    FILE *f = (FILE *)priv;
2317

    
2318
    (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2319
        " "TARGET_ABI_FMT_lx" %c%c%c\n",
2320
        start, end, end - start,
2321
        ((prot & PAGE_READ) ? 'r' : '-'),
2322
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2323
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2324

    
2325
    return (0);
2326
}
2327

    
2328
/* dump memory mappings */
2329
void page_dump(FILE *f)
2330
{
2331
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2332
            "start", "end", "size", "prot");
2333
    walk_memory_regions(f, dump_region);
2334
}
2335

    
2336
int page_get_flags(target_ulong address)
2337
{
2338
    PageDesc *p;
2339

    
2340
    p = page_find(address >> TARGET_PAGE_BITS);
2341
    if (!p)
2342
        return 0;
2343
    return p->flags;
2344
}
2345

    
2346
/* Modify the flags of a page and invalidate the code if necessary.
2347
   The flag PAGE_WRITE_ORG is positioned automatically depending
2348
   on PAGE_WRITE.  The mmap_lock should already be held.  */
2349
void page_set_flags(target_ulong start, target_ulong end, int flags)
2350
{
2351
    target_ulong addr, len;
2352

    
2353
    /* This function should never be called with addresses outside the
2354
       guest address space.  If this assert fires, it probably indicates
2355
       a missing call to h2g_valid.  */
2356
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2357
    assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2358
#endif
2359
    assert(start < end);
2360

    
2361
    start = start & TARGET_PAGE_MASK;
2362
    end = TARGET_PAGE_ALIGN(end);
2363

    
2364
    if (flags & PAGE_WRITE) {
2365
        flags |= PAGE_WRITE_ORG;
2366
    }
2367

    
2368
    for (addr = start, len = end - start;
2369
         len != 0;
2370
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2371
        PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2372

    
2373
        /* If the write protection bit is set, then we invalidate
2374
           the code inside.  */
2375
        if (!(p->flags & PAGE_WRITE) &&
2376
            (flags & PAGE_WRITE) &&
2377
            p->first_tb) {
2378
            tb_invalidate_phys_page(addr, 0, NULL);
2379
        }
2380
        p->flags = flags;
2381
    }
2382
}
2383

    
2384
int page_check_range(target_ulong start, target_ulong len, int flags)
2385
{
2386
    PageDesc *p;
2387
    target_ulong end;
2388
    target_ulong addr;
2389

    
2390
    /* This function should never be called with addresses outside the
2391
       guest address space.  If this assert fires, it probably indicates
2392
       a missing call to h2g_valid.  */
2393
#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2394
    assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
2395
#endif
2396

    
2397
    if (len == 0) {
2398
        return 0;
2399
    }
2400
    if (start + len - 1 < start) {
2401
        /* We've wrapped around.  */
2402
        return -1;
2403
    }
2404

    
2405
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2406
    start = start & TARGET_PAGE_MASK;
2407

    
2408
    for (addr = start, len = end - start;
2409
         len != 0;
2410
         len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2411
        p = page_find(addr >> TARGET_PAGE_BITS);
2412
        if( !p )
2413
            return -1;
2414
        if( !(p->flags & PAGE_VALID) )
2415
            return -1;
2416

    
2417
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2418
            return -1;
2419
        if (flags & PAGE_WRITE) {
2420
            if (!(p->flags & PAGE_WRITE_ORG))
2421
                return -1;
2422
            /* unprotect the page if it was put read-only because it
2423
               contains translated code */
2424
            if (!(p->flags & PAGE_WRITE)) {
2425
                if (!page_unprotect(addr, 0, NULL))
2426
                    return -1;
2427
            }
2428
            return 0;
2429
        }
2430
    }
2431
    return 0;
2432
}
2433

    
2434
/* called from signal handler: invalidate the code and unprotect the
2435
   page. Return TRUE if the fault was successfully handled. */
2436
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2437
{
2438
    unsigned int prot;
2439
    PageDesc *p;
2440
    target_ulong host_start, host_end, addr;
2441

    
2442
    /* Technically this isn't safe inside a signal handler.  However we
2443
       know this only ever happens in a synchronous SEGV handler, so in
2444
       practice it seems to be ok.  */
2445
    mmap_lock();
2446

    
2447
    p = page_find(address >> TARGET_PAGE_BITS);
2448
    if (!p) {
2449
        mmap_unlock();
2450
        return 0;
2451
    }
2452

    
2453
    /* if the page was really writable, then we change its
2454
       protection back to writable */
2455
    if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2456
        host_start = address & qemu_host_page_mask;
2457
        host_end = host_start + qemu_host_page_size;
2458

    
2459
        prot = 0;
2460
        for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2461
            p = page_find(addr >> TARGET_PAGE_BITS);
2462
            p->flags |= PAGE_WRITE;
2463
            prot |= p->flags;
2464

    
2465
            /* and since the content will be modified, we must invalidate
2466
               the corresponding translated code. */
2467
            tb_invalidate_phys_page(addr, pc, puc);
2468
#ifdef DEBUG_TB_CHECK
2469
            tb_invalidate_check(addr);
2470
#endif
2471
        }
2472
        mprotect((void *)g2h(host_start), qemu_host_page_size,
2473
                 prot & PAGE_BITS);
2474

    
2475
        mmap_unlock();
2476
        return 1;
2477
    }
2478
    mmap_unlock();
2479
    return 0;
2480
}
2481

    
2482
static inline void tlb_set_dirty(CPUState *env,
2483
                                 unsigned long addr, target_ulong vaddr)
2484
{
2485
}
2486
#endif /* defined(CONFIG_USER_ONLY) */
2487

    
2488
#if !defined(CONFIG_USER_ONLY)
2489

    
2490
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2491
typedef struct subpage_t {
2492
    MemoryRegion iomem;
2493
    target_phys_addr_t base;
2494
    ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2495
    ram_addr_t region_offset[TARGET_PAGE_SIZE];
2496
} subpage_t;
2497

    
2498
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2499
                             ram_addr_t memory, ram_addr_t region_offset);
2500
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2501
                                ram_addr_t orig_memory,
2502
                                ram_addr_t region_offset);
2503
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2504
                      need_subpage)                                     \
2505
    do {                                                                \
2506
        if (addr > start_addr)                                          \
2507
            start_addr2 = 0;                                            \
2508
        else {                                                          \
2509
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2510
            if (start_addr2 > 0)                                        \
2511
                need_subpage = 1;                                       \
2512
        }                                                               \
2513
                                                                        \
2514
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2515
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2516
        else {                                                          \
2517
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2518
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2519
                need_subpage = 1;                                       \
2520
        }                                                               \
2521
    } while (0)
2522

    
2523
/* register physical memory.
2524
   For RAM, 'size' must be a multiple of the target page size.
2525
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2526
   io memory page.  The address used when calling the IO function is
2527
   the offset from the start of the region, plus region_offset.  Both
2528
   start_addr and region_offset are rounded down to a page boundary
2529
   before calculating this offset.  This should not be a problem unless
2530
   the low bits of start_addr and region_offset differ.  */
2531
void cpu_register_physical_memory_log(MemoryRegionSection *section,
2532
                                      bool readable, bool readonly)
2533
{
2534
    target_phys_addr_t start_addr = section->offset_within_address_space;
2535
    ram_addr_t size = section->size;
2536
    ram_addr_t phys_offset = section->mr->ram_addr;
2537
    ram_addr_t region_offset = section->offset_within_region;
2538
    target_phys_addr_t addr, end_addr;
2539
    PhysPageDesc *p;
2540
    CPUState *env;
2541
    ram_addr_t orig_size = size;
2542
    subpage_t *subpage;
2543

    
2544
    if (memory_region_is_ram(section->mr)) {
2545
        phys_offset += region_offset;
2546
        region_offset = 0;
2547
    }
2548

    
2549
    if (readonly) {
2550
        phys_offset |= io_mem_rom.ram_addr;
2551
    }
2552

    
2553
    assert(size);
2554

    
2555
    if (phys_offset == io_mem_unassigned.ram_addr) {
2556
        region_offset = start_addr;
2557
    }
2558
    region_offset &= TARGET_PAGE_MASK;
2559
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2560
    end_addr = start_addr + (target_phys_addr_t)size;
2561

    
2562
    addr = start_addr;
2563
    do {
2564
        p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 0);
2565
        if (p && p->phys_offset != io_mem_unassigned.ram_addr) {
2566
            ram_addr_t orig_memory = p->phys_offset;
2567
            target_phys_addr_t start_addr2, end_addr2;
2568
            int need_subpage = 0;
2569
            MemoryRegion *mr = io_mem_region[orig_memory & ~TARGET_PAGE_MASK];
2570

    
2571
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2572
                          need_subpage);
2573
            if (need_subpage) {
2574
                if (!(mr->subpage)) {
2575
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2576
                                           &p->phys_offset, orig_memory,
2577
                                           p->region_offset);
2578
                } else {
2579
                    subpage = container_of(mr, subpage_t, iomem);
2580
                }
2581
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2582
                                 region_offset);
2583
                p->region_offset = 0;
2584
            } else {
2585
                p->phys_offset = phys_offset;
2586
                p->region_offset = region_offset;
2587
                if (is_ram_rom_romd(phys_offset))
2588
                    phys_offset += TARGET_PAGE_SIZE;
2589
            }
2590
        } else {
2591
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2592
            p->phys_offset = phys_offset;
2593
            p->region_offset = region_offset;
2594
            if (is_ram_rom_romd(phys_offset)) {
2595
                phys_offset += TARGET_PAGE_SIZE;
2596
            } else {
2597
                target_phys_addr_t start_addr2, end_addr2;
2598
                int need_subpage = 0;
2599

    
2600
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2601
                              end_addr2, need_subpage);
2602

    
2603
                if (need_subpage) {
2604
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2605
                                           &p->phys_offset,
2606
                                           io_mem_unassigned.ram_addr,
2607
                                           addr & TARGET_PAGE_MASK);
2608
                    subpage_register(subpage, start_addr2, end_addr2,
2609
                                     phys_offset, region_offset);
2610
                    p->region_offset = 0;
2611
                }
2612
            }
2613
        }
2614
        region_offset += TARGET_PAGE_SIZE;
2615
        addr += TARGET_PAGE_SIZE;
2616
    } while (addr != end_addr);
2617

    
2618
    /* since each CPU stores ram addresses in its TLB cache, we must
2619
       reset the modified entries */
2620
    /* XXX: slow ! */
2621
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2622
        tlb_flush(env, 1);
2623
    }
2624
}
2625

    
2626
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2627
{
2628
    if (kvm_enabled())
2629
        kvm_coalesce_mmio_region(addr, size);
2630
}
2631

    
2632
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2633
{
2634
    if (kvm_enabled())
2635
        kvm_uncoalesce_mmio_region(addr, size);
2636
}
2637

    
2638
void qemu_flush_coalesced_mmio_buffer(void)
2639
{
2640
    if (kvm_enabled())
2641
        kvm_flush_coalesced_mmio_buffer();
2642
}
2643

    
2644
#if defined(__linux__) && !defined(TARGET_S390X)
2645

    
2646
#include <sys/vfs.h>
2647

    
2648
#define HUGETLBFS_MAGIC       0x958458f6
2649

    
2650
static long gethugepagesize(const char *path)
2651
{
2652
    struct statfs fs;
2653
    int ret;
2654

    
2655
    do {
2656
        ret = statfs(path, &fs);
2657
    } while (ret != 0 && errno == EINTR);
2658

    
2659
    if (ret != 0) {
2660
        perror(path);
2661
        return 0;
2662
    }
2663

    
2664
    if (fs.f_type != HUGETLBFS_MAGIC)
2665
        fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
2666

    
2667
    return fs.f_bsize;
2668
}
2669

    
2670
static void *file_ram_alloc(RAMBlock *block,
2671
                            ram_addr_t memory,
2672
                            const char *path)
2673
{
2674
    char *filename;
2675
    void *area;
2676
    int fd;
2677
#ifdef MAP_POPULATE
2678
    int flags;
2679
#endif
2680
    unsigned long hpagesize;
2681

    
2682
    hpagesize = gethugepagesize(path);
2683
    if (!hpagesize) {
2684
        return NULL;
2685
    }
2686

    
2687
    if (memory < hpagesize) {
2688
        return NULL;
2689
    }
2690

    
2691
    if (kvm_enabled() && !kvm_has_sync_mmu()) {
2692
        fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2693
        return NULL;
2694
    }
2695

    
2696
    if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
2697
        return NULL;
2698
    }
2699

    
2700
    fd = mkstemp(filename);
2701
    if (fd < 0) {
2702
        perror("unable to create backing store for hugepages");
2703
        free(filename);
2704
        return NULL;
2705
    }
2706
    unlink(filename);
2707
    free(filename);
2708

    
2709
    memory = (memory+hpagesize-1) & ~(hpagesize-1);
2710

    
2711
    /*
2712
     * ftruncate is not supported by hugetlbfs in older
2713
     * hosts, so don't bother bailing out on errors.
2714
     * If anything goes wrong with it under other filesystems,
2715
     * mmap will fail.
2716
     */
2717
    if (ftruncate(fd, memory))
2718
        perror("ftruncate");
2719

    
2720
#ifdef MAP_POPULATE
2721
    /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2722
     * MAP_PRIVATE is requested.  For mem_prealloc we mmap as MAP_SHARED
2723
     * to sidestep this quirk.
2724
     */
2725
    flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2726
    area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2727
#else
2728
    area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2729
#endif
2730
    if (area == MAP_FAILED) {
2731
        perror("file_ram_alloc: can't mmap RAM pages");
2732
        close(fd);
2733
        return (NULL);
2734
    }
2735
    block->fd = fd;
2736
    return area;
2737
}
2738
#endif
2739

    
2740
static ram_addr_t find_ram_offset(ram_addr_t size)
2741
{
2742
    RAMBlock *block, *next_block;
2743
    ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
2744

    
2745
    if (QLIST_EMPTY(&ram_list.blocks))
2746
        return 0;
2747

    
2748
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2749
        ram_addr_t end, next = RAM_ADDR_MAX;
2750

    
2751
        end = block->offset + block->length;
2752

    
2753
        QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2754
            if (next_block->offset >= end) {
2755
                next = MIN(next, next_block->offset);
2756
            }
2757
        }
2758
        if (next - end >= size && next - end < mingap) {
2759
            offset = end;
2760
            mingap = next - end;
2761
        }
2762
    }
2763

    
2764
    if (offset == RAM_ADDR_MAX) {
2765
        fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2766
                (uint64_t)size);
2767
        abort();
2768
    }
2769

    
2770
    return offset;
2771
}
2772

    
2773
static ram_addr_t last_ram_offset(void)
2774
{
2775
    RAMBlock *block;
2776
    ram_addr_t last = 0;
2777

    
2778
    QLIST_FOREACH(block, &ram_list.blocks, next)
2779
        last = MAX(last, block->offset + block->length);
2780

    
2781
    return last;
2782
}
2783

    
2784
void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
2785
{
2786
    RAMBlock *new_block, *block;
2787

    
2788
    new_block = NULL;
2789
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2790
        if (block->offset == addr) {
2791
            new_block = block;
2792
            break;
2793
        }
2794
    }
2795
    assert(new_block);
2796
    assert(!new_block->idstr[0]);
2797

    
2798
    if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2799
        char *id = dev->parent_bus->info->get_dev_path(dev);
2800
        if (id) {
2801
            snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2802
            g_free(id);
2803
        }
2804
    }
2805
    pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2806

    
2807
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2808
        if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
2809
            fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2810
                    new_block->idstr);
2811
            abort();
2812
        }
2813
    }
2814
}
2815

    
2816
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2817
                                   MemoryRegion *mr)
2818
{
2819
    RAMBlock *new_block;
2820

    
2821
    size = TARGET_PAGE_ALIGN(size);
2822
    new_block = g_malloc0(sizeof(*new_block));
2823

    
2824
    new_block->mr = mr;
2825
    new_block->offset = find_ram_offset(size);
2826
    if (host) {
2827
        new_block->host = host;
2828
        new_block->flags |= RAM_PREALLOC_MASK;
2829
    } else {
2830
        if (mem_path) {
2831
#if defined (__linux__) && !defined(TARGET_S390X)
2832
            new_block->host = file_ram_alloc(new_block, size, mem_path);
2833
            if (!new_block->host) {
2834
                new_block->host = qemu_vmalloc(size);
2835
                qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2836
            }
2837
#else
2838
            fprintf(stderr, "-mem-path option unsupported\n");
2839
            exit(1);
2840
#endif
2841
        } else {
2842
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2843
            /* S390 KVM requires the topmost vma of the RAM to be smaller than
2844
               an system defined value, which is at least 256GB. Larger systems
2845
               have larger values. We put the guest between the end of data
2846
               segment (system break) and this value. We use 32GB as a base to
2847
               have enough room for the system break to grow. */
2848
            new_block->host = mmap((void*)0x800000000, size,
2849
                                   PROT_EXEC|PROT_READ|PROT_WRITE,
2850
                                   MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
2851
            if (new_block->host == MAP_FAILED) {
2852
                fprintf(stderr, "Allocating RAM failed\n");
2853
                abort();
2854
            }
2855
#else
2856
            if (xen_enabled()) {
2857
                xen_ram_alloc(new_block->offset, size, mr);
2858
            } else {
2859
                new_block->host = qemu_vmalloc(size);
2860
            }
2861
#endif
2862
            qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
2863
        }
2864
    }
2865
    new_block->length = size;
2866

    
2867
    QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2868

    
2869
    ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
2870
                                       last_ram_offset() >> TARGET_PAGE_BITS);
2871
    memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2872
           0xff, size >> TARGET_PAGE_BITS);
2873

    
2874
    if (kvm_enabled())
2875
        kvm_setup_guest_memory(new_block->host, size);
2876

    
2877
    return new_block->offset;
2878
}
2879

    
2880
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
2881
{
2882
    return qemu_ram_alloc_from_ptr(size, NULL, mr);
2883
}
2884

    
2885
void qemu_ram_free_from_ptr(ram_addr_t addr)
2886
{
2887
    RAMBlock *block;
2888

    
2889
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2890
        if (addr == block->offset) {
2891
            QLIST_REMOVE(block, next);
2892
            g_free(block);
2893
            return;
2894
        }
2895
    }
2896
}
2897

    
2898
void qemu_ram_free(ram_addr_t addr)
2899
{
2900
    RAMBlock *block;
2901

    
2902
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2903
        if (addr == block->offset) {
2904
            QLIST_REMOVE(block, next);
2905
            if (block->flags & RAM_PREALLOC_MASK) {
2906
                ;
2907
            } else if (mem_path) {
2908
#if defined (__linux__) && !defined(TARGET_S390X)
2909
                if (block->fd) {
2910
                    munmap(block->host, block->length);
2911
                    close(block->fd);
2912
                } else {
2913
                    qemu_vfree(block->host);
2914
                }
2915
#else
2916
                abort();
2917
#endif
2918
            } else {
2919
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2920
                munmap(block->host, block->length);
2921
#else
2922
                if (xen_enabled()) {
2923
                    xen_invalidate_map_cache_entry(block->host);
2924
                } else {
2925
                    qemu_vfree(block->host);
2926
                }
2927
#endif
2928
            }
2929
            g_free(block);
2930
            return;
2931
        }
2932
    }
2933

    
2934
}
2935

    
2936
#ifndef _WIN32
2937
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2938
{
2939
    RAMBlock *block;
2940
    ram_addr_t offset;
2941
    int flags;
2942
    void *area, *vaddr;
2943

    
2944
    QLIST_FOREACH(block, &ram_list.blocks, next) {
2945
        offset = addr - block->offset;
2946
        if (offset < block->length) {
2947
            vaddr = block->host + offset;
2948
            if (block->flags & RAM_PREALLOC_MASK) {
2949
                ;
2950
            } else {
2951
                flags = MAP_FIXED;
2952
                munmap(vaddr, length);
2953
                if (mem_path) {
2954
#if defined(__linux__) && !defined(TARGET_S390X)
2955
                    if (block->fd) {
2956
#ifdef MAP_POPULATE
2957
                        flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2958
                            MAP_PRIVATE;
2959
#else
2960
                        flags |= MAP_PRIVATE;
2961
#endif
2962
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2963
                                    flags, block->fd, offset);
2964
                    } else {
2965
                        flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2966
                        area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2967
                                    flags, -1, 0);
2968
                    }
2969
#else
2970
                    abort();
2971
#endif
2972
                } else {
2973
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2974
                    flags |= MAP_SHARED | MAP_ANONYMOUS;
2975
                    area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2976
                                flags, -1, 0);
2977
#else
2978
                    flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2979
                    area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2980
                                flags, -1, 0);
2981
#endif
2982
                }
2983
                if (area != vaddr) {
2984
                    fprintf(stderr, "Could not remap addr: "
2985
                            RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
2986
                            length, addr);
2987
                    exit(1);
2988
                }
2989
                qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2990
            }
2991
            return;
2992
        }
2993
    }
2994
}
2995
#endif /* !_WIN32 */
2996

    
2997
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2998
   With the exception of the softmmu code in this file, this should
2999
   only be used for local memory (e.g. video ram) that the device owns,
3000
   and knows it isn't going to access beyond the end of the block.
3001

3002
   It should not be used for general purpose DMA.
3003
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3004
 */
3005
void *qemu_get_ram_ptr(ram_addr_t addr)
3006
{
3007
    RAMBlock *block;
3008

    
3009
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3010
        if (addr - block->offset < block->length) {
3011
            /* Move this entry to to start of the list.  */
3012
            if (block != QLIST_FIRST(&ram_list.blocks)) {
3013
                QLIST_REMOVE(block, next);
3014
                QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3015
            }
3016
            if (xen_enabled()) {
3017
                /* We need to check if the requested address is in the RAM
3018
                 * because we don't want to map the entire memory in QEMU.
3019
                 * In that case just map until the end of the page.
3020
                 */
3021
                if (block->offset == 0) {
3022
                    return xen_map_cache(addr, 0, 0);
3023
                } else if (block->host == NULL) {
3024
                    block->host =
3025
                        xen_map_cache(block->offset, block->length, 1);
3026
                }
3027
            }
3028
            return block->host + (addr - block->offset);
3029
        }
3030
    }
3031

    
3032
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3033
    abort();
3034

    
3035
    return NULL;
3036
}
3037

    
3038
/* Return a host pointer to ram allocated with qemu_ram_alloc.
3039
 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3040
 */
3041
void *qemu_safe_ram_ptr(ram_addr_t addr)
3042
{
3043
    RAMBlock *block;
3044

    
3045
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3046
        if (addr - block->offset < block->length) {
3047
            if (xen_enabled()) {
3048
                /* We need to check if the requested address is in the RAM
3049
                 * because we don't want to map the entire memory in QEMU.
3050
                 * In that case just map until the end of the page.
3051
                 */
3052
                if (block->offset == 0) {
3053
                    return xen_map_cache(addr, 0, 0);
3054
                } else if (block->host == NULL) {
3055
                    block->host =
3056
                        xen_map_cache(block->offset, block->length, 1);
3057
                }
3058
            }
3059
            return block->host + (addr - block->offset);
3060
        }
3061
    }
3062

    
3063
    fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3064
    abort();
3065

    
3066
    return NULL;
3067
}
3068

    
3069
/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3070
 * but takes a size argument */
3071
void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
3072
{
3073
    if (*size == 0) {
3074
        return NULL;
3075
    }
3076
    if (xen_enabled()) {
3077
        return xen_map_cache(addr, *size, 1);
3078
    } else {
3079
        RAMBlock *block;
3080

    
3081
        QLIST_FOREACH(block, &ram_list.blocks, next) {
3082
            if (addr - block->offset < block->length) {
3083
                if (addr - block->offset + *size > block->length)
3084
                    *size = block->length - addr + block->offset;
3085
                return block->host + (addr - block->offset);
3086
            }
3087
        }
3088

    
3089
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3090
        abort();
3091
    }
3092
}
3093

    
3094
void qemu_put_ram_ptr(void *addr)
3095
{
3096
    trace_qemu_put_ram_ptr(addr);
3097
}
3098

    
3099
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
3100
{
3101
    RAMBlock *block;
3102
    uint8_t *host = ptr;
3103

    
3104
    if (xen_enabled()) {
3105
        *ram_addr = xen_ram_addr_from_mapcache(ptr);
3106
        return 0;
3107
    }
3108

    
3109
    QLIST_FOREACH(block, &ram_list.blocks, next) {
3110
        /* This case append when the block is not mapped. */
3111
        if (block->host == NULL) {
3112
            continue;
3113
        }
3114
        if (host - block->host < block->length) {
3115
            *ram_addr = block->offset + (host - block->host);
3116
            return 0;
3117
        }
3118
    }
3119

    
3120
    return -1;
3121
}
3122

    
3123
/* Some of the softmmu routines need to translate from a host pointer
3124
   (typically a TLB entry) back to a ram offset.  */
3125
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3126
{
3127
    ram_addr_t ram_addr;
3128

    
3129
    if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3130
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
3131
        abort();
3132
    }
3133
    return ram_addr;
3134
}
3135

    
3136
static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
3137
                                    unsigned size)
3138
{
3139
#ifdef DEBUG_UNASSIGNED
3140
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3141
#endif
3142
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3143
    cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
3144
#endif
3145
    return 0;
3146
}
3147

    
3148
static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
3149
                                 uint64_t val, unsigned size)
3150
{
3151
#ifdef DEBUG_UNASSIGNED
3152
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
3153
#endif
3154
#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
3155
    cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
3156
#endif
3157
}
3158

    
3159
static const MemoryRegionOps unassigned_mem_ops = {
3160
    .read = unassigned_mem_read,
3161
    .write = unassigned_mem_write,
3162
    .endianness = DEVICE_NATIVE_ENDIAN,
3163
};
3164

    
3165
static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
3166
                               unsigned size)
3167
{
3168
    abort();
3169
}
3170

    
3171
static void error_mem_write(void *opaque, target_phys_addr_t addr,
3172
                            uint64_t value, unsigned size)
3173
{
3174
    abort();
3175
}
3176

    
3177
static const MemoryRegionOps error_mem_ops = {
3178
    .read = error_mem_read,
3179
    .write = error_mem_write,
3180
    .endianness = DEVICE_NATIVE_ENDIAN,
3181
};
3182

    
3183
static const MemoryRegionOps rom_mem_ops = {
3184
    .read = error_mem_read,
3185
    .write = unassigned_mem_write,
3186
    .endianness = DEVICE_NATIVE_ENDIAN,
3187
};
3188

    
3189
static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
3190
                               uint64_t val, unsigned size)
3191
{
3192
    int dirty_flags;
3193
    dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3194
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3195
#if !defined(CONFIG_USER_ONLY)
3196
        tb_invalidate_phys_page_fast(ram_addr, size);
3197
        dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
3198
#endif
3199
    }
3200
    switch (size) {
3201
    case 1:
3202
        stb_p(qemu_get_ram_ptr(ram_addr), val);
3203
        break;
3204
    case 2:
3205
        stw_p(qemu_get_ram_ptr(ram_addr), val);
3206
        break;
3207
    case 4:
3208
        stl_p(qemu_get_ram_ptr(ram_addr), val);
3209
        break;
3210
    default:
3211
        abort();
3212
    }
3213
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
3214
    cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
3215
    /* we remove the notdirty callback only if the code has been
3216
       flushed */
3217
    if (dirty_flags == 0xff)
3218
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
3219
}
3220

    
3221
static const MemoryRegionOps notdirty_mem_ops = {
3222
    .read = error_mem_read,
3223
    .write = notdirty_mem_write,
3224
    .endianness = DEVICE_NATIVE_ENDIAN,
3225
};
3226

    
3227
/* Generate a debug exception if a watchpoint has been hit.  */
3228
static void check_watchpoint(int offset, int len_mask, int flags)
3229
{
3230
    CPUState *env = cpu_single_env;
3231
    target_ulong pc, cs_base;
3232
    TranslationBlock *tb;
3233
    target_ulong vaddr;
3234
    CPUWatchpoint *wp;
3235
    int cpu_flags;
3236

    
3237
    if (env->watchpoint_hit) {
3238
        /* We re-entered the check after replacing the TB. Now raise
3239
         * the debug interrupt so that is will trigger after the
3240
         * current instruction. */
3241
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3242
        return;
3243
    }
3244
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
3245
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
3246
        if ((vaddr == (wp->vaddr & len_mask) ||
3247
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
3248
            wp->flags |= BP_WATCHPOINT_HIT;
3249
            if (!env->watchpoint_hit) {
3250
                env->watchpoint_hit = wp;
3251
                tb = tb_find_pc(env->mem_io_pc);
3252
                if (!tb) {
3253
                    cpu_abort(env, "check_watchpoint: could not find TB for "
3254
                              "pc=%p", (void *)env->mem_io_pc);
3255
                }
3256
                cpu_restore_state(tb, env, env->mem_io_pc);
3257
                tb_phys_invalidate(tb, -1);
3258
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3259
                    env->exception_index = EXCP_DEBUG;
3260
                } else {
3261
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3262
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3263
                }
3264
                cpu_resume_from_signal(env, NULL);
3265
            }
3266
        } else {
3267
            wp->flags &= ~BP_WATCHPOINT_HIT;
3268
        }
3269
    }
3270
}
3271

    
3272
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
3273
   so these check for a hit then pass through to the normal out-of-line
3274
   phys routines.  */
3275
static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
3276
                               unsigned size)
3277
{
3278
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3279
    switch (size) {
3280
    case 1: return ldub_phys(addr);
3281
    case 2: return lduw_phys(addr);
3282
    case 4: return ldl_phys(addr);
3283
    default: abort();
3284
    }
3285
}
3286

    
3287
static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3288
                            uint64_t val, unsigned size)
3289
{
3290
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3291
    switch (size) {
3292
    case 1: stb_phys(addr, val);
3293
    case 2: stw_phys(addr, val);
3294
    case 4: stl_phys(addr, val);
3295
    default: abort();
3296
    }
3297
}
3298

    
3299
static const MemoryRegionOps watch_mem_ops = {
3300
    .read = watch_mem_read,
3301
    .write = watch_mem_write,
3302
    .endianness = DEVICE_NATIVE_ENDIAN,
3303
};
3304

    
3305
static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3306
                             unsigned len)
3307
{
3308
    subpage_t *mmio = opaque;
3309
    unsigned int idx = SUBPAGE_IDX(addr);
3310
#if defined(DEBUG_SUBPAGE)
3311
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3312
           mmio, len, addr, idx);
3313
#endif
3314

    
3315
    addr += mmio->region_offset[idx];
3316
    idx = mmio->sub_io_index[idx];
3317
    return io_mem_read(idx, addr, len);
3318
}
3319

    
3320
static void subpage_write(void *opaque, target_phys_addr_t addr,
3321
                          uint64_t value, unsigned len)
3322
{
3323
    subpage_t *mmio = opaque;
3324
    unsigned int idx = SUBPAGE_IDX(addr);
3325
#if defined(DEBUG_SUBPAGE)
3326
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3327
           " idx %d value %"PRIx64"\n",
3328
           __func__, mmio, len, addr, idx, value);
3329
#endif
3330

    
3331
    addr += mmio->region_offset[idx];
3332
    idx = mmio->sub_io_index[idx];
3333
    io_mem_write(idx, addr, value, len);
3334
}
3335

    
3336
static const MemoryRegionOps subpage_ops = {
3337
    .read = subpage_read,
3338
    .write = subpage_write,
3339
    .endianness = DEVICE_NATIVE_ENDIAN,
3340
};
3341

    
3342
static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3343
                                 unsigned size)
3344
{
3345
    ram_addr_t raddr = addr;
3346
    void *ptr = qemu_get_ram_ptr(raddr);
3347
    switch (size) {
3348
    case 1: return ldub_p(ptr);
3349
    case 2: return lduw_p(ptr);
3350
    case 4: return ldl_p(ptr);
3351
    default: abort();
3352
    }
3353
}
3354

    
3355
static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3356
                              uint64_t value, unsigned size)
3357
{
3358
    ram_addr_t raddr = addr;
3359
    void *ptr = qemu_get_ram_ptr(raddr);
3360
    switch (size) {
3361
    case 1: return stb_p(ptr, value);
3362
    case 2: return stw_p(ptr, value);
3363
    case 4: return stl_p(ptr, value);
3364
    default: abort();
3365
    }
3366
}
3367

    
3368
static const MemoryRegionOps subpage_ram_ops = {
3369
    .read = subpage_ram_read,
3370
    .write = subpage_ram_write,
3371
    .endianness = DEVICE_NATIVE_ENDIAN,
3372
};
3373

    
3374
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3375
                             ram_addr_t memory, ram_addr_t region_offset)
3376
{
3377
    int idx, eidx;
3378

    
3379
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3380
        return -1;
3381
    idx = SUBPAGE_IDX(start);
3382
    eidx = SUBPAGE_IDX(end);
3383
#if defined(DEBUG_SUBPAGE)
3384
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
3385
           mmio, start, end, idx, eidx, memory);
3386
#endif
3387
    if ((memory & ~TARGET_PAGE_MASK) == io_mem_ram.ram_addr) {
3388
        memory = io_mem_subpage_ram.ram_addr;
3389
    }
3390
    memory &= IO_MEM_NB_ENTRIES - 1;
3391
    for (; idx <= eidx; idx++) {
3392
        mmio->sub_io_index[idx] = memory;
3393
        mmio->region_offset[idx] = region_offset;
3394
    }
3395

    
3396
    return 0;
3397
}
3398

    
3399
static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3400
                                ram_addr_t orig_memory,
3401
                                ram_addr_t region_offset)
3402
{
3403
    subpage_t *mmio;
3404
    int subpage_memory;
3405

    
3406
    mmio = g_malloc0(sizeof(subpage_t));
3407

    
3408
    mmio->base = base;
3409
    memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3410
                          "subpage", TARGET_PAGE_SIZE);
3411
    mmio->iomem.subpage = true;
3412
    subpage_memory = mmio->iomem.ram_addr;
3413
#if defined(DEBUG_SUBPAGE)
3414
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3415
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3416
#endif
3417
    *phys = subpage_memory;
3418
    subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
3419

    
3420
    return mmio;
3421
}
3422

    
3423
static int get_free_io_mem_idx(void)
3424
{
3425
    int i;
3426

    
3427
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3428
        if (!io_mem_used[i]) {
3429
            io_mem_used[i] = 1;
3430
            return i;
3431
        }
3432
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3433
    return -1;
3434
}
3435

    
3436
/* mem_read and mem_write are arrays of functions containing the
3437
   function to access byte (index 0), word (index 1) and dword (index
3438
   2). Functions can be omitted with a NULL function pointer.
3439
   If io_index is non zero, the corresponding io zone is
3440
   modified. If it is zero, a new io zone is allocated. The return
3441
   value can be used with cpu_register_physical_memory(). (-1) is
3442
   returned if error. */
3443
static int cpu_register_io_memory_fixed(int io_index, MemoryRegion *mr)
3444
{
3445
    if (io_index <= 0) {
3446
        io_index = get_free_io_mem_idx();
3447
        if (io_index == -1)
3448
            return io_index;
3449
    } else {
3450
        if (io_index >= IO_MEM_NB_ENTRIES)
3451
            return -1;
3452
    }
3453

    
3454
    io_mem_region[io_index] = mr;
3455

    
3456
    return io_index;
3457
}
3458

    
3459
int cpu_register_io_memory(MemoryRegion *mr)
3460
{
3461
    return cpu_register_io_memory_fixed(0, mr);
3462
}
3463

    
3464
void cpu_unregister_io_memory(int io_index)
3465
{
3466
    io_mem_region[io_index] = NULL;
3467
    io_mem_used[io_index] = 0;
3468
}
3469

    
3470
static void io_mem_init(void)
3471
{
3472
    int i;
3473

    
3474
    /* Must be first: */
3475
    memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
3476
    assert(io_mem_ram.ram_addr == 0);
3477
    memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3478
    memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3479
                          "unassigned", UINT64_MAX);
3480
    memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3481
                          "notdirty", UINT64_MAX);
3482
    memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3483
                          "subpage-ram", UINT64_MAX);
3484
    for (i=0; i<5; i++)
3485
        io_mem_used[i] = 1;
3486

    
3487
    memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3488
                          "watch", UINT64_MAX);
3489
}
3490

    
3491
static void memory_map_init(void)
3492
{
3493
    system_memory = g_malloc(sizeof(*system_memory));
3494
    memory_region_init(system_memory, "system", INT64_MAX);
3495
    set_system_memory_map(system_memory);
3496

    
3497
    system_io = g_malloc(sizeof(*system_io));
3498
    memory_region_init(system_io, "io", 65536);
3499
    set_system_io_map(system_io);
3500
}
3501

    
3502
MemoryRegion *get_system_memory(void)
3503
{
3504
    return system_memory;
3505
}
3506

    
3507
MemoryRegion *get_system_io(void)
3508
{
3509
    return system_io;
3510
}
3511

    
3512
#endif /* !defined(CONFIG_USER_ONLY) */
3513

    
3514
/* physical memory access (slow version, mainly for debug) */
3515
#if defined(CONFIG_USER_ONLY)
3516
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3517
                        uint8_t *buf, int len, int is_write)
3518
{
3519
    int l, flags;
3520
    target_ulong page;
3521
    void * p;
3522

    
3523
    while (len > 0) {
3524
        page = addr & TARGET_PAGE_MASK;
3525
        l = (page + TARGET_PAGE_SIZE) - addr;
3526
        if (l > len)
3527
            l = len;
3528
        flags = page_get_flags(page);
3529
        if (!(flags & PAGE_VALID))
3530
            return -1;
3531
        if (is_write) {
3532
            if (!(flags & PAGE_WRITE))
3533
                return -1;
3534
            /* XXX: this code should not depend on lock_user */
3535
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3536
                return -1;
3537
            memcpy(p, buf, l);
3538
            unlock_user(p, addr, l);
3539
        } else {
3540
            if (!(flags & PAGE_READ))
3541
                return -1;
3542
            /* XXX: this code should not depend on lock_user */
3543
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3544
                return -1;
3545
            memcpy(buf, p, l);
3546
            unlock_user(p, addr, 0);
3547
        }
3548
        len -= l;
3549
        buf += l;
3550
        addr += l;
3551
    }
3552
    return 0;
3553
}
3554

    
3555
#else
3556
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3557
                            int len, int is_write)
3558
{
3559
    int l, io_index;
3560
    uint8_t *ptr;
3561
    uint32_t val;
3562
    target_phys_addr_t page;
3563
    ram_addr_t pd;
3564
    PhysPageDesc p;
3565

    
3566
    while (len > 0) {
3567
        page = addr & TARGET_PAGE_MASK;
3568
        l = (page + TARGET_PAGE_SIZE) - addr;
3569
        if (l > len)
3570
            l = len;
3571
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3572
        pd = p.phys_offset;
3573

    
3574
        if (is_write) {
3575
            if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
3576
                target_phys_addr_t addr1;
3577
                io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3578
                addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3579
                /* XXX: could force cpu_single_env to NULL to avoid
3580
                   potential bugs */
3581
                if (l >= 4 && ((addr1 & 3) == 0)) {
3582
                    /* 32 bit write access */
3583
                    val = ldl_p(buf);
3584
                    io_mem_write(io_index, addr1, val, 4);
3585
                    l = 4;
3586
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3587
                    /* 16 bit write access */
3588
                    val = lduw_p(buf);
3589
                    io_mem_write(io_index, addr1, val, 2);
3590
                    l = 2;
3591
                } else {
3592
                    /* 8 bit write access */
3593
                    val = ldub_p(buf);
3594
                    io_mem_write(io_index, addr1, val, 1);
3595
                    l = 1;
3596
                }
3597
            } else {
3598
                ram_addr_t addr1;
3599
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3600
                /* RAM case */
3601
                ptr = qemu_get_ram_ptr(addr1);
3602
                memcpy(ptr, buf, l);
3603
                if (!cpu_physical_memory_is_dirty(addr1)) {
3604
                    /* invalidate code */
3605
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3606
                    /* set dirty bit */
3607
                    cpu_physical_memory_set_dirty_flags(
3608
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3609
                }
3610
                qemu_put_ram_ptr(ptr);
3611
            }
3612
        } else {
3613
            if (!is_ram_rom_romd(pd)) {
3614
                target_phys_addr_t addr1;
3615
                /* I/O case */
3616
                io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3617
                addr1 = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3618
                if (l >= 4 && ((addr1 & 3) == 0)) {
3619
                    /* 32 bit read access */
3620
                    val = io_mem_read(io_index, addr1, 4);
3621
                    stl_p(buf, val);
3622
                    l = 4;
3623
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3624
                    /* 16 bit read access */
3625
                    val = io_mem_read(io_index, addr1, 2);
3626
                    stw_p(buf, val);
3627
                    l = 2;
3628
                } else {
3629
                    /* 8 bit read access */
3630
                    val = io_mem_read(io_index, addr1, 1);
3631
                    stb_p(buf, val);
3632
                    l = 1;
3633
                }
3634
            } else {
3635
                /* RAM case */
3636
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3637
                memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3638
                qemu_put_ram_ptr(ptr);
3639
            }
3640
        }
3641
        len -= l;
3642
        buf += l;
3643
        addr += l;
3644
    }
3645
}
3646

    
3647
/* used for ROM loading : can write in RAM and ROM */
3648
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3649
                                   const uint8_t *buf, int len)
3650
{
3651
    int l;
3652
    uint8_t *ptr;
3653
    target_phys_addr_t page;
3654
    unsigned long pd;
3655
    PhysPageDesc p;
3656

    
3657
    while (len > 0) {
3658
        page = addr & TARGET_PAGE_MASK;
3659
        l = (page + TARGET_PAGE_SIZE) - addr;
3660
        if (l > len)
3661
            l = len;
3662
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3663
        pd = p.phys_offset;
3664

    
3665
        if (!is_ram_rom_romd(pd)) {
3666
            /* do nothing */
3667
        } else {
3668
            unsigned long addr1;
3669
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3670
            /* ROM/RAM case */
3671
            ptr = qemu_get_ram_ptr(addr1);
3672
            memcpy(ptr, buf, l);
3673
            qemu_put_ram_ptr(ptr);
3674
        }
3675
        len -= l;
3676
        buf += l;
3677
        addr += l;
3678
    }
3679
}
3680

    
3681
typedef struct {
3682
    void *buffer;
3683
    target_phys_addr_t addr;
3684
    target_phys_addr_t len;
3685
} BounceBuffer;
3686

    
3687
static BounceBuffer bounce;
3688

    
3689
typedef struct MapClient {
3690
    void *opaque;
3691
    void (*callback)(void *opaque);
3692
    QLIST_ENTRY(MapClient) link;
3693
} MapClient;
3694

    
3695
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3696
    = QLIST_HEAD_INITIALIZER(map_client_list);
3697

    
3698
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3699
{
3700
    MapClient *client = g_malloc(sizeof(*client));
3701

    
3702
    client->opaque = opaque;
3703
    client->callback = callback;
3704
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3705
    return client;
3706
}
3707

    
3708
void cpu_unregister_map_client(void *_client)
3709
{
3710
    MapClient *client = (MapClient *)_client;
3711

    
3712
    QLIST_REMOVE(client, link);
3713
    g_free(client);
3714
}
3715

    
3716
static void cpu_notify_map_clients(void)
3717
{
3718
    MapClient *client;
3719

    
3720
    while (!QLIST_EMPTY(&map_client_list)) {
3721
        client = QLIST_FIRST(&map_client_list);
3722
        client->callback(client->opaque);
3723
        cpu_unregister_map_client(client);
3724
    }
3725
}
3726

    
3727
/* Map a physical memory region into a host virtual address.
3728
 * May map a subset of the requested range, given by and returned in *plen.
3729
 * May return NULL if resources needed to perform the mapping are exhausted.
3730
 * Use only for reads OR writes - not for read-modify-write operations.
3731
 * Use cpu_register_map_client() to know when retrying the map operation is
3732
 * likely to succeed.
3733
 */
3734
void *cpu_physical_memory_map(target_phys_addr_t addr,
3735
                              target_phys_addr_t *plen,
3736
                              int is_write)
3737
{
3738
    target_phys_addr_t len = *plen;
3739
    target_phys_addr_t todo = 0;
3740
    int l;
3741
    target_phys_addr_t page;
3742
    unsigned long pd;
3743
    PhysPageDesc p;
3744
    ram_addr_t raddr = RAM_ADDR_MAX;
3745
    ram_addr_t rlen;
3746
    void *ret;
3747

    
3748
    while (len > 0) {
3749
        page = addr & TARGET_PAGE_MASK;
3750
        l = (page + TARGET_PAGE_SIZE) - addr;
3751
        if (l > len)
3752
            l = len;
3753
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3754
        pd = p.phys_offset;
3755

    
3756
        if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
3757
            if (todo || bounce.buffer) {
3758
                break;
3759
            }
3760
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3761
            bounce.addr = addr;
3762
            bounce.len = l;
3763
            if (!is_write) {
3764
                cpu_physical_memory_read(addr, bounce.buffer, l);
3765
            }
3766

    
3767
            *plen = l;
3768
            return bounce.buffer;
3769
        }
3770
        if (!todo) {
3771
            raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3772
        }
3773

    
3774
        len -= l;
3775
        addr += l;
3776
        todo += l;
3777
    }
3778
    rlen = todo;
3779
    ret = qemu_ram_ptr_length(raddr, &rlen);
3780
    *plen = rlen;
3781
    return ret;
3782
}
3783

    
3784
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3785
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3786
 * the amount of memory that was actually read or written by the caller.
3787
 */
3788
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3789
                               int is_write, target_phys_addr_t access_len)
3790
{
3791
    if (buffer != bounce.buffer) {
3792
        if (is_write) {
3793
            ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
3794
            while (access_len) {
3795
                unsigned l;
3796
                l = TARGET_PAGE_SIZE;
3797
                if (l > access_len)
3798
                    l = access_len;
3799
                if (!cpu_physical_memory_is_dirty(addr1)) {
3800
                    /* invalidate code */
3801
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3802
                    /* set dirty bit */
3803
                    cpu_physical_memory_set_dirty_flags(
3804
                        addr1, (0xff & ~CODE_DIRTY_FLAG));
3805
                }
3806
                addr1 += l;
3807
                access_len -= l;
3808
            }
3809
        }
3810
        if (xen_enabled()) {
3811
            xen_invalidate_map_cache_entry(buffer);
3812
        }
3813
        return;
3814
    }
3815
    if (is_write) {
3816
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3817
    }
3818
    qemu_vfree(bounce.buffer);
3819
    bounce.buffer = NULL;
3820
    cpu_notify_map_clients();
3821
}
3822

    
3823
/* warning: addr must be aligned */
3824
static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3825
                                         enum device_endian endian)
3826
{
3827
    int io_index;
3828
    uint8_t *ptr;
3829
    uint32_t val;
3830
    unsigned long pd;
3831
    PhysPageDesc p;
3832

    
3833
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3834
    pd = p.phys_offset;
3835

    
3836
    if (!is_ram_rom_romd(pd)) {
3837
        /* I/O case */
3838
        io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3839
        addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3840
        val = io_mem_read(io_index, addr, 4);
3841
#if defined(TARGET_WORDS_BIGENDIAN)
3842
        if (endian == DEVICE_LITTLE_ENDIAN) {
3843
            val = bswap32(val);
3844
        }
3845
#else
3846
        if (endian == DEVICE_BIG_ENDIAN) {
3847
            val = bswap32(val);
3848
        }
3849
#endif
3850
    } else {
3851
        /* RAM case */
3852
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3853
            (addr & ~TARGET_PAGE_MASK);
3854
        switch (endian) {
3855
        case DEVICE_LITTLE_ENDIAN:
3856
            val = ldl_le_p(ptr);
3857
            break;
3858
        case DEVICE_BIG_ENDIAN:
3859
            val = ldl_be_p(ptr);
3860
            break;
3861
        default:
3862
            val = ldl_p(ptr);
3863
            break;
3864
        }
3865
    }
3866
    return val;
3867
}
3868

    
3869
uint32_t ldl_phys(target_phys_addr_t addr)
3870
{
3871
    return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3872
}
3873

    
3874
uint32_t ldl_le_phys(target_phys_addr_t addr)
3875
{
3876
    return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3877
}
3878

    
3879
uint32_t ldl_be_phys(target_phys_addr_t addr)
3880
{
3881
    return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3882
}
3883

    
3884
/* warning: addr must be aligned */
3885
static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3886
                                         enum device_endian endian)
3887
{
3888
    int io_index;
3889
    uint8_t *ptr;
3890
    uint64_t val;
3891
    unsigned long pd;
3892
    PhysPageDesc p;
3893

    
3894
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3895
    pd = p.phys_offset;
3896

    
3897
    if (!is_ram_rom_romd(pd)) {
3898
        /* I/O case */
3899
        io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3900
        addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3901

    
3902
        /* XXX This is broken when device endian != cpu endian.
3903
               Fix and add "endian" variable check */
3904
#ifdef TARGET_WORDS_BIGENDIAN
3905
        val = io_mem_read(io_index, addr, 4) << 32;
3906
        val |= io_mem_read(io_index, addr + 4, 4);
3907
#else
3908
        val = io_mem_read(io_index, addr, 4);
3909
        val |= io_mem_read(io_index, addr + 4, 4) << 32;
3910
#endif
3911
    } else {
3912
        /* RAM case */
3913
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3914
            (addr & ~TARGET_PAGE_MASK);
3915
        switch (endian) {
3916
        case DEVICE_LITTLE_ENDIAN:
3917
            val = ldq_le_p(ptr);
3918
            break;
3919
        case DEVICE_BIG_ENDIAN:
3920
            val = ldq_be_p(ptr);
3921
            break;
3922
        default:
3923
            val = ldq_p(ptr);
3924
            break;
3925
        }
3926
    }
3927
    return val;
3928
}
3929

    
3930
uint64_t ldq_phys(target_phys_addr_t addr)
3931
{
3932
    return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3933
}
3934

    
3935
uint64_t ldq_le_phys(target_phys_addr_t addr)
3936
{
3937
    return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3938
}
3939

    
3940
uint64_t ldq_be_phys(target_phys_addr_t addr)
3941
{
3942
    return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3943
}
3944

    
3945
/* XXX: optimize */
3946
uint32_t ldub_phys(target_phys_addr_t addr)
3947
{
3948
    uint8_t val;
3949
    cpu_physical_memory_read(addr, &val, 1);
3950
    return val;
3951
}
3952

    
3953
/* warning: addr must be aligned */
3954
static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3955
                                          enum device_endian endian)
3956
{
3957
    int io_index;
3958
    uint8_t *ptr;
3959
    uint64_t val;
3960
    unsigned long pd;
3961
    PhysPageDesc p;
3962

    
3963
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3964
    pd = p.phys_offset;
3965

    
3966
    if (!is_ram_rom_romd(pd)) {
3967
        /* I/O case */
3968
        io_index = pd & (IO_MEM_NB_ENTRIES - 1);
3969
        addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
3970
        val = io_mem_read(io_index, addr, 2);
3971
#if defined(TARGET_WORDS_BIGENDIAN)
3972
        if (endian == DEVICE_LITTLE_ENDIAN) {
3973
            val = bswap16(val);
3974
        }
3975
#else
3976
        if (endian == DEVICE_BIG_ENDIAN) {
3977
            val = bswap16(val);
3978
        }
3979
#endif
3980
    } else {
3981
        /* RAM case */
3982
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3983
            (addr & ~TARGET_PAGE_MASK);
3984
        switch (endian) {
3985
        case DEVICE_LITTLE_ENDIAN:
3986
            val = lduw_le_p(ptr);
3987
            break;
3988
        case DEVICE_BIG_ENDIAN:
3989
            val = lduw_be_p(ptr);
3990
            break;
3991
        default:
3992
            val = lduw_p(ptr);
3993
            break;
3994
        }
3995
    }
3996
    return val;
3997
}
3998

    
3999
uint32_t lduw_phys(target_phys_addr_t addr)
4000
{
4001
    return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4002
}
4003

    
4004
uint32_t lduw_le_phys(target_phys_addr_t addr)
4005
{
4006
    return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4007
}
4008

    
4009
uint32_t lduw_be_phys(target_phys_addr_t addr)
4010
{
4011
    return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4012
}
4013

    
4014
/* warning: addr must be aligned. The ram page is not masked as dirty
4015
   and the code inside is not invalidated. It is useful if the dirty
4016
   bits are used to track modified PTEs */
4017
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
4018
{
4019
    int io_index;
4020
    uint8_t *ptr;
4021
    unsigned long pd;
4022
    PhysPageDesc p;
4023

    
4024
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4025
    pd = p.phys_offset;
4026

    
4027
    if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4028
        io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4029
        addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4030
        io_mem_write(io_index, addr, val, 4);
4031
    } else {
4032
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4033
        ptr = qemu_get_ram_ptr(addr1);
4034
        stl_p(ptr, val);
4035

    
4036
        if (unlikely(in_migration)) {
4037
            if (!cpu_physical_memory_is_dirty(addr1)) {
4038
                /* invalidate code */
4039
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4040
                /* set dirty bit */
4041
                cpu_physical_memory_set_dirty_flags(
4042
                    addr1, (0xff & ~CODE_DIRTY_FLAG));
4043
            }
4044
        }
4045
    }
4046
}
4047

    
4048
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
4049
{
4050
    int io_index;
4051
    uint8_t *ptr;
4052
    unsigned long pd;
4053
    PhysPageDesc p;
4054

    
4055
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4056
    pd = p.phys_offset;
4057

    
4058
    if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4059
        io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4060
        addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4061
#ifdef TARGET_WORDS_BIGENDIAN
4062
        io_mem_write(io_index, addr, val >> 32, 4);
4063
        io_mem_write(io_index, addr + 4, (uint32_t)val, 4);
4064
#else
4065
        io_mem_write(io_index, addr, (uint32_t)val, 4);
4066
        io_mem_write(io_index, addr + 4, val >> 32, 4);
4067
#endif
4068
    } else {
4069
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4070
            (addr & ~TARGET_PAGE_MASK);
4071
        stq_p(ptr, val);
4072
    }
4073
}
4074

    
4075
/* warning: addr must be aligned */
4076
static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4077
                                     enum device_endian endian)
4078
{
4079
    int io_index;
4080
    uint8_t *ptr;
4081
    unsigned long pd;
4082
    PhysPageDesc p;
4083

    
4084
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4085
    pd = p.phys_offset;
4086

    
4087
    if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4088
        io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4089
        addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4090
#if defined(TARGET_WORDS_BIGENDIAN)
4091
        if (endian == DEVICE_LITTLE_ENDIAN) {
4092
            val = bswap32(val);
4093
        }
4094
#else
4095
        if (endian == DEVICE_BIG_ENDIAN) {
4096
            val = bswap32(val);
4097
        }
4098
#endif
4099
        io_mem_write(io_index, addr, val, 4);
4100
    } else {
4101
        unsigned long addr1;
4102
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4103
        /* RAM case */
4104
        ptr = qemu_get_ram_ptr(addr1);
4105
        switch (endian) {
4106
        case DEVICE_LITTLE_ENDIAN:
4107
            stl_le_p(ptr, val);
4108
            break;
4109
        case DEVICE_BIG_ENDIAN:
4110
            stl_be_p(ptr, val);
4111
            break;
4112
        default:
4113
            stl_p(ptr, val);
4114
            break;
4115
        }
4116
        if (!cpu_physical_memory_is_dirty(addr1)) {
4117
            /* invalidate code */
4118
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4119
            /* set dirty bit */
4120
            cpu_physical_memory_set_dirty_flags(addr1,
4121
                (0xff & ~CODE_DIRTY_FLAG));
4122
        }
4123
    }
4124
}
4125

    
4126
void stl_phys(target_phys_addr_t addr, uint32_t val)
4127
{
4128
    stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4129
}
4130

    
4131
void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4132
{
4133
    stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4134
}
4135

    
4136
void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4137
{
4138
    stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4139
}
4140

    
4141
/* XXX: optimize */
4142
void stb_phys(target_phys_addr_t addr, uint32_t val)
4143
{
4144
    uint8_t v = val;
4145
    cpu_physical_memory_write(addr, &v, 1);
4146
}
4147

    
4148
/* warning: addr must be aligned */
4149
static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4150
                                     enum device_endian endian)
4151
{
4152
    int io_index;
4153
    uint8_t *ptr;
4154
    unsigned long pd;
4155
    PhysPageDesc p;
4156

    
4157
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
4158
    pd = p.phys_offset;
4159

    
4160
    if ((pd & ~TARGET_PAGE_MASK) != io_mem_ram.ram_addr) {
4161
        io_index = pd & (IO_MEM_NB_ENTRIES - 1);
4162
        addr = (addr & ~TARGET_PAGE_MASK) + p.region_offset;
4163
#if defined(TARGET_WORDS_BIGENDIAN)
4164
        if (endian == DEVICE_LITTLE_ENDIAN) {
4165
            val = bswap16(val);
4166
        }
4167
#else
4168
        if (endian == DEVICE_BIG_ENDIAN) {
4169
            val = bswap16(val);
4170
        }
4171
#endif
4172
        io_mem_write(io_index, addr, val, 2);
4173
    } else {
4174
        unsigned long addr1;
4175
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4176
        /* RAM case */
4177
        ptr = qemu_get_ram_ptr(addr1);
4178
        switch (endian) {
4179
        case DEVICE_LITTLE_ENDIAN:
4180
            stw_le_p(ptr, val);
4181
            break;
4182
        case DEVICE_BIG_ENDIAN:
4183
            stw_be_p(ptr, val);
4184
            break;
4185
        default:
4186
            stw_p(ptr, val);
4187
            break;
4188
        }
4189
        if (!cpu_physical_memory_is_dirty(addr1)) {
4190
            /* invalidate code */
4191
            tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4192
            /* set dirty bit */
4193
            cpu_physical_memory_set_dirty_flags(addr1,
4194
                (0xff & ~CODE_DIRTY_FLAG));
4195
        }
4196
    }
4197
}
4198

    
4199
void stw_phys(target_phys_addr_t addr, uint32_t val)
4200
{
4201
    stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4202
}
4203

    
4204
void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4205
{
4206
    stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4207
}
4208

    
4209
void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4210
{
4211
    stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4212
}
4213

    
4214
/* XXX: optimize */
4215
void stq_phys(target_phys_addr_t addr, uint64_t val)
4216
{
4217
    val = tswap64(val);
4218
    cpu_physical_memory_write(addr, &val, 8);
4219
}
4220

    
4221
void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4222
{
4223
    val = cpu_to_le64(val);
4224
    cpu_physical_memory_write(addr, &val, 8);
4225
}
4226

    
4227
void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4228
{
4229
    val = cpu_to_be64(val);
4230
    cpu_physical_memory_write(addr, &val, 8);
4231
}
4232

    
4233
/* virtual memory access for debug (includes writing to ROM) */
4234
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
4235
                        uint8_t *buf, int len, int is_write)
4236
{
4237
    int l;
4238
    target_phys_addr_t phys_addr;
4239
    target_ulong page;
4240

    
4241
    while (len > 0) {
4242
        page = addr & TARGET_PAGE_MASK;
4243
        phys_addr = cpu_get_phys_page_debug(env, page);
4244
        /* if no physical page mapped, return an error */
4245
        if (phys_addr == -1)
4246
            return -1;
4247
        l = (page + TARGET_PAGE_SIZE) - addr;
4248
        if (l > len)
4249
            l = len;
4250
        phys_addr += (addr & ~TARGET_PAGE_MASK);
4251
        if (is_write)
4252
            cpu_physical_memory_write_rom(phys_addr, buf, l);
4253
        else
4254
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
4255
        len -= l;
4256
        buf += l;
4257
        addr += l;
4258
    }
4259
    return 0;
4260
}
4261
#endif
4262

    
4263
/* in deterministic execution mode, instructions doing device I/Os
4264
   must be at the end of the TB */
4265
void cpu_io_recompile(CPUState *env, void *retaddr)
4266
{
4267
    TranslationBlock *tb;
4268
    uint32_t n, cflags;
4269
    target_ulong pc, cs_base;
4270
    uint64_t flags;
4271

    
4272
    tb = tb_find_pc((unsigned long)retaddr);
4273
    if (!tb) {
4274
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
4275
                  retaddr);
4276
    }
4277
    n = env->icount_decr.u16.low + tb->icount;
4278
    cpu_restore_state(tb, env, (unsigned long)retaddr);
4279
    /* Calculate how many instructions had been executed before the fault
4280
       occurred.  */
4281
    n = n - env->icount_decr.u16.low;
4282
    /* Generate a new TB ending on the I/O insn.  */
4283
    n++;
4284
    /* On MIPS and SH, delay slot instructions can only be restarted if
4285
       they were already the first instruction in the TB.  If this is not
4286
       the first instruction in a TB then re-execute the preceding
4287
       branch.  */
4288
#if defined(TARGET_MIPS)
4289
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4290
        env->active_tc.PC -= 4;
4291
        env->icount_decr.u16.low++;
4292
        env->hflags &= ~MIPS_HFLAG_BMASK;
4293
    }
4294
#elif defined(TARGET_SH4)
4295
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4296
            && n > 1) {
4297
        env->pc -= 2;
4298
        env->icount_decr.u16.low++;
4299
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4300
    }
4301
#endif
4302
    /* This should never happen.  */
4303
    if (n > CF_COUNT_MASK)
4304
        cpu_abort(env, "TB too big during recompile");
4305

    
4306
    cflags = n | CF_LAST_IO;
4307
    pc = tb->pc;
4308
    cs_base = tb->cs_base;
4309
    flags = tb->flags;
4310
    tb_phys_invalidate(tb, -1);
4311
    /* FIXME: In theory this could raise an exception.  In practice
4312
       we have already translated the block once so it's probably ok.  */
4313
    tb_gen_code(env, pc, cs_base, flags, cflags);
4314
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
4315
       the first in the TB) then we end up generating a whole new TB and
4316
       repeating the fault, which is horribly inefficient.
4317
       Better would be to execute just this insn uncached, or generate a
4318
       second new TB.  */
4319
    cpu_resume_from_signal(env, NULL);
4320
}
4321

    
4322
#if !defined(CONFIG_USER_ONLY)
4323

    
4324
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
4325
{
4326
    int i, target_code_size, max_target_code_size;
4327
    int direct_jmp_count, direct_jmp2_count, cross_page;
4328
    TranslationBlock *tb;
4329

    
4330
    target_code_size = 0;
4331
    max_target_code_size = 0;
4332
    cross_page = 0;
4333
    direct_jmp_count = 0;
4334
    direct_jmp2_count = 0;
4335
    for(i = 0; i < nb_tbs; i++) {
4336
        tb = &tbs[i];
4337
        target_code_size += tb->size;
4338
        if (tb->size > max_target_code_size)
4339
            max_target_code_size = tb->size;
4340
        if (tb->page_addr[1] != -1)
4341
            cross_page++;
4342
        if (tb->tb_next_offset[0] != 0xffff) {
4343
            direct_jmp_count++;
4344
            if (tb->tb_next_offset[1] != 0xffff) {
4345
                direct_jmp2_count++;
4346
            }
4347
        }
4348
    }
4349
    /* XXX: avoid using doubles ? */
4350
    cpu_fprintf(f, "Translation buffer state:\n");
4351
    cpu_fprintf(f, "gen code size       %td/%ld\n",
4352
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4353
    cpu_fprintf(f, "TB count            %d/%d\n", 
4354
                nb_tbs, code_gen_max_blocks);
4355
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
4356
                nb_tbs ? target_code_size / nb_tbs : 0,
4357
                max_target_code_size);
4358
    cpu_fprintf(f, "TB avg host size    %td bytes (expansion ratio: %0.1f)\n",
4359
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4360
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
4361
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4362
            cross_page,
4363
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4364
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
4365
                direct_jmp_count,
4366
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4367
                direct_jmp2_count,
4368
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
4369
    cpu_fprintf(f, "\nStatistics:\n");
4370
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
4371
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4372
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
4373
    tcg_dump_info(f, cpu_fprintf);
4374
}
4375

    
4376
/* NOTE: this function can trigger an exception */
4377
/* NOTE2: the returned address is not exactly the physical address: it
4378
   is the offset relative to phys_ram_base */
4379
tb_page_addr_t get_page_addr_code(CPUState *env1, target_ulong addr)
4380
{
4381
    int mmu_idx, page_index, pd;
4382
    void *p;
4383

    
4384
    page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4385
    mmu_idx = cpu_mmu_index(env1);
4386
    if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
4387
                 (addr & TARGET_PAGE_MASK))) {
4388
        ldub_code(addr);
4389
    }
4390
    pd = env1->tlb_table[mmu_idx][page_index].addr_code & ~TARGET_PAGE_MASK;
4391
    if (pd != io_mem_ram.ram_addr && pd != io_mem_rom.ram_addr
4392
        && !is_romd(pd)) {
4393
#if defined(TARGET_ALPHA) || defined(TARGET_MIPS) || defined(TARGET_SPARC)
4394
        cpu_unassigned_access(env1, addr, 0, 1, 0, 4);
4395
#else
4396
        cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x" TARGET_FMT_lx "\n", addr);
4397
#endif
4398
    }
4399
    p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
4400
    return qemu_ram_addr_from_host_nofail(p);
4401
}
4402

    
4403
/*
4404
 * A helper function for the _utterly broken_ virtio device model to find out if
4405
 * it's running on a big endian machine. Don't do this at home kids!
4406
 */
4407
bool virtio_is_big_endian(void);
4408
bool virtio_is_big_endian(void)
4409
{
4410
#if defined(TARGET_WORDS_BIGENDIAN)
4411
    return true;
4412
#else
4413
    return false;
4414
#endif
4415
}
4416

    
4417
#define MMUSUFFIX _cmmu
4418
#undef GETPC
4419
#define GETPC() NULL
4420
#define env cpu_single_env
4421
#define SOFTMMU_CODE_ACCESS
4422

    
4423
#define SHIFT 0
4424
#include "softmmu_template.h"
4425

    
4426
#define SHIFT 1
4427
#include "softmmu_template.h"
4428

    
4429
#define SHIFT 2
4430
#include "softmmu_template.h"
4431

    
4432
#define SHIFT 3
4433
#include "softmmu_template.h"
4434

    
4435
#undef env
4436

    
4437
#endif