Statistics
| Branch: | Revision:

root / exec.c @ 7b8f3b78

History | View | Annotate | Download (112.5 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#ifdef _WIN32
21
#include <windows.h>
22
#else
23
#include <sys/types.h>
24
#include <sys/mman.h>
25
#endif
26
#include <stdlib.h>
27
#include <stdio.h>
28
#include <stdarg.h>
29
#include <string.h>
30
#include <errno.h>
31
#include <unistd.h>
32
#include <inttypes.h>
33

    
34
#include "cpu.h"
35
#include "exec-all.h"
36
#include "qemu-common.h"
37
#include "tcg.h"
38
#include "hw/hw.h"
39
#include "osdep.h"
40
#include "kvm.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#include <signal.h>
44
#endif
45

    
46
//#define DEBUG_TB_INVALIDATE
47
//#define DEBUG_FLUSH
48
//#define DEBUG_TLB
49
//#define DEBUG_UNASSIGNED
50

    
51
/* make various TB consistency checks */
52
//#define DEBUG_TB_CHECK
53
//#define DEBUG_TLB_CHECK
54

    
55
//#define DEBUG_IOPORT
56
//#define DEBUG_SUBPAGE
57

    
58
#if !defined(CONFIG_USER_ONLY)
59
/* TB consistency checks only implemented for usermode emulation.  */
60
#undef DEBUG_TB_CHECK
61
#endif
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#if defined(TARGET_SPARC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67
#elif defined(TARGET_SPARC)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69
#elif defined(TARGET_ALPHA)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72
#elif defined(TARGET_PPC64)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_X86_64)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_I386)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78
#else
79
#define TARGET_PHYS_ADDR_SPACE_BITS 32
80
#endif
81

    
82
static TranslationBlock *tbs;
83
int code_gen_max_blocks;
84
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85
static int nb_tbs;
86
/* any access to the tbs or the page table must use this lock */
87
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88

    
89
#if defined(__arm__) || defined(__sparc_v9__)
90
/* The prologue must be reachable with a direct jump. ARM and Sparc64
91
 have limited branch ranges (possibly also PPC) so place it in a
92
 section close to code segment. */
93
#define code_gen_section                                \
94
    __attribute__((__section__(".gen_code")))           \
95
    __attribute__((aligned (32)))
96
#elif defined(_WIN32)
97
/* Maximum alignment for Win32 is 16. */
98
#define code_gen_section                                \
99
    __attribute__((aligned (16)))
100
#else
101
#define code_gen_section                                \
102
    __attribute__((aligned (32)))
103
#endif
104

    
105
uint8_t code_gen_prologue[1024] code_gen_section;
106
static uint8_t *code_gen_buffer;
107
static unsigned long code_gen_buffer_size;
108
/* threshold to flush the translated code buffer */
109
static unsigned long code_gen_buffer_max_size;
110
uint8_t *code_gen_ptr;
111

    
112
#if !defined(CONFIG_USER_ONLY)
113
int phys_ram_fd;
114
uint8_t *phys_ram_dirty;
115
static int in_migration;
116

    
117
typedef struct RAMBlock {
118
    uint8_t *host;
119
    ram_addr_t offset;
120
    ram_addr_t length;
121
    struct RAMBlock *next;
122
} RAMBlock;
123

    
124
static RAMBlock *ram_blocks;
125
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126
   then we can no longer assume contiguous ram offsets, and external uses
127
   of this variable will break.  */
128
ram_addr_t last_ram_offset;
129
#endif
130

    
131
CPUState *first_cpu;
132
/* current CPU in the current thread. It is only valid inside
133
   cpu_exec() */
134
CPUState *cpu_single_env;
135
/* 0 = Do not count executed instructions.
136
   1 = Precise instruction counting.
137
   2 = Adaptive rate instruction counting.  */
138
int use_icount = 0;
139
/* Current instruction counter.  While executing translated code this may
140
   include some instructions that have not yet been executed.  */
141
int64_t qemu_icount;
142

    
143
typedef struct PageDesc {
144
    /* list of TBs intersecting this ram page */
145
    TranslationBlock *first_tb;
146
    /* in order to optimize self modifying code, we count the number
147
       of lookups we do to a given page to use a bitmap */
148
    unsigned int code_write_count;
149
    uint8_t *code_bitmap;
150
#if defined(CONFIG_USER_ONLY)
151
    unsigned long flags;
152
#endif
153
} PageDesc;
154

    
155
typedef struct PhysPageDesc {
156
    /* offset in host memory of the page + io_index in the low bits */
157
    ram_addr_t phys_offset;
158
    ram_addr_t region_offset;
159
} PhysPageDesc;
160

    
161
#define L2_BITS 10
162
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163
/* XXX: this is a temporary hack for alpha target.
164
 *      In the future, this is to be replaced by a multi-level table
165
 *      to actually be able to handle the complete 64 bits address space.
166
 */
167
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168
#else
169
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
170
#endif
171

    
172
#define L1_SIZE (1 << L1_BITS)
173
#define L2_SIZE (1 << L2_BITS)
174

    
175
unsigned long qemu_real_host_page_size;
176
unsigned long qemu_host_page_bits;
177
unsigned long qemu_host_page_size;
178
unsigned long qemu_host_page_mask;
179

    
180
/* XXX: for system emulation, it could just be an array */
181
static PageDesc *l1_map[L1_SIZE];
182
static PhysPageDesc **l1_phys_map;
183

    
184
#if !defined(CONFIG_USER_ONLY)
185
static void io_mem_init(void);
186

    
187
/* io memory support */
188
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
189
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
190
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
191
static char io_mem_used[IO_MEM_NB_ENTRIES];
192
static int io_mem_watch;
193
#endif
194

    
195
/* log support */
196
#ifdef WIN32
197
static const char *logfilename = "qemu.log";
198
#else
199
static const char *logfilename = "/tmp/qemu.log";
200
#endif
201
FILE *logfile;
202
int loglevel;
203
static int log_append = 0;
204

    
205
/* statistics */
206
static int tlb_flush_count;
207
static int tb_flush_count;
208
static int tb_phys_invalidate_count;
209

    
210
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
211
typedef struct subpage_t {
212
    target_phys_addr_t base;
213
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
214
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
215
    void *opaque[TARGET_PAGE_SIZE][2][4];
216
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
217
} subpage_t;
218

    
219
#ifdef _WIN32
220
static void map_exec(void *addr, long size)
221
{
222
    DWORD old_protect;
223
    VirtualProtect(addr, size,
224
                   PAGE_EXECUTE_READWRITE, &old_protect);
225
    
226
}
227
#else
228
static void map_exec(void *addr, long size)
229
{
230
    unsigned long start, end, page_size;
231
    
232
    page_size = getpagesize();
233
    start = (unsigned long)addr;
234
    start &= ~(page_size - 1);
235
    
236
    end = (unsigned long)addr + size;
237
    end += page_size - 1;
238
    end &= ~(page_size - 1);
239
    
240
    mprotect((void *)start, end - start,
241
             PROT_READ | PROT_WRITE | PROT_EXEC);
242
}
243
#endif
244

    
245
static void page_init(void)
246
{
247
    /* NOTE: we can always suppose that qemu_host_page_size >=
248
       TARGET_PAGE_SIZE */
249
#ifdef _WIN32
250
    {
251
        SYSTEM_INFO system_info;
252

    
253
        GetSystemInfo(&system_info);
254
        qemu_real_host_page_size = system_info.dwPageSize;
255
    }
256
#else
257
    qemu_real_host_page_size = getpagesize();
258
#endif
259
    if (qemu_host_page_size == 0)
260
        qemu_host_page_size = qemu_real_host_page_size;
261
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
262
        qemu_host_page_size = TARGET_PAGE_SIZE;
263
    qemu_host_page_bits = 0;
264
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
265
        qemu_host_page_bits++;
266
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
267
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
268
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
269

    
270
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
271
    {
272
        long long startaddr, endaddr;
273
        FILE *f;
274
        int n;
275

    
276
        mmap_lock();
277
        last_brk = (unsigned long)sbrk(0);
278
        f = fopen("/proc/self/maps", "r");
279
        if (f) {
280
            do {
281
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
282
                if (n == 2) {
283
                    startaddr = MIN(startaddr,
284
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
285
                    endaddr = MIN(endaddr,
286
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
287
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
288
                                   TARGET_PAGE_ALIGN(endaddr),
289
                                   PAGE_RESERVED); 
290
                }
291
            } while (!feof(f));
292
            fclose(f);
293
        }
294
        mmap_unlock();
295
    }
296
#endif
297
}
298

    
299
static inline PageDesc **page_l1_map(target_ulong index)
300
{
301
#if TARGET_LONG_BITS > 32
302
    /* Host memory outside guest VM.  For 32-bit targets we have already
303
       excluded high addresses.  */
304
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
305
        return NULL;
306
#endif
307
    return &l1_map[index >> L2_BITS];
308
}
309

    
310
static inline PageDesc *page_find_alloc(target_ulong index)
311
{
312
    PageDesc **lp, *p;
313
    lp = page_l1_map(index);
314
    if (!lp)
315
        return NULL;
316

    
317
    p = *lp;
318
    if (!p) {
319
        /* allocate if not found */
320
#if defined(CONFIG_USER_ONLY)
321
        size_t len = sizeof(PageDesc) * L2_SIZE;
322
        /* Don't use qemu_malloc because it may recurse.  */
323
        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
324
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
325
        *lp = p;
326
        if (h2g_valid(p)) {
327
            unsigned long addr = h2g(p);
328
            page_set_flags(addr & TARGET_PAGE_MASK,
329
                           TARGET_PAGE_ALIGN(addr + len),
330
                           PAGE_RESERVED); 
331
        }
332
#else
333
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
334
        *lp = p;
335
#endif
336
    }
337
    return p + (index & (L2_SIZE - 1));
338
}
339

    
340
static inline PageDesc *page_find(target_ulong index)
341
{
342
    PageDesc **lp, *p;
343
    lp = page_l1_map(index);
344
    if (!lp)
345
        return NULL;
346

    
347
    p = *lp;
348
    if (!p) {
349
        return NULL;
350
    }
351
    return p + (index & (L2_SIZE - 1));
352
}
353

    
354
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
355
{
356
    void **lp, **p;
357
    PhysPageDesc *pd;
358

    
359
    p = (void **)l1_phys_map;
360
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
361

    
362
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
363
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
364
#endif
365
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
366
    p = *lp;
367
    if (!p) {
368
        /* allocate if not found */
369
        if (!alloc)
370
            return NULL;
371
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
372
        memset(p, 0, sizeof(void *) * L1_SIZE);
373
        *lp = p;
374
    }
375
#endif
376
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
377
    pd = *lp;
378
    if (!pd) {
379
        int i;
380
        /* allocate if not found */
381
        if (!alloc)
382
            return NULL;
383
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
384
        *lp = pd;
385
        for (i = 0; i < L2_SIZE; i++) {
386
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
387
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
388
        }
389
    }
390
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
391
}
392

    
393
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
394
{
395
    return phys_page_find_alloc(index, 0);
396
}
397

    
398
#if !defined(CONFIG_USER_ONLY)
399
static void tlb_protect_code(ram_addr_t ram_addr);
400
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
401
                                    target_ulong vaddr);
402
#define mmap_lock() do { } while(0)
403
#define mmap_unlock() do { } while(0)
404
#endif
405

    
406
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
407

    
408
#if defined(CONFIG_USER_ONLY)
409
/* Currently it is not recommended to allocate big chunks of data in
410
   user mode. It will change when a dedicated libc will be used */
411
#define USE_STATIC_CODE_GEN_BUFFER
412
#endif
413

    
414
#ifdef USE_STATIC_CODE_GEN_BUFFER
415
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
416
#endif
417

    
418
static void code_gen_alloc(unsigned long tb_size)
419
{
420
#ifdef USE_STATIC_CODE_GEN_BUFFER
421
    code_gen_buffer = static_code_gen_buffer;
422
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
423
    map_exec(code_gen_buffer, code_gen_buffer_size);
424
#else
425
    code_gen_buffer_size = tb_size;
426
    if (code_gen_buffer_size == 0) {
427
#if defined(CONFIG_USER_ONLY)
428
        /* in user mode, phys_ram_size is not meaningful */
429
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
430
#else
431
        /* XXX: needs adjustments */
432
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
433
#endif
434
    }
435
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
436
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
437
    /* The code gen buffer location may have constraints depending on
438
       the host cpu and OS */
439
#if defined(__linux__) 
440
    {
441
        int flags;
442
        void *start = NULL;
443

    
444
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
445
#if defined(__x86_64__)
446
        flags |= MAP_32BIT;
447
        /* Cannot map more than that */
448
        if (code_gen_buffer_size > (800 * 1024 * 1024))
449
            code_gen_buffer_size = (800 * 1024 * 1024);
450
#elif defined(__sparc_v9__)
451
        // Map the buffer below 2G, so we can use direct calls and branches
452
        flags |= MAP_FIXED;
453
        start = (void *) 0x60000000UL;
454
        if (code_gen_buffer_size > (512 * 1024 * 1024))
455
            code_gen_buffer_size = (512 * 1024 * 1024);
456
#elif defined(__arm__)
457
        /* Map the buffer below 32M, so we can use direct calls and branches */
458
        flags |= MAP_FIXED;
459
        start = (void *) 0x01000000UL;
460
        if (code_gen_buffer_size > 16 * 1024 * 1024)
461
            code_gen_buffer_size = 16 * 1024 * 1024;
462
#endif
463
        code_gen_buffer = mmap(start, code_gen_buffer_size,
464
                               PROT_WRITE | PROT_READ | PROT_EXEC,
465
                               flags, -1, 0);
466
        if (code_gen_buffer == MAP_FAILED) {
467
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
468
            exit(1);
469
        }
470
    }
471
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
472
    {
473
        int flags;
474
        void *addr = NULL;
475
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
476
#if defined(__x86_64__)
477
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
478
         * 0x40000000 is free */
479
        flags |= MAP_FIXED;
480
        addr = (void *)0x40000000;
481
        /* Cannot map more than that */
482
        if (code_gen_buffer_size > (800 * 1024 * 1024))
483
            code_gen_buffer_size = (800 * 1024 * 1024);
484
#endif
485
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
486
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
487
                               flags, -1, 0);
488
        if (code_gen_buffer == MAP_FAILED) {
489
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
490
            exit(1);
491
        }
492
    }
493
#else
494
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
495
    map_exec(code_gen_buffer, code_gen_buffer_size);
496
#endif
497
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
498
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
499
    code_gen_buffer_max_size = code_gen_buffer_size - 
500
        code_gen_max_block_size();
501
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
502
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
503
}
504

    
505
/* Must be called before using the QEMU cpus. 'tb_size' is the size
506
   (in bytes) allocated to the translation buffer. Zero means default
507
   size. */
508
void cpu_exec_init_all(unsigned long tb_size)
509
{
510
    cpu_gen_init();
511
    code_gen_alloc(tb_size);
512
    code_gen_ptr = code_gen_buffer;
513
    page_init();
514
#if !defined(CONFIG_USER_ONLY)
515
    io_mem_init();
516
#endif
517
}
518

    
519
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
520

    
521
static void cpu_common_pre_save(void *opaque)
522
{
523
    CPUState *env = opaque;
524

    
525
    cpu_synchronize_state(env);
526
}
527

    
528
static int cpu_common_pre_load(void *opaque)
529
{
530
    CPUState *env = opaque;
531

    
532
    cpu_synchronize_state(env);
533
    return 0;
534
}
535

    
536
static int cpu_common_post_load(void *opaque, int version_id)
537
{
538
    CPUState *env = opaque;
539

    
540
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
541
       version_id is increased. */
542
    env->interrupt_request &= ~0x01;
543
    tlb_flush(env, 1);
544

    
545
    return 0;
546
}
547

    
548
static const VMStateDescription vmstate_cpu_common = {
549
    .name = "cpu_common",
550
    .version_id = 1,
551
    .minimum_version_id = 1,
552
    .minimum_version_id_old = 1,
553
    .pre_save = cpu_common_pre_save,
554
    .pre_load = cpu_common_pre_load,
555
    .post_load = cpu_common_post_load,
556
    .fields      = (VMStateField []) {
557
        VMSTATE_UINT32(halted, CPUState),
558
        VMSTATE_UINT32(interrupt_request, CPUState),
559
        VMSTATE_END_OF_LIST()
560
    }
561
};
562
#endif
563

    
564
CPUState *qemu_get_cpu(int cpu)
565
{
566
    CPUState *env = first_cpu;
567

    
568
    while (env) {
569
        if (env->cpu_index == cpu)
570
            break;
571
        env = env->next_cpu;
572
    }
573

    
574
    return env;
575
}
576

    
577
void cpu_exec_init(CPUState *env)
578
{
579
    CPUState **penv;
580
    int cpu_index;
581

    
582
#if defined(CONFIG_USER_ONLY)
583
    cpu_list_lock();
584
#endif
585
    env->next_cpu = NULL;
586
    penv = &first_cpu;
587
    cpu_index = 0;
588
    while (*penv != NULL) {
589
        penv = &(*penv)->next_cpu;
590
        cpu_index++;
591
    }
592
    env->cpu_index = cpu_index;
593
    env->numa_node = 0;
594
    QTAILQ_INIT(&env->breakpoints);
595
    QTAILQ_INIT(&env->watchpoints);
596
    *penv = env;
597
#if defined(CONFIG_USER_ONLY)
598
    cpu_list_unlock();
599
#endif
600
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
601
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
602
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
603
                    cpu_save, cpu_load, env);
604
#endif
605
}
606

    
607
static inline void invalidate_page_bitmap(PageDesc *p)
608
{
609
    if (p->code_bitmap) {
610
        qemu_free(p->code_bitmap);
611
        p->code_bitmap = NULL;
612
    }
613
    p->code_write_count = 0;
614
}
615

    
616
/* set to NULL all the 'first_tb' fields in all PageDescs */
617
static void page_flush_tb(void)
618
{
619
    int i, j;
620
    PageDesc *p;
621

    
622
    for(i = 0; i < L1_SIZE; i++) {
623
        p = l1_map[i];
624
        if (p) {
625
            for(j = 0; j < L2_SIZE; j++) {
626
                p->first_tb = NULL;
627
                invalidate_page_bitmap(p);
628
                p++;
629
            }
630
        }
631
    }
632
}
633

    
634
/* flush all the translation blocks */
635
/* XXX: tb_flush is currently not thread safe */
636
void tb_flush(CPUState *env1)
637
{
638
    CPUState *env;
639
#if defined(DEBUG_FLUSH)
640
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
641
           (unsigned long)(code_gen_ptr - code_gen_buffer),
642
           nb_tbs, nb_tbs > 0 ?
643
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
644
#endif
645
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
646
        cpu_abort(env1, "Internal error: code buffer overflow\n");
647

    
648
    nb_tbs = 0;
649

    
650
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
651
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
652
    }
653

    
654
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
655
    page_flush_tb();
656

    
657
    code_gen_ptr = code_gen_buffer;
658
    /* XXX: flush processor icache at this point if cache flush is
659
       expensive */
660
    tb_flush_count++;
661
}
662

    
663
#ifdef DEBUG_TB_CHECK
664

    
665
static void tb_invalidate_check(target_ulong address)
666
{
667
    TranslationBlock *tb;
668
    int i;
669
    address &= TARGET_PAGE_MASK;
670
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
671
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
672
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
673
                  address >= tb->pc + tb->size)) {
674
                printf("ERROR invalidate: address=" TARGET_FMT_lx
675
                       " PC=%08lx size=%04x\n",
676
                       address, (long)tb->pc, tb->size);
677
            }
678
        }
679
    }
680
}
681

    
682
/* verify that all the pages have correct rights for code */
683
static void tb_page_check(void)
684
{
685
    TranslationBlock *tb;
686
    int i, flags1, flags2;
687

    
688
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
689
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
690
            flags1 = page_get_flags(tb->pc);
691
            flags2 = page_get_flags(tb->pc + tb->size - 1);
692
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
693
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
694
                       (long)tb->pc, tb->size, flags1, flags2);
695
            }
696
        }
697
    }
698
}
699

    
700
#endif
701

    
702
/* invalidate one TB */
703
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
704
                             int next_offset)
705
{
706
    TranslationBlock *tb1;
707
    for(;;) {
708
        tb1 = *ptb;
709
        if (tb1 == tb) {
710
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
711
            break;
712
        }
713
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
714
    }
715
}
716

    
717
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
718
{
719
    TranslationBlock *tb1;
720
    unsigned int n1;
721

    
722
    for(;;) {
723
        tb1 = *ptb;
724
        n1 = (long)tb1 & 3;
725
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
726
        if (tb1 == tb) {
727
            *ptb = tb1->page_next[n1];
728
            break;
729
        }
730
        ptb = &tb1->page_next[n1];
731
    }
732
}
733

    
734
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
735
{
736
    TranslationBlock *tb1, **ptb;
737
    unsigned int n1;
738

    
739
    ptb = &tb->jmp_next[n];
740
    tb1 = *ptb;
741
    if (tb1) {
742
        /* find tb(n) in circular list */
743
        for(;;) {
744
            tb1 = *ptb;
745
            n1 = (long)tb1 & 3;
746
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
747
            if (n1 == n && tb1 == tb)
748
                break;
749
            if (n1 == 2) {
750
                ptb = &tb1->jmp_first;
751
            } else {
752
                ptb = &tb1->jmp_next[n1];
753
            }
754
        }
755
        /* now we can suppress tb(n) from the list */
756
        *ptb = tb->jmp_next[n];
757

    
758
        tb->jmp_next[n] = NULL;
759
    }
760
}
761

    
762
/* reset the jump entry 'n' of a TB so that it is not chained to
763
   another TB */
764
static inline void tb_reset_jump(TranslationBlock *tb, int n)
765
{
766
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
767
}
768

    
769
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
770
{
771
    CPUState *env;
772
    PageDesc *p;
773
    unsigned int h, n1;
774
    target_phys_addr_t phys_pc;
775
    TranslationBlock *tb1, *tb2;
776

    
777
    /* remove the TB from the hash list */
778
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
779
    h = tb_phys_hash_func(phys_pc);
780
    tb_remove(&tb_phys_hash[h], tb,
781
              offsetof(TranslationBlock, phys_hash_next));
782

    
783
    /* remove the TB from the page list */
784
    if (tb->page_addr[0] != page_addr) {
785
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
786
        tb_page_remove(&p->first_tb, tb);
787
        invalidate_page_bitmap(p);
788
    }
789
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
790
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
791
        tb_page_remove(&p->first_tb, tb);
792
        invalidate_page_bitmap(p);
793
    }
794

    
795
    tb_invalidated_flag = 1;
796

    
797
    /* remove the TB from the hash list */
798
    h = tb_jmp_cache_hash_func(tb->pc);
799
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
800
        if (env->tb_jmp_cache[h] == tb)
801
            env->tb_jmp_cache[h] = NULL;
802
    }
803

    
804
    /* suppress this TB from the two jump lists */
805
    tb_jmp_remove(tb, 0);
806
    tb_jmp_remove(tb, 1);
807

    
808
    /* suppress any remaining jumps to this TB */
809
    tb1 = tb->jmp_first;
810
    for(;;) {
811
        n1 = (long)tb1 & 3;
812
        if (n1 == 2)
813
            break;
814
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
815
        tb2 = tb1->jmp_next[n1];
816
        tb_reset_jump(tb1, n1);
817
        tb1->jmp_next[n1] = NULL;
818
        tb1 = tb2;
819
    }
820
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
821

    
822
    tb_phys_invalidate_count++;
823
}
824

    
825
static inline void set_bits(uint8_t *tab, int start, int len)
826
{
827
    int end, mask, end1;
828

    
829
    end = start + len;
830
    tab += start >> 3;
831
    mask = 0xff << (start & 7);
832
    if ((start & ~7) == (end & ~7)) {
833
        if (start < end) {
834
            mask &= ~(0xff << (end & 7));
835
            *tab |= mask;
836
        }
837
    } else {
838
        *tab++ |= mask;
839
        start = (start + 8) & ~7;
840
        end1 = end & ~7;
841
        while (start < end1) {
842
            *tab++ = 0xff;
843
            start += 8;
844
        }
845
        if (start < end) {
846
            mask = ~(0xff << (end & 7));
847
            *tab |= mask;
848
        }
849
    }
850
}
851

    
852
static void build_page_bitmap(PageDesc *p)
853
{
854
    int n, tb_start, tb_end;
855
    TranslationBlock *tb;
856

    
857
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
858

    
859
    tb = p->first_tb;
860
    while (tb != NULL) {
861
        n = (long)tb & 3;
862
        tb = (TranslationBlock *)((long)tb & ~3);
863
        /* NOTE: this is subtle as a TB may span two physical pages */
864
        if (n == 0) {
865
            /* NOTE: tb_end may be after the end of the page, but
866
               it is not a problem */
867
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
868
            tb_end = tb_start + tb->size;
869
            if (tb_end > TARGET_PAGE_SIZE)
870
                tb_end = TARGET_PAGE_SIZE;
871
        } else {
872
            tb_start = 0;
873
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
874
        }
875
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
876
        tb = tb->page_next[n];
877
    }
878
}
879

    
880
TranslationBlock *tb_gen_code(CPUState *env,
881
                              target_ulong pc, target_ulong cs_base,
882
                              int flags, int cflags)
883
{
884
    TranslationBlock *tb;
885
    uint8_t *tc_ptr;
886
    target_ulong phys_pc, phys_page2, virt_page2;
887
    int code_gen_size;
888

    
889
    phys_pc = get_phys_addr_code(env, pc);
890
    tb = tb_alloc(pc);
891
    if (!tb) {
892
        /* flush must be done */
893
        tb_flush(env);
894
        /* cannot fail at this point */
895
        tb = tb_alloc(pc);
896
        /* Don't forget to invalidate previous TB info.  */
897
        tb_invalidated_flag = 1;
898
    }
899
    tc_ptr = code_gen_ptr;
900
    tb->tc_ptr = tc_ptr;
901
    tb->cs_base = cs_base;
902
    tb->flags = flags;
903
    tb->cflags = cflags;
904
    cpu_gen_code(env, tb, &code_gen_size);
905
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
906

    
907
    /* check next page if needed */
908
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
909
    phys_page2 = -1;
910
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
911
        phys_page2 = get_phys_addr_code(env, virt_page2);
912
    }
913
    tb_link_phys(tb, phys_pc, phys_page2);
914
    return tb;
915
}
916

    
917
/* invalidate all TBs which intersect with the target physical page
918
   starting in range [start;end[. NOTE: start and end must refer to
919
   the same physical page. 'is_cpu_write_access' should be true if called
920
   from a real cpu write access: the virtual CPU will exit the current
921
   TB if code is modified inside this TB. */
922
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
923
                                   int is_cpu_write_access)
924
{
925
    TranslationBlock *tb, *tb_next, *saved_tb;
926
    CPUState *env = cpu_single_env;
927
    target_ulong tb_start, tb_end;
928
    PageDesc *p;
929
    int n;
930
#ifdef TARGET_HAS_PRECISE_SMC
931
    int current_tb_not_found = is_cpu_write_access;
932
    TranslationBlock *current_tb = NULL;
933
    int current_tb_modified = 0;
934
    target_ulong current_pc = 0;
935
    target_ulong current_cs_base = 0;
936
    int current_flags = 0;
937
#endif /* TARGET_HAS_PRECISE_SMC */
938

    
939
    p = page_find(start >> TARGET_PAGE_BITS);
940
    if (!p)
941
        return;
942
    if (!p->code_bitmap &&
943
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
944
        is_cpu_write_access) {
945
        /* build code bitmap */
946
        build_page_bitmap(p);
947
    }
948

    
949
    /* we remove all the TBs in the range [start, end[ */
950
    /* XXX: see if in some cases it could be faster to invalidate all the code */
951
    tb = p->first_tb;
952
    while (tb != NULL) {
953
        n = (long)tb & 3;
954
        tb = (TranslationBlock *)((long)tb & ~3);
955
        tb_next = tb->page_next[n];
956
        /* NOTE: this is subtle as a TB may span two physical pages */
957
        if (n == 0) {
958
            /* NOTE: tb_end may be after the end of the page, but
959
               it is not a problem */
960
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
961
            tb_end = tb_start + tb->size;
962
        } else {
963
            tb_start = tb->page_addr[1];
964
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
965
        }
966
        if (!(tb_end <= start || tb_start >= end)) {
967
#ifdef TARGET_HAS_PRECISE_SMC
968
            if (current_tb_not_found) {
969
                current_tb_not_found = 0;
970
                current_tb = NULL;
971
                if (env->mem_io_pc) {
972
                    /* now we have a real cpu fault */
973
                    current_tb = tb_find_pc(env->mem_io_pc);
974
                }
975
            }
976
            if (current_tb == tb &&
977
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
978
                /* If we are modifying the current TB, we must stop
979
                its execution. We could be more precise by checking
980
                that the modification is after the current PC, but it
981
                would require a specialized function to partially
982
                restore the CPU state */
983

    
984
                current_tb_modified = 1;
985
                cpu_restore_state(current_tb, env,
986
                                  env->mem_io_pc, NULL);
987
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
988
                                     &current_flags);
989
            }
990
#endif /* TARGET_HAS_PRECISE_SMC */
991
            /* we need to do that to handle the case where a signal
992
               occurs while doing tb_phys_invalidate() */
993
            saved_tb = NULL;
994
            if (env) {
995
                saved_tb = env->current_tb;
996
                env->current_tb = NULL;
997
            }
998
            tb_phys_invalidate(tb, -1);
999
            if (env) {
1000
                env->current_tb = saved_tb;
1001
                if (env->interrupt_request && env->current_tb)
1002
                    cpu_interrupt(env, env->interrupt_request);
1003
            }
1004
        }
1005
        tb = tb_next;
1006
    }
1007
#if !defined(CONFIG_USER_ONLY)
1008
    /* if no code remaining, no need to continue to use slow writes */
1009
    if (!p->first_tb) {
1010
        invalidate_page_bitmap(p);
1011
        if (is_cpu_write_access) {
1012
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1013
        }
1014
    }
1015
#endif
1016
#ifdef TARGET_HAS_PRECISE_SMC
1017
    if (current_tb_modified) {
1018
        /* we generate a block containing just the instruction
1019
           modifying the memory. It will ensure that it cannot modify
1020
           itself */
1021
        env->current_tb = NULL;
1022
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1023
        cpu_resume_from_signal(env, NULL);
1024
    }
1025
#endif
1026
}
1027

    
1028
/* len must be <= 8 and start must be a multiple of len */
1029
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1030
{
1031
    PageDesc *p;
1032
    int offset, b;
1033
#if 0
1034
    if (1) {
1035
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1036
                  cpu_single_env->mem_io_vaddr, len,
1037
                  cpu_single_env->eip,
1038
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1039
    }
1040
#endif
1041
    p = page_find(start >> TARGET_PAGE_BITS);
1042
    if (!p)
1043
        return;
1044
    if (p->code_bitmap) {
1045
        offset = start & ~TARGET_PAGE_MASK;
1046
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1047
        if (b & ((1 << len) - 1))
1048
            goto do_invalidate;
1049
    } else {
1050
    do_invalidate:
1051
        tb_invalidate_phys_page_range(start, start + len, 1);
1052
    }
1053
}
1054

    
1055
#if !defined(CONFIG_SOFTMMU)
1056
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1057
                                    unsigned long pc, void *puc)
1058
{
1059
    TranslationBlock *tb;
1060
    PageDesc *p;
1061
    int n;
1062
#ifdef TARGET_HAS_PRECISE_SMC
1063
    TranslationBlock *current_tb = NULL;
1064
    CPUState *env = cpu_single_env;
1065
    int current_tb_modified = 0;
1066
    target_ulong current_pc = 0;
1067
    target_ulong current_cs_base = 0;
1068
    int current_flags = 0;
1069
#endif
1070

    
1071
    addr &= TARGET_PAGE_MASK;
1072
    p = page_find(addr >> TARGET_PAGE_BITS);
1073
    if (!p)
1074
        return;
1075
    tb = p->first_tb;
1076
#ifdef TARGET_HAS_PRECISE_SMC
1077
    if (tb && pc != 0) {
1078
        current_tb = tb_find_pc(pc);
1079
    }
1080
#endif
1081
    while (tb != NULL) {
1082
        n = (long)tb & 3;
1083
        tb = (TranslationBlock *)((long)tb & ~3);
1084
#ifdef TARGET_HAS_PRECISE_SMC
1085
        if (current_tb == tb &&
1086
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1087
                /* If we are modifying the current TB, we must stop
1088
                   its execution. We could be more precise by checking
1089
                   that the modification is after the current PC, but it
1090
                   would require a specialized function to partially
1091
                   restore the CPU state */
1092

    
1093
            current_tb_modified = 1;
1094
            cpu_restore_state(current_tb, env, pc, puc);
1095
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1096
                                 &current_flags);
1097
        }
1098
#endif /* TARGET_HAS_PRECISE_SMC */
1099
        tb_phys_invalidate(tb, addr);
1100
        tb = tb->page_next[n];
1101
    }
1102
    p->first_tb = NULL;
1103
#ifdef TARGET_HAS_PRECISE_SMC
1104
    if (current_tb_modified) {
1105
        /* we generate a block containing just the instruction
1106
           modifying the memory. It will ensure that it cannot modify
1107
           itself */
1108
        env->current_tb = NULL;
1109
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1110
        cpu_resume_from_signal(env, puc);
1111
    }
1112
#endif
1113
}
1114
#endif
1115

    
1116
/* add the tb in the target page and protect it if necessary */
1117
static inline void tb_alloc_page(TranslationBlock *tb,
1118
                                 unsigned int n, target_ulong page_addr)
1119
{
1120
    PageDesc *p;
1121
    TranslationBlock *last_first_tb;
1122

    
1123
    tb->page_addr[n] = page_addr;
1124
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1125
    tb->page_next[n] = p->first_tb;
1126
    last_first_tb = p->first_tb;
1127
    p->first_tb = (TranslationBlock *)((long)tb | n);
1128
    invalidate_page_bitmap(p);
1129

    
1130
#if defined(TARGET_HAS_SMC) || 1
1131

    
1132
#if defined(CONFIG_USER_ONLY)
1133
    if (p->flags & PAGE_WRITE) {
1134
        target_ulong addr;
1135
        PageDesc *p2;
1136
        int prot;
1137

    
1138
        /* force the host page as non writable (writes will have a
1139
           page fault + mprotect overhead) */
1140
        page_addr &= qemu_host_page_mask;
1141
        prot = 0;
1142
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1143
            addr += TARGET_PAGE_SIZE) {
1144

    
1145
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1146
            if (!p2)
1147
                continue;
1148
            prot |= p2->flags;
1149
            p2->flags &= ~PAGE_WRITE;
1150
            page_get_flags(addr);
1151
          }
1152
        mprotect(g2h(page_addr), qemu_host_page_size,
1153
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1154
#ifdef DEBUG_TB_INVALIDATE
1155
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1156
               page_addr);
1157
#endif
1158
    }
1159
#else
1160
    /* if some code is already present, then the pages are already
1161
       protected. So we handle the case where only the first TB is
1162
       allocated in a physical page */
1163
    if (!last_first_tb) {
1164
        tlb_protect_code(page_addr);
1165
    }
1166
#endif
1167

    
1168
#endif /* TARGET_HAS_SMC */
1169
}
1170

    
1171
/* Allocate a new translation block. Flush the translation buffer if
1172
   too many translation blocks or too much generated code. */
1173
TranslationBlock *tb_alloc(target_ulong pc)
1174
{
1175
    TranslationBlock *tb;
1176

    
1177
    if (nb_tbs >= code_gen_max_blocks ||
1178
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1179
        return NULL;
1180
    tb = &tbs[nb_tbs++];
1181
    tb->pc = pc;
1182
    tb->cflags = 0;
1183
    return tb;
1184
}
1185

    
1186
void tb_free(TranslationBlock *tb)
1187
{
1188
    /* In practice this is mostly used for single use temporary TB
1189
       Ignore the hard cases and just back up if this TB happens to
1190
       be the last one generated.  */
1191
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1192
        code_gen_ptr = tb->tc_ptr;
1193
        nb_tbs--;
1194
    }
1195
}
1196

    
1197
/* add a new TB and link it to the physical page tables. phys_page2 is
1198
   (-1) to indicate that only one page contains the TB. */
1199
void tb_link_phys(TranslationBlock *tb,
1200
                  target_ulong phys_pc, target_ulong phys_page2)
1201
{
1202
    unsigned int h;
1203
    TranslationBlock **ptb;
1204

    
1205
    /* Grab the mmap lock to stop another thread invalidating this TB
1206
       before we are done.  */
1207
    mmap_lock();
1208
    /* add in the physical hash table */
1209
    h = tb_phys_hash_func(phys_pc);
1210
    ptb = &tb_phys_hash[h];
1211
    tb->phys_hash_next = *ptb;
1212
    *ptb = tb;
1213

    
1214
    /* add in the page list */
1215
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1216
    if (phys_page2 != -1)
1217
        tb_alloc_page(tb, 1, phys_page2);
1218
    else
1219
        tb->page_addr[1] = -1;
1220

    
1221
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1222
    tb->jmp_next[0] = NULL;
1223
    tb->jmp_next[1] = NULL;
1224

    
1225
    /* init original jump addresses */
1226
    if (tb->tb_next_offset[0] != 0xffff)
1227
        tb_reset_jump(tb, 0);
1228
    if (tb->tb_next_offset[1] != 0xffff)
1229
        tb_reset_jump(tb, 1);
1230

    
1231
#ifdef DEBUG_TB_CHECK
1232
    tb_page_check();
1233
#endif
1234
    mmap_unlock();
1235
}
1236

    
1237
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1238
   tb[1].tc_ptr. Return NULL if not found */
1239
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1240
{
1241
    int m_min, m_max, m;
1242
    unsigned long v;
1243
    TranslationBlock *tb;
1244

    
1245
    if (nb_tbs <= 0)
1246
        return NULL;
1247
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1248
        tc_ptr >= (unsigned long)code_gen_ptr)
1249
        return NULL;
1250
    /* binary search (cf Knuth) */
1251
    m_min = 0;
1252
    m_max = nb_tbs - 1;
1253
    while (m_min <= m_max) {
1254
        m = (m_min + m_max) >> 1;
1255
        tb = &tbs[m];
1256
        v = (unsigned long)tb->tc_ptr;
1257
        if (v == tc_ptr)
1258
            return tb;
1259
        else if (tc_ptr < v) {
1260
            m_max = m - 1;
1261
        } else {
1262
            m_min = m + 1;
1263
        }
1264
    }
1265
    return &tbs[m_max];
1266
}
1267

    
1268
static void tb_reset_jump_recursive(TranslationBlock *tb);
1269

    
1270
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1271
{
1272
    TranslationBlock *tb1, *tb_next, **ptb;
1273
    unsigned int n1;
1274

    
1275
    tb1 = tb->jmp_next[n];
1276
    if (tb1 != NULL) {
1277
        /* find head of list */
1278
        for(;;) {
1279
            n1 = (long)tb1 & 3;
1280
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1281
            if (n1 == 2)
1282
                break;
1283
            tb1 = tb1->jmp_next[n1];
1284
        }
1285
        /* we are now sure now that tb jumps to tb1 */
1286
        tb_next = tb1;
1287

    
1288
        /* remove tb from the jmp_first list */
1289
        ptb = &tb_next->jmp_first;
1290
        for(;;) {
1291
            tb1 = *ptb;
1292
            n1 = (long)tb1 & 3;
1293
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1294
            if (n1 == n && tb1 == tb)
1295
                break;
1296
            ptb = &tb1->jmp_next[n1];
1297
        }
1298
        *ptb = tb->jmp_next[n];
1299
        tb->jmp_next[n] = NULL;
1300

    
1301
        /* suppress the jump to next tb in generated code */
1302
        tb_reset_jump(tb, n);
1303

    
1304
        /* suppress jumps in the tb on which we could have jumped */
1305
        tb_reset_jump_recursive(tb_next);
1306
    }
1307
}
1308

    
1309
static void tb_reset_jump_recursive(TranslationBlock *tb)
1310
{
1311
    tb_reset_jump_recursive2(tb, 0);
1312
    tb_reset_jump_recursive2(tb, 1);
1313
}
1314

    
1315
#if defined(TARGET_HAS_ICE)
1316
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1317
{
1318
    target_phys_addr_t addr;
1319
    target_ulong pd;
1320
    ram_addr_t ram_addr;
1321
    PhysPageDesc *p;
1322

    
1323
    addr = cpu_get_phys_page_debug(env, pc);
1324
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1325
    if (!p) {
1326
        pd = IO_MEM_UNASSIGNED;
1327
    } else {
1328
        pd = p->phys_offset;
1329
    }
1330
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1331
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1332
}
1333
#endif
1334

    
1335
/* Add a watchpoint.  */
1336
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1337
                          int flags, CPUWatchpoint **watchpoint)
1338
{
1339
    target_ulong len_mask = ~(len - 1);
1340
    CPUWatchpoint *wp;
1341

    
1342
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1343
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1344
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1345
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1346
        return -EINVAL;
1347
    }
1348
    wp = qemu_malloc(sizeof(*wp));
1349

    
1350
    wp->vaddr = addr;
1351
    wp->len_mask = len_mask;
1352
    wp->flags = flags;
1353

    
1354
    /* keep all GDB-injected watchpoints in front */
1355
    if (flags & BP_GDB)
1356
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1357
    else
1358
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1359

    
1360
    tlb_flush_page(env, addr);
1361

    
1362
    if (watchpoint)
1363
        *watchpoint = wp;
1364
    return 0;
1365
}
1366

    
1367
/* Remove a specific watchpoint.  */
1368
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1369
                          int flags)
1370
{
1371
    target_ulong len_mask = ~(len - 1);
1372
    CPUWatchpoint *wp;
1373

    
1374
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1375
        if (addr == wp->vaddr && len_mask == wp->len_mask
1376
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1377
            cpu_watchpoint_remove_by_ref(env, wp);
1378
            return 0;
1379
        }
1380
    }
1381
    return -ENOENT;
1382
}
1383

    
1384
/* Remove a specific watchpoint by reference.  */
1385
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1386
{
1387
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1388

    
1389
    tlb_flush_page(env, watchpoint->vaddr);
1390

    
1391
    qemu_free(watchpoint);
1392
}
1393

    
1394
/* Remove all matching watchpoints.  */
1395
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1396
{
1397
    CPUWatchpoint *wp, *next;
1398

    
1399
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1400
        if (wp->flags & mask)
1401
            cpu_watchpoint_remove_by_ref(env, wp);
1402
    }
1403
}
1404

    
1405
/* Add a breakpoint.  */
1406
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1407
                          CPUBreakpoint **breakpoint)
1408
{
1409
#if defined(TARGET_HAS_ICE)
1410
    CPUBreakpoint *bp;
1411

    
1412
    bp = qemu_malloc(sizeof(*bp));
1413

    
1414
    bp->pc = pc;
1415
    bp->flags = flags;
1416

    
1417
    /* keep all GDB-injected breakpoints in front */
1418
    if (flags & BP_GDB)
1419
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1420
    else
1421
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1422

    
1423
    breakpoint_invalidate(env, pc);
1424

    
1425
    if (breakpoint)
1426
        *breakpoint = bp;
1427
    return 0;
1428
#else
1429
    return -ENOSYS;
1430
#endif
1431
}
1432

    
1433
/* Remove a specific breakpoint.  */
1434
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1435
{
1436
#if defined(TARGET_HAS_ICE)
1437
    CPUBreakpoint *bp;
1438

    
1439
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1440
        if (bp->pc == pc && bp->flags == flags) {
1441
            cpu_breakpoint_remove_by_ref(env, bp);
1442
            return 0;
1443
        }
1444
    }
1445
    return -ENOENT;
1446
#else
1447
    return -ENOSYS;
1448
#endif
1449
}
1450

    
1451
/* Remove a specific breakpoint by reference.  */
1452
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1453
{
1454
#if defined(TARGET_HAS_ICE)
1455
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1456

    
1457
    breakpoint_invalidate(env, breakpoint->pc);
1458

    
1459
    qemu_free(breakpoint);
1460
#endif
1461
}
1462

    
1463
/* Remove all matching breakpoints. */
1464
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1465
{
1466
#if defined(TARGET_HAS_ICE)
1467
    CPUBreakpoint *bp, *next;
1468

    
1469
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1470
        if (bp->flags & mask)
1471
            cpu_breakpoint_remove_by_ref(env, bp);
1472
    }
1473
#endif
1474
}
1475

    
1476
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1477
   CPU loop after each instruction */
1478
void cpu_single_step(CPUState *env, int enabled)
1479
{
1480
#if defined(TARGET_HAS_ICE)
1481
    if (env->singlestep_enabled != enabled) {
1482
        env->singlestep_enabled = enabled;
1483
        if (kvm_enabled())
1484
            kvm_update_guest_debug(env, 0);
1485
        else {
1486
            /* must flush all the translated code to avoid inconsistencies */
1487
            /* XXX: only flush what is necessary */
1488
            tb_flush(env);
1489
        }
1490
    }
1491
#endif
1492
}
1493

    
1494
/* enable or disable low levels log */
1495
void cpu_set_log(int log_flags)
1496
{
1497
    loglevel = log_flags;
1498
    if (loglevel && !logfile) {
1499
        logfile = fopen(logfilename, log_append ? "a" : "w");
1500
        if (!logfile) {
1501
            perror(logfilename);
1502
            _exit(1);
1503
        }
1504
#if !defined(CONFIG_SOFTMMU)
1505
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1506
        {
1507
            static char logfile_buf[4096];
1508
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1509
        }
1510
#elif !defined(_WIN32)
1511
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1512
        setvbuf(logfile, NULL, _IOLBF, 0);
1513
#endif
1514
        log_append = 1;
1515
    }
1516
    if (!loglevel && logfile) {
1517
        fclose(logfile);
1518
        logfile = NULL;
1519
    }
1520
}
1521

    
1522
void cpu_set_log_filename(const char *filename)
1523
{
1524
    logfilename = strdup(filename);
1525
    if (logfile) {
1526
        fclose(logfile);
1527
        logfile = NULL;
1528
    }
1529
    cpu_set_log(loglevel);
1530
}
1531

    
1532
static void cpu_unlink_tb(CPUState *env)
1533
{
1534
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1535
       problem and hope the cpu will stop of its own accord.  For userspace
1536
       emulation this often isn't actually as bad as it sounds.  Often
1537
       signals are used primarily to interrupt blocking syscalls.  */
1538
    TranslationBlock *tb;
1539
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1540

    
1541
    spin_lock(&interrupt_lock);
1542
    tb = env->current_tb;
1543
    /* if the cpu is currently executing code, we must unlink it and
1544
       all the potentially executing TB */
1545
    if (tb) {
1546
        env->current_tb = NULL;
1547
        tb_reset_jump_recursive(tb);
1548
    }
1549
    spin_unlock(&interrupt_lock);
1550
}
1551

    
1552
/* mask must never be zero, except for A20 change call */
1553
void cpu_interrupt(CPUState *env, int mask)
1554
{
1555
    int old_mask;
1556

    
1557
    old_mask = env->interrupt_request;
1558
    env->interrupt_request |= mask;
1559

    
1560
#ifndef CONFIG_USER_ONLY
1561
    /*
1562
     * If called from iothread context, wake the target cpu in
1563
     * case its halted.
1564
     */
1565
    if (!qemu_cpu_self(env)) {
1566
        qemu_cpu_kick(env);
1567
        return;
1568
    }
1569
#endif
1570

    
1571
    if (use_icount) {
1572
        env->icount_decr.u16.high = 0xffff;
1573
#ifndef CONFIG_USER_ONLY
1574
        if (!can_do_io(env)
1575
            && (mask & ~old_mask) != 0) {
1576
            cpu_abort(env, "Raised interrupt while not in I/O function");
1577
        }
1578
#endif
1579
    } else {
1580
        cpu_unlink_tb(env);
1581
    }
1582
}
1583

    
1584
void cpu_reset_interrupt(CPUState *env, int mask)
1585
{
1586
    env->interrupt_request &= ~mask;
1587
}
1588

    
1589
void cpu_exit(CPUState *env)
1590
{
1591
    env->exit_request = 1;
1592
    cpu_unlink_tb(env);
1593
}
1594

    
1595
const CPULogItem cpu_log_items[] = {
1596
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1597
      "show generated host assembly code for each compiled TB" },
1598
    { CPU_LOG_TB_IN_ASM, "in_asm",
1599
      "show target assembly code for each compiled TB" },
1600
    { CPU_LOG_TB_OP, "op",
1601
      "show micro ops for each compiled TB" },
1602
    { CPU_LOG_TB_OP_OPT, "op_opt",
1603
      "show micro ops "
1604
#ifdef TARGET_I386
1605
      "before eflags optimization and "
1606
#endif
1607
      "after liveness analysis" },
1608
    { CPU_LOG_INT, "int",
1609
      "show interrupts/exceptions in short format" },
1610
    { CPU_LOG_EXEC, "exec",
1611
      "show trace before each executed TB (lots of logs)" },
1612
    { CPU_LOG_TB_CPU, "cpu",
1613
      "show CPU state before block translation" },
1614
#ifdef TARGET_I386
1615
    { CPU_LOG_PCALL, "pcall",
1616
      "show protected mode far calls/returns/exceptions" },
1617
    { CPU_LOG_RESET, "cpu_reset",
1618
      "show CPU state before CPU resets" },
1619
#endif
1620
#ifdef DEBUG_IOPORT
1621
    { CPU_LOG_IOPORT, "ioport",
1622
      "show all i/o ports accesses" },
1623
#endif
1624
    { 0, NULL, NULL },
1625
};
1626

    
1627
#ifndef CONFIG_USER_ONLY
1628
static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1629
    = QLIST_HEAD_INITIALIZER(memory_client_list);
1630

    
1631
static void cpu_notify_set_memory(target_phys_addr_t start_addr,
1632
                                  ram_addr_t size,
1633
                                  ram_addr_t phys_offset)
1634
{
1635
    CPUPhysMemoryClient *client;
1636
    QLIST_FOREACH(client, &memory_client_list, list) {
1637
        client->set_memory(client, start_addr, size, phys_offset);
1638
    }
1639
}
1640

    
1641
static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
1642
                                        target_phys_addr_t end)
1643
{
1644
    CPUPhysMemoryClient *client;
1645
    QLIST_FOREACH(client, &memory_client_list, list) {
1646
        int r = client->sync_dirty_bitmap(client, start, end);
1647
        if (r < 0)
1648
            return r;
1649
    }
1650
    return 0;
1651
}
1652

    
1653
static int cpu_notify_migration_log(int enable)
1654
{
1655
    CPUPhysMemoryClient *client;
1656
    QLIST_FOREACH(client, &memory_client_list, list) {
1657
        int r = client->migration_log(client, enable);
1658
        if (r < 0)
1659
            return r;
1660
    }
1661
    return 0;
1662
}
1663

    
1664
static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
1665
                                         CPUPhysMemoryClient *client)
1666
{
1667
    PhysPageDesc *pd;
1668
    int l1, l2;
1669

    
1670
    for (l1 = 0; l1 < L1_SIZE; ++l1) {
1671
        pd = phys_map[l1];
1672
        if (!pd) {
1673
            continue;
1674
        }
1675
        for (l2 = 0; l2 < L2_SIZE; ++l2) {
1676
            if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
1677
                continue;
1678
            }
1679
            client->set_memory(client, pd[l2].region_offset,
1680
                               TARGET_PAGE_SIZE, pd[l2].phys_offset);
1681
        }
1682
    }
1683
}
1684

    
1685
static void phys_page_for_each(CPUPhysMemoryClient *client)
1686
{
1687
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
1688

    
1689
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
1690
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
1691
#endif
1692
    void **phys_map = (void **)l1_phys_map;
1693
    int l1;
1694
    if (!l1_phys_map) {
1695
        return;
1696
    }
1697
    for (l1 = 0; l1 < L1_SIZE; ++l1) {
1698
        if (phys_map[l1]) {
1699
            phys_page_for_each_in_l1_map(phys_map[l1], client);
1700
        }
1701
    }
1702
#else
1703
    if (!l1_phys_map) {
1704
        return;
1705
    }
1706
    phys_page_for_each_in_l1_map(l1_phys_map, client);
1707
#endif
1708
}
1709

    
1710
void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1711
{
1712
    QLIST_INSERT_HEAD(&memory_client_list, client, list);
1713
    phys_page_for_each(client);
1714
}
1715

    
1716
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1717
{
1718
    QLIST_REMOVE(client, list);
1719
}
1720
#endif
1721

    
1722
static int cmp1(const char *s1, int n, const char *s2)
1723
{
1724
    if (strlen(s2) != n)
1725
        return 0;
1726
    return memcmp(s1, s2, n) == 0;
1727
}
1728

    
1729
/* takes a comma separated list of log masks. Return 0 if error. */
1730
int cpu_str_to_log_mask(const char *str)
1731
{
1732
    const CPULogItem *item;
1733
    int mask;
1734
    const char *p, *p1;
1735

    
1736
    p = str;
1737
    mask = 0;
1738
    for(;;) {
1739
        p1 = strchr(p, ',');
1740
        if (!p1)
1741
            p1 = p + strlen(p);
1742
        if(cmp1(p,p1-p,"all")) {
1743
                for(item = cpu_log_items; item->mask != 0; item++) {
1744
                        mask |= item->mask;
1745
                }
1746
        } else {
1747
        for(item = cpu_log_items; item->mask != 0; item++) {
1748
            if (cmp1(p, p1 - p, item->name))
1749
                goto found;
1750
        }
1751
        return 0;
1752
        }
1753
    found:
1754
        mask |= item->mask;
1755
        if (*p1 != ',')
1756
            break;
1757
        p = p1 + 1;
1758
    }
1759
    return mask;
1760
}
1761

    
1762
void cpu_abort(CPUState *env, const char *fmt, ...)
1763
{
1764
    va_list ap;
1765
    va_list ap2;
1766

    
1767
    va_start(ap, fmt);
1768
    va_copy(ap2, ap);
1769
    fprintf(stderr, "qemu: fatal: ");
1770
    vfprintf(stderr, fmt, ap);
1771
    fprintf(stderr, "\n");
1772
#ifdef TARGET_I386
1773
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1774
#else
1775
    cpu_dump_state(env, stderr, fprintf, 0);
1776
#endif
1777
    if (qemu_log_enabled()) {
1778
        qemu_log("qemu: fatal: ");
1779
        qemu_log_vprintf(fmt, ap2);
1780
        qemu_log("\n");
1781
#ifdef TARGET_I386
1782
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1783
#else
1784
        log_cpu_state(env, 0);
1785
#endif
1786
        qemu_log_flush();
1787
        qemu_log_close();
1788
    }
1789
    va_end(ap2);
1790
    va_end(ap);
1791
#if defined(CONFIG_USER_ONLY)
1792
    {
1793
        struct sigaction act;
1794
        sigfillset(&act.sa_mask);
1795
        act.sa_handler = SIG_DFL;
1796
        sigaction(SIGABRT, &act, NULL);
1797
    }
1798
#endif
1799
    abort();
1800
}
1801

    
1802
CPUState *cpu_copy(CPUState *env)
1803
{
1804
    CPUState *new_env = cpu_init(env->cpu_model_str);
1805
    CPUState *next_cpu = new_env->next_cpu;
1806
    int cpu_index = new_env->cpu_index;
1807
#if defined(TARGET_HAS_ICE)
1808
    CPUBreakpoint *bp;
1809
    CPUWatchpoint *wp;
1810
#endif
1811

    
1812
    memcpy(new_env, env, sizeof(CPUState));
1813

    
1814
    /* Preserve chaining and index. */
1815
    new_env->next_cpu = next_cpu;
1816
    new_env->cpu_index = cpu_index;
1817

    
1818
    /* Clone all break/watchpoints.
1819
       Note: Once we support ptrace with hw-debug register access, make sure
1820
       BP_CPU break/watchpoints are handled correctly on clone. */
1821
    QTAILQ_INIT(&env->breakpoints);
1822
    QTAILQ_INIT(&env->watchpoints);
1823
#if defined(TARGET_HAS_ICE)
1824
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1825
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1826
    }
1827
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1828
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1829
                              wp->flags, NULL);
1830
    }
1831
#endif
1832

    
1833
    return new_env;
1834
}
1835

    
1836
#if !defined(CONFIG_USER_ONLY)
1837

    
1838
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1839
{
1840
    unsigned int i;
1841

    
1842
    /* Discard jump cache entries for any tb which might potentially
1843
       overlap the flushed page.  */
1844
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1845
    memset (&env->tb_jmp_cache[i], 0, 
1846
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1847

    
1848
    i = tb_jmp_cache_hash_page(addr);
1849
    memset (&env->tb_jmp_cache[i], 0, 
1850
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1851
}
1852

    
1853
static CPUTLBEntry s_cputlb_empty_entry = {
1854
    .addr_read  = -1,
1855
    .addr_write = -1,
1856
    .addr_code  = -1,
1857
    .addend     = -1,
1858
};
1859

    
1860
/* NOTE: if flush_global is true, also flush global entries (not
1861
   implemented yet) */
1862
void tlb_flush(CPUState *env, int flush_global)
1863
{
1864
    int i;
1865

    
1866
#if defined(DEBUG_TLB)
1867
    printf("tlb_flush:\n");
1868
#endif
1869
    /* must reset current TB so that interrupts cannot modify the
1870
       links while we are modifying them */
1871
    env->current_tb = NULL;
1872

    
1873
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1874
        int mmu_idx;
1875
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1876
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1877
        }
1878
    }
1879

    
1880
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1881

    
1882
    tlb_flush_count++;
1883
}
1884

    
1885
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1886
{
1887
    if (addr == (tlb_entry->addr_read &
1888
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1889
        addr == (tlb_entry->addr_write &
1890
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1891
        addr == (tlb_entry->addr_code &
1892
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1893
        *tlb_entry = s_cputlb_empty_entry;
1894
    }
1895
}
1896

    
1897
void tlb_flush_page(CPUState *env, target_ulong addr)
1898
{
1899
    int i;
1900
    int mmu_idx;
1901

    
1902
#if defined(DEBUG_TLB)
1903
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1904
#endif
1905
    /* must reset current TB so that interrupts cannot modify the
1906
       links while we are modifying them */
1907
    env->current_tb = NULL;
1908

    
1909
    addr &= TARGET_PAGE_MASK;
1910
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1911
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1912
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1913

    
1914
    tlb_flush_jmp_cache(env, addr);
1915
}
1916

    
1917
/* update the TLBs so that writes to code in the virtual page 'addr'
1918
   can be detected */
1919
static void tlb_protect_code(ram_addr_t ram_addr)
1920
{
1921
    cpu_physical_memory_reset_dirty(ram_addr,
1922
                                    ram_addr + TARGET_PAGE_SIZE,
1923
                                    CODE_DIRTY_FLAG);
1924
}
1925

    
1926
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1927
   tested for self modifying code */
1928
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1929
                                    target_ulong vaddr)
1930
{
1931
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1932
}
1933

    
1934
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1935
                                         unsigned long start, unsigned long length)
1936
{
1937
    unsigned long addr;
1938
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1939
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1940
        if ((addr - start) < length) {
1941
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1942
        }
1943
    }
1944
}
1945

    
1946
/* Note: start and end must be within the same ram block.  */
1947
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1948
                                     int dirty_flags)
1949
{
1950
    CPUState *env;
1951
    unsigned long length, start1;
1952
    int i, mask, len;
1953
    uint8_t *p;
1954

    
1955
    start &= TARGET_PAGE_MASK;
1956
    end = TARGET_PAGE_ALIGN(end);
1957

    
1958
    length = end - start;
1959
    if (length == 0)
1960
        return;
1961
    len = length >> TARGET_PAGE_BITS;
1962
    mask = ~dirty_flags;
1963
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1964
    for(i = 0; i < len; i++)
1965
        p[i] &= mask;
1966

    
1967
    /* we modify the TLB cache so that the dirty bit will be set again
1968
       when accessing the range */
1969
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1970
    /* Chek that we don't span multiple blocks - this breaks the
1971
       address comparisons below.  */
1972
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1973
            != (end - 1) - start) {
1974
        abort();
1975
    }
1976

    
1977
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1978
        int mmu_idx;
1979
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1980
            for(i = 0; i < CPU_TLB_SIZE; i++)
1981
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1982
                                      start1, length);
1983
        }
1984
    }
1985
}
1986

    
1987
int cpu_physical_memory_set_dirty_tracking(int enable)
1988
{
1989
    int ret = 0;
1990
    in_migration = enable;
1991
    ret = cpu_notify_migration_log(!!enable);
1992
    return ret;
1993
}
1994

    
1995
int cpu_physical_memory_get_dirty_tracking(void)
1996
{
1997
    return in_migration;
1998
}
1999

    
2000
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2001
                                   target_phys_addr_t end_addr)
2002
{
2003
    int ret;
2004

    
2005
    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
2006
    return ret;
2007
}
2008

    
2009
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2010
{
2011
    ram_addr_t ram_addr;
2012
    void *p;
2013

    
2014
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2015
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2016
            + tlb_entry->addend);
2017
        ram_addr = qemu_ram_addr_from_host(p);
2018
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
2019
            tlb_entry->addr_write |= TLB_NOTDIRTY;
2020
        }
2021
    }
2022
}
2023

    
2024
/* update the TLB according to the current state of the dirty bits */
2025
void cpu_tlb_update_dirty(CPUState *env)
2026
{
2027
    int i;
2028
    int mmu_idx;
2029
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2030
        for(i = 0; i < CPU_TLB_SIZE; i++)
2031
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2032
    }
2033
}
2034

    
2035
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
2036
{
2037
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2038
        tlb_entry->addr_write = vaddr;
2039
}
2040

    
2041
/* update the TLB corresponding to virtual page vaddr
2042
   so that it is no longer dirty */
2043
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
2044
{
2045
    int i;
2046
    int mmu_idx;
2047

    
2048
    vaddr &= TARGET_PAGE_MASK;
2049
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2050
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2051
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
2052
}
2053

    
2054
/* add a new TLB entry. At most one entry for a given virtual address
2055
   is permitted. Return 0 if OK or 2 if the page could not be mapped
2056
   (can only happen in non SOFTMMU mode for I/O pages or pages
2057
   conflicting with the host address space). */
2058
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2059
                      target_phys_addr_t paddr, int prot,
2060
                      int mmu_idx, int is_softmmu)
2061
{
2062
    PhysPageDesc *p;
2063
    unsigned long pd;
2064
    unsigned int index;
2065
    target_ulong address;
2066
    target_ulong code_address;
2067
    target_phys_addr_t addend;
2068
    int ret;
2069
    CPUTLBEntry *te;
2070
    CPUWatchpoint *wp;
2071
    target_phys_addr_t iotlb;
2072

    
2073
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2074
    if (!p) {
2075
        pd = IO_MEM_UNASSIGNED;
2076
    } else {
2077
        pd = p->phys_offset;
2078
    }
2079
#if defined(DEBUG_TLB)
2080
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2081
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2082
#endif
2083

    
2084
    ret = 0;
2085
    address = vaddr;
2086
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2087
        /* IO memory case (romd handled later) */
2088
        address |= TLB_MMIO;
2089
    }
2090
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2091
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2092
        /* Normal RAM.  */
2093
        iotlb = pd & TARGET_PAGE_MASK;
2094
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2095
            iotlb |= IO_MEM_NOTDIRTY;
2096
        else
2097
            iotlb |= IO_MEM_ROM;
2098
    } else {
2099
        /* IO handlers are currently passed a physical address.
2100
           It would be nice to pass an offset from the base address
2101
           of that region.  This would avoid having to special case RAM,
2102
           and avoid full address decoding in every device.
2103
           We can't use the high bits of pd for this because
2104
           IO_MEM_ROMD uses these as a ram address.  */
2105
        iotlb = (pd & ~TARGET_PAGE_MASK);
2106
        if (p) {
2107
            iotlb += p->region_offset;
2108
        } else {
2109
            iotlb += paddr;
2110
        }
2111
    }
2112

    
2113
    code_address = address;
2114
    /* Make accesses to pages with watchpoints go via the
2115
       watchpoint trap routines.  */
2116
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2117
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2118
            iotlb = io_mem_watch + paddr;
2119
            /* TODO: The memory case can be optimized by not trapping
2120
               reads of pages with a write breakpoint.  */
2121
            address |= TLB_MMIO;
2122
        }
2123
    }
2124

    
2125
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2126
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2127
    te = &env->tlb_table[mmu_idx][index];
2128
    te->addend = addend - vaddr;
2129
    if (prot & PAGE_READ) {
2130
        te->addr_read = address;
2131
    } else {
2132
        te->addr_read = -1;
2133
    }
2134

    
2135
    if (prot & PAGE_EXEC) {
2136
        te->addr_code = code_address;
2137
    } else {
2138
        te->addr_code = -1;
2139
    }
2140
    if (prot & PAGE_WRITE) {
2141
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2142
            (pd & IO_MEM_ROMD)) {
2143
            /* Write access calls the I/O callback.  */
2144
            te->addr_write = address | TLB_MMIO;
2145
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2146
                   !cpu_physical_memory_is_dirty(pd)) {
2147
            te->addr_write = address | TLB_NOTDIRTY;
2148
        } else {
2149
            te->addr_write = address;
2150
        }
2151
    } else {
2152
        te->addr_write = -1;
2153
    }
2154
    return ret;
2155
}
2156

    
2157
#else
2158

    
2159
void tlb_flush(CPUState *env, int flush_global)
2160
{
2161
}
2162

    
2163
void tlb_flush_page(CPUState *env, target_ulong addr)
2164
{
2165
}
2166

    
2167
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2168
                      target_phys_addr_t paddr, int prot,
2169
                      int mmu_idx, int is_softmmu)
2170
{
2171
    return 0;
2172
}
2173

    
2174
/*
2175
 * Walks guest process memory "regions" one by one
2176
 * and calls callback function 'fn' for each region.
2177
 */
2178
int walk_memory_regions(void *priv,
2179
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2180
{
2181
    unsigned long start, end;
2182
    PageDesc *p = NULL;
2183
    int i, j, prot, prot1;
2184
    int rc = 0;
2185

    
2186
    start = end = -1;
2187
    prot = 0;
2188

    
2189
    for (i = 0; i <= L1_SIZE; i++) {
2190
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2191
        for (j = 0; j < L2_SIZE; j++) {
2192
            prot1 = (p == NULL) ? 0 : p[j].flags;
2193
            /*
2194
             * "region" is one continuous chunk of memory
2195
             * that has same protection flags set.
2196
             */
2197
            if (prot1 != prot) {
2198
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2199
                if (start != -1) {
2200
                    rc = (*fn)(priv, start, end, prot);
2201
                    /* callback can stop iteration by returning != 0 */
2202
                    if (rc != 0)
2203
                        return (rc);
2204
                }
2205
                if (prot1 != 0)
2206
                    start = end;
2207
                else
2208
                    start = -1;
2209
                prot = prot1;
2210
            }
2211
            if (p == NULL)
2212
                break;
2213
        }
2214
    }
2215
    return (rc);
2216
}
2217

    
2218
static int dump_region(void *priv, unsigned long start,
2219
    unsigned long end, unsigned long prot)
2220
{
2221
    FILE *f = (FILE *)priv;
2222

    
2223
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2224
        start, end, end - start,
2225
        ((prot & PAGE_READ) ? 'r' : '-'),
2226
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2227
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2228

    
2229
    return (0);
2230
}
2231

    
2232
/* dump memory mappings */
2233
void page_dump(FILE *f)
2234
{
2235
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2236
            "start", "end", "size", "prot");
2237
    walk_memory_regions(f, dump_region);
2238
}
2239

    
2240
int page_get_flags(target_ulong address)
2241
{
2242
    PageDesc *p;
2243

    
2244
    p = page_find(address >> TARGET_PAGE_BITS);
2245
    if (!p)
2246
        return 0;
2247
    return p->flags;
2248
}
2249

    
2250
/* modify the flags of a page and invalidate the code if
2251
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2252
   depending on PAGE_WRITE */
2253
void page_set_flags(target_ulong start, target_ulong end, int flags)
2254
{
2255
    PageDesc *p;
2256
    target_ulong addr;
2257

    
2258
    /* mmap_lock should already be held.  */
2259
    start = start & TARGET_PAGE_MASK;
2260
    end = TARGET_PAGE_ALIGN(end);
2261
    if (flags & PAGE_WRITE)
2262
        flags |= PAGE_WRITE_ORG;
2263
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2264
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2265
        /* We may be called for host regions that are outside guest
2266
           address space.  */
2267
        if (!p)
2268
            return;
2269
        /* if the write protection is set, then we invalidate the code
2270
           inside */
2271
        if (!(p->flags & PAGE_WRITE) &&
2272
            (flags & PAGE_WRITE) &&
2273
            p->first_tb) {
2274
            tb_invalidate_phys_page(addr, 0, NULL);
2275
        }
2276
        p->flags = flags;
2277
    }
2278
}
2279

    
2280
int page_check_range(target_ulong start, target_ulong len, int flags)
2281
{
2282
    PageDesc *p;
2283
    target_ulong end;
2284
    target_ulong addr;
2285

    
2286
    if (start + len < start)
2287
        /* we've wrapped around */
2288
        return -1;
2289

    
2290
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2291
    start = start & TARGET_PAGE_MASK;
2292

    
2293
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2294
        p = page_find(addr >> TARGET_PAGE_BITS);
2295
        if( !p )
2296
            return -1;
2297
        if( !(p->flags & PAGE_VALID) )
2298
            return -1;
2299

    
2300
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2301
            return -1;
2302
        if (flags & PAGE_WRITE) {
2303
            if (!(p->flags & PAGE_WRITE_ORG))
2304
                return -1;
2305
            /* unprotect the page if it was put read-only because it
2306
               contains translated code */
2307
            if (!(p->flags & PAGE_WRITE)) {
2308
                if (!page_unprotect(addr, 0, NULL))
2309
                    return -1;
2310
            }
2311
            return 0;
2312
        }
2313
    }
2314
    return 0;
2315
}
2316

    
2317
/* called from signal handler: invalidate the code and unprotect the
2318
   page. Return TRUE if the fault was successfully handled. */
2319
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2320
{
2321
    unsigned int page_index, prot, pindex;
2322
    PageDesc *p, *p1;
2323
    target_ulong host_start, host_end, addr;
2324

    
2325
    /* Technically this isn't safe inside a signal handler.  However we
2326
       know this only ever happens in a synchronous SEGV handler, so in
2327
       practice it seems to be ok.  */
2328
    mmap_lock();
2329

    
2330
    host_start = address & qemu_host_page_mask;
2331
    page_index = host_start >> TARGET_PAGE_BITS;
2332
    p1 = page_find(page_index);
2333
    if (!p1) {
2334
        mmap_unlock();
2335
        return 0;
2336
    }
2337
    host_end = host_start + qemu_host_page_size;
2338
    p = p1;
2339
    prot = 0;
2340
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2341
        prot |= p->flags;
2342
        p++;
2343
    }
2344
    /* if the page was really writable, then we change its
2345
       protection back to writable */
2346
    if (prot & PAGE_WRITE_ORG) {
2347
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2348
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2349
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2350
                     (prot & PAGE_BITS) | PAGE_WRITE);
2351
            p1[pindex].flags |= PAGE_WRITE;
2352
            /* and since the content will be modified, we must invalidate
2353
               the corresponding translated code. */
2354
            tb_invalidate_phys_page(address, pc, puc);
2355
#ifdef DEBUG_TB_CHECK
2356
            tb_invalidate_check(address);
2357
#endif
2358
            mmap_unlock();
2359
            return 1;
2360
        }
2361
    }
2362
    mmap_unlock();
2363
    return 0;
2364
}
2365

    
2366
static inline void tlb_set_dirty(CPUState *env,
2367
                                 unsigned long addr, target_ulong vaddr)
2368
{
2369
}
2370
#endif /* defined(CONFIG_USER_ONLY) */
2371

    
2372
#if !defined(CONFIG_USER_ONLY)
2373

    
2374
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2375
                             ram_addr_t memory, ram_addr_t region_offset);
2376
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2377
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2378
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2379
                      need_subpage)                                     \
2380
    do {                                                                \
2381
        if (addr > start_addr)                                          \
2382
            start_addr2 = 0;                                            \
2383
        else {                                                          \
2384
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2385
            if (start_addr2 > 0)                                        \
2386
                need_subpage = 1;                                       \
2387
        }                                                               \
2388
                                                                        \
2389
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2390
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2391
        else {                                                          \
2392
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2393
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2394
                need_subpage = 1;                                       \
2395
        }                                                               \
2396
    } while (0)
2397

    
2398
/* register physical memory.
2399
   For RAM, 'size' must be a multiple of the target page size.
2400
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2401
   io memory page.  The address used when calling the IO function is
2402
   the offset from the start of the region, plus region_offset.  Both
2403
   start_addr and region_offset are rounded down to a page boundary
2404
   before calculating this offset.  This should not be a problem unless
2405
   the low bits of start_addr and region_offset differ.  */
2406
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2407
                                         ram_addr_t size,
2408
                                         ram_addr_t phys_offset,
2409
                                         ram_addr_t region_offset)
2410
{
2411
    target_phys_addr_t addr, end_addr;
2412
    PhysPageDesc *p;
2413
    CPUState *env;
2414
    ram_addr_t orig_size = size;
2415
    void *subpage;
2416

    
2417
    cpu_notify_set_memory(start_addr, size, phys_offset);
2418

    
2419
    if (phys_offset == IO_MEM_UNASSIGNED) {
2420
        region_offset = start_addr;
2421
    }
2422
    region_offset &= TARGET_PAGE_MASK;
2423
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2424
    end_addr = start_addr + (target_phys_addr_t)size;
2425
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2426
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2427
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2428
            ram_addr_t orig_memory = p->phys_offset;
2429
            target_phys_addr_t start_addr2, end_addr2;
2430
            int need_subpage = 0;
2431

    
2432
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2433
                          need_subpage);
2434
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2435
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2436
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2437
                                           &p->phys_offset, orig_memory,
2438
                                           p->region_offset);
2439
                } else {
2440
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2441
                                            >> IO_MEM_SHIFT];
2442
                }
2443
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2444
                                 region_offset);
2445
                p->region_offset = 0;
2446
            } else {
2447
                p->phys_offset = phys_offset;
2448
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2449
                    (phys_offset & IO_MEM_ROMD))
2450
                    phys_offset += TARGET_PAGE_SIZE;
2451
            }
2452
        } else {
2453
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2454
            p->phys_offset = phys_offset;
2455
            p->region_offset = region_offset;
2456
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2457
                (phys_offset & IO_MEM_ROMD)) {
2458
                phys_offset += TARGET_PAGE_SIZE;
2459
            } else {
2460
                target_phys_addr_t start_addr2, end_addr2;
2461
                int need_subpage = 0;
2462

    
2463
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2464
                              end_addr2, need_subpage);
2465

    
2466
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2467
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2468
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2469
                                           addr & TARGET_PAGE_MASK);
2470
                    subpage_register(subpage, start_addr2, end_addr2,
2471
                                     phys_offset, region_offset);
2472
                    p->region_offset = 0;
2473
                }
2474
            }
2475
        }
2476
        region_offset += TARGET_PAGE_SIZE;
2477
    }
2478

    
2479
    /* since each CPU stores ram addresses in its TLB cache, we must
2480
       reset the modified entries */
2481
    /* XXX: slow ! */
2482
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2483
        tlb_flush(env, 1);
2484
    }
2485
}
2486

    
2487
/* XXX: temporary until new memory mapping API */
2488
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2489
{
2490
    PhysPageDesc *p;
2491

    
2492
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2493
    if (!p)
2494
        return IO_MEM_UNASSIGNED;
2495
    return p->phys_offset;
2496
}
2497

    
2498
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2499
{
2500
    if (kvm_enabled())
2501
        kvm_coalesce_mmio_region(addr, size);
2502
}
2503

    
2504
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2505
{
2506
    if (kvm_enabled())
2507
        kvm_uncoalesce_mmio_region(addr, size);
2508
}
2509

    
2510
void qemu_flush_coalesced_mmio_buffer(void)
2511
{
2512
    if (kvm_enabled())
2513
        kvm_flush_coalesced_mmio_buffer();
2514
}
2515

    
2516
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2517
{
2518
    RAMBlock *new_block;
2519

    
2520
    size = TARGET_PAGE_ALIGN(size);
2521
    new_block = qemu_malloc(sizeof(*new_block));
2522

    
2523
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2524
    /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2525
    new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
2526
                           MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2527
#else
2528
    new_block->host = qemu_vmalloc(size);
2529
#endif
2530
#ifdef MADV_MERGEABLE
2531
    madvise(new_block->host, size, MADV_MERGEABLE);
2532
#endif
2533
    new_block->offset = last_ram_offset;
2534
    new_block->length = size;
2535

    
2536
    new_block->next = ram_blocks;
2537
    ram_blocks = new_block;
2538

    
2539
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2540
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2541
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2542
           0xff, size >> TARGET_PAGE_BITS);
2543

    
2544
    last_ram_offset += size;
2545

    
2546
    if (kvm_enabled())
2547
        kvm_setup_guest_memory(new_block->host, size);
2548

    
2549
    return new_block->offset;
2550
}
2551

    
2552
void qemu_ram_free(ram_addr_t addr)
2553
{
2554
    /* TODO: implement this.  */
2555
}
2556

    
2557
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2558
   With the exception of the softmmu code in this file, this should
2559
   only be used for local memory (e.g. video ram) that the device owns,
2560
   and knows it isn't going to access beyond the end of the block.
2561

2562
   It should not be used for general purpose DMA.
2563
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2564
 */
2565
void *qemu_get_ram_ptr(ram_addr_t addr)
2566
{
2567
    RAMBlock *prev;
2568
    RAMBlock **prevp;
2569
    RAMBlock *block;
2570

    
2571
    prev = NULL;
2572
    prevp = &ram_blocks;
2573
    block = ram_blocks;
2574
    while (block && (block->offset > addr
2575
                     || block->offset + block->length <= addr)) {
2576
        if (prev)
2577
          prevp = &prev->next;
2578
        prev = block;
2579
        block = block->next;
2580
    }
2581
    if (!block) {
2582
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2583
        abort();
2584
    }
2585
    /* Move this entry to to start of the list.  */
2586
    if (prev) {
2587
        prev->next = block->next;
2588
        block->next = *prevp;
2589
        *prevp = block;
2590
    }
2591
    return block->host + (addr - block->offset);
2592
}
2593

    
2594
/* Some of the softmmu routines need to translate from a host pointer
2595
   (typically a TLB entry) back to a ram offset.  */
2596
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2597
{
2598
    RAMBlock *prev;
2599
    RAMBlock *block;
2600
    uint8_t *host = ptr;
2601

    
2602
    prev = NULL;
2603
    block = ram_blocks;
2604
    while (block && (block->host > host
2605
                     || block->host + block->length <= host)) {
2606
        prev = block;
2607
        block = block->next;
2608
    }
2609
    if (!block) {
2610
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2611
        abort();
2612
    }
2613
    return block->offset + (host - block->host);
2614
}
2615

    
2616
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2617
{
2618
#ifdef DEBUG_UNASSIGNED
2619
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2620
#endif
2621
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2622
    do_unassigned_access(addr, 0, 0, 0, 1);
2623
#endif
2624
    return 0;
2625
}
2626

    
2627
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2628
{
2629
#ifdef DEBUG_UNASSIGNED
2630
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2631
#endif
2632
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2633
    do_unassigned_access(addr, 0, 0, 0, 2);
2634
#endif
2635
    return 0;
2636
}
2637

    
2638
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2639
{
2640
#ifdef DEBUG_UNASSIGNED
2641
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2642
#endif
2643
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2644
    do_unassigned_access(addr, 0, 0, 0, 4);
2645
#endif
2646
    return 0;
2647
}
2648

    
2649
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2650
{
2651
#ifdef DEBUG_UNASSIGNED
2652
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2653
#endif
2654
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2655
    do_unassigned_access(addr, 1, 0, 0, 1);
2656
#endif
2657
}
2658

    
2659
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2660
{
2661
#ifdef DEBUG_UNASSIGNED
2662
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2663
#endif
2664
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2665
    do_unassigned_access(addr, 1, 0, 0, 2);
2666
#endif
2667
}
2668

    
2669
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2670
{
2671
#ifdef DEBUG_UNASSIGNED
2672
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2673
#endif
2674
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2675
    do_unassigned_access(addr, 1, 0, 0, 4);
2676
#endif
2677
}
2678

    
2679
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2680
    unassigned_mem_readb,
2681
    unassigned_mem_readw,
2682
    unassigned_mem_readl,
2683
};
2684

    
2685
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2686
    unassigned_mem_writeb,
2687
    unassigned_mem_writew,
2688
    unassigned_mem_writel,
2689
};
2690

    
2691
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2692
                                uint32_t val)
2693
{
2694
    int dirty_flags;
2695
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2696
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2697
#if !defined(CONFIG_USER_ONLY)
2698
        tb_invalidate_phys_page_fast(ram_addr, 1);
2699
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2700
#endif
2701
    }
2702
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2703
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2704
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2705
    /* we remove the notdirty callback only if the code has been
2706
       flushed */
2707
    if (dirty_flags == 0xff)
2708
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2709
}
2710

    
2711
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2712
                                uint32_t val)
2713
{
2714
    int dirty_flags;
2715
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2716
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2717
#if !defined(CONFIG_USER_ONLY)
2718
        tb_invalidate_phys_page_fast(ram_addr, 2);
2719
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2720
#endif
2721
    }
2722
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2723
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2724
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2725
    /* we remove the notdirty callback only if the code has been
2726
       flushed */
2727
    if (dirty_flags == 0xff)
2728
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2729
}
2730

    
2731
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2732
                                uint32_t val)
2733
{
2734
    int dirty_flags;
2735
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2736
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2737
#if !defined(CONFIG_USER_ONLY)
2738
        tb_invalidate_phys_page_fast(ram_addr, 4);
2739
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2740
#endif
2741
    }
2742
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2743
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2744
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2745
    /* we remove the notdirty callback only if the code has been
2746
       flushed */
2747
    if (dirty_flags == 0xff)
2748
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2749
}
2750

    
2751
static CPUReadMemoryFunc * const error_mem_read[3] = {
2752
    NULL, /* never used */
2753
    NULL, /* never used */
2754
    NULL, /* never used */
2755
};
2756

    
2757
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2758
    notdirty_mem_writeb,
2759
    notdirty_mem_writew,
2760
    notdirty_mem_writel,
2761
};
2762

    
2763
/* Generate a debug exception if a watchpoint has been hit.  */
2764
static void check_watchpoint(int offset, int len_mask, int flags)
2765
{
2766
    CPUState *env = cpu_single_env;
2767
    target_ulong pc, cs_base;
2768
    TranslationBlock *tb;
2769
    target_ulong vaddr;
2770
    CPUWatchpoint *wp;
2771
    int cpu_flags;
2772

    
2773
    if (env->watchpoint_hit) {
2774
        /* We re-entered the check after replacing the TB. Now raise
2775
         * the debug interrupt so that is will trigger after the
2776
         * current instruction. */
2777
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2778
        return;
2779
    }
2780
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2781
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2782
        if ((vaddr == (wp->vaddr & len_mask) ||
2783
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2784
            wp->flags |= BP_WATCHPOINT_HIT;
2785
            if (!env->watchpoint_hit) {
2786
                env->watchpoint_hit = wp;
2787
                tb = tb_find_pc(env->mem_io_pc);
2788
                if (!tb) {
2789
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2790
                              "pc=%p", (void *)env->mem_io_pc);
2791
                }
2792
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2793
                tb_phys_invalidate(tb, -1);
2794
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2795
                    env->exception_index = EXCP_DEBUG;
2796
                } else {
2797
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2798
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2799
                }
2800
                cpu_resume_from_signal(env, NULL);
2801
            }
2802
        } else {
2803
            wp->flags &= ~BP_WATCHPOINT_HIT;
2804
        }
2805
    }
2806
}
2807

    
2808
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2809
   so these check for a hit then pass through to the normal out-of-line
2810
   phys routines.  */
2811
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2812
{
2813
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2814
    return ldub_phys(addr);
2815
}
2816

    
2817
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2818
{
2819
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2820
    return lduw_phys(addr);
2821
}
2822

    
2823
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2824
{
2825
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2826
    return ldl_phys(addr);
2827
}
2828

    
2829
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2830
                             uint32_t val)
2831
{
2832
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2833
    stb_phys(addr, val);
2834
}
2835

    
2836
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2837
                             uint32_t val)
2838
{
2839
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2840
    stw_phys(addr, val);
2841
}
2842

    
2843
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2844
                             uint32_t val)
2845
{
2846
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2847
    stl_phys(addr, val);
2848
}
2849

    
2850
static CPUReadMemoryFunc * const watch_mem_read[3] = {
2851
    watch_mem_readb,
2852
    watch_mem_readw,
2853
    watch_mem_readl,
2854
};
2855

    
2856
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2857
    watch_mem_writeb,
2858
    watch_mem_writew,
2859
    watch_mem_writel,
2860
};
2861

    
2862
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2863
                                 unsigned int len)
2864
{
2865
    uint32_t ret;
2866
    unsigned int idx;
2867

    
2868
    idx = SUBPAGE_IDX(addr);
2869
#if defined(DEBUG_SUBPAGE)
2870
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2871
           mmio, len, addr, idx);
2872
#endif
2873
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2874
                                       addr + mmio->region_offset[idx][0][len]);
2875

    
2876
    return ret;
2877
}
2878

    
2879
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2880
                              uint32_t value, unsigned int len)
2881
{
2882
    unsigned int idx;
2883

    
2884
    idx = SUBPAGE_IDX(addr);
2885
#if defined(DEBUG_SUBPAGE)
2886
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2887
           mmio, len, addr, idx, value);
2888
#endif
2889
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2890
                                  addr + mmio->region_offset[idx][1][len],
2891
                                  value);
2892
}
2893

    
2894
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2895
{
2896
#if defined(DEBUG_SUBPAGE)
2897
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2898
#endif
2899

    
2900
    return subpage_readlen(opaque, addr, 0);
2901
}
2902

    
2903
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2904
                            uint32_t value)
2905
{
2906
#if defined(DEBUG_SUBPAGE)
2907
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2908
#endif
2909
    subpage_writelen(opaque, addr, value, 0);
2910
}
2911

    
2912
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2913
{
2914
#if defined(DEBUG_SUBPAGE)
2915
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2916
#endif
2917

    
2918
    return subpage_readlen(opaque, addr, 1);
2919
}
2920

    
2921
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2922
                            uint32_t value)
2923
{
2924
#if defined(DEBUG_SUBPAGE)
2925
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2926
#endif
2927
    subpage_writelen(opaque, addr, value, 1);
2928
}
2929

    
2930
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2931
{
2932
#if defined(DEBUG_SUBPAGE)
2933
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2934
#endif
2935

    
2936
    return subpage_readlen(opaque, addr, 2);
2937
}
2938

    
2939
static void subpage_writel (void *opaque,
2940
                         target_phys_addr_t addr, uint32_t value)
2941
{
2942
#if defined(DEBUG_SUBPAGE)
2943
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2944
#endif
2945
    subpage_writelen(opaque, addr, value, 2);
2946
}
2947

    
2948
static CPUReadMemoryFunc * const subpage_read[] = {
2949
    &subpage_readb,
2950
    &subpage_readw,
2951
    &subpage_readl,
2952
};
2953

    
2954
static CPUWriteMemoryFunc * const subpage_write[] = {
2955
    &subpage_writeb,
2956
    &subpage_writew,
2957
    &subpage_writel,
2958
};
2959

    
2960
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2961
                             ram_addr_t memory, ram_addr_t region_offset)
2962
{
2963
    int idx, eidx;
2964
    unsigned int i;
2965

    
2966
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2967
        return -1;
2968
    idx = SUBPAGE_IDX(start);
2969
    eidx = SUBPAGE_IDX(end);
2970
#if defined(DEBUG_SUBPAGE)
2971
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2972
           mmio, start, end, idx, eidx, memory);
2973
#endif
2974
    memory >>= IO_MEM_SHIFT;
2975
    for (; idx <= eidx; idx++) {
2976
        for (i = 0; i < 4; i++) {
2977
            if (io_mem_read[memory][i]) {
2978
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2979
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2980
                mmio->region_offset[idx][0][i] = region_offset;
2981
            }
2982
            if (io_mem_write[memory][i]) {
2983
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2984
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2985
                mmio->region_offset[idx][1][i] = region_offset;
2986
            }
2987
        }
2988
    }
2989

    
2990
    return 0;
2991
}
2992

    
2993
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2994
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2995
{
2996
    subpage_t *mmio;
2997
    int subpage_memory;
2998

    
2999
    mmio = qemu_mallocz(sizeof(subpage_t));
3000

    
3001
    mmio->base = base;
3002
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3003
#if defined(DEBUG_SUBPAGE)
3004
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3005
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3006
#endif
3007
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3008
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3009
                         region_offset);
3010

    
3011
    return mmio;
3012
}
3013

    
3014
static int get_free_io_mem_idx(void)
3015
{
3016
    int i;
3017

    
3018
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3019
        if (!io_mem_used[i]) {
3020
            io_mem_used[i] = 1;
3021
            return i;
3022
        }
3023
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
3024
    return -1;
3025
}
3026

    
3027
/* mem_read and mem_write are arrays of functions containing the
3028
   function to access byte (index 0), word (index 1) and dword (index
3029
   2). Functions can be omitted with a NULL function pointer.
3030
   If io_index is non zero, the corresponding io zone is
3031
   modified. If it is zero, a new io zone is allocated. The return
3032
   value can be used with cpu_register_physical_memory(). (-1) is
3033
   returned if error. */
3034
static int cpu_register_io_memory_fixed(int io_index,
3035
                                        CPUReadMemoryFunc * const *mem_read,
3036
                                        CPUWriteMemoryFunc * const *mem_write,
3037
                                        void *opaque)
3038
{
3039
    int i, subwidth = 0;
3040

    
3041
    if (io_index <= 0) {
3042
        io_index = get_free_io_mem_idx();
3043
        if (io_index == -1)
3044
            return io_index;
3045
    } else {
3046
        io_index >>= IO_MEM_SHIFT;
3047
        if (io_index >= IO_MEM_NB_ENTRIES)
3048
            return -1;
3049
    }
3050

    
3051
    for(i = 0;i < 3; i++) {
3052
        if (!mem_read[i] || !mem_write[i])
3053
            subwidth = IO_MEM_SUBWIDTH;
3054
        io_mem_read[io_index][i] = mem_read[i];
3055
        io_mem_write[io_index][i] = mem_write[i];
3056
    }
3057
    io_mem_opaque[io_index] = opaque;
3058
    return (io_index << IO_MEM_SHIFT) | subwidth;
3059
}
3060

    
3061
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3062
                           CPUWriteMemoryFunc * const *mem_write,
3063
                           void *opaque)
3064
{
3065
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3066
}
3067

    
3068
void cpu_unregister_io_memory(int io_table_address)
3069
{
3070
    int i;
3071
    int io_index = io_table_address >> IO_MEM_SHIFT;
3072

    
3073
    for (i=0;i < 3; i++) {
3074
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3075
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3076
    }
3077
    io_mem_opaque[io_index] = NULL;
3078
    io_mem_used[io_index] = 0;
3079
}
3080

    
3081
static void io_mem_init(void)
3082
{
3083
    int i;
3084

    
3085
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3086
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3087
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3088
    for (i=0; i<5; i++)
3089
        io_mem_used[i] = 1;
3090

    
3091
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3092
                                          watch_mem_write, NULL);
3093
}
3094

    
3095
#endif /* !defined(CONFIG_USER_ONLY) */
3096

    
3097
/* physical memory access (slow version, mainly for debug) */
3098
#if defined(CONFIG_USER_ONLY)
3099
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3100
                            int len, int is_write)
3101
{
3102
    int l, flags;
3103
    target_ulong page;
3104
    void * p;
3105

    
3106
    while (len > 0) {
3107
        page = addr & TARGET_PAGE_MASK;
3108
        l = (page + TARGET_PAGE_SIZE) - addr;
3109
        if (l > len)
3110
            l = len;
3111
        flags = page_get_flags(page);
3112
        if (!(flags & PAGE_VALID))
3113
            return;
3114
        if (is_write) {
3115
            if (!(flags & PAGE_WRITE))
3116
                return;
3117
            /* XXX: this code should not depend on lock_user */
3118
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3119
                /* FIXME - should this return an error rather than just fail? */
3120
                return;
3121
            memcpy(p, buf, l);
3122
            unlock_user(p, addr, l);
3123
        } else {
3124
            if (!(flags & PAGE_READ))
3125
                return;
3126
            /* XXX: this code should not depend on lock_user */
3127
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3128
                /* FIXME - should this return an error rather than just fail? */
3129
                return;
3130
            memcpy(buf, p, l);
3131
            unlock_user(p, addr, 0);
3132
        }
3133
        len -= l;
3134
        buf += l;
3135
        addr += l;
3136
    }
3137
}
3138

    
3139
#else
3140
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3141
                            int len, int is_write)
3142
{
3143
    int l, io_index;
3144
    uint8_t *ptr;
3145
    uint32_t val;
3146
    target_phys_addr_t page;
3147
    unsigned long pd;
3148
    PhysPageDesc *p;
3149

    
3150
    while (len > 0) {
3151
        page = addr & TARGET_PAGE_MASK;
3152
        l = (page + TARGET_PAGE_SIZE) - addr;
3153
        if (l > len)
3154
            l = len;
3155
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3156
        if (!p) {
3157
            pd = IO_MEM_UNASSIGNED;
3158
        } else {
3159
            pd = p->phys_offset;
3160
        }
3161

    
3162
        if (is_write) {
3163
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3164
                target_phys_addr_t addr1 = addr;
3165
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3166
                if (p)
3167
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3168
                /* XXX: could force cpu_single_env to NULL to avoid
3169
                   potential bugs */
3170
                if (l >= 4 && ((addr1 & 3) == 0)) {
3171
                    /* 32 bit write access */
3172
                    val = ldl_p(buf);
3173
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3174
                    l = 4;
3175
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3176
                    /* 16 bit write access */
3177
                    val = lduw_p(buf);
3178
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3179
                    l = 2;
3180
                } else {
3181
                    /* 8 bit write access */
3182
                    val = ldub_p(buf);
3183
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3184
                    l = 1;
3185
                }
3186
            } else {
3187
                unsigned long addr1;
3188
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3189
                /* RAM case */
3190
                ptr = qemu_get_ram_ptr(addr1);
3191
                memcpy(ptr, buf, l);
3192
                if (!cpu_physical_memory_is_dirty(addr1)) {
3193
                    /* invalidate code */
3194
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3195
                    /* set dirty bit */
3196
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3197
                        (0xff & ~CODE_DIRTY_FLAG);
3198
                }
3199
            }
3200
        } else {
3201
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3202
                !(pd & IO_MEM_ROMD)) {
3203
                target_phys_addr_t addr1 = addr;
3204
                /* I/O case */
3205
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3206
                if (p)
3207
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3208
                if (l >= 4 && ((addr1 & 3) == 0)) {
3209
                    /* 32 bit read access */
3210
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3211
                    stl_p(buf, val);
3212
                    l = 4;
3213
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3214
                    /* 16 bit read access */
3215
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3216
                    stw_p(buf, val);
3217
                    l = 2;
3218
                } else {
3219
                    /* 8 bit read access */
3220
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3221
                    stb_p(buf, val);
3222
                    l = 1;
3223
                }
3224
            } else {
3225
                /* RAM case */
3226
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3227
                    (addr & ~TARGET_PAGE_MASK);
3228
                memcpy(buf, ptr, l);
3229
            }
3230
        }
3231
        len -= l;
3232
        buf += l;
3233
        addr += l;
3234
    }
3235
}
3236

    
3237
/* used for ROM loading : can write in RAM and ROM */
3238
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3239
                                   const uint8_t *buf, int len)
3240
{
3241
    int l;
3242
    uint8_t *ptr;
3243
    target_phys_addr_t page;
3244
    unsigned long pd;
3245
    PhysPageDesc *p;
3246

    
3247
    while (len > 0) {
3248
        page = addr & TARGET_PAGE_MASK;
3249
        l = (page + TARGET_PAGE_SIZE) - addr;
3250
        if (l > len)
3251
            l = len;
3252
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3253
        if (!p) {
3254
            pd = IO_MEM_UNASSIGNED;
3255
        } else {
3256
            pd = p->phys_offset;
3257
        }
3258

    
3259
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3260
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3261
            !(pd & IO_MEM_ROMD)) {
3262
            /* do nothing */
3263
        } else {
3264
            unsigned long addr1;
3265
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3266
            /* ROM/RAM case */
3267
            ptr = qemu_get_ram_ptr(addr1);
3268
            memcpy(ptr, buf, l);
3269
        }
3270
        len -= l;
3271
        buf += l;
3272
        addr += l;
3273
    }
3274
}
3275

    
3276
typedef struct {
3277
    void *buffer;
3278
    target_phys_addr_t addr;
3279
    target_phys_addr_t len;
3280
} BounceBuffer;
3281

    
3282
static BounceBuffer bounce;
3283

    
3284
typedef struct MapClient {
3285
    void *opaque;
3286
    void (*callback)(void *opaque);
3287
    QLIST_ENTRY(MapClient) link;
3288
} MapClient;
3289

    
3290
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3291
    = QLIST_HEAD_INITIALIZER(map_client_list);
3292

    
3293
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3294
{
3295
    MapClient *client = qemu_malloc(sizeof(*client));
3296

    
3297
    client->opaque = opaque;
3298
    client->callback = callback;
3299
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3300
    return client;
3301
}
3302

    
3303
void cpu_unregister_map_client(void *_client)
3304
{
3305
    MapClient *client = (MapClient *)_client;
3306

    
3307
    QLIST_REMOVE(client, link);
3308
    qemu_free(client);
3309
}
3310

    
3311
static void cpu_notify_map_clients(void)
3312
{
3313
    MapClient *client;
3314

    
3315
    while (!QLIST_EMPTY(&map_client_list)) {
3316
        client = QLIST_FIRST(&map_client_list);
3317
        client->callback(client->opaque);
3318
        cpu_unregister_map_client(client);
3319
    }
3320
}
3321

    
3322
/* Map a physical memory region into a host virtual address.
3323
 * May map a subset of the requested range, given by and returned in *plen.
3324
 * May return NULL if resources needed to perform the mapping are exhausted.
3325
 * Use only for reads OR writes - not for read-modify-write operations.
3326
 * Use cpu_register_map_client() to know when retrying the map operation is
3327
 * likely to succeed.
3328
 */
3329
void *cpu_physical_memory_map(target_phys_addr_t addr,
3330
                              target_phys_addr_t *plen,
3331
                              int is_write)
3332
{
3333
    target_phys_addr_t len = *plen;
3334
    target_phys_addr_t done = 0;
3335
    int l;
3336
    uint8_t *ret = NULL;
3337
    uint8_t *ptr;
3338
    target_phys_addr_t page;
3339
    unsigned long pd;
3340
    PhysPageDesc *p;
3341
    unsigned long addr1;
3342

    
3343
    while (len > 0) {
3344
        page = addr & TARGET_PAGE_MASK;
3345
        l = (page + TARGET_PAGE_SIZE) - addr;
3346
        if (l > len)
3347
            l = len;
3348
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3349
        if (!p) {
3350
            pd = IO_MEM_UNASSIGNED;
3351
        } else {
3352
            pd = p->phys_offset;
3353
        }
3354

    
3355
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3356
            if (done || bounce.buffer) {
3357
                break;
3358
            }
3359
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3360
            bounce.addr = addr;
3361
            bounce.len = l;
3362
            if (!is_write) {
3363
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3364
            }
3365
            ptr = bounce.buffer;
3366
        } else {
3367
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3368
            ptr = qemu_get_ram_ptr(addr1);
3369
        }
3370
        if (!done) {
3371
            ret = ptr;
3372
        } else if (ret + done != ptr) {
3373
            break;
3374
        }
3375

    
3376
        len -= l;
3377
        addr += l;
3378
        done += l;
3379
    }
3380
    *plen = done;
3381
    return ret;
3382
}
3383

    
3384
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3385
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3386
 * the amount of memory that was actually read or written by the caller.
3387
 */
3388
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3389
                               int is_write, target_phys_addr_t access_len)
3390
{
3391
    if (buffer != bounce.buffer) {
3392
        if (is_write) {
3393
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3394
            while (access_len) {
3395
                unsigned l;
3396
                l = TARGET_PAGE_SIZE;
3397
                if (l > access_len)
3398
                    l = access_len;
3399
                if (!cpu_physical_memory_is_dirty(addr1)) {
3400
                    /* invalidate code */
3401
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3402
                    /* set dirty bit */
3403
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3404
                        (0xff & ~CODE_DIRTY_FLAG);
3405
                }
3406
                addr1 += l;
3407
                access_len -= l;
3408
            }
3409
        }
3410
        return;
3411
    }
3412
    if (is_write) {
3413
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3414
    }
3415
    qemu_vfree(bounce.buffer);
3416
    bounce.buffer = NULL;
3417
    cpu_notify_map_clients();
3418
}
3419

    
3420
/* warning: addr must be aligned */
3421
uint32_t ldl_phys(target_phys_addr_t addr)
3422
{
3423
    int io_index;
3424
    uint8_t *ptr;
3425
    uint32_t val;
3426
    unsigned long pd;
3427
    PhysPageDesc *p;
3428

    
3429
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3430
    if (!p) {
3431
        pd = IO_MEM_UNASSIGNED;
3432
    } else {
3433
        pd = p->phys_offset;
3434
    }
3435

    
3436
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3437
        !(pd & IO_MEM_ROMD)) {
3438
        /* I/O case */
3439
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3440
        if (p)
3441
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3442
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3443
    } else {
3444
        /* RAM case */
3445
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3446
            (addr & ~TARGET_PAGE_MASK);
3447
        val = ldl_p(ptr);
3448
    }
3449
    return val;
3450
}
3451

    
3452
/* warning: addr must be aligned */
3453
uint64_t ldq_phys(target_phys_addr_t addr)
3454
{
3455
    int io_index;
3456
    uint8_t *ptr;
3457
    uint64_t val;
3458
    unsigned long pd;
3459
    PhysPageDesc *p;
3460

    
3461
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3462
    if (!p) {
3463
        pd = IO_MEM_UNASSIGNED;
3464
    } else {
3465
        pd = p->phys_offset;
3466
    }
3467

    
3468
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3469
        !(pd & IO_MEM_ROMD)) {
3470
        /* I/O case */
3471
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3472
        if (p)
3473
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3474
#ifdef TARGET_WORDS_BIGENDIAN
3475
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3476
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3477
#else
3478
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3479
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3480
#endif
3481
    } else {
3482
        /* RAM case */
3483
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3484
            (addr & ~TARGET_PAGE_MASK);
3485
        val = ldq_p(ptr);
3486
    }
3487
    return val;
3488
}
3489

    
3490
/* XXX: optimize */
3491
uint32_t ldub_phys(target_phys_addr_t addr)
3492
{
3493
    uint8_t val;
3494
    cpu_physical_memory_read(addr, &val, 1);
3495
    return val;
3496
}
3497

    
3498
/* XXX: optimize */
3499
uint32_t lduw_phys(target_phys_addr_t addr)
3500
{
3501
    uint16_t val;
3502
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3503
    return tswap16(val);
3504
}
3505

    
3506
/* warning: addr must be aligned. The ram page is not masked as dirty
3507
   and the code inside is not invalidated. It is useful if the dirty
3508
   bits are used to track modified PTEs */
3509
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3510
{
3511
    int io_index;
3512
    uint8_t *ptr;
3513
    unsigned long pd;
3514
    PhysPageDesc *p;
3515

    
3516
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3517
    if (!p) {
3518
        pd = IO_MEM_UNASSIGNED;
3519
    } else {
3520
        pd = p->phys_offset;
3521
    }
3522

    
3523
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3524
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3525
        if (p)
3526
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3527
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3528
    } else {
3529
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3530
        ptr = qemu_get_ram_ptr(addr1);
3531
        stl_p(ptr, val);
3532

    
3533
        if (unlikely(in_migration)) {
3534
            if (!cpu_physical_memory_is_dirty(addr1)) {
3535
                /* invalidate code */
3536
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3537
                /* set dirty bit */
3538
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3539
                    (0xff & ~CODE_DIRTY_FLAG);
3540
            }
3541
        }
3542
    }
3543
}
3544

    
3545
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3546
{
3547
    int io_index;
3548
    uint8_t *ptr;
3549
    unsigned long pd;
3550
    PhysPageDesc *p;
3551

    
3552
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3553
    if (!p) {
3554
        pd = IO_MEM_UNASSIGNED;
3555
    } else {
3556
        pd = p->phys_offset;
3557
    }
3558

    
3559
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3560
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3561
        if (p)
3562
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3563
#ifdef TARGET_WORDS_BIGENDIAN
3564
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3565
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3566
#else
3567
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3568
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3569
#endif
3570
    } else {
3571
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3572
            (addr & ~TARGET_PAGE_MASK);
3573
        stq_p(ptr, val);
3574
    }
3575
}
3576

    
3577
/* warning: addr must be aligned */
3578
void stl_phys(target_phys_addr_t addr, uint32_t val)
3579
{
3580
    int io_index;
3581
    uint8_t *ptr;
3582
    unsigned long pd;
3583
    PhysPageDesc *p;
3584

    
3585
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3586
    if (!p) {
3587
        pd = IO_MEM_UNASSIGNED;
3588
    } else {
3589
        pd = p->phys_offset;
3590
    }
3591

    
3592
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3593
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3594
        if (p)
3595
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3596
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3597
    } else {
3598
        unsigned long addr1;
3599
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3600
        /* RAM case */
3601
        ptr = qemu_get_ram_ptr(addr1);
3602
        stl_p(ptr, val);
3603
        if (!cpu_physical_memory_is_dirty(addr1)) {
3604
            /* invalidate code */
3605
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3606
            /* set dirty bit */
3607
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3608
                (0xff & ~CODE_DIRTY_FLAG);
3609
        }
3610
    }
3611
}
3612

    
3613
/* XXX: optimize */
3614
void stb_phys(target_phys_addr_t addr, uint32_t val)
3615
{
3616
    uint8_t v = val;
3617
    cpu_physical_memory_write(addr, &v, 1);
3618
}
3619

    
3620
/* XXX: optimize */
3621
void stw_phys(target_phys_addr_t addr, uint32_t val)
3622
{
3623
    uint16_t v = tswap16(val);
3624
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3625
}
3626

    
3627
/* XXX: optimize */
3628
void stq_phys(target_phys_addr_t addr, uint64_t val)
3629
{
3630
    val = tswap64(val);
3631
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3632
}
3633

    
3634
#endif
3635

    
3636
/* virtual memory access for debug (includes writing to ROM) */
3637
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3638
                        uint8_t *buf, int len, int is_write)
3639
{
3640
    int l;
3641
    target_phys_addr_t phys_addr;
3642
    target_ulong page;
3643

    
3644
    while (len > 0) {
3645
        page = addr & TARGET_PAGE_MASK;
3646
        phys_addr = cpu_get_phys_page_debug(env, page);
3647
        /* if no physical page mapped, return an error */
3648
        if (phys_addr == -1)
3649
            return -1;
3650
        l = (page + TARGET_PAGE_SIZE) - addr;
3651
        if (l > len)
3652
            l = len;
3653
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3654
#if !defined(CONFIG_USER_ONLY)
3655
        if (is_write)
3656
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3657
        else
3658
#endif
3659
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3660
        len -= l;
3661
        buf += l;
3662
        addr += l;
3663
    }
3664
    return 0;
3665
}
3666

    
3667
/* in deterministic execution mode, instructions doing device I/Os
3668
   must be at the end of the TB */
3669
void cpu_io_recompile(CPUState *env, void *retaddr)
3670
{
3671
    TranslationBlock *tb;
3672
    uint32_t n, cflags;
3673
    target_ulong pc, cs_base;
3674
    uint64_t flags;
3675

    
3676
    tb = tb_find_pc((unsigned long)retaddr);
3677
    if (!tb) {
3678
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3679
                  retaddr);
3680
    }
3681
    n = env->icount_decr.u16.low + tb->icount;
3682
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3683
    /* Calculate how many instructions had been executed before the fault
3684
       occurred.  */
3685
    n = n - env->icount_decr.u16.low;
3686
    /* Generate a new TB ending on the I/O insn.  */
3687
    n++;
3688
    /* On MIPS and SH, delay slot instructions can only be restarted if
3689
       they were already the first instruction in the TB.  If this is not
3690
       the first instruction in a TB then re-execute the preceding
3691
       branch.  */
3692
#if defined(TARGET_MIPS)
3693
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3694
        env->active_tc.PC -= 4;
3695
        env->icount_decr.u16.low++;
3696
        env->hflags &= ~MIPS_HFLAG_BMASK;
3697
    }
3698
#elif defined(TARGET_SH4)
3699
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3700
            && n > 1) {
3701
        env->pc -= 2;
3702
        env->icount_decr.u16.low++;
3703
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3704
    }
3705
#endif
3706
    /* This should never happen.  */
3707
    if (n > CF_COUNT_MASK)
3708
        cpu_abort(env, "TB too big during recompile");
3709

    
3710
    cflags = n | CF_LAST_IO;
3711
    pc = tb->pc;
3712
    cs_base = tb->cs_base;
3713
    flags = tb->flags;
3714
    tb_phys_invalidate(tb, -1);
3715
    /* FIXME: In theory this could raise an exception.  In practice
3716
       we have already translated the block once so it's probably ok.  */
3717
    tb_gen_code(env, pc, cs_base, flags, cflags);
3718
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3719
       the first in the TB) then we end up generating a whole new TB and
3720
       repeating the fault, which is horribly inefficient.
3721
       Better would be to execute just this insn uncached, or generate a
3722
       second new TB.  */
3723
    cpu_resume_from_signal(env, NULL);
3724
}
3725

    
3726
void dump_exec_info(FILE *f,
3727
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3728
{
3729
    int i, target_code_size, max_target_code_size;
3730
    int direct_jmp_count, direct_jmp2_count, cross_page;
3731
    TranslationBlock *tb;
3732

    
3733
    target_code_size = 0;
3734
    max_target_code_size = 0;
3735
    cross_page = 0;
3736
    direct_jmp_count = 0;
3737
    direct_jmp2_count = 0;
3738
    for(i = 0; i < nb_tbs; i++) {
3739
        tb = &tbs[i];
3740
        target_code_size += tb->size;
3741
        if (tb->size > max_target_code_size)
3742
            max_target_code_size = tb->size;
3743
        if (tb->page_addr[1] != -1)
3744
            cross_page++;
3745
        if (tb->tb_next_offset[0] != 0xffff) {
3746
            direct_jmp_count++;
3747
            if (tb->tb_next_offset[1] != 0xffff) {
3748
                direct_jmp2_count++;
3749
            }
3750
        }
3751
    }
3752
    /* XXX: avoid using doubles ? */
3753
    cpu_fprintf(f, "Translation buffer state:\n");
3754
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3755
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3756
    cpu_fprintf(f, "TB count            %d/%d\n", 
3757
                nb_tbs, code_gen_max_blocks);
3758
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3759
                nb_tbs ? target_code_size / nb_tbs : 0,
3760
                max_target_code_size);
3761
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3762
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3763
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3764
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3765
            cross_page,
3766
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3767
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3768
                direct_jmp_count,
3769
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3770
                direct_jmp2_count,
3771
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3772
    cpu_fprintf(f, "\nStatistics:\n");
3773
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3774
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3775
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3776
    tcg_dump_info(f, cpu_fprintf);
3777
}
3778

    
3779
#if !defined(CONFIG_USER_ONLY)
3780

    
3781
#define MMUSUFFIX _cmmu
3782
#define GETPC() NULL
3783
#define env cpu_single_env
3784
#define SOFTMMU_CODE_ACCESS
3785

    
3786
#define SHIFT 0
3787
#include "softmmu_template.h"
3788

    
3789
#define SHIFT 1
3790
#include "softmmu_template.h"
3791

    
3792
#define SHIFT 2
3793
#include "softmmu_template.h"
3794

    
3795
#define SHIFT 3
3796
#include "softmmu_template.h"
3797

    
3798
#undef env
3799

    
3800
#endif