Statistics
| Branch: | Revision:

root / exec.c @ 660f11be

History | View | Annotate | Download (111.4 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#ifdef _WIN32
21
#include <windows.h>
22
#else
23
#include <sys/types.h>
24
#include <sys/mman.h>
25
#endif
26
#include <stdlib.h>
27
#include <stdio.h>
28
#include <stdarg.h>
29
#include <string.h>
30
#include <errno.h>
31
#include <unistd.h>
32
#include <inttypes.h>
33

    
34
#include "cpu.h"
35
#include "exec-all.h"
36
#include "qemu-common.h"
37
#include "tcg.h"
38
#include "hw/hw.h"
39
#include "osdep.h"
40
#include "kvm.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#endif
44

    
45
//#define DEBUG_TB_INVALIDATE
46
//#define DEBUG_FLUSH
47
//#define DEBUG_TLB
48
//#define DEBUG_UNASSIGNED
49

    
50
/* make various TB consistency checks */
51
//#define DEBUG_TB_CHECK
52
//#define DEBUG_TLB_CHECK
53

    
54
//#define DEBUG_IOPORT
55
//#define DEBUG_SUBPAGE
56

    
57
#if !defined(CONFIG_USER_ONLY)
58
/* TB consistency checks only implemented for usermode emulation.  */
59
#undef DEBUG_TB_CHECK
60
#endif
61

    
62
#define SMC_BITMAP_USE_THRESHOLD 10
63

    
64
#if defined(TARGET_SPARC64)
65
#define TARGET_PHYS_ADDR_SPACE_BITS 41
66
#elif defined(TARGET_SPARC)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 36
68
#elif defined(TARGET_ALPHA)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 42
70
#define TARGET_VIRT_ADDR_SPACE_BITS 42
71
#elif defined(TARGET_PPC64)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 36
77
#else
78
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
79
#define TARGET_PHYS_ADDR_SPACE_BITS 32
80
#endif
81

    
82
static TranslationBlock *tbs;
83
int code_gen_max_blocks;
84
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85
static int nb_tbs;
86
/* any access to the tbs or the page table must use this lock */
87
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88

    
89
#if defined(__arm__) || defined(__sparc_v9__)
90
/* The prologue must be reachable with a direct jump. ARM and Sparc64
91
 have limited branch ranges (possibly also PPC) so place it in a
92
 section close to code segment. */
93
#define code_gen_section                                \
94
    __attribute__((__section__(".gen_code")))           \
95
    __attribute__((aligned (32)))
96
#elif defined(_WIN32)
97
/* Maximum alignment for Win32 is 16. */
98
#define code_gen_section                                \
99
    __attribute__((aligned (16)))
100
#else
101
#define code_gen_section                                \
102
    __attribute__((aligned (32)))
103
#endif
104

    
105
uint8_t code_gen_prologue[1024] code_gen_section;
106
static uint8_t *code_gen_buffer;
107
static unsigned long code_gen_buffer_size;
108
/* threshold to flush the translated code buffer */
109
static unsigned long code_gen_buffer_max_size;
110
uint8_t *code_gen_ptr;
111

    
112
#if !defined(CONFIG_USER_ONLY)
113
int phys_ram_fd;
114
uint8_t *phys_ram_dirty;
115
static int in_migration;
116

    
117
typedef struct RAMBlock {
118
    uint8_t *host;
119
    ram_addr_t offset;
120
    ram_addr_t length;
121
    struct RAMBlock *next;
122
} RAMBlock;
123

    
124
static RAMBlock *ram_blocks;
125
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126
   then we can no longer assume contiguous ram offsets, and external uses
127
   of this variable will break.  */
128
ram_addr_t last_ram_offset;
129
#endif
130

    
131
CPUState *first_cpu;
132
/* current CPU in the current thread. It is only valid inside
133
   cpu_exec() */
134
CPUState *cpu_single_env;
135
/* 0 = Do not count executed instructions.
136
   1 = Precise instruction counting.
137
   2 = Adaptive rate instruction counting.  */
138
int use_icount = 0;
139
/* Current instruction counter.  While executing translated code this may
140
   include some instructions that have not yet been executed.  */
141
int64_t qemu_icount;
142

    
143
typedef struct PageDesc {
144
    /* list of TBs intersecting this ram page */
145
    TranslationBlock *first_tb;
146
    /* in order to optimize self modifying code, we count the number
147
       of lookups we do to a given page to use a bitmap */
148
    unsigned int code_write_count;
149
    uint8_t *code_bitmap;
150
#if defined(CONFIG_USER_ONLY)
151
    unsigned long flags;
152
#endif
153
} PageDesc;
154

    
155
typedef struct PhysPageDesc {
156
    /* offset in host memory of the page + io_index in the low bits */
157
    ram_addr_t phys_offset;
158
    ram_addr_t region_offset;
159
} PhysPageDesc;
160

    
161
#define L2_BITS 10
162
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163
/* XXX: this is a temporary hack for alpha target.
164
 *      In the future, this is to be replaced by a multi-level table
165
 *      to actually be able to handle the complete 64 bits address space.
166
 */
167
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168
#else
169
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
170
#endif
171

    
172
#define L1_SIZE (1 << L1_BITS)
173
#define L2_SIZE (1 << L2_BITS)
174

    
175
unsigned long qemu_real_host_page_size;
176
unsigned long qemu_host_page_bits;
177
unsigned long qemu_host_page_size;
178
unsigned long qemu_host_page_mask;
179

    
180
/* XXX: for system emulation, it could just be an array */
181
static PageDesc *l1_map[L1_SIZE];
182
static PhysPageDesc **l1_phys_map;
183

    
184
#if !defined(CONFIG_USER_ONLY)
185
static void io_mem_init(void);
186

    
187
/* io memory support */
188
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
189
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
190
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
191
static char io_mem_used[IO_MEM_NB_ENTRIES];
192
static int io_mem_watch;
193
#endif
194

    
195
/* log support */
196
static const char *logfilename = "/tmp/qemu.log";
197
FILE *logfile;
198
int loglevel;
199
static int log_append = 0;
200

    
201
/* statistics */
202
static int tlb_flush_count;
203
static int tb_flush_count;
204
static int tb_phys_invalidate_count;
205

    
206
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
207
typedef struct subpage_t {
208
    target_phys_addr_t base;
209
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
210
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
211
    void *opaque[TARGET_PAGE_SIZE][2][4];
212
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
213
} subpage_t;
214

    
215
#ifdef _WIN32
216
static void map_exec(void *addr, long size)
217
{
218
    DWORD old_protect;
219
    VirtualProtect(addr, size,
220
                   PAGE_EXECUTE_READWRITE, &old_protect);
221
    
222
}
223
#else
224
static void map_exec(void *addr, long size)
225
{
226
    unsigned long start, end, page_size;
227
    
228
    page_size = getpagesize();
229
    start = (unsigned long)addr;
230
    start &= ~(page_size - 1);
231
    
232
    end = (unsigned long)addr + size;
233
    end += page_size - 1;
234
    end &= ~(page_size - 1);
235
    
236
    mprotect((void *)start, end - start,
237
             PROT_READ | PROT_WRITE | PROT_EXEC);
238
}
239
#endif
240

    
241
static void page_init(void)
242
{
243
    /* NOTE: we can always suppose that qemu_host_page_size >=
244
       TARGET_PAGE_SIZE */
245
#ifdef _WIN32
246
    {
247
        SYSTEM_INFO system_info;
248

    
249
        GetSystemInfo(&system_info);
250
        qemu_real_host_page_size = system_info.dwPageSize;
251
    }
252
#else
253
    qemu_real_host_page_size = getpagesize();
254
#endif
255
    if (qemu_host_page_size == 0)
256
        qemu_host_page_size = qemu_real_host_page_size;
257
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
258
        qemu_host_page_size = TARGET_PAGE_SIZE;
259
    qemu_host_page_bits = 0;
260
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
261
        qemu_host_page_bits++;
262
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
263
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
264
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
265

    
266
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
267
    {
268
        long long startaddr, endaddr;
269
        FILE *f;
270
        int n;
271

    
272
        mmap_lock();
273
        last_brk = (unsigned long)sbrk(0);
274
        f = fopen("/proc/self/maps", "r");
275
        if (f) {
276
            do {
277
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
278
                if (n == 2) {
279
                    startaddr = MIN(startaddr,
280
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
281
                    endaddr = MIN(endaddr,
282
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
283
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
284
                                   TARGET_PAGE_ALIGN(endaddr),
285
                                   PAGE_RESERVED); 
286
                }
287
            } while (!feof(f));
288
            fclose(f);
289
        }
290
        mmap_unlock();
291
    }
292
#endif
293
}
294

    
295
static inline PageDesc **page_l1_map(target_ulong index)
296
{
297
#if TARGET_LONG_BITS > 32
298
    /* Host memory outside guest VM.  For 32-bit targets we have already
299
       excluded high addresses.  */
300
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
301
        return NULL;
302
#endif
303
    return &l1_map[index >> L2_BITS];
304
}
305

    
306
static inline PageDesc *page_find_alloc(target_ulong index)
307
{
308
    PageDesc **lp, *p;
309
    lp = page_l1_map(index);
310
    if (!lp)
311
        return NULL;
312

    
313
    p = *lp;
314
    if (!p) {
315
        /* allocate if not found */
316
#if defined(CONFIG_USER_ONLY)
317
        size_t len = sizeof(PageDesc) * L2_SIZE;
318
        /* Don't use qemu_malloc because it may recurse.  */
319
        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
320
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
321
        *lp = p;
322
        if (h2g_valid(p)) {
323
            unsigned long addr = h2g(p);
324
            page_set_flags(addr & TARGET_PAGE_MASK,
325
                           TARGET_PAGE_ALIGN(addr + len),
326
                           PAGE_RESERVED); 
327
        }
328
#else
329
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
330
        *lp = p;
331
#endif
332
    }
333
    return p + (index & (L2_SIZE - 1));
334
}
335

    
336
static inline PageDesc *page_find(target_ulong index)
337
{
338
    PageDesc **lp, *p;
339
    lp = page_l1_map(index);
340
    if (!lp)
341
        return NULL;
342

    
343
    p = *lp;
344
    if (!p) {
345
        return NULL;
346
    }
347
    return p + (index & (L2_SIZE - 1));
348
}
349

    
350
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
351
{
352
    void **lp, **p;
353
    PhysPageDesc *pd;
354

    
355
    p = (void **)l1_phys_map;
356
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
357

    
358
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
359
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
360
#endif
361
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
362
    p = *lp;
363
    if (!p) {
364
        /* allocate if not found */
365
        if (!alloc)
366
            return NULL;
367
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
368
        memset(p, 0, sizeof(void *) * L1_SIZE);
369
        *lp = p;
370
    }
371
#endif
372
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
373
    pd = *lp;
374
    if (!pd) {
375
        int i;
376
        /* allocate if not found */
377
        if (!alloc)
378
            return NULL;
379
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
380
        *lp = pd;
381
        for (i = 0; i < L2_SIZE; i++) {
382
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
383
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
384
        }
385
    }
386
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
387
}
388

    
389
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
390
{
391
    return phys_page_find_alloc(index, 0);
392
}
393

    
394
#if !defined(CONFIG_USER_ONLY)
395
static void tlb_protect_code(ram_addr_t ram_addr);
396
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
397
                                    target_ulong vaddr);
398
#define mmap_lock() do { } while(0)
399
#define mmap_unlock() do { } while(0)
400
#endif
401

    
402
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
403

    
404
#if defined(CONFIG_USER_ONLY)
405
/* Currently it is not recommended to allocate big chunks of data in
406
   user mode. It will change when a dedicated libc will be used */
407
#define USE_STATIC_CODE_GEN_BUFFER
408
#endif
409

    
410
#ifdef USE_STATIC_CODE_GEN_BUFFER
411
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
412
#endif
413

    
414
static void code_gen_alloc(unsigned long tb_size)
415
{
416
#ifdef USE_STATIC_CODE_GEN_BUFFER
417
    code_gen_buffer = static_code_gen_buffer;
418
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
419
    map_exec(code_gen_buffer, code_gen_buffer_size);
420
#else
421
    code_gen_buffer_size = tb_size;
422
    if (code_gen_buffer_size == 0) {
423
#if defined(CONFIG_USER_ONLY)
424
        /* in user mode, phys_ram_size is not meaningful */
425
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
426
#else
427
        /* XXX: needs adjustments */
428
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
429
#endif
430
    }
431
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
432
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
433
    /* The code gen buffer location may have constraints depending on
434
       the host cpu and OS */
435
#if defined(__linux__) 
436
    {
437
        int flags;
438
        void *start = NULL;
439

    
440
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
441
#if defined(__x86_64__)
442
        flags |= MAP_32BIT;
443
        /* Cannot map more than that */
444
        if (code_gen_buffer_size > (800 * 1024 * 1024))
445
            code_gen_buffer_size = (800 * 1024 * 1024);
446
#elif defined(__sparc_v9__)
447
        // Map the buffer below 2G, so we can use direct calls and branches
448
        flags |= MAP_FIXED;
449
        start = (void *) 0x60000000UL;
450
        if (code_gen_buffer_size > (512 * 1024 * 1024))
451
            code_gen_buffer_size = (512 * 1024 * 1024);
452
#elif defined(__arm__)
453
        /* Map the buffer below 32M, so we can use direct calls and branches */
454
        flags |= MAP_FIXED;
455
        start = (void *) 0x01000000UL;
456
        if (code_gen_buffer_size > 16 * 1024 * 1024)
457
            code_gen_buffer_size = 16 * 1024 * 1024;
458
#endif
459
        code_gen_buffer = mmap(start, code_gen_buffer_size,
460
                               PROT_WRITE | PROT_READ | PROT_EXEC,
461
                               flags, -1, 0);
462
        if (code_gen_buffer == MAP_FAILED) {
463
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
464
            exit(1);
465
        }
466
    }
467
#elif defined(__FreeBSD__) || defined(__DragonFly__)
468
    {
469
        int flags;
470
        void *addr = NULL;
471
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
472
#if defined(__x86_64__)
473
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
474
         * 0x40000000 is free */
475
        flags |= MAP_FIXED;
476
        addr = (void *)0x40000000;
477
        /* Cannot map more than that */
478
        if (code_gen_buffer_size > (800 * 1024 * 1024))
479
            code_gen_buffer_size = (800 * 1024 * 1024);
480
#endif
481
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
482
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
483
                               flags, -1, 0);
484
        if (code_gen_buffer == MAP_FAILED) {
485
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
486
            exit(1);
487
        }
488
    }
489
#else
490
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
491
    map_exec(code_gen_buffer, code_gen_buffer_size);
492
#endif
493
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
494
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
495
    code_gen_buffer_max_size = code_gen_buffer_size - 
496
        code_gen_max_block_size();
497
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
498
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
499
}
500

    
501
/* Must be called before using the QEMU cpus. 'tb_size' is the size
502
   (in bytes) allocated to the translation buffer. Zero means default
503
   size. */
504
void cpu_exec_init_all(unsigned long tb_size)
505
{
506
    cpu_gen_init();
507
    code_gen_alloc(tb_size);
508
    code_gen_ptr = code_gen_buffer;
509
    page_init();
510
#if !defined(CONFIG_USER_ONLY)
511
    io_mem_init();
512
#endif
513
}
514

    
515
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
516

    
517
#define CPU_COMMON_SAVE_VERSION 1
518

    
519
static void cpu_common_save(QEMUFile *f, void *opaque)
520
{
521
    CPUState *env = opaque;
522

    
523
    cpu_synchronize_state(env, 0);
524

    
525
    qemu_put_be32s(f, &env->halted);
526
    qemu_put_be32s(f, &env->interrupt_request);
527
}
528

    
529
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
530
{
531
    CPUState *env = opaque;
532

    
533
    if (version_id != CPU_COMMON_SAVE_VERSION)
534
        return -EINVAL;
535

    
536
    qemu_get_be32s(f, &env->halted);
537
    qemu_get_be32s(f, &env->interrupt_request);
538
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
539
       version_id is increased. */
540
    env->interrupt_request &= ~0x01;
541
    tlb_flush(env, 1);
542
    cpu_synchronize_state(env, 1);
543

    
544
    return 0;
545
}
546
#endif
547

    
548
CPUState *qemu_get_cpu(int cpu)
549
{
550
    CPUState *env = first_cpu;
551

    
552
    while (env) {
553
        if (env->cpu_index == cpu)
554
            break;
555
        env = env->next_cpu;
556
    }
557

    
558
    return env;
559
}
560

    
561
void cpu_exec_init(CPUState *env)
562
{
563
    CPUState **penv;
564
    int cpu_index;
565

    
566
#if defined(CONFIG_USER_ONLY)
567
    cpu_list_lock();
568
#endif
569
    env->next_cpu = NULL;
570
    penv = &first_cpu;
571
    cpu_index = 0;
572
    while (*penv != NULL) {
573
        penv = &(*penv)->next_cpu;
574
        cpu_index++;
575
    }
576
    env->cpu_index = cpu_index;
577
    env->numa_node = 0;
578
    TAILQ_INIT(&env->breakpoints);
579
    TAILQ_INIT(&env->watchpoints);
580
    *penv = env;
581
#if defined(CONFIG_USER_ONLY)
582
    cpu_list_unlock();
583
#endif
584
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
585
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
586
                    cpu_common_save, cpu_common_load, env);
587
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
588
                    cpu_save, cpu_load, env);
589
#endif
590
}
591

    
592
static inline void invalidate_page_bitmap(PageDesc *p)
593
{
594
    if (p->code_bitmap) {
595
        qemu_free(p->code_bitmap);
596
        p->code_bitmap = NULL;
597
    }
598
    p->code_write_count = 0;
599
}
600

    
601
/* set to NULL all the 'first_tb' fields in all PageDescs */
602
static void page_flush_tb(void)
603
{
604
    int i, j;
605
    PageDesc *p;
606

    
607
    for(i = 0; i < L1_SIZE; i++) {
608
        p = l1_map[i];
609
        if (p) {
610
            for(j = 0; j < L2_SIZE; j++) {
611
                p->first_tb = NULL;
612
                invalidate_page_bitmap(p);
613
                p++;
614
            }
615
        }
616
    }
617
}
618

    
619
/* flush all the translation blocks */
620
/* XXX: tb_flush is currently not thread safe */
621
void tb_flush(CPUState *env1)
622
{
623
    CPUState *env;
624
#if defined(DEBUG_FLUSH)
625
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
626
           (unsigned long)(code_gen_ptr - code_gen_buffer),
627
           nb_tbs, nb_tbs > 0 ?
628
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
629
#endif
630
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
631
        cpu_abort(env1, "Internal error: code buffer overflow\n");
632

    
633
    nb_tbs = 0;
634

    
635
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
636
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
637
    }
638

    
639
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
640
    page_flush_tb();
641

    
642
    code_gen_ptr = code_gen_buffer;
643
    /* XXX: flush processor icache at this point if cache flush is
644
       expensive */
645
    tb_flush_count++;
646
}
647

    
648
#ifdef DEBUG_TB_CHECK
649

    
650
static void tb_invalidate_check(target_ulong address)
651
{
652
    TranslationBlock *tb;
653
    int i;
654
    address &= TARGET_PAGE_MASK;
655
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
656
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
657
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
658
                  address >= tb->pc + tb->size)) {
659
                printf("ERROR invalidate: address=" TARGET_FMT_lx
660
                       " PC=%08lx size=%04x\n",
661
                       address, (long)tb->pc, tb->size);
662
            }
663
        }
664
    }
665
}
666

    
667
/* verify that all the pages have correct rights for code */
668
static void tb_page_check(void)
669
{
670
    TranslationBlock *tb;
671
    int i, flags1, flags2;
672

    
673
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
674
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
675
            flags1 = page_get_flags(tb->pc);
676
            flags2 = page_get_flags(tb->pc + tb->size - 1);
677
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
678
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
679
                       (long)tb->pc, tb->size, flags1, flags2);
680
            }
681
        }
682
    }
683
}
684

    
685
#endif
686

    
687
/* invalidate one TB */
688
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
689
                             int next_offset)
690
{
691
    TranslationBlock *tb1;
692
    for(;;) {
693
        tb1 = *ptb;
694
        if (tb1 == tb) {
695
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
696
            break;
697
        }
698
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
699
    }
700
}
701

    
702
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
703
{
704
    TranslationBlock *tb1;
705
    unsigned int n1;
706

    
707
    for(;;) {
708
        tb1 = *ptb;
709
        n1 = (long)tb1 & 3;
710
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
711
        if (tb1 == tb) {
712
            *ptb = tb1->page_next[n1];
713
            break;
714
        }
715
        ptb = &tb1->page_next[n1];
716
    }
717
}
718

    
719
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
720
{
721
    TranslationBlock *tb1, **ptb;
722
    unsigned int n1;
723

    
724
    ptb = &tb->jmp_next[n];
725
    tb1 = *ptb;
726
    if (tb1) {
727
        /* find tb(n) in circular list */
728
        for(;;) {
729
            tb1 = *ptb;
730
            n1 = (long)tb1 & 3;
731
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
732
            if (n1 == n && tb1 == tb)
733
                break;
734
            if (n1 == 2) {
735
                ptb = &tb1->jmp_first;
736
            } else {
737
                ptb = &tb1->jmp_next[n1];
738
            }
739
        }
740
        /* now we can suppress tb(n) from the list */
741
        *ptb = tb->jmp_next[n];
742

    
743
        tb->jmp_next[n] = NULL;
744
    }
745
}
746

    
747
/* reset the jump entry 'n' of a TB so that it is not chained to
748
   another TB */
749
static inline void tb_reset_jump(TranslationBlock *tb, int n)
750
{
751
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
752
}
753

    
754
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
755
{
756
    CPUState *env;
757
    PageDesc *p;
758
    unsigned int h, n1;
759
    target_phys_addr_t phys_pc;
760
    TranslationBlock *tb1, *tb2;
761

    
762
    /* remove the TB from the hash list */
763
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
764
    h = tb_phys_hash_func(phys_pc);
765
    tb_remove(&tb_phys_hash[h], tb,
766
              offsetof(TranslationBlock, phys_hash_next));
767

    
768
    /* remove the TB from the page list */
769
    if (tb->page_addr[0] != page_addr) {
770
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
771
        tb_page_remove(&p->first_tb, tb);
772
        invalidate_page_bitmap(p);
773
    }
774
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
775
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
776
        tb_page_remove(&p->first_tb, tb);
777
        invalidate_page_bitmap(p);
778
    }
779

    
780
    tb_invalidated_flag = 1;
781

    
782
    /* remove the TB from the hash list */
783
    h = tb_jmp_cache_hash_func(tb->pc);
784
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
785
        if (env->tb_jmp_cache[h] == tb)
786
            env->tb_jmp_cache[h] = NULL;
787
    }
788

    
789
    /* suppress this TB from the two jump lists */
790
    tb_jmp_remove(tb, 0);
791
    tb_jmp_remove(tb, 1);
792

    
793
    /* suppress any remaining jumps to this TB */
794
    tb1 = tb->jmp_first;
795
    for(;;) {
796
        n1 = (long)tb1 & 3;
797
        if (n1 == 2)
798
            break;
799
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
800
        tb2 = tb1->jmp_next[n1];
801
        tb_reset_jump(tb1, n1);
802
        tb1->jmp_next[n1] = NULL;
803
        tb1 = tb2;
804
    }
805
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
806

    
807
    tb_phys_invalidate_count++;
808
}
809

    
810
static inline void set_bits(uint8_t *tab, int start, int len)
811
{
812
    int end, mask, end1;
813

    
814
    end = start + len;
815
    tab += start >> 3;
816
    mask = 0xff << (start & 7);
817
    if ((start & ~7) == (end & ~7)) {
818
        if (start < end) {
819
            mask &= ~(0xff << (end & 7));
820
            *tab |= mask;
821
        }
822
    } else {
823
        *tab++ |= mask;
824
        start = (start + 8) & ~7;
825
        end1 = end & ~7;
826
        while (start < end1) {
827
            *tab++ = 0xff;
828
            start += 8;
829
        }
830
        if (start < end) {
831
            mask = ~(0xff << (end & 7));
832
            *tab |= mask;
833
        }
834
    }
835
}
836

    
837
static void build_page_bitmap(PageDesc *p)
838
{
839
    int n, tb_start, tb_end;
840
    TranslationBlock *tb;
841

    
842
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
843

    
844
    tb = p->first_tb;
845
    while (tb != NULL) {
846
        n = (long)tb & 3;
847
        tb = (TranslationBlock *)((long)tb & ~3);
848
        /* NOTE: this is subtle as a TB may span two physical pages */
849
        if (n == 0) {
850
            /* NOTE: tb_end may be after the end of the page, but
851
               it is not a problem */
852
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
853
            tb_end = tb_start + tb->size;
854
            if (tb_end > TARGET_PAGE_SIZE)
855
                tb_end = TARGET_PAGE_SIZE;
856
        } else {
857
            tb_start = 0;
858
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
859
        }
860
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
861
        tb = tb->page_next[n];
862
    }
863
}
864

    
865
TranslationBlock *tb_gen_code(CPUState *env,
866
                              target_ulong pc, target_ulong cs_base,
867
                              int flags, int cflags)
868
{
869
    TranslationBlock *tb;
870
    uint8_t *tc_ptr;
871
    target_ulong phys_pc, phys_page2, virt_page2;
872
    int code_gen_size;
873

    
874
    phys_pc = get_phys_addr_code(env, pc);
875
    tb = tb_alloc(pc);
876
    if (!tb) {
877
        /* flush must be done */
878
        tb_flush(env);
879
        /* cannot fail at this point */
880
        tb = tb_alloc(pc);
881
        /* Don't forget to invalidate previous TB info.  */
882
        tb_invalidated_flag = 1;
883
    }
884
    tc_ptr = code_gen_ptr;
885
    tb->tc_ptr = tc_ptr;
886
    tb->cs_base = cs_base;
887
    tb->flags = flags;
888
    tb->cflags = cflags;
889
    cpu_gen_code(env, tb, &code_gen_size);
890
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
891

    
892
    /* check next page if needed */
893
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
894
    phys_page2 = -1;
895
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
896
        phys_page2 = get_phys_addr_code(env, virt_page2);
897
    }
898
    tb_link_phys(tb, phys_pc, phys_page2);
899
    return tb;
900
}
901

    
902
/* invalidate all TBs which intersect with the target physical page
903
   starting in range [start;end[. NOTE: start and end must refer to
904
   the same physical page. 'is_cpu_write_access' should be true if called
905
   from a real cpu write access: the virtual CPU will exit the current
906
   TB if code is modified inside this TB. */
907
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
908
                                   int is_cpu_write_access)
909
{
910
    TranslationBlock *tb, *tb_next, *saved_tb;
911
    CPUState *env = cpu_single_env;
912
    target_ulong tb_start, tb_end;
913
    PageDesc *p;
914
    int n;
915
#ifdef TARGET_HAS_PRECISE_SMC
916
    int current_tb_not_found = is_cpu_write_access;
917
    TranslationBlock *current_tb = NULL;
918
    int current_tb_modified = 0;
919
    target_ulong current_pc = 0;
920
    target_ulong current_cs_base = 0;
921
    int current_flags = 0;
922
#endif /* TARGET_HAS_PRECISE_SMC */
923

    
924
    p = page_find(start >> TARGET_PAGE_BITS);
925
    if (!p)
926
        return;
927
    if (!p->code_bitmap &&
928
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
929
        is_cpu_write_access) {
930
        /* build code bitmap */
931
        build_page_bitmap(p);
932
    }
933

    
934
    /* we remove all the TBs in the range [start, end[ */
935
    /* XXX: see if in some cases it could be faster to invalidate all the code */
936
    tb = p->first_tb;
937
    while (tb != NULL) {
938
        n = (long)tb & 3;
939
        tb = (TranslationBlock *)((long)tb & ~3);
940
        tb_next = tb->page_next[n];
941
        /* NOTE: this is subtle as a TB may span two physical pages */
942
        if (n == 0) {
943
            /* NOTE: tb_end may be after the end of the page, but
944
               it is not a problem */
945
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
946
            tb_end = tb_start + tb->size;
947
        } else {
948
            tb_start = tb->page_addr[1];
949
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
950
        }
951
        if (!(tb_end <= start || tb_start >= end)) {
952
#ifdef TARGET_HAS_PRECISE_SMC
953
            if (current_tb_not_found) {
954
                current_tb_not_found = 0;
955
                current_tb = NULL;
956
                if (env->mem_io_pc) {
957
                    /* now we have a real cpu fault */
958
                    current_tb = tb_find_pc(env->mem_io_pc);
959
                }
960
            }
961
            if (current_tb == tb &&
962
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
963
                /* If we are modifying the current TB, we must stop
964
                its execution. We could be more precise by checking
965
                that the modification is after the current PC, but it
966
                would require a specialized function to partially
967
                restore the CPU state */
968

    
969
                current_tb_modified = 1;
970
                cpu_restore_state(current_tb, env,
971
                                  env->mem_io_pc, NULL);
972
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
973
                                     &current_flags);
974
            }
975
#endif /* TARGET_HAS_PRECISE_SMC */
976
            /* we need to do that to handle the case where a signal
977
               occurs while doing tb_phys_invalidate() */
978
            saved_tb = NULL;
979
            if (env) {
980
                saved_tb = env->current_tb;
981
                env->current_tb = NULL;
982
            }
983
            tb_phys_invalidate(tb, -1);
984
            if (env) {
985
                env->current_tb = saved_tb;
986
                if (env->interrupt_request && env->current_tb)
987
                    cpu_interrupt(env, env->interrupt_request);
988
            }
989
        }
990
        tb = tb_next;
991
    }
992
#if !defined(CONFIG_USER_ONLY)
993
    /* if no code remaining, no need to continue to use slow writes */
994
    if (!p->first_tb) {
995
        invalidate_page_bitmap(p);
996
        if (is_cpu_write_access) {
997
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
998
        }
999
    }
1000
#endif
1001
#ifdef TARGET_HAS_PRECISE_SMC
1002
    if (current_tb_modified) {
1003
        /* we generate a block containing just the instruction
1004
           modifying the memory. It will ensure that it cannot modify
1005
           itself */
1006
        env->current_tb = NULL;
1007
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1008
        cpu_resume_from_signal(env, NULL);
1009
    }
1010
#endif
1011
}
1012

    
1013
/* len must be <= 8 and start must be a multiple of len */
1014
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1015
{
1016
    PageDesc *p;
1017
    int offset, b;
1018
#if 0
1019
    if (1) {
1020
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1021
                  cpu_single_env->mem_io_vaddr, len,
1022
                  cpu_single_env->eip,
1023
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1024
    }
1025
#endif
1026
    p = page_find(start >> TARGET_PAGE_BITS);
1027
    if (!p)
1028
        return;
1029
    if (p->code_bitmap) {
1030
        offset = start & ~TARGET_PAGE_MASK;
1031
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1032
        if (b & ((1 << len) - 1))
1033
            goto do_invalidate;
1034
    } else {
1035
    do_invalidate:
1036
        tb_invalidate_phys_page_range(start, start + len, 1);
1037
    }
1038
}
1039

    
1040
#if !defined(CONFIG_SOFTMMU)
1041
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1042
                                    unsigned long pc, void *puc)
1043
{
1044
    TranslationBlock *tb;
1045
    PageDesc *p;
1046
    int n;
1047
#ifdef TARGET_HAS_PRECISE_SMC
1048
    TranslationBlock *current_tb = NULL;
1049
    CPUState *env = cpu_single_env;
1050
    int current_tb_modified = 0;
1051
    target_ulong current_pc = 0;
1052
    target_ulong current_cs_base = 0;
1053
    int current_flags = 0;
1054
#endif
1055

    
1056
    addr &= TARGET_PAGE_MASK;
1057
    p = page_find(addr >> TARGET_PAGE_BITS);
1058
    if (!p)
1059
        return;
1060
    tb = p->first_tb;
1061
#ifdef TARGET_HAS_PRECISE_SMC
1062
    if (tb && pc != 0) {
1063
        current_tb = tb_find_pc(pc);
1064
    }
1065
#endif
1066
    while (tb != NULL) {
1067
        n = (long)tb & 3;
1068
        tb = (TranslationBlock *)((long)tb & ~3);
1069
#ifdef TARGET_HAS_PRECISE_SMC
1070
        if (current_tb == tb &&
1071
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1072
                /* If we are modifying the current TB, we must stop
1073
                   its execution. We could be more precise by checking
1074
                   that the modification is after the current PC, but it
1075
                   would require a specialized function to partially
1076
                   restore the CPU state */
1077

    
1078
            current_tb_modified = 1;
1079
            cpu_restore_state(current_tb, env, pc, puc);
1080
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1081
                                 &current_flags);
1082
        }
1083
#endif /* TARGET_HAS_PRECISE_SMC */
1084
        tb_phys_invalidate(tb, addr);
1085
        tb = tb->page_next[n];
1086
    }
1087
    p->first_tb = NULL;
1088
#ifdef TARGET_HAS_PRECISE_SMC
1089
    if (current_tb_modified) {
1090
        /* we generate a block containing just the instruction
1091
           modifying the memory. It will ensure that it cannot modify
1092
           itself */
1093
        env->current_tb = NULL;
1094
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1095
        cpu_resume_from_signal(env, puc);
1096
    }
1097
#endif
1098
}
1099
#endif
1100

    
1101
/* add the tb in the target page and protect it if necessary */
1102
static inline void tb_alloc_page(TranslationBlock *tb,
1103
                                 unsigned int n, target_ulong page_addr)
1104
{
1105
    PageDesc *p;
1106
    TranslationBlock *last_first_tb;
1107

    
1108
    tb->page_addr[n] = page_addr;
1109
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1110
    tb->page_next[n] = p->first_tb;
1111
    last_first_tb = p->first_tb;
1112
    p->first_tb = (TranslationBlock *)((long)tb | n);
1113
    invalidate_page_bitmap(p);
1114

    
1115
#if defined(TARGET_HAS_SMC) || 1
1116

    
1117
#if defined(CONFIG_USER_ONLY)
1118
    if (p->flags & PAGE_WRITE) {
1119
        target_ulong addr;
1120
        PageDesc *p2;
1121
        int prot;
1122

    
1123
        /* force the host page as non writable (writes will have a
1124
           page fault + mprotect overhead) */
1125
        page_addr &= qemu_host_page_mask;
1126
        prot = 0;
1127
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1128
            addr += TARGET_PAGE_SIZE) {
1129

    
1130
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1131
            if (!p2)
1132
                continue;
1133
            prot |= p2->flags;
1134
            p2->flags &= ~PAGE_WRITE;
1135
            page_get_flags(addr);
1136
          }
1137
        mprotect(g2h(page_addr), qemu_host_page_size,
1138
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1139
#ifdef DEBUG_TB_INVALIDATE
1140
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1141
               page_addr);
1142
#endif
1143
    }
1144
#else
1145
    /* if some code is already present, then the pages are already
1146
       protected. So we handle the case where only the first TB is
1147
       allocated in a physical page */
1148
    if (!last_first_tb) {
1149
        tlb_protect_code(page_addr);
1150
    }
1151
#endif
1152

    
1153
#endif /* TARGET_HAS_SMC */
1154
}
1155

    
1156
/* Allocate a new translation block. Flush the translation buffer if
1157
   too many translation blocks or too much generated code. */
1158
TranslationBlock *tb_alloc(target_ulong pc)
1159
{
1160
    TranslationBlock *tb;
1161

    
1162
    if (nb_tbs >= code_gen_max_blocks ||
1163
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1164
        return NULL;
1165
    tb = &tbs[nb_tbs++];
1166
    tb->pc = pc;
1167
    tb->cflags = 0;
1168
    return tb;
1169
}
1170

    
1171
void tb_free(TranslationBlock *tb)
1172
{
1173
    /* In practice this is mostly used for single use temporary TB
1174
       Ignore the hard cases and just back up if this TB happens to
1175
       be the last one generated.  */
1176
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1177
        code_gen_ptr = tb->tc_ptr;
1178
        nb_tbs--;
1179
    }
1180
}
1181

    
1182
/* add a new TB and link it to the physical page tables. phys_page2 is
1183
   (-1) to indicate that only one page contains the TB. */
1184
void tb_link_phys(TranslationBlock *tb,
1185
                  target_ulong phys_pc, target_ulong phys_page2)
1186
{
1187
    unsigned int h;
1188
    TranslationBlock **ptb;
1189

    
1190
    /* Grab the mmap lock to stop another thread invalidating this TB
1191
       before we are done.  */
1192
    mmap_lock();
1193
    /* add in the physical hash table */
1194
    h = tb_phys_hash_func(phys_pc);
1195
    ptb = &tb_phys_hash[h];
1196
    tb->phys_hash_next = *ptb;
1197
    *ptb = tb;
1198

    
1199
    /* add in the page list */
1200
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1201
    if (phys_page2 != -1)
1202
        tb_alloc_page(tb, 1, phys_page2);
1203
    else
1204
        tb->page_addr[1] = -1;
1205

    
1206
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1207
    tb->jmp_next[0] = NULL;
1208
    tb->jmp_next[1] = NULL;
1209

    
1210
    /* init original jump addresses */
1211
    if (tb->tb_next_offset[0] != 0xffff)
1212
        tb_reset_jump(tb, 0);
1213
    if (tb->tb_next_offset[1] != 0xffff)
1214
        tb_reset_jump(tb, 1);
1215

    
1216
#ifdef DEBUG_TB_CHECK
1217
    tb_page_check();
1218
#endif
1219
    mmap_unlock();
1220
}
1221

    
1222
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1223
   tb[1].tc_ptr. Return NULL if not found */
1224
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1225
{
1226
    int m_min, m_max, m;
1227
    unsigned long v;
1228
    TranslationBlock *tb;
1229

    
1230
    if (nb_tbs <= 0)
1231
        return NULL;
1232
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1233
        tc_ptr >= (unsigned long)code_gen_ptr)
1234
        return NULL;
1235
    /* binary search (cf Knuth) */
1236
    m_min = 0;
1237
    m_max = nb_tbs - 1;
1238
    while (m_min <= m_max) {
1239
        m = (m_min + m_max) >> 1;
1240
        tb = &tbs[m];
1241
        v = (unsigned long)tb->tc_ptr;
1242
        if (v == tc_ptr)
1243
            return tb;
1244
        else if (tc_ptr < v) {
1245
            m_max = m - 1;
1246
        } else {
1247
            m_min = m + 1;
1248
        }
1249
    }
1250
    return &tbs[m_max];
1251
}
1252

    
1253
static void tb_reset_jump_recursive(TranslationBlock *tb);
1254

    
1255
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1256
{
1257
    TranslationBlock *tb1, *tb_next, **ptb;
1258
    unsigned int n1;
1259

    
1260
    tb1 = tb->jmp_next[n];
1261
    if (tb1 != NULL) {
1262
        /* find head of list */
1263
        for(;;) {
1264
            n1 = (long)tb1 & 3;
1265
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1266
            if (n1 == 2)
1267
                break;
1268
            tb1 = tb1->jmp_next[n1];
1269
        }
1270
        /* we are now sure now that tb jumps to tb1 */
1271
        tb_next = tb1;
1272

    
1273
        /* remove tb from the jmp_first list */
1274
        ptb = &tb_next->jmp_first;
1275
        for(;;) {
1276
            tb1 = *ptb;
1277
            n1 = (long)tb1 & 3;
1278
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1279
            if (n1 == n && tb1 == tb)
1280
                break;
1281
            ptb = &tb1->jmp_next[n1];
1282
        }
1283
        *ptb = tb->jmp_next[n];
1284
        tb->jmp_next[n] = NULL;
1285

    
1286
        /* suppress the jump to next tb in generated code */
1287
        tb_reset_jump(tb, n);
1288

    
1289
        /* suppress jumps in the tb on which we could have jumped */
1290
        tb_reset_jump_recursive(tb_next);
1291
    }
1292
}
1293

    
1294
static void tb_reset_jump_recursive(TranslationBlock *tb)
1295
{
1296
    tb_reset_jump_recursive2(tb, 0);
1297
    tb_reset_jump_recursive2(tb, 1);
1298
}
1299

    
1300
#if defined(TARGET_HAS_ICE)
1301
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1302
{
1303
    target_phys_addr_t addr;
1304
    target_ulong pd;
1305
    ram_addr_t ram_addr;
1306
    PhysPageDesc *p;
1307

    
1308
    addr = cpu_get_phys_page_debug(env, pc);
1309
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1310
    if (!p) {
1311
        pd = IO_MEM_UNASSIGNED;
1312
    } else {
1313
        pd = p->phys_offset;
1314
    }
1315
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1316
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1317
}
1318
#endif
1319

    
1320
/* Add a watchpoint.  */
1321
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1322
                          int flags, CPUWatchpoint **watchpoint)
1323
{
1324
    target_ulong len_mask = ~(len - 1);
1325
    CPUWatchpoint *wp;
1326

    
1327
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1328
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1329
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1330
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1331
        return -EINVAL;
1332
    }
1333
    wp = qemu_malloc(sizeof(*wp));
1334

    
1335
    wp->vaddr = addr;
1336
    wp->len_mask = len_mask;
1337
    wp->flags = flags;
1338

    
1339
    /* keep all GDB-injected watchpoints in front */
1340
    if (flags & BP_GDB)
1341
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1342
    else
1343
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1344

    
1345
    tlb_flush_page(env, addr);
1346

    
1347
    if (watchpoint)
1348
        *watchpoint = wp;
1349
    return 0;
1350
}
1351

    
1352
/* Remove a specific watchpoint.  */
1353
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1354
                          int flags)
1355
{
1356
    target_ulong len_mask = ~(len - 1);
1357
    CPUWatchpoint *wp;
1358

    
1359
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1360
        if (addr == wp->vaddr && len_mask == wp->len_mask
1361
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1362
            cpu_watchpoint_remove_by_ref(env, wp);
1363
            return 0;
1364
        }
1365
    }
1366
    return -ENOENT;
1367
}
1368

    
1369
/* Remove a specific watchpoint by reference.  */
1370
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1371
{
1372
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1373

    
1374
    tlb_flush_page(env, watchpoint->vaddr);
1375

    
1376
    qemu_free(watchpoint);
1377
}
1378

    
1379
/* Remove all matching watchpoints.  */
1380
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1381
{
1382
    CPUWatchpoint *wp, *next;
1383

    
1384
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1385
        if (wp->flags & mask)
1386
            cpu_watchpoint_remove_by_ref(env, wp);
1387
    }
1388
}
1389

    
1390
/* Add a breakpoint.  */
1391
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1392
                          CPUBreakpoint **breakpoint)
1393
{
1394
#if defined(TARGET_HAS_ICE)
1395
    CPUBreakpoint *bp;
1396

    
1397
    bp = qemu_malloc(sizeof(*bp));
1398

    
1399
    bp->pc = pc;
1400
    bp->flags = flags;
1401

    
1402
    /* keep all GDB-injected breakpoints in front */
1403
    if (flags & BP_GDB)
1404
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1405
    else
1406
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1407

    
1408
    breakpoint_invalidate(env, pc);
1409

    
1410
    if (breakpoint)
1411
        *breakpoint = bp;
1412
    return 0;
1413
#else
1414
    return -ENOSYS;
1415
#endif
1416
}
1417

    
1418
/* Remove a specific breakpoint.  */
1419
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1420
{
1421
#if defined(TARGET_HAS_ICE)
1422
    CPUBreakpoint *bp;
1423

    
1424
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1425
        if (bp->pc == pc && bp->flags == flags) {
1426
            cpu_breakpoint_remove_by_ref(env, bp);
1427
            return 0;
1428
        }
1429
    }
1430
    return -ENOENT;
1431
#else
1432
    return -ENOSYS;
1433
#endif
1434
}
1435

    
1436
/* Remove a specific breakpoint by reference.  */
1437
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1438
{
1439
#if defined(TARGET_HAS_ICE)
1440
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1441

    
1442
    breakpoint_invalidate(env, breakpoint->pc);
1443

    
1444
    qemu_free(breakpoint);
1445
#endif
1446
}
1447

    
1448
/* Remove all matching breakpoints. */
1449
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1450
{
1451
#if defined(TARGET_HAS_ICE)
1452
    CPUBreakpoint *bp, *next;
1453

    
1454
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1455
        if (bp->flags & mask)
1456
            cpu_breakpoint_remove_by_ref(env, bp);
1457
    }
1458
#endif
1459
}
1460

    
1461
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1462
   CPU loop after each instruction */
1463
void cpu_single_step(CPUState *env, int enabled)
1464
{
1465
#if defined(TARGET_HAS_ICE)
1466
    if (env->singlestep_enabled != enabled) {
1467
        env->singlestep_enabled = enabled;
1468
        if (kvm_enabled())
1469
            kvm_update_guest_debug(env, 0);
1470
        else {
1471
            /* must flush all the translated code to avoid inconsistencies */
1472
            /* XXX: only flush what is necessary */
1473
            tb_flush(env);
1474
        }
1475
    }
1476
#endif
1477
}
1478

    
1479
/* enable or disable low levels log */
1480
void cpu_set_log(int log_flags)
1481
{
1482
    loglevel = log_flags;
1483
    if (loglevel && !logfile) {
1484
        logfile = fopen(logfilename, log_append ? "a" : "w");
1485
        if (!logfile) {
1486
            perror(logfilename);
1487
            _exit(1);
1488
        }
1489
#if !defined(CONFIG_SOFTMMU)
1490
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1491
        {
1492
            static char logfile_buf[4096];
1493
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1494
        }
1495
#elif !defined(_WIN32)
1496
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1497
        setvbuf(logfile, NULL, _IOLBF, 0);
1498
#endif
1499
        log_append = 1;
1500
    }
1501
    if (!loglevel && logfile) {
1502
        fclose(logfile);
1503
        logfile = NULL;
1504
    }
1505
}
1506

    
1507
void cpu_set_log_filename(const char *filename)
1508
{
1509
    logfilename = strdup(filename);
1510
    if (logfile) {
1511
        fclose(logfile);
1512
        logfile = NULL;
1513
    }
1514
    cpu_set_log(loglevel);
1515
}
1516

    
1517
static void cpu_unlink_tb(CPUState *env)
1518
{
1519
#if defined(CONFIG_USE_NPTL)
1520
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1521
       problem and hope the cpu will stop of its own accord.  For userspace
1522
       emulation this often isn't actually as bad as it sounds.  Often
1523
       signals are used primarily to interrupt blocking syscalls.  */
1524
#else
1525
    TranslationBlock *tb;
1526
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1527

    
1528
    tb = env->current_tb;
1529
    /* if the cpu is currently executing code, we must unlink it and
1530
       all the potentially executing TB */
1531
    if (tb && !testandset(&interrupt_lock)) {
1532
        env->current_tb = NULL;
1533
        tb_reset_jump_recursive(tb);
1534
        resetlock(&interrupt_lock);
1535
    }
1536
#endif
1537
}
1538

    
1539
/* mask must never be zero, except for A20 change call */
1540
void cpu_interrupt(CPUState *env, int mask)
1541
{
1542
    int old_mask;
1543

    
1544
    old_mask = env->interrupt_request;
1545
    env->interrupt_request |= mask;
1546

    
1547
#ifndef CONFIG_USER_ONLY
1548
    /*
1549
     * If called from iothread context, wake the target cpu in
1550
     * case its halted.
1551
     */
1552
    if (!qemu_cpu_self(env)) {
1553
        qemu_cpu_kick(env);
1554
        return;
1555
    }
1556
#endif
1557

    
1558
    if (use_icount) {
1559
        env->icount_decr.u16.high = 0xffff;
1560
#ifndef CONFIG_USER_ONLY
1561
        if (!can_do_io(env)
1562
            && (mask & ~old_mask) != 0) {
1563
            cpu_abort(env, "Raised interrupt while not in I/O function");
1564
        }
1565
#endif
1566
    } else {
1567
        cpu_unlink_tb(env);
1568
    }
1569
}
1570

    
1571
void cpu_reset_interrupt(CPUState *env, int mask)
1572
{
1573
    env->interrupt_request &= ~mask;
1574
}
1575

    
1576
void cpu_exit(CPUState *env)
1577
{
1578
    env->exit_request = 1;
1579
    cpu_unlink_tb(env);
1580
}
1581

    
1582
const CPULogItem cpu_log_items[] = {
1583
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1584
      "show generated host assembly code for each compiled TB" },
1585
    { CPU_LOG_TB_IN_ASM, "in_asm",
1586
      "show target assembly code for each compiled TB" },
1587
    { CPU_LOG_TB_OP, "op",
1588
      "show micro ops for each compiled TB" },
1589
    { CPU_LOG_TB_OP_OPT, "op_opt",
1590
      "show micro ops "
1591
#ifdef TARGET_I386
1592
      "before eflags optimization and "
1593
#endif
1594
      "after liveness analysis" },
1595
    { CPU_LOG_INT, "int",
1596
      "show interrupts/exceptions in short format" },
1597
    { CPU_LOG_EXEC, "exec",
1598
      "show trace before each executed TB (lots of logs)" },
1599
    { CPU_LOG_TB_CPU, "cpu",
1600
      "show CPU state before block translation" },
1601
#ifdef TARGET_I386
1602
    { CPU_LOG_PCALL, "pcall",
1603
      "show protected mode far calls/returns/exceptions" },
1604
    { CPU_LOG_RESET, "cpu_reset",
1605
      "show CPU state before CPU resets" },
1606
#endif
1607
#ifdef DEBUG_IOPORT
1608
    { CPU_LOG_IOPORT, "ioport",
1609
      "show all i/o ports accesses" },
1610
#endif
1611
    { 0, NULL, NULL },
1612
};
1613

    
1614
static int cmp1(const char *s1, int n, const char *s2)
1615
{
1616
    if (strlen(s2) != n)
1617
        return 0;
1618
    return memcmp(s1, s2, n) == 0;
1619
}
1620

    
1621
/* takes a comma separated list of log masks. Return 0 if error. */
1622
int cpu_str_to_log_mask(const char *str)
1623
{
1624
    const CPULogItem *item;
1625
    int mask;
1626
    const char *p, *p1;
1627

    
1628
    p = str;
1629
    mask = 0;
1630
    for(;;) {
1631
        p1 = strchr(p, ',');
1632
        if (!p1)
1633
            p1 = p + strlen(p);
1634
        if(cmp1(p,p1-p,"all")) {
1635
                for(item = cpu_log_items; item->mask != 0; item++) {
1636
                        mask |= item->mask;
1637
                }
1638
        } else {
1639
        for(item = cpu_log_items; item->mask != 0; item++) {
1640
            if (cmp1(p, p1 - p, item->name))
1641
                goto found;
1642
        }
1643
        return 0;
1644
        }
1645
    found:
1646
        mask |= item->mask;
1647
        if (*p1 != ',')
1648
            break;
1649
        p = p1 + 1;
1650
    }
1651
    return mask;
1652
}
1653

    
1654
void cpu_abort(CPUState *env, const char *fmt, ...)
1655
{
1656
    va_list ap;
1657
    va_list ap2;
1658

    
1659
    va_start(ap, fmt);
1660
    va_copy(ap2, ap);
1661
    fprintf(stderr, "qemu: fatal: ");
1662
    vfprintf(stderr, fmt, ap);
1663
    fprintf(stderr, "\n");
1664
#ifdef TARGET_I386
1665
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1666
#else
1667
    cpu_dump_state(env, stderr, fprintf, 0);
1668
#endif
1669
    if (qemu_log_enabled()) {
1670
        qemu_log("qemu: fatal: ");
1671
        qemu_log_vprintf(fmt, ap2);
1672
        qemu_log("\n");
1673
#ifdef TARGET_I386
1674
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1675
#else
1676
        log_cpu_state(env, 0);
1677
#endif
1678
        qemu_log_flush();
1679
        qemu_log_close();
1680
    }
1681
    va_end(ap2);
1682
    va_end(ap);
1683
    abort();
1684
}
1685

    
1686
CPUState *cpu_copy(CPUState *env)
1687
{
1688
    CPUState *new_env = cpu_init(env->cpu_model_str);
1689
    CPUState *next_cpu = new_env->next_cpu;
1690
    int cpu_index = new_env->cpu_index;
1691
#if defined(TARGET_HAS_ICE)
1692
    CPUBreakpoint *bp;
1693
    CPUWatchpoint *wp;
1694
#endif
1695

    
1696
    memcpy(new_env, env, sizeof(CPUState));
1697

    
1698
    /* Preserve chaining and index. */
1699
    new_env->next_cpu = next_cpu;
1700
    new_env->cpu_index = cpu_index;
1701

    
1702
    /* Clone all break/watchpoints.
1703
       Note: Once we support ptrace with hw-debug register access, make sure
1704
       BP_CPU break/watchpoints are handled correctly on clone. */
1705
    TAILQ_INIT(&env->breakpoints);
1706
    TAILQ_INIT(&env->watchpoints);
1707
#if defined(TARGET_HAS_ICE)
1708
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1709
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1710
    }
1711
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1712
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1713
                              wp->flags, NULL);
1714
    }
1715
#endif
1716

    
1717
    return new_env;
1718
}
1719

    
1720
#if !defined(CONFIG_USER_ONLY)
1721

    
1722
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1723
{
1724
    unsigned int i;
1725

    
1726
    /* Discard jump cache entries for any tb which might potentially
1727
       overlap the flushed page.  */
1728
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1729
    memset (&env->tb_jmp_cache[i], 0, 
1730
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1731

    
1732
    i = tb_jmp_cache_hash_page(addr);
1733
    memset (&env->tb_jmp_cache[i], 0, 
1734
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1735
}
1736

    
1737
static CPUTLBEntry s_cputlb_empty_entry = {
1738
    .addr_read  = -1,
1739
    .addr_write = -1,
1740
    .addr_code  = -1,
1741
    .addend     = -1,
1742
};
1743

    
1744
/* NOTE: if flush_global is true, also flush global entries (not
1745
   implemented yet) */
1746
void tlb_flush(CPUState *env, int flush_global)
1747
{
1748
    int i;
1749

    
1750
#if defined(DEBUG_TLB)
1751
    printf("tlb_flush:\n");
1752
#endif
1753
    /* must reset current TB so that interrupts cannot modify the
1754
       links while we are modifying them */
1755
    env->current_tb = NULL;
1756

    
1757
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1758
        int mmu_idx;
1759
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1760
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1761
        }
1762
    }
1763

    
1764
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1765

    
1766
#ifdef CONFIG_KQEMU
1767
    if (env->kqemu_enabled) {
1768
        kqemu_flush(env, flush_global);
1769
    }
1770
#endif
1771
    tlb_flush_count++;
1772
}
1773

    
1774
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1775
{
1776
    if (addr == (tlb_entry->addr_read &
1777
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1778
        addr == (tlb_entry->addr_write &
1779
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1780
        addr == (tlb_entry->addr_code &
1781
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1782
        *tlb_entry = s_cputlb_empty_entry;
1783
    }
1784
}
1785

    
1786
void tlb_flush_page(CPUState *env, target_ulong addr)
1787
{
1788
    int i;
1789
    int mmu_idx;
1790

    
1791
#if defined(DEBUG_TLB)
1792
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1793
#endif
1794
    /* must reset current TB so that interrupts cannot modify the
1795
       links while we are modifying them */
1796
    env->current_tb = NULL;
1797

    
1798
    addr &= TARGET_PAGE_MASK;
1799
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1800
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1801
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1802

    
1803
    tlb_flush_jmp_cache(env, addr);
1804

    
1805
#ifdef CONFIG_KQEMU
1806
    if (env->kqemu_enabled) {
1807
        kqemu_flush_page(env, addr);
1808
    }
1809
#endif
1810
}
1811

    
1812
/* update the TLBs so that writes to code in the virtual page 'addr'
1813
   can be detected */
1814
static void tlb_protect_code(ram_addr_t ram_addr)
1815
{
1816
    cpu_physical_memory_reset_dirty(ram_addr,
1817
                                    ram_addr + TARGET_PAGE_SIZE,
1818
                                    CODE_DIRTY_FLAG);
1819
}
1820

    
1821
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1822
   tested for self modifying code */
1823
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1824
                                    target_ulong vaddr)
1825
{
1826
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1827
}
1828

    
1829
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1830
                                         unsigned long start, unsigned long length)
1831
{
1832
    unsigned long addr;
1833
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1834
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1835
        if ((addr - start) < length) {
1836
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1837
        }
1838
    }
1839
}
1840

    
1841
/* Note: start and end must be within the same ram block.  */
1842
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1843
                                     int dirty_flags)
1844
{
1845
    CPUState *env;
1846
    unsigned long length, start1;
1847
    int i, mask, len;
1848
    uint8_t *p;
1849

    
1850
    start &= TARGET_PAGE_MASK;
1851
    end = TARGET_PAGE_ALIGN(end);
1852

    
1853
    length = end - start;
1854
    if (length == 0)
1855
        return;
1856
    len = length >> TARGET_PAGE_BITS;
1857
#ifdef CONFIG_KQEMU
1858
    /* XXX: should not depend on cpu context */
1859
    env = first_cpu;
1860
    if (env->kqemu_enabled) {
1861
        ram_addr_t addr;
1862
        addr = start;
1863
        for(i = 0; i < len; i++) {
1864
            kqemu_set_notdirty(env, addr);
1865
            addr += TARGET_PAGE_SIZE;
1866
        }
1867
    }
1868
#endif
1869
    mask = ~dirty_flags;
1870
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1871
    for(i = 0; i < len; i++)
1872
        p[i] &= mask;
1873

    
1874
    /* we modify the TLB cache so that the dirty bit will be set again
1875
       when accessing the range */
1876
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1877
    /* Chek that we don't span multiple blocks - this breaks the
1878
       address comparisons below.  */
1879
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1880
            != (end - 1) - start) {
1881
        abort();
1882
    }
1883

    
1884
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1885
        int mmu_idx;
1886
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1887
            for(i = 0; i < CPU_TLB_SIZE; i++)
1888
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1889
                                      start1, length);
1890
        }
1891
    }
1892
}
1893

    
1894
int cpu_physical_memory_set_dirty_tracking(int enable)
1895
{
1896
    in_migration = enable;
1897
    if (kvm_enabled()) {
1898
        return kvm_set_migration_log(enable);
1899
    }
1900
    return 0;
1901
}
1902

    
1903
int cpu_physical_memory_get_dirty_tracking(void)
1904
{
1905
    return in_migration;
1906
}
1907

    
1908
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1909
                                   target_phys_addr_t end_addr)
1910
{
1911
    int ret = 0;
1912

    
1913
    if (kvm_enabled())
1914
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1915
    return ret;
1916
}
1917

    
1918
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1919
{
1920
    ram_addr_t ram_addr;
1921
    void *p;
1922

    
1923
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1924
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1925
            + tlb_entry->addend);
1926
        ram_addr = qemu_ram_addr_from_host(p);
1927
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1928
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1929
        }
1930
    }
1931
}
1932

    
1933
/* update the TLB according to the current state of the dirty bits */
1934
void cpu_tlb_update_dirty(CPUState *env)
1935
{
1936
    int i;
1937
    int mmu_idx;
1938
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1939
        for(i = 0; i < CPU_TLB_SIZE; i++)
1940
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1941
    }
1942
}
1943

    
1944
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1945
{
1946
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1947
        tlb_entry->addr_write = vaddr;
1948
}
1949

    
1950
/* update the TLB corresponding to virtual page vaddr
1951
   so that it is no longer dirty */
1952
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1953
{
1954
    int i;
1955
    int mmu_idx;
1956

    
1957
    vaddr &= TARGET_PAGE_MASK;
1958
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1959
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1960
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1961
}
1962

    
1963
/* add a new TLB entry. At most one entry for a given virtual address
1964
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1965
   (can only happen in non SOFTMMU mode for I/O pages or pages
1966
   conflicting with the host address space). */
1967
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1968
                      target_phys_addr_t paddr, int prot,
1969
                      int mmu_idx, int is_softmmu)
1970
{
1971
    PhysPageDesc *p;
1972
    unsigned long pd;
1973
    unsigned int index;
1974
    target_ulong address;
1975
    target_ulong code_address;
1976
    target_phys_addr_t addend;
1977
    int ret;
1978
    CPUTLBEntry *te;
1979
    CPUWatchpoint *wp;
1980
    target_phys_addr_t iotlb;
1981

    
1982
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1983
    if (!p) {
1984
        pd = IO_MEM_UNASSIGNED;
1985
    } else {
1986
        pd = p->phys_offset;
1987
    }
1988
#if defined(DEBUG_TLB)
1989
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1990
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1991
#endif
1992

    
1993
    ret = 0;
1994
    address = vaddr;
1995
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1996
        /* IO memory case (romd handled later) */
1997
        address |= TLB_MMIO;
1998
    }
1999
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2000
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2001
        /* Normal RAM.  */
2002
        iotlb = pd & TARGET_PAGE_MASK;
2003
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2004
            iotlb |= IO_MEM_NOTDIRTY;
2005
        else
2006
            iotlb |= IO_MEM_ROM;
2007
    } else {
2008
        /* IO handlers are currently passed a physical address.
2009
           It would be nice to pass an offset from the base address
2010
           of that region.  This would avoid having to special case RAM,
2011
           and avoid full address decoding in every device.
2012
           We can't use the high bits of pd for this because
2013
           IO_MEM_ROMD uses these as a ram address.  */
2014
        iotlb = (pd & ~TARGET_PAGE_MASK);
2015
        if (p) {
2016
            iotlb += p->region_offset;
2017
        } else {
2018
            iotlb += paddr;
2019
        }
2020
    }
2021

    
2022
    code_address = address;
2023
    /* Make accesses to pages with watchpoints go via the
2024
       watchpoint trap routines.  */
2025
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2026
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2027
            iotlb = io_mem_watch + paddr;
2028
            /* TODO: The memory case can be optimized by not trapping
2029
               reads of pages with a write breakpoint.  */
2030
            address |= TLB_MMIO;
2031
        }
2032
    }
2033

    
2034
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2035
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2036
    te = &env->tlb_table[mmu_idx][index];
2037
    te->addend = addend - vaddr;
2038
    if (prot & PAGE_READ) {
2039
        te->addr_read = address;
2040
    } else {
2041
        te->addr_read = -1;
2042
    }
2043

    
2044
    if (prot & PAGE_EXEC) {
2045
        te->addr_code = code_address;
2046
    } else {
2047
        te->addr_code = -1;
2048
    }
2049
    if (prot & PAGE_WRITE) {
2050
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2051
            (pd & IO_MEM_ROMD)) {
2052
            /* Write access calls the I/O callback.  */
2053
            te->addr_write = address | TLB_MMIO;
2054
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2055
                   !cpu_physical_memory_is_dirty(pd)) {
2056
            te->addr_write = address | TLB_NOTDIRTY;
2057
        } else {
2058
            te->addr_write = address;
2059
        }
2060
    } else {
2061
        te->addr_write = -1;
2062
    }
2063
    return ret;
2064
}
2065

    
2066
#else
2067

    
2068
void tlb_flush(CPUState *env, int flush_global)
2069
{
2070
}
2071

    
2072
void tlb_flush_page(CPUState *env, target_ulong addr)
2073
{
2074
}
2075

    
2076
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2077
                      target_phys_addr_t paddr, int prot,
2078
                      int mmu_idx, int is_softmmu)
2079
{
2080
    return 0;
2081
}
2082

    
2083
/*
2084
 * Walks guest process memory "regions" one by one
2085
 * and calls callback function 'fn' for each region.
2086
 */
2087
int walk_memory_regions(void *priv,
2088
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2089
{
2090
    unsigned long start, end;
2091
    PageDesc *p = NULL;
2092
    int i, j, prot, prot1;
2093
    int rc = 0;
2094

    
2095
    start = end = -1;
2096
    prot = 0;
2097

    
2098
    for (i = 0; i <= L1_SIZE; i++) {
2099
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2100
        for (j = 0; j < L2_SIZE; j++) {
2101
            prot1 = (p == NULL) ? 0 : p[j].flags;
2102
            /*
2103
             * "region" is one continuous chunk of memory
2104
             * that has same protection flags set.
2105
             */
2106
            if (prot1 != prot) {
2107
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2108
                if (start != -1) {
2109
                    rc = (*fn)(priv, start, end, prot);
2110
                    /* callback can stop iteration by returning != 0 */
2111
                    if (rc != 0)
2112
                        return (rc);
2113
                }
2114
                if (prot1 != 0)
2115
                    start = end;
2116
                else
2117
                    start = -1;
2118
                prot = prot1;
2119
            }
2120
            if (p == NULL)
2121
                break;
2122
        }
2123
    }
2124
    return (rc);
2125
}
2126

    
2127
static int dump_region(void *priv, unsigned long start,
2128
    unsigned long end, unsigned long prot)
2129
{
2130
    FILE *f = (FILE *)priv;
2131

    
2132
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2133
        start, end, end - start,
2134
        ((prot & PAGE_READ) ? 'r' : '-'),
2135
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2136
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2137

    
2138
    return (0);
2139
}
2140

    
2141
/* dump memory mappings */
2142
void page_dump(FILE *f)
2143
{
2144
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2145
            "start", "end", "size", "prot");
2146
    walk_memory_regions(f, dump_region);
2147
}
2148

    
2149
int page_get_flags(target_ulong address)
2150
{
2151
    PageDesc *p;
2152

    
2153
    p = page_find(address >> TARGET_PAGE_BITS);
2154
    if (!p)
2155
        return 0;
2156
    return p->flags;
2157
}
2158

    
2159
/* modify the flags of a page and invalidate the code if
2160
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2161
   depending on PAGE_WRITE */
2162
void page_set_flags(target_ulong start, target_ulong end, int flags)
2163
{
2164
    PageDesc *p;
2165
    target_ulong addr;
2166

    
2167
    /* mmap_lock should already be held.  */
2168
    start = start & TARGET_PAGE_MASK;
2169
    end = TARGET_PAGE_ALIGN(end);
2170
    if (flags & PAGE_WRITE)
2171
        flags |= PAGE_WRITE_ORG;
2172
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2173
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2174
        /* We may be called for host regions that are outside guest
2175
           address space.  */
2176
        if (!p)
2177
            return;
2178
        /* if the write protection is set, then we invalidate the code
2179
           inside */
2180
        if (!(p->flags & PAGE_WRITE) &&
2181
            (flags & PAGE_WRITE) &&
2182
            p->first_tb) {
2183
            tb_invalidate_phys_page(addr, 0, NULL);
2184
        }
2185
        p->flags = flags;
2186
    }
2187
}
2188

    
2189
int page_check_range(target_ulong start, target_ulong len, int flags)
2190
{
2191
    PageDesc *p;
2192
    target_ulong end;
2193
    target_ulong addr;
2194

    
2195
    if (start + len < start)
2196
        /* we've wrapped around */
2197
        return -1;
2198

    
2199
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2200
    start = start & TARGET_PAGE_MASK;
2201

    
2202
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2203
        p = page_find(addr >> TARGET_PAGE_BITS);
2204
        if( !p )
2205
            return -1;
2206
        if( !(p->flags & PAGE_VALID) )
2207
            return -1;
2208

    
2209
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2210
            return -1;
2211
        if (flags & PAGE_WRITE) {
2212
            if (!(p->flags & PAGE_WRITE_ORG))
2213
                return -1;
2214
            /* unprotect the page if it was put read-only because it
2215
               contains translated code */
2216
            if (!(p->flags & PAGE_WRITE)) {
2217
                if (!page_unprotect(addr, 0, NULL))
2218
                    return -1;
2219
            }
2220
            return 0;
2221
        }
2222
    }
2223
    return 0;
2224
}
2225

    
2226
/* called from signal handler: invalidate the code and unprotect the
2227
   page. Return TRUE if the fault was successfully handled. */
2228
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2229
{
2230
    unsigned int page_index, prot, pindex;
2231
    PageDesc *p, *p1;
2232
    target_ulong host_start, host_end, addr;
2233

    
2234
    /* Technically this isn't safe inside a signal handler.  However we
2235
       know this only ever happens in a synchronous SEGV handler, so in
2236
       practice it seems to be ok.  */
2237
    mmap_lock();
2238

    
2239
    host_start = address & qemu_host_page_mask;
2240
    page_index = host_start >> TARGET_PAGE_BITS;
2241
    p1 = page_find(page_index);
2242
    if (!p1) {
2243
        mmap_unlock();
2244
        return 0;
2245
    }
2246
    host_end = host_start + qemu_host_page_size;
2247
    p = p1;
2248
    prot = 0;
2249
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2250
        prot |= p->flags;
2251
        p++;
2252
    }
2253
    /* if the page was really writable, then we change its
2254
       protection back to writable */
2255
    if (prot & PAGE_WRITE_ORG) {
2256
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2257
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2258
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2259
                     (prot & PAGE_BITS) | PAGE_WRITE);
2260
            p1[pindex].flags |= PAGE_WRITE;
2261
            /* and since the content will be modified, we must invalidate
2262
               the corresponding translated code. */
2263
            tb_invalidate_phys_page(address, pc, puc);
2264
#ifdef DEBUG_TB_CHECK
2265
            tb_invalidate_check(address);
2266
#endif
2267
            mmap_unlock();
2268
            return 1;
2269
        }
2270
    }
2271
    mmap_unlock();
2272
    return 0;
2273
}
2274

    
2275
static inline void tlb_set_dirty(CPUState *env,
2276
                                 unsigned long addr, target_ulong vaddr)
2277
{
2278
}
2279
#endif /* defined(CONFIG_USER_ONLY) */
2280

    
2281
#if !defined(CONFIG_USER_ONLY)
2282

    
2283
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2284
                             ram_addr_t memory, ram_addr_t region_offset);
2285
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2286
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2287
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2288
                      need_subpage)                                     \
2289
    do {                                                                \
2290
        if (addr > start_addr)                                          \
2291
            start_addr2 = 0;                                            \
2292
        else {                                                          \
2293
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2294
            if (start_addr2 > 0)                                        \
2295
                need_subpage = 1;                                       \
2296
        }                                                               \
2297
                                                                        \
2298
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2299
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2300
        else {                                                          \
2301
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2302
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2303
                need_subpage = 1;                                       \
2304
        }                                                               \
2305
    } while (0)
2306

    
2307
/* register physical memory. 'size' must be a multiple of the target
2308
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2309
   io memory page.  The address used when calling the IO function is
2310
   the offset from the start of the region, plus region_offset.  Both
2311
   start_addr and region_offset are rounded down to a page boundary
2312
   before calculating this offset.  This should not be a problem unless
2313
   the low bits of start_addr and region_offset differ.  */
2314
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2315
                                         ram_addr_t size,
2316
                                         ram_addr_t phys_offset,
2317
                                         ram_addr_t region_offset)
2318
{
2319
    target_phys_addr_t addr, end_addr;
2320
    PhysPageDesc *p;
2321
    CPUState *env;
2322
    ram_addr_t orig_size = size;
2323
    void *subpage;
2324

    
2325
#ifdef CONFIG_KQEMU
2326
    /* XXX: should not depend on cpu context */
2327
    env = first_cpu;
2328
    if (env->kqemu_enabled) {
2329
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2330
    }
2331
#endif
2332
    if (kvm_enabled())
2333
        kvm_set_phys_mem(start_addr, size, phys_offset);
2334

    
2335
    if (phys_offset == IO_MEM_UNASSIGNED) {
2336
        region_offset = start_addr;
2337
    }
2338
    region_offset &= TARGET_PAGE_MASK;
2339
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2340
    end_addr = start_addr + (target_phys_addr_t)size;
2341
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2342
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2343
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2344
            ram_addr_t orig_memory = p->phys_offset;
2345
            target_phys_addr_t start_addr2, end_addr2;
2346
            int need_subpage = 0;
2347

    
2348
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2349
                          need_subpage);
2350
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2351
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2352
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2353
                                           &p->phys_offset, orig_memory,
2354
                                           p->region_offset);
2355
                } else {
2356
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2357
                                            >> IO_MEM_SHIFT];
2358
                }
2359
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2360
                                 region_offset);
2361
                p->region_offset = 0;
2362
            } else {
2363
                p->phys_offset = phys_offset;
2364
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2365
                    (phys_offset & IO_MEM_ROMD))
2366
                    phys_offset += TARGET_PAGE_SIZE;
2367
            }
2368
        } else {
2369
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2370
            p->phys_offset = phys_offset;
2371
            p->region_offset = region_offset;
2372
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2373
                (phys_offset & IO_MEM_ROMD)) {
2374
                phys_offset += TARGET_PAGE_SIZE;
2375
            } else {
2376
                target_phys_addr_t start_addr2, end_addr2;
2377
                int need_subpage = 0;
2378

    
2379
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2380
                              end_addr2, need_subpage);
2381

    
2382
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2383
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2384
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2385
                                           addr & TARGET_PAGE_MASK);
2386
                    subpage_register(subpage, start_addr2, end_addr2,
2387
                                     phys_offset, region_offset);
2388
                    p->region_offset = 0;
2389
                }
2390
            }
2391
        }
2392
        region_offset += TARGET_PAGE_SIZE;
2393
    }
2394

    
2395
    /* since each CPU stores ram addresses in its TLB cache, we must
2396
       reset the modified entries */
2397
    /* XXX: slow ! */
2398
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2399
        tlb_flush(env, 1);
2400
    }
2401
}
2402

    
2403
/* XXX: temporary until new memory mapping API */
2404
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2405
{
2406
    PhysPageDesc *p;
2407

    
2408
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2409
    if (!p)
2410
        return IO_MEM_UNASSIGNED;
2411
    return p->phys_offset;
2412
}
2413

    
2414
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2415
{
2416
    if (kvm_enabled())
2417
        kvm_coalesce_mmio_region(addr, size);
2418
}
2419

    
2420
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2421
{
2422
    if (kvm_enabled())
2423
        kvm_uncoalesce_mmio_region(addr, size);
2424
}
2425

    
2426
#ifdef CONFIG_KQEMU
2427
/* XXX: better than nothing */
2428
static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2429
{
2430
    ram_addr_t addr;
2431
    if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2432
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2433
                (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2434
        abort();
2435
    }
2436
    addr = last_ram_offset;
2437
    last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2438
    return addr;
2439
}
2440
#endif
2441

    
2442
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2443
{
2444
    RAMBlock *new_block;
2445

    
2446
#ifdef CONFIG_KQEMU
2447
    if (kqemu_phys_ram_base) {
2448
        return kqemu_ram_alloc(size);
2449
    }
2450
#endif
2451

    
2452
    size = TARGET_PAGE_ALIGN(size);
2453
    new_block = qemu_malloc(sizeof(*new_block));
2454

    
2455
    new_block->host = qemu_vmalloc(size);
2456
    new_block->offset = last_ram_offset;
2457
    new_block->length = size;
2458

    
2459
    new_block->next = ram_blocks;
2460
    ram_blocks = new_block;
2461

    
2462
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2463
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2464
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2465
           0xff, size >> TARGET_PAGE_BITS);
2466

    
2467
    last_ram_offset += size;
2468

    
2469
    if (kvm_enabled())
2470
        kvm_setup_guest_memory(new_block->host, size);
2471

    
2472
    return new_block->offset;
2473
}
2474

    
2475
void qemu_ram_free(ram_addr_t addr)
2476
{
2477
    /* TODO: implement this.  */
2478
}
2479

    
2480
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2481
   With the exception of the softmmu code in this file, this should
2482
   only be used for local memory (e.g. video ram) that the device owns,
2483
   and knows it isn't going to access beyond the end of the block.
2484

2485
   It should not be used for general purpose DMA.
2486
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2487
 */
2488
void *qemu_get_ram_ptr(ram_addr_t addr)
2489
{
2490
    RAMBlock *prev;
2491
    RAMBlock **prevp;
2492
    RAMBlock *block;
2493

    
2494
#ifdef CONFIG_KQEMU
2495
    if (kqemu_phys_ram_base) {
2496
        return kqemu_phys_ram_base + addr;
2497
    }
2498
#endif
2499

    
2500
    prev = NULL;
2501
    prevp = &ram_blocks;
2502
    block = ram_blocks;
2503
    while (block && (block->offset > addr
2504
                     || block->offset + block->length <= addr)) {
2505
        if (prev)
2506
          prevp = &prev->next;
2507
        prev = block;
2508
        block = block->next;
2509
    }
2510
    if (!block) {
2511
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2512
        abort();
2513
    }
2514
    /* Move this entry to to start of the list.  */
2515
    if (prev) {
2516
        prev->next = block->next;
2517
        block->next = *prevp;
2518
        *prevp = block;
2519
    }
2520
    return block->host + (addr - block->offset);
2521
}
2522

    
2523
/* Some of the softmmu routines need to translate from a host pointer
2524
   (typically a TLB entry) back to a ram offset.  */
2525
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2526
{
2527
    RAMBlock *prev;
2528
    RAMBlock **prevp;
2529
    RAMBlock *block;
2530
    uint8_t *host = ptr;
2531

    
2532
#ifdef CONFIG_KQEMU
2533
    if (kqemu_phys_ram_base) {
2534
        return host - kqemu_phys_ram_base;
2535
    }
2536
#endif
2537

    
2538
    prev = NULL;
2539
    prevp = &ram_blocks;
2540
    block = ram_blocks;
2541
    while (block && (block->host > host
2542
                     || block->host + block->length <= host)) {
2543
        if (prev)
2544
          prevp = &prev->next;
2545
        prev = block;
2546
        block = block->next;
2547
    }
2548
    if (!block) {
2549
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2550
        abort();
2551
    }
2552
    return block->offset + (host - block->host);
2553
}
2554

    
2555
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2556
{
2557
#ifdef DEBUG_UNASSIGNED
2558
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2559
#endif
2560
#if defined(TARGET_SPARC)
2561
    do_unassigned_access(addr, 0, 0, 0, 1);
2562
#endif
2563
    return 0;
2564
}
2565

    
2566
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2567
{
2568
#ifdef DEBUG_UNASSIGNED
2569
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2570
#endif
2571
#if defined(TARGET_SPARC)
2572
    do_unassigned_access(addr, 0, 0, 0, 2);
2573
#endif
2574
    return 0;
2575
}
2576

    
2577
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2578
{
2579
#ifdef DEBUG_UNASSIGNED
2580
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2581
#endif
2582
#if defined(TARGET_SPARC)
2583
    do_unassigned_access(addr, 0, 0, 0, 4);
2584
#endif
2585
    return 0;
2586
}
2587

    
2588
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2589
{
2590
#ifdef DEBUG_UNASSIGNED
2591
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2592
#endif
2593
#if defined(TARGET_SPARC)
2594
    do_unassigned_access(addr, 1, 0, 0, 1);
2595
#endif
2596
}
2597

    
2598
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2599
{
2600
#ifdef DEBUG_UNASSIGNED
2601
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2602
#endif
2603
#if defined(TARGET_SPARC)
2604
    do_unassigned_access(addr, 1, 0, 0, 2);
2605
#endif
2606
}
2607

    
2608
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2609
{
2610
#ifdef DEBUG_UNASSIGNED
2611
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2612
#endif
2613
#if defined(TARGET_SPARC)
2614
    do_unassigned_access(addr, 1, 0, 0, 4);
2615
#endif
2616
}
2617

    
2618
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2619
    unassigned_mem_readb,
2620
    unassigned_mem_readw,
2621
    unassigned_mem_readl,
2622
};
2623

    
2624
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2625
    unassigned_mem_writeb,
2626
    unassigned_mem_writew,
2627
    unassigned_mem_writel,
2628
};
2629

    
2630
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2631
                                uint32_t val)
2632
{
2633
    int dirty_flags;
2634
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2635
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2636
#if !defined(CONFIG_USER_ONLY)
2637
        tb_invalidate_phys_page_fast(ram_addr, 1);
2638
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2639
#endif
2640
    }
2641
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2642
#ifdef CONFIG_KQEMU
2643
    if (cpu_single_env->kqemu_enabled &&
2644
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2645
        kqemu_modify_page(cpu_single_env, ram_addr);
2646
#endif
2647
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2648
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2649
    /* we remove the notdirty callback only if the code has been
2650
       flushed */
2651
    if (dirty_flags == 0xff)
2652
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2653
}
2654

    
2655
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2656
                                uint32_t val)
2657
{
2658
    int dirty_flags;
2659
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2660
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2661
#if !defined(CONFIG_USER_ONLY)
2662
        tb_invalidate_phys_page_fast(ram_addr, 2);
2663
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2664
#endif
2665
    }
2666
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2667
#ifdef CONFIG_KQEMU
2668
    if (cpu_single_env->kqemu_enabled &&
2669
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2670
        kqemu_modify_page(cpu_single_env, ram_addr);
2671
#endif
2672
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2673
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2674
    /* we remove the notdirty callback only if the code has been
2675
       flushed */
2676
    if (dirty_flags == 0xff)
2677
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2678
}
2679

    
2680
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2681
                                uint32_t val)
2682
{
2683
    int dirty_flags;
2684
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2685
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2686
#if !defined(CONFIG_USER_ONLY)
2687
        tb_invalidate_phys_page_fast(ram_addr, 4);
2688
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2689
#endif
2690
    }
2691
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2692
#ifdef CONFIG_KQEMU
2693
    if (cpu_single_env->kqemu_enabled &&
2694
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2695
        kqemu_modify_page(cpu_single_env, ram_addr);
2696
#endif
2697
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2698
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2699
    /* we remove the notdirty callback only if the code has been
2700
       flushed */
2701
    if (dirty_flags == 0xff)
2702
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2703
}
2704

    
2705
static CPUReadMemoryFunc *error_mem_read[3] = {
2706
    NULL, /* never used */
2707
    NULL, /* never used */
2708
    NULL, /* never used */
2709
};
2710

    
2711
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2712
    notdirty_mem_writeb,
2713
    notdirty_mem_writew,
2714
    notdirty_mem_writel,
2715
};
2716

    
2717
/* Generate a debug exception if a watchpoint has been hit.  */
2718
static void check_watchpoint(int offset, int len_mask, int flags)
2719
{
2720
    CPUState *env = cpu_single_env;
2721
    target_ulong pc, cs_base;
2722
    TranslationBlock *tb;
2723
    target_ulong vaddr;
2724
    CPUWatchpoint *wp;
2725
    int cpu_flags;
2726

    
2727
    if (env->watchpoint_hit) {
2728
        /* We re-entered the check after replacing the TB. Now raise
2729
         * the debug interrupt so that is will trigger after the
2730
         * current instruction. */
2731
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2732
        return;
2733
    }
2734
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2735
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2736
        if ((vaddr == (wp->vaddr & len_mask) ||
2737
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2738
            wp->flags |= BP_WATCHPOINT_HIT;
2739
            if (!env->watchpoint_hit) {
2740
                env->watchpoint_hit = wp;
2741
                tb = tb_find_pc(env->mem_io_pc);
2742
                if (!tb) {
2743
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2744
                              "pc=%p", (void *)env->mem_io_pc);
2745
                }
2746
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2747
                tb_phys_invalidate(tb, -1);
2748
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2749
                    env->exception_index = EXCP_DEBUG;
2750
                } else {
2751
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2752
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2753
                }
2754
                cpu_resume_from_signal(env, NULL);
2755
            }
2756
        } else {
2757
            wp->flags &= ~BP_WATCHPOINT_HIT;
2758
        }
2759
    }
2760
}
2761

    
2762
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2763
   so these check for a hit then pass through to the normal out-of-line
2764
   phys routines.  */
2765
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2766
{
2767
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2768
    return ldub_phys(addr);
2769
}
2770

    
2771
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2772
{
2773
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2774
    return lduw_phys(addr);
2775
}
2776

    
2777
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2778
{
2779
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2780
    return ldl_phys(addr);
2781
}
2782

    
2783
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2784
                             uint32_t val)
2785
{
2786
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2787
    stb_phys(addr, val);
2788
}
2789

    
2790
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2791
                             uint32_t val)
2792
{
2793
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2794
    stw_phys(addr, val);
2795
}
2796

    
2797
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2798
                             uint32_t val)
2799
{
2800
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2801
    stl_phys(addr, val);
2802
}
2803

    
2804
static CPUReadMemoryFunc *watch_mem_read[3] = {
2805
    watch_mem_readb,
2806
    watch_mem_readw,
2807
    watch_mem_readl,
2808
};
2809

    
2810
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2811
    watch_mem_writeb,
2812
    watch_mem_writew,
2813
    watch_mem_writel,
2814
};
2815

    
2816
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2817
                                 unsigned int len)
2818
{
2819
    uint32_t ret;
2820
    unsigned int idx;
2821

    
2822
    idx = SUBPAGE_IDX(addr);
2823
#if defined(DEBUG_SUBPAGE)
2824
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2825
           mmio, len, addr, idx);
2826
#endif
2827
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2828
                                       addr + mmio->region_offset[idx][0][len]);
2829

    
2830
    return ret;
2831
}
2832

    
2833
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2834
                              uint32_t value, unsigned int len)
2835
{
2836
    unsigned int idx;
2837

    
2838
    idx = SUBPAGE_IDX(addr);
2839
#if defined(DEBUG_SUBPAGE)
2840
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2841
           mmio, len, addr, idx, value);
2842
#endif
2843
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2844
                                  addr + mmio->region_offset[idx][1][len],
2845
                                  value);
2846
}
2847

    
2848
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2849
{
2850
#if defined(DEBUG_SUBPAGE)
2851
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2852
#endif
2853

    
2854
    return subpage_readlen(opaque, addr, 0);
2855
}
2856

    
2857
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2858
                            uint32_t value)
2859
{
2860
#if defined(DEBUG_SUBPAGE)
2861
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2862
#endif
2863
    subpage_writelen(opaque, addr, value, 0);
2864
}
2865

    
2866
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2867
{
2868
#if defined(DEBUG_SUBPAGE)
2869
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2870
#endif
2871

    
2872
    return subpage_readlen(opaque, addr, 1);
2873
}
2874

    
2875
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2876
                            uint32_t value)
2877
{
2878
#if defined(DEBUG_SUBPAGE)
2879
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2880
#endif
2881
    subpage_writelen(opaque, addr, value, 1);
2882
}
2883

    
2884
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2885
{
2886
#if defined(DEBUG_SUBPAGE)
2887
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2888
#endif
2889

    
2890
    return subpage_readlen(opaque, addr, 2);
2891
}
2892

    
2893
static void subpage_writel (void *opaque,
2894
                         target_phys_addr_t addr, uint32_t value)
2895
{
2896
#if defined(DEBUG_SUBPAGE)
2897
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2898
#endif
2899
    subpage_writelen(opaque, addr, value, 2);
2900
}
2901

    
2902
static CPUReadMemoryFunc *subpage_read[] = {
2903
    &subpage_readb,
2904
    &subpage_readw,
2905
    &subpage_readl,
2906
};
2907

    
2908
static CPUWriteMemoryFunc *subpage_write[] = {
2909
    &subpage_writeb,
2910
    &subpage_writew,
2911
    &subpage_writel,
2912
};
2913

    
2914
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2915
                             ram_addr_t memory, ram_addr_t region_offset)
2916
{
2917
    int idx, eidx;
2918
    unsigned int i;
2919

    
2920
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2921
        return -1;
2922
    idx = SUBPAGE_IDX(start);
2923
    eidx = SUBPAGE_IDX(end);
2924
#if defined(DEBUG_SUBPAGE)
2925
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2926
           mmio, start, end, idx, eidx, memory);
2927
#endif
2928
    memory >>= IO_MEM_SHIFT;
2929
    for (; idx <= eidx; idx++) {
2930
        for (i = 0; i < 4; i++) {
2931
            if (io_mem_read[memory][i]) {
2932
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2933
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2934
                mmio->region_offset[idx][0][i] = region_offset;
2935
            }
2936
            if (io_mem_write[memory][i]) {
2937
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2938
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2939
                mmio->region_offset[idx][1][i] = region_offset;
2940
            }
2941
        }
2942
    }
2943

    
2944
    return 0;
2945
}
2946

    
2947
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2948
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2949
{
2950
    subpage_t *mmio;
2951
    int subpage_memory;
2952

    
2953
    mmio = qemu_mallocz(sizeof(subpage_t));
2954

    
2955
    mmio->base = base;
2956
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2957
#if defined(DEBUG_SUBPAGE)
2958
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2959
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2960
#endif
2961
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2962
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2963
                         region_offset);
2964

    
2965
    return mmio;
2966
}
2967

    
2968
static int get_free_io_mem_idx(void)
2969
{
2970
    int i;
2971

    
2972
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2973
        if (!io_mem_used[i]) {
2974
            io_mem_used[i] = 1;
2975
            return i;
2976
        }
2977

    
2978
    return -1;
2979
}
2980

    
2981
/* mem_read and mem_write are arrays of functions containing the
2982
   function to access byte (index 0), word (index 1) and dword (index
2983
   2). Functions can be omitted with a NULL function pointer.
2984
   If io_index is non zero, the corresponding io zone is
2985
   modified. If it is zero, a new io zone is allocated. The return
2986
   value can be used with cpu_register_physical_memory(). (-1) is
2987
   returned if error. */
2988
static int cpu_register_io_memory_fixed(int io_index,
2989
                                        CPUReadMemoryFunc **mem_read,
2990
                                        CPUWriteMemoryFunc **mem_write,
2991
                                        void *opaque)
2992
{
2993
    int i, subwidth = 0;
2994

    
2995
    if (io_index <= 0) {
2996
        io_index = get_free_io_mem_idx();
2997
        if (io_index == -1)
2998
            return io_index;
2999
    } else {
3000
        io_index >>= IO_MEM_SHIFT;
3001
        if (io_index >= IO_MEM_NB_ENTRIES)
3002
            return -1;
3003
    }
3004

    
3005
    for(i = 0;i < 3; i++) {
3006
        if (!mem_read[i] || !mem_write[i])
3007
            subwidth = IO_MEM_SUBWIDTH;
3008
        io_mem_read[io_index][i] = mem_read[i];
3009
        io_mem_write[io_index][i] = mem_write[i];
3010
    }
3011
    io_mem_opaque[io_index] = opaque;
3012
    return (io_index << IO_MEM_SHIFT) | subwidth;
3013
}
3014

    
3015
int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
3016
                           CPUWriteMemoryFunc **mem_write,
3017
                           void *opaque)
3018
{
3019
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3020
}
3021

    
3022
void cpu_unregister_io_memory(int io_table_address)
3023
{
3024
    int i;
3025
    int io_index = io_table_address >> IO_MEM_SHIFT;
3026

    
3027
    for (i=0;i < 3; i++) {
3028
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3029
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3030
    }
3031
    io_mem_opaque[io_index] = NULL;
3032
    io_mem_used[io_index] = 0;
3033
}
3034

    
3035
static void io_mem_init(void)
3036
{
3037
    int i;
3038

    
3039
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3040
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3041
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3042
    for (i=0; i<5; i++)
3043
        io_mem_used[i] = 1;
3044

    
3045
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3046
                                          watch_mem_write, NULL);
3047
#ifdef CONFIG_KQEMU
3048
    if (kqemu_phys_ram_base) {
3049
        /* alloc dirty bits array */
3050
        phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3051
        memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3052
    }
3053
#endif
3054
}
3055

    
3056
#endif /* !defined(CONFIG_USER_ONLY) */
3057

    
3058
/* physical memory access (slow version, mainly for debug) */
3059
#if defined(CONFIG_USER_ONLY)
3060
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3061
                            int len, int is_write)
3062
{
3063
    int l, flags;
3064
    target_ulong page;
3065
    void * p;
3066

    
3067
    while (len > 0) {
3068
        page = addr & TARGET_PAGE_MASK;
3069
        l = (page + TARGET_PAGE_SIZE) - addr;
3070
        if (l > len)
3071
            l = len;
3072
        flags = page_get_flags(page);
3073
        if (!(flags & PAGE_VALID))
3074
            return;
3075
        if (is_write) {
3076
            if (!(flags & PAGE_WRITE))
3077
                return;
3078
            /* XXX: this code should not depend on lock_user */
3079
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3080
                /* FIXME - should this return an error rather than just fail? */
3081
                return;
3082
            memcpy(p, buf, l);
3083
            unlock_user(p, addr, l);
3084
        } else {
3085
            if (!(flags & PAGE_READ))
3086
                return;
3087
            /* XXX: this code should not depend on lock_user */
3088
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3089
                /* FIXME - should this return an error rather than just fail? */
3090
                return;
3091
            memcpy(buf, p, l);
3092
            unlock_user(p, addr, 0);
3093
        }
3094
        len -= l;
3095
        buf += l;
3096
        addr += l;
3097
    }
3098
}
3099

    
3100
#else
3101
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3102
                            int len, int is_write)
3103
{
3104
    int l, io_index;
3105
    uint8_t *ptr;
3106
    uint32_t val;
3107
    target_phys_addr_t page;
3108
    unsigned long pd;
3109
    PhysPageDesc *p;
3110

    
3111
    while (len > 0) {
3112
        page = addr & TARGET_PAGE_MASK;
3113
        l = (page + TARGET_PAGE_SIZE) - addr;
3114
        if (l > len)
3115
            l = len;
3116
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3117
        if (!p) {
3118
            pd = IO_MEM_UNASSIGNED;
3119
        } else {
3120
            pd = p->phys_offset;
3121
        }
3122

    
3123
        if (is_write) {
3124
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3125
                target_phys_addr_t addr1 = addr;
3126
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3127
                if (p)
3128
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3129
                /* XXX: could force cpu_single_env to NULL to avoid
3130
                   potential bugs */
3131
                if (l >= 4 && ((addr1 & 3) == 0)) {
3132
                    /* 32 bit write access */
3133
                    val = ldl_p(buf);
3134
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3135
                    l = 4;
3136
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3137
                    /* 16 bit write access */
3138
                    val = lduw_p(buf);
3139
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3140
                    l = 2;
3141
                } else {
3142
                    /* 8 bit write access */
3143
                    val = ldub_p(buf);
3144
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3145
                    l = 1;
3146
                }
3147
            } else {
3148
                unsigned long addr1;
3149
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3150
                /* RAM case */
3151
                ptr = qemu_get_ram_ptr(addr1);
3152
                memcpy(ptr, buf, l);
3153
                if (!cpu_physical_memory_is_dirty(addr1)) {
3154
                    /* invalidate code */
3155
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3156
                    /* set dirty bit */
3157
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3158
                        (0xff & ~CODE_DIRTY_FLAG);
3159
                }
3160
            }
3161
        } else {
3162
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3163
                !(pd & IO_MEM_ROMD)) {
3164
                target_phys_addr_t addr1 = addr;
3165
                /* I/O case */
3166
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3167
                if (p)
3168
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3169
                if (l >= 4 && ((addr1 & 3) == 0)) {
3170
                    /* 32 bit read access */
3171
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3172
                    stl_p(buf, val);
3173
                    l = 4;
3174
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3175
                    /* 16 bit read access */
3176
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3177
                    stw_p(buf, val);
3178
                    l = 2;
3179
                } else {
3180
                    /* 8 bit read access */
3181
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3182
                    stb_p(buf, val);
3183
                    l = 1;
3184
                }
3185
            } else {
3186
                /* RAM case */
3187
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3188
                    (addr & ~TARGET_PAGE_MASK);
3189
                memcpy(buf, ptr, l);
3190
            }
3191
        }
3192
        len -= l;
3193
        buf += l;
3194
        addr += l;
3195
    }
3196
}
3197

    
3198
/* used for ROM loading : can write in RAM and ROM */
3199
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3200
                                   const uint8_t *buf, int len)
3201
{
3202
    int l;
3203
    uint8_t *ptr;
3204
    target_phys_addr_t page;
3205
    unsigned long pd;
3206
    PhysPageDesc *p;
3207

    
3208
    while (len > 0) {
3209
        page = addr & TARGET_PAGE_MASK;
3210
        l = (page + TARGET_PAGE_SIZE) - addr;
3211
        if (l > len)
3212
            l = len;
3213
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3214
        if (!p) {
3215
            pd = IO_MEM_UNASSIGNED;
3216
        } else {
3217
            pd = p->phys_offset;
3218
        }
3219

    
3220
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3221
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3222
            !(pd & IO_MEM_ROMD)) {
3223
            /* do nothing */
3224
        } else {
3225
            unsigned long addr1;
3226
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3227
            /* ROM/RAM case */
3228
            ptr = qemu_get_ram_ptr(addr1);
3229
            memcpy(ptr, buf, l);
3230
        }
3231
        len -= l;
3232
        buf += l;
3233
        addr += l;
3234
    }
3235
}
3236

    
3237
typedef struct {
3238
    void *buffer;
3239
    target_phys_addr_t addr;
3240
    target_phys_addr_t len;
3241
} BounceBuffer;
3242

    
3243
static BounceBuffer bounce;
3244

    
3245
typedef struct MapClient {
3246
    void *opaque;
3247
    void (*callback)(void *opaque);
3248
    LIST_ENTRY(MapClient) link;
3249
} MapClient;
3250

    
3251
static LIST_HEAD(map_client_list, MapClient) map_client_list
3252
    = LIST_HEAD_INITIALIZER(map_client_list);
3253

    
3254
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3255
{
3256
    MapClient *client = qemu_malloc(sizeof(*client));
3257

    
3258
    client->opaque = opaque;
3259
    client->callback = callback;
3260
    LIST_INSERT_HEAD(&map_client_list, client, link);
3261
    return client;
3262
}
3263

    
3264
void cpu_unregister_map_client(void *_client)
3265
{
3266
    MapClient *client = (MapClient *)_client;
3267

    
3268
    LIST_REMOVE(client, link);
3269
    qemu_free(client);
3270
}
3271

    
3272
static void cpu_notify_map_clients(void)
3273
{
3274
    MapClient *client;
3275

    
3276
    while (!LIST_EMPTY(&map_client_list)) {
3277
        client = LIST_FIRST(&map_client_list);
3278
        client->callback(client->opaque);
3279
        cpu_unregister_map_client(client);
3280
    }
3281
}
3282

    
3283
/* Map a physical memory region into a host virtual address.
3284
 * May map a subset of the requested range, given by and returned in *plen.
3285
 * May return NULL if resources needed to perform the mapping are exhausted.
3286
 * Use only for reads OR writes - not for read-modify-write operations.
3287
 * Use cpu_register_map_client() to know when retrying the map operation is
3288
 * likely to succeed.
3289
 */
3290
void *cpu_physical_memory_map(target_phys_addr_t addr,
3291
                              target_phys_addr_t *plen,
3292
                              int is_write)
3293
{
3294
    target_phys_addr_t len = *plen;
3295
    target_phys_addr_t done = 0;
3296
    int l;
3297
    uint8_t *ret = NULL;
3298
    uint8_t *ptr;
3299
    target_phys_addr_t page;
3300
    unsigned long pd;
3301
    PhysPageDesc *p;
3302
    unsigned long addr1;
3303

    
3304
    while (len > 0) {
3305
        page = addr & TARGET_PAGE_MASK;
3306
        l = (page + TARGET_PAGE_SIZE) - addr;
3307
        if (l > len)
3308
            l = len;
3309
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3310
        if (!p) {
3311
            pd = IO_MEM_UNASSIGNED;
3312
        } else {
3313
            pd = p->phys_offset;
3314
        }
3315

    
3316
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3317
            if (done || bounce.buffer) {
3318
                break;
3319
            }
3320
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3321
            bounce.addr = addr;
3322
            bounce.len = l;
3323
            if (!is_write) {
3324
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3325
            }
3326
            ptr = bounce.buffer;
3327
        } else {
3328
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3329
            ptr = qemu_get_ram_ptr(addr1);
3330
        }
3331
        if (!done) {
3332
            ret = ptr;
3333
        } else if (ret + done != ptr) {
3334
            break;
3335
        }
3336

    
3337
        len -= l;
3338
        addr += l;
3339
        done += l;
3340
    }
3341
    *plen = done;
3342
    return ret;
3343
}
3344

    
3345
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3346
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3347
 * the amount of memory that was actually read or written by the caller.
3348
 */
3349
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3350
                               int is_write, target_phys_addr_t access_len)
3351
{
3352
    if (buffer != bounce.buffer) {
3353
        if (is_write) {
3354
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3355
            while (access_len) {
3356
                unsigned l;
3357
                l = TARGET_PAGE_SIZE;
3358
                if (l > access_len)
3359
                    l = access_len;
3360
                if (!cpu_physical_memory_is_dirty(addr1)) {
3361
                    /* invalidate code */
3362
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3363
                    /* set dirty bit */
3364
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3365
                        (0xff & ~CODE_DIRTY_FLAG);
3366
                }
3367
                addr1 += l;
3368
                access_len -= l;
3369
            }
3370
        }
3371
        return;
3372
    }
3373
    if (is_write) {
3374
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3375
    }
3376
    qemu_free(bounce.buffer);
3377
    bounce.buffer = NULL;
3378
    cpu_notify_map_clients();
3379
}
3380

    
3381
/* warning: addr must be aligned */
3382
uint32_t ldl_phys(target_phys_addr_t addr)
3383
{
3384
    int io_index;
3385
    uint8_t *ptr;
3386
    uint32_t val;
3387
    unsigned long pd;
3388
    PhysPageDesc *p;
3389

    
3390
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3391
    if (!p) {
3392
        pd = IO_MEM_UNASSIGNED;
3393
    } else {
3394
        pd = p->phys_offset;
3395
    }
3396

    
3397
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3398
        !(pd & IO_MEM_ROMD)) {
3399
        /* I/O case */
3400
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3401
        if (p)
3402
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3403
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3404
    } else {
3405
        /* RAM case */
3406
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3407
            (addr & ~TARGET_PAGE_MASK);
3408
        val = ldl_p(ptr);
3409
    }
3410
    return val;
3411
}
3412

    
3413
/* warning: addr must be aligned */
3414
uint64_t ldq_phys(target_phys_addr_t addr)
3415
{
3416
    int io_index;
3417
    uint8_t *ptr;
3418
    uint64_t val;
3419
    unsigned long pd;
3420
    PhysPageDesc *p;
3421

    
3422
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3423
    if (!p) {
3424
        pd = IO_MEM_UNASSIGNED;
3425
    } else {
3426
        pd = p->phys_offset;
3427
    }
3428

    
3429
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3430
        !(pd & IO_MEM_ROMD)) {
3431
        /* I/O case */
3432
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3433
        if (p)
3434
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3435
#ifdef TARGET_WORDS_BIGENDIAN
3436
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3437
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3438
#else
3439
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3440
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3441
#endif
3442
    } else {
3443
        /* RAM case */
3444
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3445
            (addr & ~TARGET_PAGE_MASK);
3446
        val = ldq_p(ptr);
3447
    }
3448
    return val;
3449
}
3450

    
3451
/* XXX: optimize */
3452
uint32_t ldub_phys(target_phys_addr_t addr)
3453
{
3454
    uint8_t val;
3455
    cpu_physical_memory_read(addr, &val, 1);
3456
    return val;
3457
}
3458

    
3459
/* XXX: optimize */
3460
uint32_t lduw_phys(target_phys_addr_t addr)
3461
{
3462
    uint16_t val;
3463
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3464
    return tswap16(val);
3465
}
3466

    
3467
/* warning: addr must be aligned. The ram page is not masked as dirty
3468
   and the code inside is not invalidated. It is useful if the dirty
3469
   bits are used to track modified PTEs */
3470
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3471
{
3472
    int io_index;
3473
    uint8_t *ptr;
3474
    unsigned long pd;
3475
    PhysPageDesc *p;
3476

    
3477
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3478
    if (!p) {
3479
        pd = IO_MEM_UNASSIGNED;
3480
    } else {
3481
        pd = p->phys_offset;
3482
    }
3483

    
3484
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3485
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3486
        if (p)
3487
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3488
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3489
    } else {
3490
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3491
        ptr = qemu_get_ram_ptr(addr1);
3492
        stl_p(ptr, val);
3493

    
3494
        if (unlikely(in_migration)) {
3495
            if (!cpu_physical_memory_is_dirty(addr1)) {
3496
                /* invalidate code */
3497
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3498
                /* set dirty bit */
3499
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3500
                    (0xff & ~CODE_DIRTY_FLAG);
3501
            }
3502
        }
3503
    }
3504
}
3505

    
3506
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3507
{
3508
    int io_index;
3509
    uint8_t *ptr;
3510
    unsigned long pd;
3511
    PhysPageDesc *p;
3512

    
3513
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3514
    if (!p) {
3515
        pd = IO_MEM_UNASSIGNED;
3516
    } else {
3517
        pd = p->phys_offset;
3518
    }
3519

    
3520
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3521
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3522
        if (p)
3523
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3524
#ifdef TARGET_WORDS_BIGENDIAN
3525
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3526
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3527
#else
3528
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3529
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3530
#endif
3531
    } else {
3532
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3533
            (addr & ~TARGET_PAGE_MASK);
3534
        stq_p(ptr, val);
3535
    }
3536
}
3537

    
3538
/* warning: addr must be aligned */
3539
void stl_phys(target_phys_addr_t addr, uint32_t val)
3540
{
3541
    int io_index;
3542
    uint8_t *ptr;
3543
    unsigned long pd;
3544
    PhysPageDesc *p;
3545

    
3546
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3547
    if (!p) {
3548
        pd = IO_MEM_UNASSIGNED;
3549
    } else {
3550
        pd = p->phys_offset;
3551
    }
3552

    
3553
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3554
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3555
        if (p)
3556
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3557
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3558
    } else {
3559
        unsigned long addr1;
3560
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3561
        /* RAM case */
3562
        ptr = qemu_get_ram_ptr(addr1);
3563
        stl_p(ptr, val);
3564
        if (!cpu_physical_memory_is_dirty(addr1)) {
3565
            /* invalidate code */
3566
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3567
            /* set dirty bit */
3568
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3569
                (0xff & ~CODE_DIRTY_FLAG);
3570
        }
3571
    }
3572
}
3573

    
3574
/* XXX: optimize */
3575
void stb_phys(target_phys_addr_t addr, uint32_t val)
3576
{
3577
    uint8_t v = val;
3578
    cpu_physical_memory_write(addr, &v, 1);
3579
}
3580

    
3581
/* XXX: optimize */
3582
void stw_phys(target_phys_addr_t addr, uint32_t val)
3583
{
3584
    uint16_t v = tswap16(val);
3585
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3586
}
3587

    
3588
/* XXX: optimize */
3589
void stq_phys(target_phys_addr_t addr, uint64_t val)
3590
{
3591
    val = tswap64(val);
3592
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3593
}
3594

    
3595
#endif
3596

    
3597
/* virtual memory access for debug (includes writing to ROM) */
3598
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3599
                        uint8_t *buf, int len, int is_write)
3600
{
3601
    int l;
3602
    target_phys_addr_t phys_addr;
3603
    target_ulong page;
3604

    
3605
    while (len > 0) {
3606
        page = addr & TARGET_PAGE_MASK;
3607
        phys_addr = cpu_get_phys_page_debug(env, page);
3608
        /* if no physical page mapped, return an error */
3609
        if (phys_addr == -1)
3610
            return -1;
3611
        l = (page + TARGET_PAGE_SIZE) - addr;
3612
        if (l > len)
3613
            l = len;
3614
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3615
#if !defined(CONFIG_USER_ONLY)
3616
        if (is_write)
3617
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3618
        else
3619
#endif
3620
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3621
        len -= l;
3622
        buf += l;
3623
        addr += l;
3624
    }
3625
    return 0;
3626
}
3627

    
3628
/* in deterministic execution mode, instructions doing device I/Os
3629
   must be at the end of the TB */
3630
void cpu_io_recompile(CPUState *env, void *retaddr)
3631
{
3632
    TranslationBlock *tb;
3633
    uint32_t n, cflags;
3634
    target_ulong pc, cs_base;
3635
    uint64_t flags;
3636

    
3637
    tb = tb_find_pc((unsigned long)retaddr);
3638
    if (!tb) {
3639
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3640
                  retaddr);
3641
    }
3642
    n = env->icount_decr.u16.low + tb->icount;
3643
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3644
    /* Calculate how many instructions had been executed before the fault
3645
       occurred.  */
3646
    n = n - env->icount_decr.u16.low;
3647
    /* Generate a new TB ending on the I/O insn.  */
3648
    n++;
3649
    /* On MIPS and SH, delay slot instructions can only be restarted if
3650
       they were already the first instruction in the TB.  If this is not
3651
       the first instruction in a TB then re-execute the preceding
3652
       branch.  */
3653
#if defined(TARGET_MIPS)
3654
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3655
        env->active_tc.PC -= 4;
3656
        env->icount_decr.u16.low++;
3657
        env->hflags &= ~MIPS_HFLAG_BMASK;
3658
    }
3659
#elif defined(TARGET_SH4)
3660
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3661
            && n > 1) {
3662
        env->pc -= 2;
3663
        env->icount_decr.u16.low++;
3664
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3665
    }
3666
#endif
3667
    /* This should never happen.  */
3668
    if (n > CF_COUNT_MASK)
3669
        cpu_abort(env, "TB too big during recompile");
3670

    
3671
    cflags = n | CF_LAST_IO;
3672
    pc = tb->pc;
3673
    cs_base = tb->cs_base;
3674
    flags = tb->flags;
3675
    tb_phys_invalidate(tb, -1);
3676
    /* FIXME: In theory this could raise an exception.  In practice
3677
       we have already translated the block once so it's probably ok.  */
3678
    tb_gen_code(env, pc, cs_base, flags, cflags);
3679
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3680
       the first in the TB) then we end up generating a whole new TB and
3681
       repeating the fault, which is horribly inefficient.
3682
       Better would be to execute just this insn uncached, or generate a
3683
       second new TB.  */
3684
    cpu_resume_from_signal(env, NULL);
3685
}
3686

    
3687
void dump_exec_info(FILE *f,
3688
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3689
{
3690
    int i, target_code_size, max_target_code_size;
3691
    int direct_jmp_count, direct_jmp2_count, cross_page;
3692
    TranslationBlock *tb;
3693

    
3694
    target_code_size = 0;
3695
    max_target_code_size = 0;
3696
    cross_page = 0;
3697
    direct_jmp_count = 0;
3698
    direct_jmp2_count = 0;
3699
    for(i = 0; i < nb_tbs; i++) {
3700
        tb = &tbs[i];
3701
        target_code_size += tb->size;
3702
        if (tb->size > max_target_code_size)
3703
            max_target_code_size = tb->size;
3704
        if (tb->page_addr[1] != -1)
3705
            cross_page++;
3706
        if (tb->tb_next_offset[0] != 0xffff) {
3707
            direct_jmp_count++;
3708
            if (tb->tb_next_offset[1] != 0xffff) {
3709
                direct_jmp2_count++;
3710
            }
3711
        }
3712
    }
3713
    /* XXX: avoid using doubles ? */
3714
    cpu_fprintf(f, "Translation buffer state:\n");
3715
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3716
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3717
    cpu_fprintf(f, "TB count            %d/%d\n", 
3718
                nb_tbs, code_gen_max_blocks);
3719
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3720
                nb_tbs ? target_code_size / nb_tbs : 0,
3721
                max_target_code_size);
3722
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3723
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3724
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3725
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3726
            cross_page,
3727
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3728
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3729
                direct_jmp_count,
3730
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3731
                direct_jmp2_count,
3732
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3733
    cpu_fprintf(f, "\nStatistics:\n");
3734
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3735
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3736
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3737
    tcg_dump_info(f, cpu_fprintf);
3738
}
3739

    
3740
#if !defined(CONFIG_USER_ONLY)
3741

    
3742
#define MMUSUFFIX _cmmu
3743
#define GETPC() NULL
3744
#define env cpu_single_env
3745
#define SOFTMMU_CODE_ACCESS
3746

    
3747
#define SHIFT 0
3748
#include "softmmu_template.h"
3749

    
3750
#define SHIFT 1
3751
#include "softmmu_template.h"
3752

    
3753
#define SHIFT 2
3754
#include "softmmu_template.h"
3755

    
3756
#define SHIFT 3
3757
#include "softmmu_template.h"
3758

    
3759
#undef env
3760

    
3761
#endif