Statistics
| Branch: | Revision:

root / exec.c @ 2f7bb878

History | View | Annotate | Download (111.4 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#ifdef _WIN32
21
#include <windows.h>
22
#else
23
#include <sys/types.h>
24
#include <sys/mman.h>
25
#endif
26
#include <stdlib.h>
27
#include <stdio.h>
28
#include <stdarg.h>
29
#include <string.h>
30
#include <errno.h>
31
#include <unistd.h>
32
#include <inttypes.h>
33

    
34
#include "cpu.h"
35
#include "exec-all.h"
36
#include "qemu-common.h"
37
#include "tcg.h"
38
#include "hw/hw.h"
39
#include "osdep.h"
40
#include "kvm.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#endif
44

    
45
//#define DEBUG_TB_INVALIDATE
46
//#define DEBUG_FLUSH
47
//#define DEBUG_TLB
48
//#define DEBUG_UNASSIGNED
49

    
50
/* make various TB consistency checks */
51
//#define DEBUG_TB_CHECK
52
//#define DEBUG_TLB_CHECK
53

    
54
//#define DEBUG_IOPORT
55
//#define DEBUG_SUBPAGE
56

    
57
#if !defined(CONFIG_USER_ONLY)
58
/* TB consistency checks only implemented for usermode emulation.  */
59
#undef DEBUG_TB_CHECK
60
#endif
61

    
62
#define SMC_BITMAP_USE_THRESHOLD 10
63

    
64
#if defined(TARGET_SPARC64)
65
#define TARGET_PHYS_ADDR_SPACE_BITS 41
66
#elif defined(TARGET_SPARC)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 36
68
#elif defined(TARGET_ALPHA)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 42
70
#define TARGET_VIRT_ADDR_SPACE_BITS 42
71
#elif defined(TARGET_PPC64)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 36
77
#else
78
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
79
#define TARGET_PHYS_ADDR_SPACE_BITS 32
80
#endif
81

    
82
static TranslationBlock *tbs;
83
int code_gen_max_blocks;
84
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85
static int nb_tbs;
86
/* any access to the tbs or the page table must use this lock */
87
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88

    
89
#if defined(__arm__) || defined(__sparc_v9__)
90
/* The prologue must be reachable with a direct jump. ARM and Sparc64
91
 have limited branch ranges (possibly also PPC) so place it in a
92
 section close to code segment. */
93
#define code_gen_section                                \
94
    __attribute__((__section__(".gen_code")))           \
95
    __attribute__((aligned (32)))
96
#elif defined(_WIN32)
97
/* Maximum alignment for Win32 is 16. */
98
#define code_gen_section                                \
99
    __attribute__((aligned (16)))
100
#else
101
#define code_gen_section                                \
102
    __attribute__((aligned (32)))
103
#endif
104

    
105
uint8_t code_gen_prologue[1024] code_gen_section;
106
static uint8_t *code_gen_buffer;
107
static unsigned long code_gen_buffer_size;
108
/* threshold to flush the translated code buffer */
109
static unsigned long code_gen_buffer_max_size;
110
uint8_t *code_gen_ptr;
111

    
112
#if !defined(CONFIG_USER_ONLY)
113
int phys_ram_fd;
114
uint8_t *phys_ram_dirty;
115
static int in_migration;
116

    
117
typedef struct RAMBlock {
118
    uint8_t *host;
119
    ram_addr_t offset;
120
    ram_addr_t length;
121
    struct RAMBlock *next;
122
} RAMBlock;
123

    
124
static RAMBlock *ram_blocks;
125
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126
   then we can no longer assume contiguous ram offsets, and external uses
127
   of this variable will break.  */
128
ram_addr_t last_ram_offset;
129
#endif
130

    
131
CPUState *first_cpu;
132
/* current CPU in the current thread. It is only valid inside
133
   cpu_exec() */
134
CPUState *cpu_single_env;
135
/* 0 = Do not count executed instructions.
136
   1 = Precise instruction counting.
137
   2 = Adaptive rate instruction counting.  */
138
int use_icount = 0;
139
/* Current instruction counter.  While executing translated code this may
140
   include some instructions that have not yet been executed.  */
141
int64_t qemu_icount;
142

    
143
typedef struct PageDesc {
144
    /* list of TBs intersecting this ram page */
145
    TranslationBlock *first_tb;
146
    /* in order to optimize self modifying code, we count the number
147
       of lookups we do to a given page to use a bitmap */
148
    unsigned int code_write_count;
149
    uint8_t *code_bitmap;
150
#if defined(CONFIG_USER_ONLY)
151
    unsigned long flags;
152
#endif
153
} PageDesc;
154

    
155
typedef struct PhysPageDesc {
156
    /* offset in host memory of the page + io_index in the low bits */
157
    ram_addr_t phys_offset;
158
    ram_addr_t region_offset;
159
} PhysPageDesc;
160

    
161
#define L2_BITS 10
162
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163
/* XXX: this is a temporary hack for alpha target.
164
 *      In the future, this is to be replaced by a multi-level table
165
 *      to actually be able to handle the complete 64 bits address space.
166
 */
167
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168
#else
169
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
170
#endif
171

    
172
#define L1_SIZE (1 << L1_BITS)
173
#define L2_SIZE (1 << L2_BITS)
174

    
175
unsigned long qemu_real_host_page_size;
176
unsigned long qemu_host_page_bits;
177
unsigned long qemu_host_page_size;
178
unsigned long qemu_host_page_mask;
179

    
180
/* XXX: for system emulation, it could just be an array */
181
static PageDesc *l1_map[L1_SIZE];
182
static PhysPageDesc **l1_phys_map;
183

    
184
#if !defined(CONFIG_USER_ONLY)
185
static void io_mem_init(void);
186

    
187
/* io memory support */
188
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
189
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
190
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
191
static char io_mem_used[IO_MEM_NB_ENTRIES];
192
static int io_mem_watch;
193
#endif
194

    
195
/* log support */
196
static const char *logfilename = "/tmp/qemu.log";
197
FILE *logfile;
198
int loglevel;
199
static int log_append = 0;
200

    
201
/* statistics */
202
static int tlb_flush_count;
203
static int tb_flush_count;
204
static int tb_phys_invalidate_count;
205

    
206
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
207
typedef struct subpage_t {
208
    target_phys_addr_t base;
209
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
210
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
211
    void *opaque[TARGET_PAGE_SIZE][2][4];
212
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
213
} subpage_t;
214

    
215
#ifdef _WIN32
216
static void map_exec(void *addr, long size)
217
{
218
    DWORD old_protect;
219
    VirtualProtect(addr, size,
220
                   PAGE_EXECUTE_READWRITE, &old_protect);
221
    
222
}
223
#else
224
static void map_exec(void *addr, long size)
225
{
226
    unsigned long start, end, page_size;
227
    
228
    page_size = getpagesize();
229
    start = (unsigned long)addr;
230
    start &= ~(page_size - 1);
231
    
232
    end = (unsigned long)addr + size;
233
    end += page_size - 1;
234
    end &= ~(page_size - 1);
235
    
236
    mprotect((void *)start, end - start,
237
             PROT_READ | PROT_WRITE | PROT_EXEC);
238
}
239
#endif
240

    
241
static void page_init(void)
242
{
243
    /* NOTE: we can always suppose that qemu_host_page_size >=
244
       TARGET_PAGE_SIZE */
245
#ifdef _WIN32
246
    {
247
        SYSTEM_INFO system_info;
248

    
249
        GetSystemInfo(&system_info);
250
        qemu_real_host_page_size = system_info.dwPageSize;
251
    }
252
#else
253
    qemu_real_host_page_size = getpagesize();
254
#endif
255
    if (qemu_host_page_size == 0)
256
        qemu_host_page_size = qemu_real_host_page_size;
257
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
258
        qemu_host_page_size = TARGET_PAGE_SIZE;
259
    qemu_host_page_bits = 0;
260
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
261
        qemu_host_page_bits++;
262
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
263
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
264
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
265

    
266
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
267
    {
268
        long long startaddr, endaddr;
269
        FILE *f;
270
        int n;
271

    
272
        mmap_lock();
273
        last_brk = (unsigned long)sbrk(0);
274
        f = fopen("/proc/self/maps", "r");
275
        if (f) {
276
            do {
277
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
278
                if (n == 2) {
279
                    startaddr = MIN(startaddr,
280
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
281
                    endaddr = MIN(endaddr,
282
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
283
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
284
                                   TARGET_PAGE_ALIGN(endaddr),
285
                                   PAGE_RESERVED); 
286
                }
287
            } while (!feof(f));
288
            fclose(f);
289
        }
290
        mmap_unlock();
291
    }
292
#endif
293
}
294

    
295
static inline PageDesc **page_l1_map(target_ulong index)
296
{
297
#if TARGET_LONG_BITS > 32
298
    /* Host memory outside guest VM.  For 32-bit targets we have already
299
       excluded high addresses.  */
300
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
301
        return NULL;
302
#endif
303
    return &l1_map[index >> L2_BITS];
304
}
305

    
306
static inline PageDesc *page_find_alloc(target_ulong index)
307
{
308
    PageDesc **lp, *p;
309
    lp = page_l1_map(index);
310
    if (!lp)
311
        return NULL;
312

    
313
    p = *lp;
314
    if (!p) {
315
        /* allocate if not found */
316
#if defined(CONFIG_USER_ONLY)
317
        size_t len = sizeof(PageDesc) * L2_SIZE;
318
        /* Don't use qemu_malloc because it may recurse.  */
319
        p = mmap(0, len, PROT_READ | PROT_WRITE,
320
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
321
        *lp = p;
322
        if (h2g_valid(p)) {
323
            unsigned long addr = h2g(p);
324
            page_set_flags(addr & TARGET_PAGE_MASK,
325
                           TARGET_PAGE_ALIGN(addr + len),
326
                           PAGE_RESERVED); 
327
        }
328
#else
329
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
330
        *lp = p;
331
#endif
332
    }
333
    return p + (index & (L2_SIZE - 1));
334
}
335

    
336
static inline PageDesc *page_find(target_ulong index)
337
{
338
    PageDesc **lp, *p;
339
    lp = page_l1_map(index);
340
    if (!lp)
341
        return NULL;
342

    
343
    p = *lp;
344
    if (!p)
345
        return 0;
346
    return p + (index & (L2_SIZE - 1));
347
}
348

    
349
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
350
{
351
    void **lp, **p;
352
    PhysPageDesc *pd;
353

    
354
    p = (void **)l1_phys_map;
355
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
356

    
357
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359
#endif
360
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
361
    p = *lp;
362
    if (!p) {
363
        /* allocate if not found */
364
        if (!alloc)
365
            return NULL;
366
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367
        memset(p, 0, sizeof(void *) * L1_SIZE);
368
        *lp = p;
369
    }
370
#endif
371
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
372
    pd = *lp;
373
    if (!pd) {
374
        int i;
375
        /* allocate if not found */
376
        if (!alloc)
377
            return NULL;
378
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379
        *lp = pd;
380
        for (i = 0; i < L2_SIZE; i++) {
381
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
382
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
383
        }
384
    }
385
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
386
}
387

    
388
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
389
{
390
    return phys_page_find_alloc(index, 0);
391
}
392

    
393
#if !defined(CONFIG_USER_ONLY)
394
static void tlb_protect_code(ram_addr_t ram_addr);
395
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
396
                                    target_ulong vaddr);
397
#define mmap_lock() do { } while(0)
398
#define mmap_unlock() do { } while(0)
399
#endif
400

    
401
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
402

    
403
#if defined(CONFIG_USER_ONLY)
404
/* Currently it is not recommended to allocate big chunks of data in
405
   user mode. It will change when a dedicated libc will be used */
406
#define USE_STATIC_CODE_GEN_BUFFER
407
#endif
408

    
409
#ifdef USE_STATIC_CODE_GEN_BUFFER
410
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
411
#endif
412

    
413
static void code_gen_alloc(unsigned long tb_size)
414
{
415
#ifdef USE_STATIC_CODE_GEN_BUFFER
416
    code_gen_buffer = static_code_gen_buffer;
417
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
418
    map_exec(code_gen_buffer, code_gen_buffer_size);
419
#else
420
    code_gen_buffer_size = tb_size;
421
    if (code_gen_buffer_size == 0) {
422
#if defined(CONFIG_USER_ONLY)
423
        /* in user mode, phys_ram_size is not meaningful */
424
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
425
#else
426
        /* XXX: needs adjustments */
427
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
428
#endif
429
    }
430
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
431
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
432
    /* The code gen buffer location may have constraints depending on
433
       the host cpu and OS */
434
#if defined(__linux__) 
435
    {
436
        int flags;
437
        void *start = NULL;
438

    
439
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
440
#if defined(__x86_64__)
441
        flags |= MAP_32BIT;
442
        /* Cannot map more than that */
443
        if (code_gen_buffer_size > (800 * 1024 * 1024))
444
            code_gen_buffer_size = (800 * 1024 * 1024);
445
#elif defined(__sparc_v9__)
446
        // Map the buffer below 2G, so we can use direct calls and branches
447
        flags |= MAP_FIXED;
448
        start = (void *) 0x60000000UL;
449
        if (code_gen_buffer_size > (512 * 1024 * 1024))
450
            code_gen_buffer_size = (512 * 1024 * 1024);
451
#elif defined(__arm__)
452
        /* Map the buffer below 32M, so we can use direct calls and branches */
453
        flags |= MAP_FIXED;
454
        start = (void *) 0x01000000UL;
455
        if (code_gen_buffer_size > 16 * 1024 * 1024)
456
            code_gen_buffer_size = 16 * 1024 * 1024;
457
#endif
458
        code_gen_buffer = mmap(start, code_gen_buffer_size,
459
                               PROT_WRITE | PROT_READ | PROT_EXEC,
460
                               flags, -1, 0);
461
        if (code_gen_buffer == MAP_FAILED) {
462
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
463
            exit(1);
464
        }
465
    }
466
#elif defined(__FreeBSD__) || defined(__DragonFly__)
467
    {
468
        int flags;
469
        void *addr = NULL;
470
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
471
#if defined(__x86_64__)
472
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
473
         * 0x40000000 is free */
474
        flags |= MAP_FIXED;
475
        addr = (void *)0x40000000;
476
        /* Cannot map more than that */
477
        if (code_gen_buffer_size > (800 * 1024 * 1024))
478
            code_gen_buffer_size = (800 * 1024 * 1024);
479
#endif
480
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
481
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
482
                               flags, -1, 0);
483
        if (code_gen_buffer == MAP_FAILED) {
484
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
485
            exit(1);
486
        }
487
    }
488
#else
489
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
490
    map_exec(code_gen_buffer, code_gen_buffer_size);
491
#endif
492
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
493
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
494
    code_gen_buffer_max_size = code_gen_buffer_size - 
495
        code_gen_max_block_size();
496
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
497
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
498
}
499

    
500
/* Must be called before using the QEMU cpus. 'tb_size' is the size
501
   (in bytes) allocated to the translation buffer. Zero means default
502
   size. */
503
void cpu_exec_init_all(unsigned long tb_size)
504
{
505
    cpu_gen_init();
506
    code_gen_alloc(tb_size);
507
    code_gen_ptr = code_gen_buffer;
508
    page_init();
509
#if !defined(CONFIG_USER_ONLY)
510
    io_mem_init();
511
#endif
512
}
513

    
514
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
515

    
516
#define CPU_COMMON_SAVE_VERSION 1
517

    
518
static void cpu_common_save(QEMUFile *f, void *opaque)
519
{
520
    CPUState *env = opaque;
521

    
522
    cpu_synchronize_state(env, 0);
523

    
524
    qemu_put_be32s(f, &env->halted);
525
    qemu_put_be32s(f, &env->interrupt_request);
526
}
527

    
528
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
529
{
530
    CPUState *env = opaque;
531

    
532
    if (version_id != CPU_COMMON_SAVE_VERSION)
533
        return -EINVAL;
534

    
535
    qemu_get_be32s(f, &env->halted);
536
    qemu_get_be32s(f, &env->interrupt_request);
537
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
538
       version_id is increased. */
539
    env->interrupt_request &= ~0x01;
540
    tlb_flush(env, 1);
541
    cpu_synchronize_state(env, 1);
542

    
543
    return 0;
544
}
545
#endif
546

    
547
CPUState *qemu_get_cpu(int cpu)
548
{
549
    CPUState *env = first_cpu;
550

    
551
    while (env) {
552
        if (env->cpu_index == cpu)
553
            break;
554
        env = env->next_cpu;
555
    }
556

    
557
    return env;
558
}
559

    
560
void cpu_exec_init(CPUState *env)
561
{
562
    CPUState **penv;
563
    int cpu_index;
564

    
565
#if defined(CONFIG_USER_ONLY)
566
    cpu_list_lock();
567
#endif
568
    env->next_cpu = NULL;
569
    penv = &first_cpu;
570
    cpu_index = 0;
571
    while (*penv != NULL) {
572
        penv = &(*penv)->next_cpu;
573
        cpu_index++;
574
    }
575
    env->cpu_index = cpu_index;
576
    env->numa_node = 0;
577
    TAILQ_INIT(&env->breakpoints);
578
    TAILQ_INIT(&env->watchpoints);
579
    *penv = env;
580
#if defined(CONFIG_USER_ONLY)
581
    cpu_list_unlock();
582
#endif
583
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
584
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
585
                    cpu_common_save, cpu_common_load, env);
586
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
587
                    cpu_save, cpu_load, env);
588
#endif
589
}
590

    
591
static inline void invalidate_page_bitmap(PageDesc *p)
592
{
593
    if (p->code_bitmap) {
594
        qemu_free(p->code_bitmap);
595
        p->code_bitmap = NULL;
596
    }
597
    p->code_write_count = 0;
598
}
599

    
600
/* set to NULL all the 'first_tb' fields in all PageDescs */
601
static void page_flush_tb(void)
602
{
603
    int i, j;
604
    PageDesc *p;
605

    
606
    for(i = 0; i < L1_SIZE; i++) {
607
        p = l1_map[i];
608
        if (p) {
609
            for(j = 0; j < L2_SIZE; j++) {
610
                p->first_tb = NULL;
611
                invalidate_page_bitmap(p);
612
                p++;
613
            }
614
        }
615
    }
616
}
617

    
618
/* flush all the translation blocks */
619
/* XXX: tb_flush is currently not thread safe */
620
void tb_flush(CPUState *env1)
621
{
622
    CPUState *env;
623
#if defined(DEBUG_FLUSH)
624
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
625
           (unsigned long)(code_gen_ptr - code_gen_buffer),
626
           nb_tbs, nb_tbs > 0 ?
627
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
628
#endif
629
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
630
        cpu_abort(env1, "Internal error: code buffer overflow\n");
631

    
632
    nb_tbs = 0;
633

    
634
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
635
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
636
    }
637

    
638
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
639
    page_flush_tb();
640

    
641
    code_gen_ptr = code_gen_buffer;
642
    /* XXX: flush processor icache at this point if cache flush is
643
       expensive */
644
    tb_flush_count++;
645
}
646

    
647
#ifdef DEBUG_TB_CHECK
648

    
649
static void tb_invalidate_check(target_ulong address)
650
{
651
    TranslationBlock *tb;
652
    int i;
653
    address &= TARGET_PAGE_MASK;
654
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
655
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
656
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
657
                  address >= tb->pc + tb->size)) {
658
                printf("ERROR invalidate: address=" TARGET_FMT_lx
659
                       " PC=%08lx size=%04x\n",
660
                       address, (long)tb->pc, tb->size);
661
            }
662
        }
663
    }
664
}
665

    
666
/* verify that all the pages have correct rights for code */
667
static void tb_page_check(void)
668
{
669
    TranslationBlock *tb;
670
    int i, flags1, flags2;
671

    
672
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
673
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
674
            flags1 = page_get_flags(tb->pc);
675
            flags2 = page_get_flags(tb->pc + tb->size - 1);
676
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
677
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
678
                       (long)tb->pc, tb->size, flags1, flags2);
679
            }
680
        }
681
    }
682
}
683

    
684
#endif
685

    
686
/* invalidate one TB */
687
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
688
                             int next_offset)
689
{
690
    TranslationBlock *tb1;
691
    for(;;) {
692
        tb1 = *ptb;
693
        if (tb1 == tb) {
694
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
695
            break;
696
        }
697
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
698
    }
699
}
700

    
701
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
702
{
703
    TranslationBlock *tb1;
704
    unsigned int n1;
705

    
706
    for(;;) {
707
        tb1 = *ptb;
708
        n1 = (long)tb1 & 3;
709
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
710
        if (tb1 == tb) {
711
            *ptb = tb1->page_next[n1];
712
            break;
713
        }
714
        ptb = &tb1->page_next[n1];
715
    }
716
}
717

    
718
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
719
{
720
    TranslationBlock *tb1, **ptb;
721
    unsigned int n1;
722

    
723
    ptb = &tb->jmp_next[n];
724
    tb1 = *ptb;
725
    if (tb1) {
726
        /* find tb(n) in circular list */
727
        for(;;) {
728
            tb1 = *ptb;
729
            n1 = (long)tb1 & 3;
730
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
731
            if (n1 == n && tb1 == tb)
732
                break;
733
            if (n1 == 2) {
734
                ptb = &tb1->jmp_first;
735
            } else {
736
                ptb = &tb1->jmp_next[n1];
737
            }
738
        }
739
        /* now we can suppress tb(n) from the list */
740
        *ptb = tb->jmp_next[n];
741

    
742
        tb->jmp_next[n] = NULL;
743
    }
744
}
745

    
746
/* reset the jump entry 'n' of a TB so that it is not chained to
747
   another TB */
748
static inline void tb_reset_jump(TranslationBlock *tb, int n)
749
{
750
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
751
}
752

    
753
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
754
{
755
    CPUState *env;
756
    PageDesc *p;
757
    unsigned int h, n1;
758
    target_phys_addr_t phys_pc;
759
    TranslationBlock *tb1, *tb2;
760

    
761
    /* remove the TB from the hash list */
762
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
763
    h = tb_phys_hash_func(phys_pc);
764
    tb_remove(&tb_phys_hash[h], tb,
765
              offsetof(TranslationBlock, phys_hash_next));
766

    
767
    /* remove the TB from the page list */
768
    if (tb->page_addr[0] != page_addr) {
769
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
770
        tb_page_remove(&p->first_tb, tb);
771
        invalidate_page_bitmap(p);
772
    }
773
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
774
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
775
        tb_page_remove(&p->first_tb, tb);
776
        invalidate_page_bitmap(p);
777
    }
778

    
779
    tb_invalidated_flag = 1;
780

    
781
    /* remove the TB from the hash list */
782
    h = tb_jmp_cache_hash_func(tb->pc);
783
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
784
        if (env->tb_jmp_cache[h] == tb)
785
            env->tb_jmp_cache[h] = NULL;
786
    }
787

    
788
    /* suppress this TB from the two jump lists */
789
    tb_jmp_remove(tb, 0);
790
    tb_jmp_remove(tb, 1);
791

    
792
    /* suppress any remaining jumps to this TB */
793
    tb1 = tb->jmp_first;
794
    for(;;) {
795
        n1 = (long)tb1 & 3;
796
        if (n1 == 2)
797
            break;
798
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
799
        tb2 = tb1->jmp_next[n1];
800
        tb_reset_jump(tb1, n1);
801
        tb1->jmp_next[n1] = NULL;
802
        tb1 = tb2;
803
    }
804
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
805

    
806
    tb_phys_invalidate_count++;
807
}
808

    
809
static inline void set_bits(uint8_t *tab, int start, int len)
810
{
811
    int end, mask, end1;
812

    
813
    end = start + len;
814
    tab += start >> 3;
815
    mask = 0xff << (start & 7);
816
    if ((start & ~7) == (end & ~7)) {
817
        if (start < end) {
818
            mask &= ~(0xff << (end & 7));
819
            *tab |= mask;
820
        }
821
    } else {
822
        *tab++ |= mask;
823
        start = (start + 8) & ~7;
824
        end1 = end & ~7;
825
        while (start < end1) {
826
            *tab++ = 0xff;
827
            start += 8;
828
        }
829
        if (start < end) {
830
            mask = ~(0xff << (end & 7));
831
            *tab |= mask;
832
        }
833
    }
834
}
835

    
836
static void build_page_bitmap(PageDesc *p)
837
{
838
    int n, tb_start, tb_end;
839
    TranslationBlock *tb;
840

    
841
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
842

    
843
    tb = p->first_tb;
844
    while (tb != NULL) {
845
        n = (long)tb & 3;
846
        tb = (TranslationBlock *)((long)tb & ~3);
847
        /* NOTE: this is subtle as a TB may span two physical pages */
848
        if (n == 0) {
849
            /* NOTE: tb_end may be after the end of the page, but
850
               it is not a problem */
851
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
852
            tb_end = tb_start + tb->size;
853
            if (tb_end > TARGET_PAGE_SIZE)
854
                tb_end = TARGET_PAGE_SIZE;
855
        } else {
856
            tb_start = 0;
857
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
858
        }
859
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
860
        tb = tb->page_next[n];
861
    }
862
}
863

    
864
TranslationBlock *tb_gen_code(CPUState *env,
865
                              target_ulong pc, target_ulong cs_base,
866
                              int flags, int cflags)
867
{
868
    TranslationBlock *tb;
869
    uint8_t *tc_ptr;
870
    target_ulong phys_pc, phys_page2, virt_page2;
871
    int code_gen_size;
872

    
873
    phys_pc = get_phys_addr_code(env, pc);
874
    tb = tb_alloc(pc);
875
    if (!tb) {
876
        /* flush must be done */
877
        tb_flush(env);
878
        /* cannot fail at this point */
879
        tb = tb_alloc(pc);
880
        /* Don't forget to invalidate previous TB info.  */
881
        tb_invalidated_flag = 1;
882
    }
883
    tc_ptr = code_gen_ptr;
884
    tb->tc_ptr = tc_ptr;
885
    tb->cs_base = cs_base;
886
    tb->flags = flags;
887
    tb->cflags = cflags;
888
    cpu_gen_code(env, tb, &code_gen_size);
889
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
890

    
891
    /* check next page if needed */
892
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
893
    phys_page2 = -1;
894
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
895
        phys_page2 = get_phys_addr_code(env, virt_page2);
896
    }
897
    tb_link_phys(tb, phys_pc, phys_page2);
898
    return tb;
899
}
900

    
901
/* invalidate all TBs which intersect with the target physical page
902
   starting in range [start;end[. NOTE: start and end must refer to
903
   the same physical page. 'is_cpu_write_access' should be true if called
904
   from a real cpu write access: the virtual CPU will exit the current
905
   TB if code is modified inside this TB. */
906
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
907
                                   int is_cpu_write_access)
908
{
909
    TranslationBlock *tb, *tb_next, *saved_tb;
910
    CPUState *env = cpu_single_env;
911
    target_ulong tb_start, tb_end;
912
    PageDesc *p;
913
    int n;
914
#ifdef TARGET_HAS_PRECISE_SMC
915
    int current_tb_not_found = is_cpu_write_access;
916
    TranslationBlock *current_tb = NULL;
917
    int current_tb_modified = 0;
918
    target_ulong current_pc = 0;
919
    target_ulong current_cs_base = 0;
920
    int current_flags = 0;
921
#endif /* TARGET_HAS_PRECISE_SMC */
922

    
923
    p = page_find(start >> TARGET_PAGE_BITS);
924
    if (!p)
925
        return;
926
    if (!p->code_bitmap &&
927
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
928
        is_cpu_write_access) {
929
        /* build code bitmap */
930
        build_page_bitmap(p);
931
    }
932

    
933
    /* we remove all the TBs in the range [start, end[ */
934
    /* XXX: see if in some cases it could be faster to invalidate all the code */
935
    tb = p->first_tb;
936
    while (tb != NULL) {
937
        n = (long)tb & 3;
938
        tb = (TranslationBlock *)((long)tb & ~3);
939
        tb_next = tb->page_next[n];
940
        /* NOTE: this is subtle as a TB may span two physical pages */
941
        if (n == 0) {
942
            /* NOTE: tb_end may be after the end of the page, but
943
               it is not a problem */
944
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
945
            tb_end = tb_start + tb->size;
946
        } else {
947
            tb_start = tb->page_addr[1];
948
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
949
        }
950
        if (!(tb_end <= start || tb_start >= end)) {
951
#ifdef TARGET_HAS_PRECISE_SMC
952
            if (current_tb_not_found) {
953
                current_tb_not_found = 0;
954
                current_tb = NULL;
955
                if (env->mem_io_pc) {
956
                    /* now we have a real cpu fault */
957
                    current_tb = tb_find_pc(env->mem_io_pc);
958
                }
959
            }
960
            if (current_tb == tb &&
961
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
962
                /* If we are modifying the current TB, we must stop
963
                its execution. We could be more precise by checking
964
                that the modification is after the current PC, but it
965
                would require a specialized function to partially
966
                restore the CPU state */
967

    
968
                current_tb_modified = 1;
969
                cpu_restore_state(current_tb, env,
970
                                  env->mem_io_pc, NULL);
971
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
972
                                     &current_flags);
973
            }
974
#endif /* TARGET_HAS_PRECISE_SMC */
975
            /* we need to do that to handle the case where a signal
976
               occurs while doing tb_phys_invalidate() */
977
            saved_tb = NULL;
978
            if (env) {
979
                saved_tb = env->current_tb;
980
                env->current_tb = NULL;
981
            }
982
            tb_phys_invalidate(tb, -1);
983
            if (env) {
984
                env->current_tb = saved_tb;
985
                if (env->interrupt_request && env->current_tb)
986
                    cpu_interrupt(env, env->interrupt_request);
987
            }
988
        }
989
        tb = tb_next;
990
    }
991
#if !defined(CONFIG_USER_ONLY)
992
    /* if no code remaining, no need to continue to use slow writes */
993
    if (!p->first_tb) {
994
        invalidate_page_bitmap(p);
995
        if (is_cpu_write_access) {
996
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
997
        }
998
    }
999
#endif
1000
#ifdef TARGET_HAS_PRECISE_SMC
1001
    if (current_tb_modified) {
1002
        /* we generate a block containing just the instruction
1003
           modifying the memory. It will ensure that it cannot modify
1004
           itself */
1005
        env->current_tb = NULL;
1006
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1007
        cpu_resume_from_signal(env, NULL);
1008
    }
1009
#endif
1010
}
1011

    
1012
/* len must be <= 8 and start must be a multiple of len */
1013
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1014
{
1015
    PageDesc *p;
1016
    int offset, b;
1017
#if 0
1018
    if (1) {
1019
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1020
                  cpu_single_env->mem_io_vaddr, len,
1021
                  cpu_single_env->eip,
1022
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1023
    }
1024
#endif
1025
    p = page_find(start >> TARGET_PAGE_BITS);
1026
    if (!p)
1027
        return;
1028
    if (p->code_bitmap) {
1029
        offset = start & ~TARGET_PAGE_MASK;
1030
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1031
        if (b & ((1 << len) - 1))
1032
            goto do_invalidate;
1033
    } else {
1034
    do_invalidate:
1035
        tb_invalidate_phys_page_range(start, start + len, 1);
1036
    }
1037
}
1038

    
1039
#if !defined(CONFIG_SOFTMMU)
1040
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1041
                                    unsigned long pc, void *puc)
1042
{
1043
    TranslationBlock *tb;
1044
    PageDesc *p;
1045
    int n;
1046
#ifdef TARGET_HAS_PRECISE_SMC
1047
    TranslationBlock *current_tb = NULL;
1048
    CPUState *env = cpu_single_env;
1049
    int current_tb_modified = 0;
1050
    target_ulong current_pc = 0;
1051
    target_ulong current_cs_base = 0;
1052
    int current_flags = 0;
1053
#endif
1054

    
1055
    addr &= TARGET_PAGE_MASK;
1056
    p = page_find(addr >> TARGET_PAGE_BITS);
1057
    if (!p)
1058
        return;
1059
    tb = p->first_tb;
1060
#ifdef TARGET_HAS_PRECISE_SMC
1061
    if (tb && pc != 0) {
1062
        current_tb = tb_find_pc(pc);
1063
    }
1064
#endif
1065
    while (tb != NULL) {
1066
        n = (long)tb & 3;
1067
        tb = (TranslationBlock *)((long)tb & ~3);
1068
#ifdef TARGET_HAS_PRECISE_SMC
1069
        if (current_tb == tb &&
1070
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1071
                /* If we are modifying the current TB, we must stop
1072
                   its execution. We could be more precise by checking
1073
                   that the modification is after the current PC, but it
1074
                   would require a specialized function to partially
1075
                   restore the CPU state */
1076

    
1077
            current_tb_modified = 1;
1078
            cpu_restore_state(current_tb, env, pc, puc);
1079
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1080
                                 &current_flags);
1081
        }
1082
#endif /* TARGET_HAS_PRECISE_SMC */
1083
        tb_phys_invalidate(tb, addr);
1084
        tb = tb->page_next[n];
1085
    }
1086
    p->first_tb = NULL;
1087
#ifdef TARGET_HAS_PRECISE_SMC
1088
    if (current_tb_modified) {
1089
        /* we generate a block containing just the instruction
1090
           modifying the memory. It will ensure that it cannot modify
1091
           itself */
1092
        env->current_tb = NULL;
1093
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1094
        cpu_resume_from_signal(env, puc);
1095
    }
1096
#endif
1097
}
1098
#endif
1099

    
1100
/* add the tb in the target page and protect it if necessary */
1101
static inline void tb_alloc_page(TranslationBlock *tb,
1102
                                 unsigned int n, target_ulong page_addr)
1103
{
1104
    PageDesc *p;
1105
    TranslationBlock *last_first_tb;
1106

    
1107
    tb->page_addr[n] = page_addr;
1108
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1109
    tb->page_next[n] = p->first_tb;
1110
    last_first_tb = p->first_tb;
1111
    p->first_tb = (TranslationBlock *)((long)tb | n);
1112
    invalidate_page_bitmap(p);
1113

    
1114
#if defined(TARGET_HAS_SMC) || 1
1115

    
1116
#if defined(CONFIG_USER_ONLY)
1117
    if (p->flags & PAGE_WRITE) {
1118
        target_ulong addr;
1119
        PageDesc *p2;
1120
        int prot;
1121

    
1122
        /* force the host page as non writable (writes will have a
1123
           page fault + mprotect overhead) */
1124
        page_addr &= qemu_host_page_mask;
1125
        prot = 0;
1126
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1127
            addr += TARGET_PAGE_SIZE) {
1128

    
1129
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1130
            if (!p2)
1131
                continue;
1132
            prot |= p2->flags;
1133
            p2->flags &= ~PAGE_WRITE;
1134
            page_get_flags(addr);
1135
          }
1136
        mprotect(g2h(page_addr), qemu_host_page_size,
1137
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1138
#ifdef DEBUG_TB_INVALIDATE
1139
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1140
               page_addr);
1141
#endif
1142
    }
1143
#else
1144
    /* if some code is already present, then the pages are already
1145
       protected. So we handle the case where only the first TB is
1146
       allocated in a physical page */
1147
    if (!last_first_tb) {
1148
        tlb_protect_code(page_addr);
1149
    }
1150
#endif
1151

    
1152
#endif /* TARGET_HAS_SMC */
1153
}
1154

    
1155
/* Allocate a new translation block. Flush the translation buffer if
1156
   too many translation blocks or too much generated code. */
1157
TranslationBlock *tb_alloc(target_ulong pc)
1158
{
1159
    TranslationBlock *tb;
1160

    
1161
    if (nb_tbs >= code_gen_max_blocks ||
1162
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1163
        return NULL;
1164
    tb = &tbs[nb_tbs++];
1165
    tb->pc = pc;
1166
    tb->cflags = 0;
1167
    return tb;
1168
}
1169

    
1170
void tb_free(TranslationBlock *tb)
1171
{
1172
    /* In practice this is mostly used for single use temporary TB
1173
       Ignore the hard cases and just back up if this TB happens to
1174
       be the last one generated.  */
1175
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1176
        code_gen_ptr = tb->tc_ptr;
1177
        nb_tbs--;
1178
    }
1179
}
1180

    
1181
/* add a new TB and link it to the physical page tables. phys_page2 is
1182
   (-1) to indicate that only one page contains the TB. */
1183
void tb_link_phys(TranslationBlock *tb,
1184
                  target_ulong phys_pc, target_ulong phys_page2)
1185
{
1186
    unsigned int h;
1187
    TranslationBlock **ptb;
1188

    
1189
    /* Grab the mmap lock to stop another thread invalidating this TB
1190
       before we are done.  */
1191
    mmap_lock();
1192
    /* add in the physical hash table */
1193
    h = tb_phys_hash_func(phys_pc);
1194
    ptb = &tb_phys_hash[h];
1195
    tb->phys_hash_next = *ptb;
1196
    *ptb = tb;
1197

    
1198
    /* add in the page list */
1199
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1200
    if (phys_page2 != -1)
1201
        tb_alloc_page(tb, 1, phys_page2);
1202
    else
1203
        tb->page_addr[1] = -1;
1204

    
1205
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1206
    tb->jmp_next[0] = NULL;
1207
    tb->jmp_next[1] = NULL;
1208

    
1209
    /* init original jump addresses */
1210
    if (tb->tb_next_offset[0] != 0xffff)
1211
        tb_reset_jump(tb, 0);
1212
    if (tb->tb_next_offset[1] != 0xffff)
1213
        tb_reset_jump(tb, 1);
1214

    
1215
#ifdef DEBUG_TB_CHECK
1216
    tb_page_check();
1217
#endif
1218
    mmap_unlock();
1219
}
1220

    
1221
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1222
   tb[1].tc_ptr. Return NULL if not found */
1223
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1224
{
1225
    int m_min, m_max, m;
1226
    unsigned long v;
1227
    TranslationBlock *tb;
1228

    
1229
    if (nb_tbs <= 0)
1230
        return NULL;
1231
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1232
        tc_ptr >= (unsigned long)code_gen_ptr)
1233
        return NULL;
1234
    /* binary search (cf Knuth) */
1235
    m_min = 0;
1236
    m_max = nb_tbs - 1;
1237
    while (m_min <= m_max) {
1238
        m = (m_min + m_max) >> 1;
1239
        tb = &tbs[m];
1240
        v = (unsigned long)tb->tc_ptr;
1241
        if (v == tc_ptr)
1242
            return tb;
1243
        else if (tc_ptr < v) {
1244
            m_max = m - 1;
1245
        } else {
1246
            m_min = m + 1;
1247
        }
1248
    }
1249
    return &tbs[m_max];
1250
}
1251

    
1252
static void tb_reset_jump_recursive(TranslationBlock *tb);
1253

    
1254
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1255
{
1256
    TranslationBlock *tb1, *tb_next, **ptb;
1257
    unsigned int n1;
1258

    
1259
    tb1 = tb->jmp_next[n];
1260
    if (tb1 != NULL) {
1261
        /* find head of list */
1262
        for(;;) {
1263
            n1 = (long)tb1 & 3;
1264
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1265
            if (n1 == 2)
1266
                break;
1267
            tb1 = tb1->jmp_next[n1];
1268
        }
1269
        /* we are now sure now that tb jumps to tb1 */
1270
        tb_next = tb1;
1271

    
1272
        /* remove tb from the jmp_first list */
1273
        ptb = &tb_next->jmp_first;
1274
        for(;;) {
1275
            tb1 = *ptb;
1276
            n1 = (long)tb1 & 3;
1277
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1278
            if (n1 == n && tb1 == tb)
1279
                break;
1280
            ptb = &tb1->jmp_next[n1];
1281
        }
1282
        *ptb = tb->jmp_next[n];
1283
        tb->jmp_next[n] = NULL;
1284

    
1285
        /* suppress the jump to next tb in generated code */
1286
        tb_reset_jump(tb, n);
1287

    
1288
        /* suppress jumps in the tb on which we could have jumped */
1289
        tb_reset_jump_recursive(tb_next);
1290
    }
1291
}
1292

    
1293
static void tb_reset_jump_recursive(TranslationBlock *tb)
1294
{
1295
    tb_reset_jump_recursive2(tb, 0);
1296
    tb_reset_jump_recursive2(tb, 1);
1297
}
1298

    
1299
#if defined(TARGET_HAS_ICE)
1300
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1301
{
1302
    target_phys_addr_t addr;
1303
    target_ulong pd;
1304
    ram_addr_t ram_addr;
1305
    PhysPageDesc *p;
1306

    
1307
    addr = cpu_get_phys_page_debug(env, pc);
1308
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1309
    if (!p) {
1310
        pd = IO_MEM_UNASSIGNED;
1311
    } else {
1312
        pd = p->phys_offset;
1313
    }
1314
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1315
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1316
}
1317
#endif
1318

    
1319
/* Add a watchpoint.  */
1320
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1321
                          int flags, CPUWatchpoint **watchpoint)
1322
{
1323
    target_ulong len_mask = ~(len - 1);
1324
    CPUWatchpoint *wp;
1325

    
1326
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1327
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1328
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1329
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1330
        return -EINVAL;
1331
    }
1332
    wp = qemu_malloc(sizeof(*wp));
1333

    
1334
    wp->vaddr = addr;
1335
    wp->len_mask = len_mask;
1336
    wp->flags = flags;
1337

    
1338
    /* keep all GDB-injected watchpoints in front */
1339
    if (flags & BP_GDB)
1340
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1341
    else
1342
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1343

    
1344
    tlb_flush_page(env, addr);
1345

    
1346
    if (watchpoint)
1347
        *watchpoint = wp;
1348
    return 0;
1349
}
1350

    
1351
/* Remove a specific watchpoint.  */
1352
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1353
                          int flags)
1354
{
1355
    target_ulong len_mask = ~(len - 1);
1356
    CPUWatchpoint *wp;
1357

    
1358
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1359
        if (addr == wp->vaddr && len_mask == wp->len_mask
1360
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1361
            cpu_watchpoint_remove_by_ref(env, wp);
1362
            return 0;
1363
        }
1364
    }
1365
    return -ENOENT;
1366
}
1367

    
1368
/* Remove a specific watchpoint by reference.  */
1369
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1370
{
1371
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1372

    
1373
    tlb_flush_page(env, watchpoint->vaddr);
1374

    
1375
    qemu_free(watchpoint);
1376
}
1377

    
1378
/* Remove all matching watchpoints.  */
1379
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1380
{
1381
    CPUWatchpoint *wp, *next;
1382

    
1383
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1384
        if (wp->flags & mask)
1385
            cpu_watchpoint_remove_by_ref(env, wp);
1386
    }
1387
}
1388

    
1389
/* Add a breakpoint.  */
1390
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1391
                          CPUBreakpoint **breakpoint)
1392
{
1393
#if defined(TARGET_HAS_ICE)
1394
    CPUBreakpoint *bp;
1395

    
1396
    bp = qemu_malloc(sizeof(*bp));
1397

    
1398
    bp->pc = pc;
1399
    bp->flags = flags;
1400

    
1401
    /* keep all GDB-injected breakpoints in front */
1402
    if (flags & BP_GDB)
1403
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1404
    else
1405
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1406

    
1407
    breakpoint_invalidate(env, pc);
1408

    
1409
    if (breakpoint)
1410
        *breakpoint = bp;
1411
    return 0;
1412
#else
1413
    return -ENOSYS;
1414
#endif
1415
}
1416

    
1417
/* Remove a specific breakpoint.  */
1418
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1419
{
1420
#if defined(TARGET_HAS_ICE)
1421
    CPUBreakpoint *bp;
1422

    
1423
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1424
        if (bp->pc == pc && bp->flags == flags) {
1425
            cpu_breakpoint_remove_by_ref(env, bp);
1426
            return 0;
1427
        }
1428
    }
1429
    return -ENOENT;
1430
#else
1431
    return -ENOSYS;
1432
#endif
1433
}
1434

    
1435
/* Remove a specific breakpoint by reference.  */
1436
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1437
{
1438
#if defined(TARGET_HAS_ICE)
1439
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1440

    
1441
    breakpoint_invalidate(env, breakpoint->pc);
1442

    
1443
    qemu_free(breakpoint);
1444
#endif
1445
}
1446

    
1447
/* Remove all matching breakpoints. */
1448
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1449
{
1450
#if defined(TARGET_HAS_ICE)
1451
    CPUBreakpoint *bp, *next;
1452

    
1453
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1454
        if (bp->flags & mask)
1455
            cpu_breakpoint_remove_by_ref(env, bp);
1456
    }
1457
#endif
1458
}
1459

    
1460
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1461
   CPU loop after each instruction */
1462
void cpu_single_step(CPUState *env, int enabled)
1463
{
1464
#if defined(TARGET_HAS_ICE)
1465
    if (env->singlestep_enabled != enabled) {
1466
        env->singlestep_enabled = enabled;
1467
        if (kvm_enabled())
1468
            kvm_update_guest_debug(env, 0);
1469
        else {
1470
            /* must flush all the translated code to avoid inconsistencies */
1471
            /* XXX: only flush what is necessary */
1472
            tb_flush(env);
1473
        }
1474
    }
1475
#endif
1476
}
1477

    
1478
/* enable or disable low levels log */
1479
void cpu_set_log(int log_flags)
1480
{
1481
    loglevel = log_flags;
1482
    if (loglevel && !logfile) {
1483
        logfile = fopen(logfilename, log_append ? "a" : "w");
1484
        if (!logfile) {
1485
            perror(logfilename);
1486
            _exit(1);
1487
        }
1488
#if !defined(CONFIG_SOFTMMU)
1489
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1490
        {
1491
            static char logfile_buf[4096];
1492
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1493
        }
1494
#elif !defined(_WIN32)
1495
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1496
        setvbuf(logfile, NULL, _IOLBF, 0);
1497
#endif
1498
        log_append = 1;
1499
    }
1500
    if (!loglevel && logfile) {
1501
        fclose(logfile);
1502
        logfile = NULL;
1503
    }
1504
}
1505

    
1506
void cpu_set_log_filename(const char *filename)
1507
{
1508
    logfilename = strdup(filename);
1509
    if (logfile) {
1510
        fclose(logfile);
1511
        logfile = NULL;
1512
    }
1513
    cpu_set_log(loglevel);
1514
}
1515

    
1516
static void cpu_unlink_tb(CPUState *env)
1517
{
1518
#if defined(CONFIG_USE_NPTL)
1519
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1520
       problem and hope the cpu will stop of its own accord.  For userspace
1521
       emulation this often isn't actually as bad as it sounds.  Often
1522
       signals are used primarily to interrupt blocking syscalls.  */
1523
#else
1524
    TranslationBlock *tb;
1525
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1526

    
1527
    tb = env->current_tb;
1528
    /* if the cpu is currently executing code, we must unlink it and
1529
       all the potentially executing TB */
1530
    if (tb && !testandset(&interrupt_lock)) {
1531
        env->current_tb = NULL;
1532
        tb_reset_jump_recursive(tb);
1533
        resetlock(&interrupt_lock);
1534
    }
1535
#endif
1536
}
1537

    
1538
/* mask must never be zero, except for A20 change call */
1539
void cpu_interrupt(CPUState *env, int mask)
1540
{
1541
    int old_mask;
1542

    
1543
    old_mask = env->interrupt_request;
1544
    env->interrupt_request |= mask;
1545

    
1546
#ifndef CONFIG_USER_ONLY
1547
    /*
1548
     * If called from iothread context, wake the target cpu in
1549
     * case its halted.
1550
     */
1551
    if (!qemu_cpu_self(env)) {
1552
        qemu_cpu_kick(env);
1553
        return;
1554
    }
1555
#endif
1556

    
1557
    if (use_icount) {
1558
        env->icount_decr.u16.high = 0xffff;
1559
#ifndef CONFIG_USER_ONLY
1560
        if (!can_do_io(env)
1561
            && (mask & ~old_mask) != 0) {
1562
            cpu_abort(env, "Raised interrupt while not in I/O function");
1563
        }
1564
#endif
1565
    } else {
1566
        cpu_unlink_tb(env);
1567
    }
1568
}
1569

    
1570
void cpu_reset_interrupt(CPUState *env, int mask)
1571
{
1572
    env->interrupt_request &= ~mask;
1573
}
1574

    
1575
void cpu_exit(CPUState *env)
1576
{
1577
    env->exit_request = 1;
1578
    cpu_unlink_tb(env);
1579
}
1580

    
1581
const CPULogItem cpu_log_items[] = {
1582
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1583
      "show generated host assembly code for each compiled TB" },
1584
    { CPU_LOG_TB_IN_ASM, "in_asm",
1585
      "show target assembly code for each compiled TB" },
1586
    { CPU_LOG_TB_OP, "op",
1587
      "show micro ops for each compiled TB" },
1588
    { CPU_LOG_TB_OP_OPT, "op_opt",
1589
      "show micro ops "
1590
#ifdef TARGET_I386
1591
      "before eflags optimization and "
1592
#endif
1593
      "after liveness analysis" },
1594
    { CPU_LOG_INT, "int",
1595
      "show interrupts/exceptions in short format" },
1596
    { CPU_LOG_EXEC, "exec",
1597
      "show trace before each executed TB (lots of logs)" },
1598
    { CPU_LOG_TB_CPU, "cpu",
1599
      "show CPU state before block translation" },
1600
#ifdef TARGET_I386
1601
    { CPU_LOG_PCALL, "pcall",
1602
      "show protected mode far calls/returns/exceptions" },
1603
    { CPU_LOG_RESET, "cpu_reset",
1604
      "show CPU state before CPU resets" },
1605
#endif
1606
#ifdef DEBUG_IOPORT
1607
    { CPU_LOG_IOPORT, "ioport",
1608
      "show all i/o ports accesses" },
1609
#endif
1610
    { 0, NULL, NULL },
1611
};
1612

    
1613
static int cmp1(const char *s1, int n, const char *s2)
1614
{
1615
    if (strlen(s2) != n)
1616
        return 0;
1617
    return memcmp(s1, s2, n) == 0;
1618
}
1619

    
1620
/* takes a comma separated list of log masks. Return 0 if error. */
1621
int cpu_str_to_log_mask(const char *str)
1622
{
1623
    const CPULogItem *item;
1624
    int mask;
1625
    const char *p, *p1;
1626

    
1627
    p = str;
1628
    mask = 0;
1629
    for(;;) {
1630
        p1 = strchr(p, ',');
1631
        if (!p1)
1632
            p1 = p + strlen(p);
1633
        if(cmp1(p,p1-p,"all")) {
1634
                for(item = cpu_log_items; item->mask != 0; item++) {
1635
                        mask |= item->mask;
1636
                }
1637
        } else {
1638
        for(item = cpu_log_items; item->mask != 0; item++) {
1639
            if (cmp1(p, p1 - p, item->name))
1640
                goto found;
1641
        }
1642
        return 0;
1643
        }
1644
    found:
1645
        mask |= item->mask;
1646
        if (*p1 != ',')
1647
            break;
1648
        p = p1 + 1;
1649
    }
1650
    return mask;
1651
}
1652

    
1653
void cpu_abort(CPUState *env, const char *fmt, ...)
1654
{
1655
    va_list ap;
1656
    va_list ap2;
1657

    
1658
    va_start(ap, fmt);
1659
    va_copy(ap2, ap);
1660
    fprintf(stderr, "qemu: fatal: ");
1661
    vfprintf(stderr, fmt, ap);
1662
    fprintf(stderr, "\n");
1663
#ifdef TARGET_I386
1664
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1665
#else
1666
    cpu_dump_state(env, stderr, fprintf, 0);
1667
#endif
1668
    if (qemu_log_enabled()) {
1669
        qemu_log("qemu: fatal: ");
1670
        qemu_log_vprintf(fmt, ap2);
1671
        qemu_log("\n");
1672
#ifdef TARGET_I386
1673
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1674
#else
1675
        log_cpu_state(env, 0);
1676
#endif
1677
        qemu_log_flush();
1678
        qemu_log_close();
1679
    }
1680
    va_end(ap2);
1681
    va_end(ap);
1682
    abort();
1683
}
1684

    
1685
CPUState *cpu_copy(CPUState *env)
1686
{
1687
    CPUState *new_env = cpu_init(env->cpu_model_str);
1688
    CPUState *next_cpu = new_env->next_cpu;
1689
    int cpu_index = new_env->cpu_index;
1690
#if defined(TARGET_HAS_ICE)
1691
    CPUBreakpoint *bp;
1692
    CPUWatchpoint *wp;
1693
#endif
1694

    
1695
    memcpy(new_env, env, sizeof(CPUState));
1696

    
1697
    /* Preserve chaining and index. */
1698
    new_env->next_cpu = next_cpu;
1699
    new_env->cpu_index = cpu_index;
1700

    
1701
    /* Clone all break/watchpoints.
1702
       Note: Once we support ptrace with hw-debug register access, make sure
1703
       BP_CPU break/watchpoints are handled correctly on clone. */
1704
    TAILQ_INIT(&env->breakpoints);
1705
    TAILQ_INIT(&env->watchpoints);
1706
#if defined(TARGET_HAS_ICE)
1707
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1708
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1709
    }
1710
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1711
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1712
                              wp->flags, NULL);
1713
    }
1714
#endif
1715

    
1716
    return new_env;
1717
}
1718

    
1719
#if !defined(CONFIG_USER_ONLY)
1720

    
1721
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1722
{
1723
    unsigned int i;
1724

    
1725
    /* Discard jump cache entries for any tb which might potentially
1726
       overlap the flushed page.  */
1727
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1728
    memset (&env->tb_jmp_cache[i], 0, 
1729
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1730

    
1731
    i = tb_jmp_cache_hash_page(addr);
1732
    memset (&env->tb_jmp_cache[i], 0, 
1733
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1734
}
1735

    
1736
static CPUTLBEntry s_cputlb_empty_entry = {
1737
    .addr_read  = -1,
1738
    .addr_write = -1,
1739
    .addr_code  = -1,
1740
    .addend     = -1,
1741
};
1742

    
1743
/* NOTE: if flush_global is true, also flush global entries (not
1744
   implemented yet) */
1745
void tlb_flush(CPUState *env, int flush_global)
1746
{
1747
    int i;
1748

    
1749
#if defined(DEBUG_TLB)
1750
    printf("tlb_flush:\n");
1751
#endif
1752
    /* must reset current TB so that interrupts cannot modify the
1753
       links while we are modifying them */
1754
    env->current_tb = NULL;
1755

    
1756
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1757
        int mmu_idx;
1758
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1759
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1760
        }
1761
    }
1762

    
1763
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1764

    
1765
#ifdef CONFIG_KQEMU
1766
    if (env->kqemu_enabled) {
1767
        kqemu_flush(env, flush_global);
1768
    }
1769
#endif
1770
    tlb_flush_count++;
1771
}
1772

    
1773
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1774
{
1775
    if (addr == (tlb_entry->addr_read &
1776
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1777
        addr == (tlb_entry->addr_write &
1778
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1779
        addr == (tlb_entry->addr_code &
1780
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1781
        *tlb_entry = s_cputlb_empty_entry;
1782
    }
1783
}
1784

    
1785
void tlb_flush_page(CPUState *env, target_ulong addr)
1786
{
1787
    int i;
1788
    int mmu_idx;
1789

    
1790
#if defined(DEBUG_TLB)
1791
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1792
#endif
1793
    /* must reset current TB so that interrupts cannot modify the
1794
       links while we are modifying them */
1795
    env->current_tb = NULL;
1796

    
1797
    addr &= TARGET_PAGE_MASK;
1798
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1799
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1800
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1801

    
1802
    tlb_flush_jmp_cache(env, addr);
1803

    
1804
#ifdef CONFIG_KQEMU
1805
    if (env->kqemu_enabled) {
1806
        kqemu_flush_page(env, addr);
1807
    }
1808
#endif
1809
}
1810

    
1811
/* update the TLBs so that writes to code in the virtual page 'addr'
1812
   can be detected */
1813
static void tlb_protect_code(ram_addr_t ram_addr)
1814
{
1815
    cpu_physical_memory_reset_dirty(ram_addr,
1816
                                    ram_addr + TARGET_PAGE_SIZE,
1817
                                    CODE_DIRTY_FLAG);
1818
}
1819

    
1820
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1821
   tested for self modifying code */
1822
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1823
                                    target_ulong vaddr)
1824
{
1825
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1826
}
1827

    
1828
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1829
                                         unsigned long start, unsigned long length)
1830
{
1831
    unsigned long addr;
1832
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1833
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1834
        if ((addr - start) < length) {
1835
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1836
        }
1837
    }
1838
}
1839

    
1840
/* Note: start and end must be within the same ram block.  */
1841
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1842
                                     int dirty_flags)
1843
{
1844
    CPUState *env;
1845
    unsigned long length, start1;
1846
    int i, mask, len;
1847
    uint8_t *p;
1848

    
1849
    start &= TARGET_PAGE_MASK;
1850
    end = TARGET_PAGE_ALIGN(end);
1851

    
1852
    length = end - start;
1853
    if (length == 0)
1854
        return;
1855
    len = length >> TARGET_PAGE_BITS;
1856
#ifdef CONFIG_KQEMU
1857
    /* XXX: should not depend on cpu context */
1858
    env = first_cpu;
1859
    if (env->kqemu_enabled) {
1860
        ram_addr_t addr;
1861
        addr = start;
1862
        for(i = 0; i < len; i++) {
1863
            kqemu_set_notdirty(env, addr);
1864
            addr += TARGET_PAGE_SIZE;
1865
        }
1866
    }
1867
#endif
1868
    mask = ~dirty_flags;
1869
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1870
    for(i = 0; i < len; i++)
1871
        p[i] &= mask;
1872

    
1873
    /* we modify the TLB cache so that the dirty bit will be set again
1874
       when accessing the range */
1875
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1876
    /* Chek that we don't span multiple blocks - this breaks the
1877
       address comparisons below.  */
1878
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1879
            != (end - 1) - start) {
1880
        abort();
1881
    }
1882

    
1883
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1884
        int mmu_idx;
1885
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1886
            for(i = 0; i < CPU_TLB_SIZE; i++)
1887
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1888
                                      start1, length);
1889
        }
1890
    }
1891
}
1892

    
1893
int cpu_physical_memory_set_dirty_tracking(int enable)
1894
{
1895
    in_migration = enable;
1896
    if (kvm_enabled()) {
1897
        return kvm_set_migration_log(enable);
1898
    }
1899
    return 0;
1900
}
1901

    
1902
int cpu_physical_memory_get_dirty_tracking(void)
1903
{
1904
    return in_migration;
1905
}
1906

    
1907
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1908
                                   target_phys_addr_t end_addr)
1909
{
1910
    int ret = 0;
1911

    
1912
    if (kvm_enabled())
1913
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1914
    return ret;
1915
}
1916

    
1917
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1918
{
1919
    ram_addr_t ram_addr;
1920
    void *p;
1921

    
1922
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1923
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1924
            + tlb_entry->addend);
1925
        ram_addr = qemu_ram_addr_from_host(p);
1926
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1927
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1928
        }
1929
    }
1930
}
1931

    
1932
/* update the TLB according to the current state of the dirty bits */
1933
void cpu_tlb_update_dirty(CPUState *env)
1934
{
1935
    int i;
1936
    int mmu_idx;
1937
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1938
        for(i = 0; i < CPU_TLB_SIZE; i++)
1939
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1940
    }
1941
}
1942

    
1943
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1944
{
1945
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1946
        tlb_entry->addr_write = vaddr;
1947
}
1948

    
1949
/* update the TLB corresponding to virtual page vaddr
1950
   so that it is no longer dirty */
1951
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1952
{
1953
    int i;
1954
    int mmu_idx;
1955

    
1956
    vaddr &= TARGET_PAGE_MASK;
1957
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1958
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1959
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1960
}
1961

    
1962
/* add a new TLB entry. At most one entry for a given virtual address
1963
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1964
   (can only happen in non SOFTMMU mode for I/O pages or pages
1965
   conflicting with the host address space). */
1966
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1967
                      target_phys_addr_t paddr, int prot,
1968
                      int mmu_idx, int is_softmmu)
1969
{
1970
    PhysPageDesc *p;
1971
    unsigned long pd;
1972
    unsigned int index;
1973
    target_ulong address;
1974
    target_ulong code_address;
1975
    target_phys_addr_t addend;
1976
    int ret;
1977
    CPUTLBEntry *te;
1978
    CPUWatchpoint *wp;
1979
    target_phys_addr_t iotlb;
1980

    
1981
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1982
    if (!p) {
1983
        pd = IO_MEM_UNASSIGNED;
1984
    } else {
1985
        pd = p->phys_offset;
1986
    }
1987
#if defined(DEBUG_TLB)
1988
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1989
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1990
#endif
1991

    
1992
    ret = 0;
1993
    address = vaddr;
1994
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1995
        /* IO memory case (romd handled later) */
1996
        address |= TLB_MMIO;
1997
    }
1998
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1999
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2000
        /* Normal RAM.  */
2001
        iotlb = pd & TARGET_PAGE_MASK;
2002
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2003
            iotlb |= IO_MEM_NOTDIRTY;
2004
        else
2005
            iotlb |= IO_MEM_ROM;
2006
    } else {
2007
        /* IO handlers are currently passed a physical address.
2008
           It would be nice to pass an offset from the base address
2009
           of that region.  This would avoid having to special case RAM,
2010
           and avoid full address decoding in every device.
2011
           We can't use the high bits of pd for this because
2012
           IO_MEM_ROMD uses these as a ram address.  */
2013
        iotlb = (pd & ~TARGET_PAGE_MASK);
2014
        if (p) {
2015
            iotlb += p->region_offset;
2016
        } else {
2017
            iotlb += paddr;
2018
        }
2019
    }
2020

    
2021
    code_address = address;
2022
    /* Make accesses to pages with watchpoints go via the
2023
       watchpoint trap routines.  */
2024
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2025
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2026
            iotlb = io_mem_watch + paddr;
2027
            /* TODO: The memory case can be optimized by not trapping
2028
               reads of pages with a write breakpoint.  */
2029
            address |= TLB_MMIO;
2030
        }
2031
    }
2032

    
2033
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2034
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2035
    te = &env->tlb_table[mmu_idx][index];
2036
    te->addend = addend - vaddr;
2037
    if (prot & PAGE_READ) {
2038
        te->addr_read = address;
2039
    } else {
2040
        te->addr_read = -1;
2041
    }
2042

    
2043
    if (prot & PAGE_EXEC) {
2044
        te->addr_code = code_address;
2045
    } else {
2046
        te->addr_code = -1;
2047
    }
2048
    if (prot & PAGE_WRITE) {
2049
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2050
            (pd & IO_MEM_ROMD)) {
2051
            /* Write access calls the I/O callback.  */
2052
            te->addr_write = address | TLB_MMIO;
2053
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2054
                   !cpu_physical_memory_is_dirty(pd)) {
2055
            te->addr_write = address | TLB_NOTDIRTY;
2056
        } else {
2057
            te->addr_write = address;
2058
        }
2059
    } else {
2060
        te->addr_write = -1;
2061
    }
2062
    return ret;
2063
}
2064

    
2065
#else
2066

    
2067
void tlb_flush(CPUState *env, int flush_global)
2068
{
2069
}
2070

    
2071
void tlb_flush_page(CPUState *env, target_ulong addr)
2072
{
2073
}
2074

    
2075
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2076
                      target_phys_addr_t paddr, int prot,
2077
                      int mmu_idx, int is_softmmu)
2078
{
2079
    return 0;
2080
}
2081

    
2082
/*
2083
 * Walks guest process memory "regions" one by one
2084
 * and calls callback function 'fn' for each region.
2085
 */
2086
int walk_memory_regions(void *priv,
2087
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2088
{
2089
    unsigned long start, end;
2090
    PageDesc *p = NULL;
2091
    int i, j, prot, prot1;
2092
    int rc = 0;
2093

    
2094
    start = end = -1;
2095
    prot = 0;
2096

    
2097
    for (i = 0; i <= L1_SIZE; i++) {
2098
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2099
        for (j = 0; j < L2_SIZE; j++) {
2100
            prot1 = (p == NULL) ? 0 : p[j].flags;
2101
            /*
2102
             * "region" is one continuous chunk of memory
2103
             * that has same protection flags set.
2104
             */
2105
            if (prot1 != prot) {
2106
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2107
                if (start != -1) {
2108
                    rc = (*fn)(priv, start, end, prot);
2109
                    /* callback can stop iteration by returning != 0 */
2110
                    if (rc != 0)
2111
                        return (rc);
2112
                }
2113
                if (prot1 != 0)
2114
                    start = end;
2115
                else
2116
                    start = -1;
2117
                prot = prot1;
2118
            }
2119
            if (p == NULL)
2120
                break;
2121
        }
2122
    }
2123
    return (rc);
2124
}
2125

    
2126
static int dump_region(void *priv, unsigned long start,
2127
    unsigned long end, unsigned long prot)
2128
{
2129
    FILE *f = (FILE *)priv;
2130

    
2131
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2132
        start, end, end - start,
2133
        ((prot & PAGE_READ) ? 'r' : '-'),
2134
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2135
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2136

    
2137
    return (0);
2138
}
2139

    
2140
/* dump memory mappings */
2141
void page_dump(FILE *f)
2142
{
2143
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2144
            "start", "end", "size", "prot");
2145
    walk_memory_regions(f, dump_region);
2146
}
2147

    
2148
int page_get_flags(target_ulong address)
2149
{
2150
    PageDesc *p;
2151

    
2152
    p = page_find(address >> TARGET_PAGE_BITS);
2153
    if (!p)
2154
        return 0;
2155
    return p->flags;
2156
}
2157

    
2158
/* modify the flags of a page and invalidate the code if
2159
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2160
   depending on PAGE_WRITE */
2161
void page_set_flags(target_ulong start, target_ulong end, int flags)
2162
{
2163
    PageDesc *p;
2164
    target_ulong addr;
2165

    
2166
    /* mmap_lock should already be held.  */
2167
    start = start & TARGET_PAGE_MASK;
2168
    end = TARGET_PAGE_ALIGN(end);
2169
    if (flags & PAGE_WRITE)
2170
        flags |= PAGE_WRITE_ORG;
2171
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2172
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2173
        /* We may be called for host regions that are outside guest
2174
           address space.  */
2175
        if (!p)
2176
            return;
2177
        /* if the write protection is set, then we invalidate the code
2178
           inside */
2179
        if (!(p->flags & PAGE_WRITE) &&
2180
            (flags & PAGE_WRITE) &&
2181
            p->first_tb) {
2182
            tb_invalidate_phys_page(addr, 0, NULL);
2183
        }
2184
        p->flags = flags;
2185
    }
2186
}
2187

    
2188
int page_check_range(target_ulong start, target_ulong len, int flags)
2189
{
2190
    PageDesc *p;
2191
    target_ulong end;
2192
    target_ulong addr;
2193

    
2194
    if (start + len < start)
2195
        /* we've wrapped around */
2196
        return -1;
2197

    
2198
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2199
    start = start & TARGET_PAGE_MASK;
2200

    
2201
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2202
        p = page_find(addr >> TARGET_PAGE_BITS);
2203
        if( !p )
2204
            return -1;
2205
        if( !(p->flags & PAGE_VALID) )
2206
            return -1;
2207

    
2208
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2209
            return -1;
2210
        if (flags & PAGE_WRITE) {
2211
            if (!(p->flags & PAGE_WRITE_ORG))
2212
                return -1;
2213
            /* unprotect the page if it was put read-only because it
2214
               contains translated code */
2215
            if (!(p->flags & PAGE_WRITE)) {
2216
                if (!page_unprotect(addr, 0, NULL))
2217
                    return -1;
2218
            }
2219
            return 0;
2220
        }
2221
    }
2222
    return 0;
2223
}
2224

    
2225
/* called from signal handler: invalidate the code and unprotect the
2226
   page. Return TRUE if the fault was successfully handled. */
2227
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2228
{
2229
    unsigned int page_index, prot, pindex;
2230
    PageDesc *p, *p1;
2231
    target_ulong host_start, host_end, addr;
2232

    
2233
    /* Technically this isn't safe inside a signal handler.  However we
2234
       know this only ever happens in a synchronous SEGV handler, so in
2235
       practice it seems to be ok.  */
2236
    mmap_lock();
2237

    
2238
    host_start = address & qemu_host_page_mask;
2239
    page_index = host_start >> TARGET_PAGE_BITS;
2240
    p1 = page_find(page_index);
2241
    if (!p1) {
2242
        mmap_unlock();
2243
        return 0;
2244
    }
2245
    host_end = host_start + qemu_host_page_size;
2246
    p = p1;
2247
    prot = 0;
2248
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2249
        prot |= p->flags;
2250
        p++;
2251
    }
2252
    /* if the page was really writable, then we change its
2253
       protection back to writable */
2254
    if (prot & PAGE_WRITE_ORG) {
2255
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2256
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2257
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2258
                     (prot & PAGE_BITS) | PAGE_WRITE);
2259
            p1[pindex].flags |= PAGE_WRITE;
2260
            /* and since the content will be modified, we must invalidate
2261
               the corresponding translated code. */
2262
            tb_invalidate_phys_page(address, pc, puc);
2263
#ifdef DEBUG_TB_CHECK
2264
            tb_invalidate_check(address);
2265
#endif
2266
            mmap_unlock();
2267
            return 1;
2268
        }
2269
    }
2270
    mmap_unlock();
2271
    return 0;
2272
}
2273

    
2274
static inline void tlb_set_dirty(CPUState *env,
2275
                                 unsigned long addr, target_ulong vaddr)
2276
{
2277
}
2278
#endif /* defined(CONFIG_USER_ONLY) */
2279

    
2280
#if !defined(CONFIG_USER_ONLY)
2281

    
2282
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2283
                             ram_addr_t memory, ram_addr_t region_offset);
2284
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2285
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2286
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2287
                      need_subpage)                                     \
2288
    do {                                                                \
2289
        if (addr > start_addr)                                          \
2290
            start_addr2 = 0;                                            \
2291
        else {                                                          \
2292
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2293
            if (start_addr2 > 0)                                        \
2294
                need_subpage = 1;                                       \
2295
        }                                                               \
2296
                                                                        \
2297
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2298
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2299
        else {                                                          \
2300
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2301
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2302
                need_subpage = 1;                                       \
2303
        }                                                               \
2304
    } while (0)
2305

    
2306
/* register physical memory. 'size' must be a multiple of the target
2307
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2308
   io memory page.  The address used when calling the IO function is
2309
   the offset from the start of the region, plus region_offset.  Both
2310
   start_addr and region_offset are rounded down to a page boundary
2311
   before calculating this offset.  This should not be a problem unless
2312
   the low bits of start_addr and region_offset differ.  */
2313
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2314
                                         ram_addr_t size,
2315
                                         ram_addr_t phys_offset,
2316
                                         ram_addr_t region_offset)
2317
{
2318
    target_phys_addr_t addr, end_addr;
2319
    PhysPageDesc *p;
2320
    CPUState *env;
2321
    ram_addr_t orig_size = size;
2322
    void *subpage;
2323

    
2324
#ifdef CONFIG_KQEMU
2325
    /* XXX: should not depend on cpu context */
2326
    env = first_cpu;
2327
    if (env->kqemu_enabled) {
2328
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2329
    }
2330
#endif
2331
    if (kvm_enabled())
2332
        kvm_set_phys_mem(start_addr, size, phys_offset);
2333

    
2334
    if (phys_offset == IO_MEM_UNASSIGNED) {
2335
        region_offset = start_addr;
2336
    }
2337
    region_offset &= TARGET_PAGE_MASK;
2338
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2339
    end_addr = start_addr + (target_phys_addr_t)size;
2340
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2341
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2342
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2343
            ram_addr_t orig_memory = p->phys_offset;
2344
            target_phys_addr_t start_addr2, end_addr2;
2345
            int need_subpage = 0;
2346

    
2347
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2348
                          need_subpage);
2349
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2350
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2351
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2352
                                           &p->phys_offset, orig_memory,
2353
                                           p->region_offset);
2354
                } else {
2355
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2356
                                            >> IO_MEM_SHIFT];
2357
                }
2358
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2359
                                 region_offset);
2360
                p->region_offset = 0;
2361
            } else {
2362
                p->phys_offset = phys_offset;
2363
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2364
                    (phys_offset & IO_MEM_ROMD))
2365
                    phys_offset += TARGET_PAGE_SIZE;
2366
            }
2367
        } else {
2368
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2369
            p->phys_offset = phys_offset;
2370
            p->region_offset = region_offset;
2371
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2372
                (phys_offset & IO_MEM_ROMD)) {
2373
                phys_offset += TARGET_PAGE_SIZE;
2374
            } else {
2375
                target_phys_addr_t start_addr2, end_addr2;
2376
                int need_subpage = 0;
2377

    
2378
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2379
                              end_addr2, need_subpage);
2380

    
2381
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2382
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2383
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2384
                                           addr & TARGET_PAGE_MASK);
2385
                    subpage_register(subpage, start_addr2, end_addr2,
2386
                                     phys_offset, region_offset);
2387
                    p->region_offset = 0;
2388
                }
2389
            }
2390
        }
2391
        region_offset += TARGET_PAGE_SIZE;
2392
    }
2393

    
2394
    /* since each CPU stores ram addresses in its TLB cache, we must
2395
       reset the modified entries */
2396
    /* XXX: slow ! */
2397
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2398
        tlb_flush(env, 1);
2399
    }
2400
}
2401

    
2402
/* XXX: temporary until new memory mapping API */
2403
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2404
{
2405
    PhysPageDesc *p;
2406

    
2407
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2408
    if (!p)
2409
        return IO_MEM_UNASSIGNED;
2410
    return p->phys_offset;
2411
}
2412

    
2413
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2414
{
2415
    if (kvm_enabled())
2416
        kvm_coalesce_mmio_region(addr, size);
2417
}
2418

    
2419
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2420
{
2421
    if (kvm_enabled())
2422
        kvm_uncoalesce_mmio_region(addr, size);
2423
}
2424

    
2425
#ifdef CONFIG_KQEMU
2426
/* XXX: better than nothing */
2427
static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2428
{
2429
    ram_addr_t addr;
2430
    if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2431
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2432
                (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2433
        abort();
2434
    }
2435
    addr = last_ram_offset;
2436
    last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2437
    return addr;
2438
}
2439
#endif
2440

    
2441
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2442
{
2443
    RAMBlock *new_block;
2444

    
2445
#ifdef CONFIG_KQEMU
2446
    if (kqemu_phys_ram_base) {
2447
        return kqemu_ram_alloc(size);
2448
    }
2449
#endif
2450

    
2451
    size = TARGET_PAGE_ALIGN(size);
2452
    new_block = qemu_malloc(sizeof(*new_block));
2453

    
2454
    new_block->host = qemu_vmalloc(size);
2455
    new_block->offset = last_ram_offset;
2456
    new_block->length = size;
2457

    
2458
    new_block->next = ram_blocks;
2459
    ram_blocks = new_block;
2460

    
2461
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2462
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2463
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2464
           0xff, size >> TARGET_PAGE_BITS);
2465

    
2466
    last_ram_offset += size;
2467

    
2468
    if (kvm_enabled())
2469
        kvm_setup_guest_memory(new_block->host, size);
2470

    
2471
    return new_block->offset;
2472
}
2473

    
2474
void qemu_ram_free(ram_addr_t addr)
2475
{
2476
    /* TODO: implement this.  */
2477
}
2478

    
2479
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2480
   With the exception of the softmmu code in this file, this should
2481
   only be used for local memory (e.g. video ram) that the device owns,
2482
   and knows it isn't going to access beyond the end of the block.
2483

2484
   It should not be used for general purpose DMA.
2485
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2486
 */
2487
void *qemu_get_ram_ptr(ram_addr_t addr)
2488
{
2489
    RAMBlock *prev;
2490
    RAMBlock **prevp;
2491
    RAMBlock *block;
2492

    
2493
#ifdef CONFIG_KQEMU
2494
    if (kqemu_phys_ram_base) {
2495
        return kqemu_phys_ram_base + addr;
2496
    }
2497
#endif
2498

    
2499
    prev = NULL;
2500
    prevp = &ram_blocks;
2501
    block = ram_blocks;
2502
    while (block && (block->offset > addr
2503
                     || block->offset + block->length <= addr)) {
2504
        if (prev)
2505
          prevp = &prev->next;
2506
        prev = block;
2507
        block = block->next;
2508
    }
2509
    if (!block) {
2510
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2511
        abort();
2512
    }
2513
    /* Move this entry to to start of the list.  */
2514
    if (prev) {
2515
        prev->next = block->next;
2516
        block->next = *prevp;
2517
        *prevp = block;
2518
    }
2519
    return block->host + (addr - block->offset);
2520
}
2521

    
2522
/* Some of the softmmu routines need to translate from a host pointer
2523
   (typically a TLB entry) back to a ram offset.  */
2524
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2525
{
2526
    RAMBlock *prev;
2527
    RAMBlock **prevp;
2528
    RAMBlock *block;
2529
    uint8_t *host = ptr;
2530

    
2531
#ifdef CONFIG_KQEMU
2532
    if (kqemu_phys_ram_base) {
2533
        return host - kqemu_phys_ram_base;
2534
    }
2535
#endif
2536

    
2537
    prev = NULL;
2538
    prevp = &ram_blocks;
2539
    block = ram_blocks;
2540
    while (block && (block->host > host
2541
                     || block->host + block->length <= host)) {
2542
        if (prev)
2543
          prevp = &prev->next;
2544
        prev = block;
2545
        block = block->next;
2546
    }
2547
    if (!block) {
2548
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2549
        abort();
2550
    }
2551
    return block->offset + (host - block->host);
2552
}
2553

    
2554
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2555
{
2556
#ifdef DEBUG_UNASSIGNED
2557
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2558
#endif
2559
#if defined(TARGET_SPARC)
2560
    do_unassigned_access(addr, 0, 0, 0, 1);
2561
#endif
2562
    return 0;
2563
}
2564

    
2565
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2566
{
2567
#ifdef DEBUG_UNASSIGNED
2568
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2569
#endif
2570
#if defined(TARGET_SPARC)
2571
    do_unassigned_access(addr, 0, 0, 0, 2);
2572
#endif
2573
    return 0;
2574
}
2575

    
2576
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2577
{
2578
#ifdef DEBUG_UNASSIGNED
2579
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2580
#endif
2581
#if defined(TARGET_SPARC)
2582
    do_unassigned_access(addr, 0, 0, 0, 4);
2583
#endif
2584
    return 0;
2585
}
2586

    
2587
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2588
{
2589
#ifdef DEBUG_UNASSIGNED
2590
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2591
#endif
2592
#if defined(TARGET_SPARC)
2593
    do_unassigned_access(addr, 1, 0, 0, 1);
2594
#endif
2595
}
2596

    
2597
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2598
{
2599
#ifdef DEBUG_UNASSIGNED
2600
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2601
#endif
2602
#if defined(TARGET_SPARC)
2603
    do_unassigned_access(addr, 1, 0, 0, 2);
2604
#endif
2605
}
2606

    
2607
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2608
{
2609
#ifdef DEBUG_UNASSIGNED
2610
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2611
#endif
2612
#if defined(TARGET_SPARC)
2613
    do_unassigned_access(addr, 1, 0, 0, 4);
2614
#endif
2615
}
2616

    
2617
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2618
    unassigned_mem_readb,
2619
    unassigned_mem_readw,
2620
    unassigned_mem_readl,
2621
};
2622

    
2623
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2624
    unassigned_mem_writeb,
2625
    unassigned_mem_writew,
2626
    unassigned_mem_writel,
2627
};
2628

    
2629
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2630
                                uint32_t val)
2631
{
2632
    int dirty_flags;
2633
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2634
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2635
#if !defined(CONFIG_USER_ONLY)
2636
        tb_invalidate_phys_page_fast(ram_addr, 1);
2637
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2638
#endif
2639
    }
2640
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2641
#ifdef CONFIG_KQEMU
2642
    if (cpu_single_env->kqemu_enabled &&
2643
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2644
        kqemu_modify_page(cpu_single_env, ram_addr);
2645
#endif
2646
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2647
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2648
    /* we remove the notdirty callback only if the code has been
2649
       flushed */
2650
    if (dirty_flags == 0xff)
2651
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2652
}
2653

    
2654
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2655
                                uint32_t val)
2656
{
2657
    int dirty_flags;
2658
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2659
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2660
#if !defined(CONFIG_USER_ONLY)
2661
        tb_invalidate_phys_page_fast(ram_addr, 2);
2662
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2663
#endif
2664
    }
2665
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2666
#ifdef CONFIG_KQEMU
2667
    if (cpu_single_env->kqemu_enabled &&
2668
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2669
        kqemu_modify_page(cpu_single_env, ram_addr);
2670
#endif
2671
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2672
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2673
    /* we remove the notdirty callback only if the code has been
2674
       flushed */
2675
    if (dirty_flags == 0xff)
2676
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2677
}
2678

    
2679
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2680
                                uint32_t val)
2681
{
2682
    int dirty_flags;
2683
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2684
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2685
#if !defined(CONFIG_USER_ONLY)
2686
        tb_invalidate_phys_page_fast(ram_addr, 4);
2687
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2688
#endif
2689
    }
2690
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2691
#ifdef CONFIG_KQEMU
2692
    if (cpu_single_env->kqemu_enabled &&
2693
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2694
        kqemu_modify_page(cpu_single_env, ram_addr);
2695
#endif
2696
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2697
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2698
    /* we remove the notdirty callback only if the code has been
2699
       flushed */
2700
    if (dirty_flags == 0xff)
2701
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2702
}
2703

    
2704
static CPUReadMemoryFunc *error_mem_read[3] = {
2705
    NULL, /* never used */
2706
    NULL, /* never used */
2707
    NULL, /* never used */
2708
};
2709

    
2710
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2711
    notdirty_mem_writeb,
2712
    notdirty_mem_writew,
2713
    notdirty_mem_writel,
2714
};
2715

    
2716
/* Generate a debug exception if a watchpoint has been hit.  */
2717
static void check_watchpoint(int offset, int len_mask, int flags)
2718
{
2719
    CPUState *env = cpu_single_env;
2720
    target_ulong pc, cs_base;
2721
    TranslationBlock *tb;
2722
    target_ulong vaddr;
2723
    CPUWatchpoint *wp;
2724
    int cpu_flags;
2725

    
2726
    if (env->watchpoint_hit) {
2727
        /* We re-entered the check after replacing the TB. Now raise
2728
         * the debug interrupt so that is will trigger after the
2729
         * current instruction. */
2730
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2731
        return;
2732
    }
2733
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2734
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2735
        if ((vaddr == (wp->vaddr & len_mask) ||
2736
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2737
            wp->flags |= BP_WATCHPOINT_HIT;
2738
            if (!env->watchpoint_hit) {
2739
                env->watchpoint_hit = wp;
2740
                tb = tb_find_pc(env->mem_io_pc);
2741
                if (!tb) {
2742
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2743
                              "pc=%p", (void *)env->mem_io_pc);
2744
                }
2745
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2746
                tb_phys_invalidate(tb, -1);
2747
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2748
                    env->exception_index = EXCP_DEBUG;
2749
                } else {
2750
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2751
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2752
                }
2753
                cpu_resume_from_signal(env, NULL);
2754
            }
2755
        } else {
2756
            wp->flags &= ~BP_WATCHPOINT_HIT;
2757
        }
2758
    }
2759
}
2760

    
2761
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2762
   so these check for a hit then pass through to the normal out-of-line
2763
   phys routines.  */
2764
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2765
{
2766
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2767
    return ldub_phys(addr);
2768
}
2769

    
2770
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2771
{
2772
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2773
    return lduw_phys(addr);
2774
}
2775

    
2776
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2777
{
2778
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2779
    return ldl_phys(addr);
2780
}
2781

    
2782
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2783
                             uint32_t val)
2784
{
2785
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2786
    stb_phys(addr, val);
2787
}
2788

    
2789
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2790
                             uint32_t val)
2791
{
2792
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2793
    stw_phys(addr, val);
2794
}
2795

    
2796
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2797
                             uint32_t val)
2798
{
2799
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2800
    stl_phys(addr, val);
2801
}
2802

    
2803
static CPUReadMemoryFunc *watch_mem_read[3] = {
2804
    watch_mem_readb,
2805
    watch_mem_readw,
2806
    watch_mem_readl,
2807
};
2808

    
2809
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2810
    watch_mem_writeb,
2811
    watch_mem_writew,
2812
    watch_mem_writel,
2813
};
2814

    
2815
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2816
                                 unsigned int len)
2817
{
2818
    uint32_t ret;
2819
    unsigned int idx;
2820

    
2821
    idx = SUBPAGE_IDX(addr);
2822
#if defined(DEBUG_SUBPAGE)
2823
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2824
           mmio, len, addr, idx);
2825
#endif
2826
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2827
                                       addr + mmio->region_offset[idx][0][len]);
2828

    
2829
    return ret;
2830
}
2831

    
2832
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2833
                              uint32_t value, unsigned int len)
2834
{
2835
    unsigned int idx;
2836

    
2837
    idx = SUBPAGE_IDX(addr);
2838
#if defined(DEBUG_SUBPAGE)
2839
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2840
           mmio, len, addr, idx, value);
2841
#endif
2842
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2843
                                  addr + mmio->region_offset[idx][1][len],
2844
                                  value);
2845
}
2846

    
2847
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2848
{
2849
#if defined(DEBUG_SUBPAGE)
2850
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2851
#endif
2852

    
2853
    return subpage_readlen(opaque, addr, 0);
2854
}
2855

    
2856
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2857
                            uint32_t value)
2858
{
2859
#if defined(DEBUG_SUBPAGE)
2860
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2861
#endif
2862
    subpage_writelen(opaque, addr, value, 0);
2863
}
2864

    
2865
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2866
{
2867
#if defined(DEBUG_SUBPAGE)
2868
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2869
#endif
2870

    
2871
    return subpage_readlen(opaque, addr, 1);
2872
}
2873

    
2874
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2875
                            uint32_t value)
2876
{
2877
#if defined(DEBUG_SUBPAGE)
2878
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2879
#endif
2880
    subpage_writelen(opaque, addr, value, 1);
2881
}
2882

    
2883
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2884
{
2885
#if defined(DEBUG_SUBPAGE)
2886
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2887
#endif
2888

    
2889
    return subpage_readlen(opaque, addr, 2);
2890
}
2891

    
2892
static void subpage_writel (void *opaque,
2893
                         target_phys_addr_t addr, uint32_t value)
2894
{
2895
#if defined(DEBUG_SUBPAGE)
2896
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2897
#endif
2898
    subpage_writelen(opaque, addr, value, 2);
2899
}
2900

    
2901
static CPUReadMemoryFunc *subpage_read[] = {
2902
    &subpage_readb,
2903
    &subpage_readw,
2904
    &subpage_readl,
2905
};
2906

    
2907
static CPUWriteMemoryFunc *subpage_write[] = {
2908
    &subpage_writeb,
2909
    &subpage_writew,
2910
    &subpage_writel,
2911
};
2912

    
2913
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2914
                             ram_addr_t memory, ram_addr_t region_offset)
2915
{
2916
    int idx, eidx;
2917
    unsigned int i;
2918

    
2919
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2920
        return -1;
2921
    idx = SUBPAGE_IDX(start);
2922
    eidx = SUBPAGE_IDX(end);
2923
#if defined(DEBUG_SUBPAGE)
2924
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2925
           mmio, start, end, idx, eidx, memory);
2926
#endif
2927
    memory >>= IO_MEM_SHIFT;
2928
    for (; idx <= eidx; idx++) {
2929
        for (i = 0; i < 4; i++) {
2930
            if (io_mem_read[memory][i]) {
2931
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2932
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2933
                mmio->region_offset[idx][0][i] = region_offset;
2934
            }
2935
            if (io_mem_write[memory][i]) {
2936
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2937
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2938
                mmio->region_offset[idx][1][i] = region_offset;
2939
            }
2940
        }
2941
    }
2942

    
2943
    return 0;
2944
}
2945

    
2946
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2947
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2948
{
2949
    subpage_t *mmio;
2950
    int subpage_memory;
2951

    
2952
    mmio = qemu_mallocz(sizeof(subpage_t));
2953

    
2954
    mmio->base = base;
2955
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2956
#if defined(DEBUG_SUBPAGE)
2957
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2958
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2959
#endif
2960
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2961
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2962
                         region_offset);
2963

    
2964
    return mmio;
2965
}
2966

    
2967
static int get_free_io_mem_idx(void)
2968
{
2969
    int i;
2970

    
2971
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2972
        if (!io_mem_used[i]) {
2973
            io_mem_used[i] = 1;
2974
            return i;
2975
        }
2976

    
2977
    return -1;
2978
}
2979

    
2980
/* mem_read and mem_write are arrays of functions containing the
2981
   function to access byte (index 0), word (index 1) and dword (index
2982
   2). Functions can be omitted with a NULL function pointer.
2983
   If io_index is non zero, the corresponding io zone is
2984
   modified. If it is zero, a new io zone is allocated. The return
2985
   value can be used with cpu_register_physical_memory(). (-1) is
2986
   returned if error. */
2987
static int cpu_register_io_memory_fixed(int io_index,
2988
                                        CPUReadMemoryFunc **mem_read,
2989
                                        CPUWriteMemoryFunc **mem_write,
2990
                                        void *opaque)
2991
{
2992
    int i, subwidth = 0;
2993

    
2994
    if (io_index <= 0) {
2995
        io_index = get_free_io_mem_idx();
2996
        if (io_index == -1)
2997
            return io_index;
2998
    } else {
2999
        io_index >>= IO_MEM_SHIFT;
3000
        if (io_index >= IO_MEM_NB_ENTRIES)
3001
            return -1;
3002
    }
3003

    
3004
    for(i = 0;i < 3; i++) {
3005
        if (!mem_read[i] || !mem_write[i])
3006
            subwidth = IO_MEM_SUBWIDTH;
3007
        io_mem_read[io_index][i] = mem_read[i];
3008
        io_mem_write[io_index][i] = mem_write[i];
3009
    }
3010
    io_mem_opaque[io_index] = opaque;
3011
    return (io_index << IO_MEM_SHIFT) | subwidth;
3012
}
3013

    
3014
int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
3015
                           CPUWriteMemoryFunc **mem_write,
3016
                           void *opaque)
3017
{
3018
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3019
}
3020

    
3021
void cpu_unregister_io_memory(int io_table_address)
3022
{
3023
    int i;
3024
    int io_index = io_table_address >> IO_MEM_SHIFT;
3025

    
3026
    for (i=0;i < 3; i++) {
3027
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3028
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3029
    }
3030
    io_mem_opaque[io_index] = NULL;
3031
    io_mem_used[io_index] = 0;
3032
}
3033

    
3034
static void io_mem_init(void)
3035
{
3036
    int i;
3037

    
3038
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3039
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3040
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3041
    for (i=0; i<5; i++)
3042
        io_mem_used[i] = 1;
3043

    
3044
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3045
                                          watch_mem_write, NULL);
3046
#ifdef CONFIG_KQEMU
3047
    if (kqemu_phys_ram_base) {
3048
        /* alloc dirty bits array */
3049
        phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3050
        memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3051
    }
3052
#endif
3053
}
3054

    
3055
#endif /* !defined(CONFIG_USER_ONLY) */
3056

    
3057
/* physical memory access (slow version, mainly for debug) */
3058
#if defined(CONFIG_USER_ONLY)
3059
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3060
                            int len, int is_write)
3061
{
3062
    int l, flags;
3063
    target_ulong page;
3064
    void * p;
3065

    
3066
    while (len > 0) {
3067
        page = addr & TARGET_PAGE_MASK;
3068
        l = (page + TARGET_PAGE_SIZE) - addr;
3069
        if (l > len)
3070
            l = len;
3071
        flags = page_get_flags(page);
3072
        if (!(flags & PAGE_VALID))
3073
            return;
3074
        if (is_write) {
3075
            if (!(flags & PAGE_WRITE))
3076
                return;
3077
            /* XXX: this code should not depend on lock_user */
3078
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3079
                /* FIXME - should this return an error rather than just fail? */
3080
                return;
3081
            memcpy(p, buf, l);
3082
            unlock_user(p, addr, l);
3083
        } else {
3084
            if (!(flags & PAGE_READ))
3085
                return;
3086
            /* XXX: this code should not depend on lock_user */
3087
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3088
                /* FIXME - should this return an error rather than just fail? */
3089
                return;
3090
            memcpy(buf, p, l);
3091
            unlock_user(p, addr, 0);
3092
        }
3093
        len -= l;
3094
        buf += l;
3095
        addr += l;
3096
    }
3097
}
3098

    
3099
#else
3100
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3101
                            int len, int is_write)
3102
{
3103
    int l, io_index;
3104
    uint8_t *ptr;
3105
    uint32_t val;
3106
    target_phys_addr_t page;
3107
    unsigned long pd;
3108
    PhysPageDesc *p;
3109

    
3110
    while (len > 0) {
3111
        page = addr & TARGET_PAGE_MASK;
3112
        l = (page + TARGET_PAGE_SIZE) - addr;
3113
        if (l > len)
3114
            l = len;
3115
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3116
        if (!p) {
3117
            pd = IO_MEM_UNASSIGNED;
3118
        } else {
3119
            pd = p->phys_offset;
3120
        }
3121

    
3122
        if (is_write) {
3123
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3124
                target_phys_addr_t addr1 = addr;
3125
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3126
                if (p)
3127
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3128
                /* XXX: could force cpu_single_env to NULL to avoid
3129
                   potential bugs */
3130
                if (l >= 4 && ((addr1 & 3) == 0)) {
3131
                    /* 32 bit write access */
3132
                    val = ldl_p(buf);
3133
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3134
                    l = 4;
3135
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3136
                    /* 16 bit write access */
3137
                    val = lduw_p(buf);
3138
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3139
                    l = 2;
3140
                } else {
3141
                    /* 8 bit write access */
3142
                    val = ldub_p(buf);
3143
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3144
                    l = 1;
3145
                }
3146
            } else {
3147
                unsigned long addr1;
3148
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3149
                /* RAM case */
3150
                ptr = qemu_get_ram_ptr(addr1);
3151
                memcpy(ptr, buf, l);
3152
                if (!cpu_physical_memory_is_dirty(addr1)) {
3153
                    /* invalidate code */
3154
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3155
                    /* set dirty bit */
3156
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3157
                        (0xff & ~CODE_DIRTY_FLAG);
3158
                }
3159
            }
3160
        } else {
3161
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3162
                !(pd & IO_MEM_ROMD)) {
3163
                target_phys_addr_t addr1 = addr;
3164
                /* I/O case */
3165
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3166
                if (p)
3167
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3168
                if (l >= 4 && ((addr1 & 3) == 0)) {
3169
                    /* 32 bit read access */
3170
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3171
                    stl_p(buf, val);
3172
                    l = 4;
3173
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3174
                    /* 16 bit read access */
3175
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3176
                    stw_p(buf, val);
3177
                    l = 2;
3178
                } else {
3179
                    /* 8 bit read access */
3180
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3181
                    stb_p(buf, val);
3182
                    l = 1;
3183
                }
3184
            } else {
3185
                /* RAM case */
3186
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3187
                    (addr & ~TARGET_PAGE_MASK);
3188
                memcpy(buf, ptr, l);
3189
            }
3190
        }
3191
        len -= l;
3192
        buf += l;
3193
        addr += l;
3194
    }
3195
}
3196

    
3197
/* used for ROM loading : can write in RAM and ROM */
3198
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3199
                                   const uint8_t *buf, int len)
3200
{
3201
    int l;
3202
    uint8_t *ptr;
3203
    target_phys_addr_t page;
3204
    unsigned long pd;
3205
    PhysPageDesc *p;
3206

    
3207
    while (len > 0) {
3208
        page = addr & TARGET_PAGE_MASK;
3209
        l = (page + TARGET_PAGE_SIZE) - addr;
3210
        if (l > len)
3211
            l = len;
3212
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3213
        if (!p) {
3214
            pd = IO_MEM_UNASSIGNED;
3215
        } else {
3216
            pd = p->phys_offset;
3217
        }
3218

    
3219
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3220
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3221
            !(pd & IO_MEM_ROMD)) {
3222
            /* do nothing */
3223
        } else {
3224
            unsigned long addr1;
3225
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3226
            /* ROM/RAM case */
3227
            ptr = qemu_get_ram_ptr(addr1);
3228
            memcpy(ptr, buf, l);
3229
        }
3230
        len -= l;
3231
        buf += l;
3232
        addr += l;
3233
    }
3234
}
3235

    
3236
typedef struct {
3237
    void *buffer;
3238
    target_phys_addr_t addr;
3239
    target_phys_addr_t len;
3240
} BounceBuffer;
3241

    
3242
static BounceBuffer bounce;
3243

    
3244
typedef struct MapClient {
3245
    void *opaque;
3246
    void (*callback)(void *opaque);
3247
    LIST_ENTRY(MapClient) link;
3248
} MapClient;
3249

    
3250
static LIST_HEAD(map_client_list, MapClient) map_client_list
3251
    = LIST_HEAD_INITIALIZER(map_client_list);
3252

    
3253
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3254
{
3255
    MapClient *client = qemu_malloc(sizeof(*client));
3256

    
3257
    client->opaque = opaque;
3258
    client->callback = callback;
3259
    LIST_INSERT_HEAD(&map_client_list, client, link);
3260
    return client;
3261
}
3262

    
3263
void cpu_unregister_map_client(void *_client)
3264
{
3265
    MapClient *client = (MapClient *)_client;
3266

    
3267
    LIST_REMOVE(client, link);
3268
    qemu_free(client);
3269
}
3270

    
3271
static void cpu_notify_map_clients(void)
3272
{
3273
    MapClient *client;
3274

    
3275
    while (!LIST_EMPTY(&map_client_list)) {
3276
        client = LIST_FIRST(&map_client_list);
3277
        client->callback(client->opaque);
3278
        cpu_unregister_map_client(client);
3279
    }
3280
}
3281

    
3282
/* Map a physical memory region into a host virtual address.
3283
 * May map a subset of the requested range, given by and returned in *plen.
3284
 * May return NULL if resources needed to perform the mapping are exhausted.
3285
 * Use only for reads OR writes - not for read-modify-write operations.
3286
 * Use cpu_register_map_client() to know when retrying the map operation is
3287
 * likely to succeed.
3288
 */
3289
void *cpu_physical_memory_map(target_phys_addr_t addr,
3290
                              target_phys_addr_t *plen,
3291
                              int is_write)
3292
{
3293
    target_phys_addr_t len = *plen;
3294
    target_phys_addr_t done = 0;
3295
    int l;
3296
    uint8_t *ret = NULL;
3297
    uint8_t *ptr;
3298
    target_phys_addr_t page;
3299
    unsigned long pd;
3300
    PhysPageDesc *p;
3301
    unsigned long addr1;
3302

    
3303
    while (len > 0) {
3304
        page = addr & TARGET_PAGE_MASK;
3305
        l = (page + TARGET_PAGE_SIZE) - addr;
3306
        if (l > len)
3307
            l = len;
3308
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3309
        if (!p) {
3310
            pd = IO_MEM_UNASSIGNED;
3311
        } else {
3312
            pd = p->phys_offset;
3313
        }
3314

    
3315
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3316
            if (done || bounce.buffer) {
3317
                break;
3318
            }
3319
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3320
            bounce.addr = addr;
3321
            bounce.len = l;
3322
            if (!is_write) {
3323
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3324
            }
3325
            ptr = bounce.buffer;
3326
        } else {
3327
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3328
            ptr = qemu_get_ram_ptr(addr1);
3329
        }
3330
        if (!done) {
3331
            ret = ptr;
3332
        } else if (ret + done != ptr) {
3333
            break;
3334
        }
3335

    
3336
        len -= l;
3337
        addr += l;
3338
        done += l;
3339
    }
3340
    *plen = done;
3341
    return ret;
3342
}
3343

    
3344
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3345
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3346
 * the amount of memory that was actually read or written by the caller.
3347
 */
3348
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3349
                               int is_write, target_phys_addr_t access_len)
3350
{
3351
    if (buffer != bounce.buffer) {
3352
        if (is_write) {
3353
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3354
            while (access_len) {
3355
                unsigned l;
3356
                l = TARGET_PAGE_SIZE;
3357
                if (l > access_len)
3358
                    l = access_len;
3359
                if (!cpu_physical_memory_is_dirty(addr1)) {
3360
                    /* invalidate code */
3361
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3362
                    /* set dirty bit */
3363
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3364
                        (0xff & ~CODE_DIRTY_FLAG);
3365
                }
3366
                addr1 += l;
3367
                access_len -= l;
3368
            }
3369
        }
3370
        return;
3371
    }
3372
    if (is_write) {
3373
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3374
    }
3375
    qemu_free(bounce.buffer);
3376
    bounce.buffer = NULL;
3377
    cpu_notify_map_clients();
3378
}
3379

    
3380
/* warning: addr must be aligned */
3381
uint32_t ldl_phys(target_phys_addr_t addr)
3382
{
3383
    int io_index;
3384
    uint8_t *ptr;
3385
    uint32_t val;
3386
    unsigned long pd;
3387
    PhysPageDesc *p;
3388

    
3389
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3390
    if (!p) {
3391
        pd = IO_MEM_UNASSIGNED;
3392
    } else {
3393
        pd = p->phys_offset;
3394
    }
3395

    
3396
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3397
        !(pd & IO_MEM_ROMD)) {
3398
        /* I/O case */
3399
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3400
        if (p)
3401
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3402
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3403
    } else {
3404
        /* RAM case */
3405
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3406
            (addr & ~TARGET_PAGE_MASK);
3407
        val = ldl_p(ptr);
3408
    }
3409
    return val;
3410
}
3411

    
3412
/* warning: addr must be aligned */
3413
uint64_t ldq_phys(target_phys_addr_t addr)
3414
{
3415
    int io_index;
3416
    uint8_t *ptr;
3417
    uint64_t val;
3418
    unsigned long pd;
3419
    PhysPageDesc *p;
3420

    
3421
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3422
    if (!p) {
3423
        pd = IO_MEM_UNASSIGNED;
3424
    } else {
3425
        pd = p->phys_offset;
3426
    }
3427

    
3428
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3429
        !(pd & IO_MEM_ROMD)) {
3430
        /* I/O case */
3431
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3432
        if (p)
3433
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3434
#ifdef TARGET_WORDS_BIGENDIAN
3435
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3436
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3437
#else
3438
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3439
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3440
#endif
3441
    } else {
3442
        /* RAM case */
3443
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3444
            (addr & ~TARGET_PAGE_MASK);
3445
        val = ldq_p(ptr);
3446
    }
3447
    return val;
3448
}
3449

    
3450
/* XXX: optimize */
3451
uint32_t ldub_phys(target_phys_addr_t addr)
3452
{
3453
    uint8_t val;
3454
    cpu_physical_memory_read(addr, &val, 1);
3455
    return val;
3456
}
3457

    
3458
/* XXX: optimize */
3459
uint32_t lduw_phys(target_phys_addr_t addr)
3460
{
3461
    uint16_t val;
3462
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3463
    return tswap16(val);
3464
}
3465

    
3466
/* warning: addr must be aligned. The ram page is not masked as dirty
3467
   and the code inside is not invalidated. It is useful if the dirty
3468
   bits are used to track modified PTEs */
3469
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3470
{
3471
    int io_index;
3472
    uint8_t *ptr;
3473
    unsigned long pd;
3474
    PhysPageDesc *p;
3475

    
3476
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3477
    if (!p) {
3478
        pd = IO_MEM_UNASSIGNED;
3479
    } else {
3480
        pd = p->phys_offset;
3481
    }
3482

    
3483
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3484
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3485
        if (p)
3486
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3487
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3488
    } else {
3489
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3490
        ptr = qemu_get_ram_ptr(addr1);
3491
        stl_p(ptr, val);
3492

    
3493
        if (unlikely(in_migration)) {
3494
            if (!cpu_physical_memory_is_dirty(addr1)) {
3495
                /* invalidate code */
3496
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3497
                /* set dirty bit */
3498
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3499
                    (0xff & ~CODE_DIRTY_FLAG);
3500
            }
3501
        }
3502
    }
3503
}
3504

    
3505
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3506
{
3507
    int io_index;
3508
    uint8_t *ptr;
3509
    unsigned long pd;
3510
    PhysPageDesc *p;
3511

    
3512
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3513
    if (!p) {
3514
        pd = IO_MEM_UNASSIGNED;
3515
    } else {
3516
        pd = p->phys_offset;
3517
    }
3518

    
3519
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3520
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3521
        if (p)
3522
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3523
#ifdef TARGET_WORDS_BIGENDIAN
3524
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3525
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3526
#else
3527
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3528
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3529
#endif
3530
    } else {
3531
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3532
            (addr & ~TARGET_PAGE_MASK);
3533
        stq_p(ptr, val);
3534
    }
3535
}
3536

    
3537
/* warning: addr must be aligned */
3538
void stl_phys(target_phys_addr_t addr, uint32_t val)
3539
{
3540
    int io_index;
3541
    uint8_t *ptr;
3542
    unsigned long pd;
3543
    PhysPageDesc *p;
3544

    
3545
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3546
    if (!p) {
3547
        pd = IO_MEM_UNASSIGNED;
3548
    } else {
3549
        pd = p->phys_offset;
3550
    }
3551

    
3552
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3553
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3554
        if (p)
3555
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3556
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3557
    } else {
3558
        unsigned long addr1;
3559
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3560
        /* RAM case */
3561
        ptr = qemu_get_ram_ptr(addr1);
3562
        stl_p(ptr, val);
3563
        if (!cpu_physical_memory_is_dirty(addr1)) {
3564
            /* invalidate code */
3565
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3566
            /* set dirty bit */
3567
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3568
                (0xff & ~CODE_DIRTY_FLAG);
3569
        }
3570
    }
3571
}
3572

    
3573
/* XXX: optimize */
3574
void stb_phys(target_phys_addr_t addr, uint32_t val)
3575
{
3576
    uint8_t v = val;
3577
    cpu_physical_memory_write(addr, &v, 1);
3578
}
3579

    
3580
/* XXX: optimize */
3581
void stw_phys(target_phys_addr_t addr, uint32_t val)
3582
{
3583
    uint16_t v = tswap16(val);
3584
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3585
}
3586

    
3587
/* XXX: optimize */
3588
void stq_phys(target_phys_addr_t addr, uint64_t val)
3589
{
3590
    val = tswap64(val);
3591
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3592
}
3593

    
3594
#endif
3595

    
3596
/* virtual memory access for debug (includes writing to ROM) */
3597
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3598
                        uint8_t *buf, int len, int is_write)
3599
{
3600
    int l;
3601
    target_phys_addr_t phys_addr;
3602
    target_ulong page;
3603

    
3604
    while (len > 0) {
3605
        page = addr & TARGET_PAGE_MASK;
3606
        phys_addr = cpu_get_phys_page_debug(env, page);
3607
        /* if no physical page mapped, return an error */
3608
        if (phys_addr == -1)
3609
            return -1;
3610
        l = (page + TARGET_PAGE_SIZE) - addr;
3611
        if (l > len)
3612
            l = len;
3613
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3614
#if !defined(CONFIG_USER_ONLY)
3615
        if (is_write)
3616
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3617
        else
3618
#endif
3619
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3620
        len -= l;
3621
        buf += l;
3622
        addr += l;
3623
    }
3624
    return 0;
3625
}
3626

    
3627
/* in deterministic execution mode, instructions doing device I/Os
3628
   must be at the end of the TB */
3629
void cpu_io_recompile(CPUState *env, void *retaddr)
3630
{
3631
    TranslationBlock *tb;
3632
    uint32_t n, cflags;
3633
    target_ulong pc, cs_base;
3634
    uint64_t flags;
3635

    
3636
    tb = tb_find_pc((unsigned long)retaddr);
3637
    if (!tb) {
3638
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3639
                  retaddr);
3640
    }
3641
    n = env->icount_decr.u16.low + tb->icount;
3642
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3643
    /* Calculate how many instructions had been executed before the fault
3644
       occurred.  */
3645
    n = n - env->icount_decr.u16.low;
3646
    /* Generate a new TB ending on the I/O insn.  */
3647
    n++;
3648
    /* On MIPS and SH, delay slot instructions can only be restarted if
3649
       they were already the first instruction in the TB.  If this is not
3650
       the first instruction in a TB then re-execute the preceding
3651
       branch.  */
3652
#if defined(TARGET_MIPS)
3653
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3654
        env->active_tc.PC -= 4;
3655
        env->icount_decr.u16.low++;
3656
        env->hflags &= ~MIPS_HFLAG_BMASK;
3657
    }
3658
#elif defined(TARGET_SH4)
3659
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3660
            && n > 1) {
3661
        env->pc -= 2;
3662
        env->icount_decr.u16.low++;
3663
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3664
    }
3665
#endif
3666
    /* This should never happen.  */
3667
    if (n > CF_COUNT_MASK)
3668
        cpu_abort(env, "TB too big during recompile");
3669

    
3670
    cflags = n | CF_LAST_IO;
3671
    pc = tb->pc;
3672
    cs_base = tb->cs_base;
3673
    flags = tb->flags;
3674
    tb_phys_invalidate(tb, -1);
3675
    /* FIXME: In theory this could raise an exception.  In practice
3676
       we have already translated the block once so it's probably ok.  */
3677
    tb_gen_code(env, pc, cs_base, flags, cflags);
3678
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3679
       the first in the TB) then we end up generating a whole new TB and
3680
       repeating the fault, which is horribly inefficient.
3681
       Better would be to execute just this insn uncached, or generate a
3682
       second new TB.  */
3683
    cpu_resume_from_signal(env, NULL);
3684
}
3685

    
3686
void dump_exec_info(FILE *f,
3687
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3688
{
3689
    int i, target_code_size, max_target_code_size;
3690
    int direct_jmp_count, direct_jmp2_count, cross_page;
3691
    TranslationBlock *tb;
3692

    
3693
    target_code_size = 0;
3694
    max_target_code_size = 0;
3695
    cross_page = 0;
3696
    direct_jmp_count = 0;
3697
    direct_jmp2_count = 0;
3698
    for(i = 0; i < nb_tbs; i++) {
3699
        tb = &tbs[i];
3700
        target_code_size += tb->size;
3701
        if (tb->size > max_target_code_size)
3702
            max_target_code_size = tb->size;
3703
        if (tb->page_addr[1] != -1)
3704
            cross_page++;
3705
        if (tb->tb_next_offset[0] != 0xffff) {
3706
            direct_jmp_count++;
3707
            if (tb->tb_next_offset[1] != 0xffff) {
3708
                direct_jmp2_count++;
3709
            }
3710
        }
3711
    }
3712
    /* XXX: avoid using doubles ? */
3713
    cpu_fprintf(f, "Translation buffer state:\n");
3714
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3715
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3716
    cpu_fprintf(f, "TB count            %d/%d\n", 
3717
                nb_tbs, code_gen_max_blocks);
3718
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3719
                nb_tbs ? target_code_size / nb_tbs : 0,
3720
                max_target_code_size);
3721
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3722
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3723
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3724
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3725
            cross_page,
3726
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3727
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3728
                direct_jmp_count,
3729
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3730
                direct_jmp2_count,
3731
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3732
    cpu_fprintf(f, "\nStatistics:\n");
3733
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3734
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3735
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3736
    tcg_dump_info(f, cpu_fprintf);
3737
}
3738

    
3739
#if !defined(CONFIG_USER_ONLY)
3740

    
3741
#define MMUSUFFIX _cmmu
3742
#define GETPC() NULL
3743
#define env cpu_single_env
3744
#define SOFTMMU_CODE_ACCESS
3745

    
3746
#define SHIFT 0
3747
#include "softmmu_template.h"
3748

    
3749
#define SHIFT 1
3750
#include "softmmu_template.h"
3751

    
3752
#define SHIFT 2
3753
#include "softmmu_template.h"
3754

    
3755
#define SHIFT 3
3756
#include "softmmu_template.h"
3757

    
3758
#undef env
3759

    
3760
#endif