Statistics
| Branch: | Revision:

root / exec.c @ 6f0437e8

History | View | Annotate | Download (112 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#include "qemu-common.h"
38
#include "tcg.h"
39
#include "hw/hw.h"
40
#include "osdep.h"
41
#include "kvm.h"
42
#if defined(CONFIG_USER_ONLY)
43
#include <qemu.h>
44
#endif
45

    
46
//#define DEBUG_TB_INVALIDATE
47
//#define DEBUG_FLUSH
48
//#define DEBUG_TLB
49
//#define DEBUG_UNASSIGNED
50

    
51
/* make various TB consistency checks */
52
//#define DEBUG_TB_CHECK
53
//#define DEBUG_TLB_CHECK
54

    
55
//#define DEBUG_IOPORT
56
//#define DEBUG_SUBPAGE
57

    
58
#if !defined(CONFIG_USER_ONLY)
59
/* TB consistency checks only implemented for usermode emulation.  */
60
#undef DEBUG_TB_CHECK
61
#endif
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#if defined(TARGET_SPARC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67
#elif defined(TARGET_SPARC)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69
#elif defined(TARGET_ALPHA)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72
#elif defined(TARGET_PPC64)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78
#else
79
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80
#define TARGET_PHYS_ADDR_SPACE_BITS 32
81
#endif
82

    
83
static TranslationBlock *tbs;
84
int code_gen_max_blocks;
85
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86
static int nb_tbs;
87
/* any access to the tbs or the page table must use this lock */
88
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89

    
90
#if defined(__arm__) || defined(__sparc_v9__)
91
/* The prologue must be reachable with a direct jump. ARM and Sparc64
92
 have limited branch ranges (possibly also PPC) so place it in a
93
 section close to code segment. */
94
#define code_gen_section                                \
95
    __attribute__((__section__(".gen_code")))           \
96
    __attribute__((aligned (32)))
97
#else
98
#define code_gen_section                                \
99
    __attribute__((aligned (32)))
100
#endif
101

    
102
uint8_t code_gen_prologue[1024] code_gen_section;
103
static uint8_t *code_gen_buffer;
104
static unsigned long code_gen_buffer_size;
105
/* threshold to flush the translated code buffer */
106
static unsigned long code_gen_buffer_max_size;
107
uint8_t *code_gen_ptr;
108

    
109
#if !defined(CONFIG_USER_ONLY)
110
int phys_ram_fd;
111
uint8_t *phys_ram_dirty;
112
static int in_migration;
113

    
114
typedef struct RAMBlock {
115
    uint8_t *host;
116
    ram_addr_t offset;
117
    ram_addr_t length;
118
    struct RAMBlock *next;
119
} RAMBlock;
120

    
121
static RAMBlock *ram_blocks;
122
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
123
   then we can no longet assume contiguous ram offsets, and external uses
124
   of this variable will break.  */
125
ram_addr_t last_ram_offset;
126
#endif
127

    
128
CPUState *first_cpu;
129
/* current CPU in the current thread. It is only valid inside
130
   cpu_exec() */
131
CPUState *cpu_single_env;
132
/* 0 = Do not count executed instructions.
133
   1 = Precise instruction counting.
134
   2 = Adaptive rate instruction counting.  */
135
int use_icount = 0;
136
/* Current instruction counter.  While executing translated code this may
137
   include some instructions that have not yet been executed.  */
138
int64_t qemu_icount;
139

    
140
typedef struct PageDesc {
141
    /* list of TBs intersecting this ram page */
142
    TranslationBlock *first_tb;
143
    /* in order to optimize self modifying code, we count the number
144
       of lookups we do to a given page to use a bitmap */
145
    unsigned int code_write_count;
146
    uint8_t *code_bitmap;
147
#if defined(CONFIG_USER_ONLY)
148
    unsigned long flags;
149
#endif
150
} PageDesc;
151

    
152
typedef struct PhysPageDesc {
153
    /* offset in host memory of the page + io_index in the low bits */
154
    ram_addr_t phys_offset;
155
    ram_addr_t region_offset;
156
} PhysPageDesc;
157

    
158
#define L2_BITS 10
159
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
160
/* XXX: this is a temporary hack for alpha target.
161
 *      In the future, this is to be replaced by a multi-level table
162
 *      to actually be able to handle the complete 64 bits address space.
163
 */
164
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
165
#else
166
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
167
#endif
168

    
169
#define L1_SIZE (1 << L1_BITS)
170
#define L2_SIZE (1 << L2_BITS)
171

    
172
unsigned long qemu_real_host_page_size;
173
unsigned long qemu_host_page_bits;
174
unsigned long qemu_host_page_size;
175
unsigned long qemu_host_page_mask;
176

    
177
/* XXX: for system emulation, it could just be an array */
178
static PageDesc *l1_map[L1_SIZE];
179
static PhysPageDesc **l1_phys_map;
180

    
181
#if !defined(CONFIG_USER_ONLY)
182
static void io_mem_init(void);
183

    
184
/* io memory support */
185
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
186
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
187
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
188
static char io_mem_used[IO_MEM_NB_ENTRIES];
189
static int io_mem_watch;
190
#endif
191

    
192
/* log support */
193
static const char *logfilename = "/tmp/qemu.log";
194
FILE *logfile;
195
int loglevel;
196
static int log_append = 0;
197

    
198
/* statistics */
199
static int tlb_flush_count;
200
static int tb_flush_count;
201
static int tb_phys_invalidate_count;
202

    
203
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
204
typedef struct subpage_t {
205
    target_phys_addr_t base;
206
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
207
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
208
    void *opaque[TARGET_PAGE_SIZE][2][4];
209
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
210
} subpage_t;
211

    
212
#ifdef _WIN32
213
static void map_exec(void *addr, long size)
214
{
215
    DWORD old_protect;
216
    VirtualProtect(addr, size,
217
                   PAGE_EXECUTE_READWRITE, &old_protect);
218
    
219
}
220
#else
221
static void map_exec(void *addr, long size)
222
{
223
    unsigned long start, end, page_size;
224
    
225
    page_size = getpagesize();
226
    start = (unsigned long)addr;
227
    start &= ~(page_size - 1);
228
    
229
    end = (unsigned long)addr + size;
230
    end += page_size - 1;
231
    end &= ~(page_size - 1);
232
    
233
    mprotect((void *)start, end - start,
234
             PROT_READ | PROT_WRITE | PROT_EXEC);
235
}
236
#endif
237

    
238
static void page_init(void)
239
{
240
    /* NOTE: we can always suppose that qemu_host_page_size >=
241
       TARGET_PAGE_SIZE */
242
#ifdef _WIN32
243
    {
244
        SYSTEM_INFO system_info;
245

    
246
        GetSystemInfo(&system_info);
247
        qemu_real_host_page_size = system_info.dwPageSize;
248
    }
249
#else
250
    qemu_real_host_page_size = getpagesize();
251
#endif
252
    if (qemu_host_page_size == 0)
253
        qemu_host_page_size = qemu_real_host_page_size;
254
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
255
        qemu_host_page_size = TARGET_PAGE_SIZE;
256
    qemu_host_page_bits = 0;
257
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
258
        qemu_host_page_bits++;
259
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
260
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
262

    
263
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
264
    {
265
        long long startaddr, endaddr;
266
        FILE *f;
267
        int n;
268

    
269
        mmap_lock();
270
        last_brk = (unsigned long)sbrk(0);
271
        f = fopen("/proc/self/maps", "r");
272
        if (f) {
273
            do {
274
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
275
                if (n == 2) {
276
                    startaddr = MIN(startaddr,
277
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
278
                    endaddr = MIN(endaddr,
279
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
281
                                   TARGET_PAGE_ALIGN(endaddr),
282
                                   PAGE_RESERVED); 
283
                }
284
            } while (!feof(f));
285
            fclose(f);
286
        }
287
        mmap_unlock();
288
    }
289
#endif
290
}
291

    
292
static inline PageDesc **page_l1_map(target_ulong index)
293
{
294
#if TARGET_LONG_BITS > 32
295
    /* Host memory outside guest VM.  For 32-bit targets we have already
296
       excluded high addresses.  */
297
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
298
        return NULL;
299
#endif
300
    return &l1_map[index >> L2_BITS];
301
}
302

    
303
static inline PageDesc *page_find_alloc(target_ulong index)
304
{
305
    PageDesc **lp, *p;
306
    lp = page_l1_map(index);
307
    if (!lp)
308
        return NULL;
309

    
310
    p = *lp;
311
    if (!p) {
312
        /* allocate if not found */
313
#if defined(CONFIG_USER_ONLY)
314
        size_t len = sizeof(PageDesc) * L2_SIZE;
315
        /* Don't use qemu_malloc because it may recurse.  */
316
        p = mmap(0, len, PROT_READ | PROT_WRITE,
317
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
318
        *lp = p;
319
        if (h2g_valid(p)) {
320
            unsigned long addr = h2g(p);
321
            page_set_flags(addr & TARGET_PAGE_MASK,
322
                           TARGET_PAGE_ALIGN(addr + len),
323
                           PAGE_RESERVED); 
324
        }
325
#else
326
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
327
        *lp = p;
328
#endif
329
    }
330
    return p + (index & (L2_SIZE - 1));
331
}
332

    
333
static inline PageDesc *page_find(target_ulong index)
334
{
335
    PageDesc **lp, *p;
336
    lp = page_l1_map(index);
337
    if (!lp)
338
        return NULL;
339

    
340
    p = *lp;
341
    if (!p)
342
        return 0;
343
    return p + (index & (L2_SIZE - 1));
344
}
345

    
346
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
347
{
348
    void **lp, **p;
349
    PhysPageDesc *pd;
350

    
351
    p = (void **)l1_phys_map;
352
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
353

    
354
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
355
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
356
#endif
357
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
358
    p = *lp;
359
    if (!p) {
360
        /* allocate if not found */
361
        if (!alloc)
362
            return NULL;
363
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
364
        memset(p, 0, sizeof(void *) * L1_SIZE);
365
        *lp = p;
366
    }
367
#endif
368
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
369
    pd = *lp;
370
    if (!pd) {
371
        int i;
372
        /* allocate if not found */
373
        if (!alloc)
374
            return NULL;
375
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
376
        *lp = pd;
377
        for (i = 0; i < L2_SIZE; i++) {
378
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
379
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
380
        }
381
    }
382
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
383
}
384

    
385
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
386
{
387
    return phys_page_find_alloc(index, 0);
388
}
389

    
390
#if !defined(CONFIG_USER_ONLY)
391
static void tlb_protect_code(ram_addr_t ram_addr);
392
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
393
                                    target_ulong vaddr);
394
#define mmap_lock() do { } while(0)
395
#define mmap_unlock() do { } while(0)
396
#endif
397

    
398
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399

    
400
#if defined(CONFIG_USER_ONLY)
401
/* Currently it is not recommanded to allocate big chunks of data in
402
   user mode. It will change when a dedicated libc will be used */
403
#define USE_STATIC_CODE_GEN_BUFFER
404
#endif
405

    
406
#ifdef USE_STATIC_CODE_GEN_BUFFER
407
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
408
#endif
409

    
410
static void code_gen_alloc(unsigned long tb_size)
411
{
412
#ifdef USE_STATIC_CODE_GEN_BUFFER
413
    code_gen_buffer = static_code_gen_buffer;
414
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415
    map_exec(code_gen_buffer, code_gen_buffer_size);
416
#else
417
    code_gen_buffer_size = tb_size;
418
    if (code_gen_buffer_size == 0) {
419
#if defined(CONFIG_USER_ONLY)
420
        /* in user mode, phys_ram_size is not meaningful */
421
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
422
#else
423
        /* XXX: needs ajustments */
424
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
425
#endif
426
    }
427
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
428
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
429
    /* The code gen buffer location may have constraints depending on
430
       the host cpu and OS */
431
#if defined(__linux__) 
432
    {
433
        int flags;
434
        void *start = NULL;
435

    
436
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
437
#if defined(__x86_64__)
438
        flags |= MAP_32BIT;
439
        /* Cannot map more than that */
440
        if (code_gen_buffer_size > (800 * 1024 * 1024))
441
            code_gen_buffer_size = (800 * 1024 * 1024);
442
#elif defined(__sparc_v9__)
443
        // Map the buffer below 2G, so we can use direct calls and branches
444
        flags |= MAP_FIXED;
445
        start = (void *) 0x60000000UL;
446
        if (code_gen_buffer_size > (512 * 1024 * 1024))
447
            code_gen_buffer_size = (512 * 1024 * 1024);
448
#elif defined(__arm__)
449
        /* Map the buffer below 32M, so we can use direct calls and branches */
450
        flags |= MAP_FIXED;
451
        start = (void *) 0x01000000UL;
452
        if (code_gen_buffer_size > 16 * 1024 * 1024)
453
            code_gen_buffer_size = 16 * 1024 * 1024;
454
#endif
455
        code_gen_buffer = mmap(start, code_gen_buffer_size,
456
                               PROT_WRITE | PROT_READ | PROT_EXEC,
457
                               flags, -1, 0);
458
        if (code_gen_buffer == MAP_FAILED) {
459
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
460
            exit(1);
461
        }
462
    }
463
#elif defined(__FreeBSD__) || defined(__DragonFly__)
464
    {
465
        int flags;
466
        void *addr = NULL;
467
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
468
#if defined(__x86_64__)
469
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
470
         * 0x40000000 is free */
471
        flags |= MAP_FIXED;
472
        addr = (void *)0x40000000;
473
        /* Cannot map more than that */
474
        if (code_gen_buffer_size > (800 * 1024 * 1024))
475
            code_gen_buffer_size = (800 * 1024 * 1024);
476
#endif
477
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
478
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
479
                               flags, -1, 0);
480
        if (code_gen_buffer == MAP_FAILED) {
481
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
482
            exit(1);
483
        }
484
    }
485
#else
486
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
487
    map_exec(code_gen_buffer, code_gen_buffer_size);
488
#endif
489
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
490
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
491
    code_gen_buffer_max_size = code_gen_buffer_size - 
492
        code_gen_max_block_size();
493
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
494
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
495
}
496

    
497
/* Must be called before using the QEMU cpus. 'tb_size' is the size
498
   (in bytes) allocated to the translation buffer. Zero means default
499
   size. */
500
void cpu_exec_init_all(unsigned long tb_size)
501
{
502
    cpu_gen_init();
503
    code_gen_alloc(tb_size);
504
    code_gen_ptr = code_gen_buffer;
505
    page_init();
506
#if !defined(CONFIG_USER_ONLY)
507
    io_mem_init();
508
#endif
509
}
510

    
511
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
512

    
513
#define CPU_COMMON_SAVE_VERSION 1
514

    
515
static void cpu_common_save(QEMUFile *f, void *opaque)
516
{
517
    CPUState *env = opaque;
518

    
519
    qemu_put_be32s(f, &env->halted);
520
    qemu_put_be32s(f, &env->interrupt_request);
521
}
522

    
523
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
524
{
525
    CPUState *env = opaque;
526

    
527
    if (version_id != CPU_COMMON_SAVE_VERSION)
528
        return -EINVAL;
529

    
530
    qemu_get_be32s(f, &env->halted);
531
    qemu_get_be32s(f, &env->interrupt_request);
532
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
533
       version_id is increased. */
534
    env->interrupt_request &= ~0x01;
535
    tlb_flush(env, 1);
536

    
537
    return 0;
538
}
539
#endif
540

    
541
void cpu_exec_init(CPUState *env)
542
{
543
    CPUState **penv;
544
    int cpu_index;
545

    
546
#if defined(CONFIG_USER_ONLY)
547
    cpu_list_lock();
548
#endif
549
    env->next_cpu = NULL;
550
    penv = &first_cpu;
551
    cpu_index = 0;
552
    while (*penv != NULL) {
553
        penv = (CPUState **)&(*penv)->next_cpu;
554
        cpu_index++;
555
    }
556
    env->cpu_index = cpu_index;
557
    env->numa_node = 0;
558
    TAILQ_INIT(&env->breakpoints);
559
    TAILQ_INIT(&env->watchpoints);
560
    *penv = env;
561
#if defined(CONFIG_USER_ONLY)
562
    cpu_list_unlock();
563
#endif
564
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
565
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
566
                    cpu_common_save, cpu_common_load, env);
567
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
568
                    cpu_save, cpu_load, env);
569
#endif
570
}
571

    
572
static inline void invalidate_page_bitmap(PageDesc *p)
573
{
574
    if (p->code_bitmap) {
575
        qemu_free(p->code_bitmap);
576
        p->code_bitmap = NULL;
577
    }
578
    p->code_write_count = 0;
579
}
580

    
581
/* set to NULL all the 'first_tb' fields in all PageDescs */
582
static void page_flush_tb(void)
583
{
584
    int i, j;
585
    PageDesc *p;
586

    
587
    for(i = 0; i < L1_SIZE; i++) {
588
        p = l1_map[i];
589
        if (p) {
590
            for(j = 0; j < L2_SIZE; j++) {
591
                p->first_tb = NULL;
592
                invalidate_page_bitmap(p);
593
                p++;
594
            }
595
        }
596
    }
597
}
598

    
599
/* flush all the translation blocks */
600
/* XXX: tb_flush is currently not thread safe */
601
void tb_flush(CPUState *env1)
602
{
603
    CPUState *env;
604
#if defined(DEBUG_FLUSH)
605
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
606
           (unsigned long)(code_gen_ptr - code_gen_buffer),
607
           nb_tbs, nb_tbs > 0 ?
608
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
609
#endif
610
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
611
        cpu_abort(env1, "Internal error: code buffer overflow\n");
612

    
613
    nb_tbs = 0;
614

    
615
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
616
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
617
    }
618

    
619
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
620
    page_flush_tb();
621

    
622
    code_gen_ptr = code_gen_buffer;
623
    /* XXX: flush processor icache at this point if cache flush is
624
       expensive */
625
    tb_flush_count++;
626
}
627

    
628
#ifdef DEBUG_TB_CHECK
629

    
630
static void tb_invalidate_check(target_ulong address)
631
{
632
    TranslationBlock *tb;
633
    int i;
634
    address &= TARGET_PAGE_MASK;
635
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
636
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
637
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
638
                  address >= tb->pc + tb->size)) {
639
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
640
                       address, (long)tb->pc, tb->size);
641
            }
642
        }
643
    }
644
}
645

    
646
/* verify that all the pages have correct rights for code */
647
static void tb_page_check(void)
648
{
649
    TranslationBlock *tb;
650
    int i, flags1, flags2;
651

    
652
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
653
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
654
            flags1 = page_get_flags(tb->pc);
655
            flags2 = page_get_flags(tb->pc + tb->size - 1);
656
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
657
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
658
                       (long)tb->pc, tb->size, flags1, flags2);
659
            }
660
        }
661
    }
662
}
663

    
664
static void tb_jmp_check(TranslationBlock *tb)
665
{
666
    TranslationBlock *tb1;
667
    unsigned int n1;
668

    
669
    /* suppress any remaining jumps to this TB */
670
    tb1 = tb->jmp_first;
671
    for(;;) {
672
        n1 = (long)tb1 & 3;
673
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
674
        if (n1 == 2)
675
            break;
676
        tb1 = tb1->jmp_next[n1];
677
    }
678
    /* check end of list */
679
    if (tb1 != tb) {
680
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
681
    }
682
}
683

    
684
#endif
685

    
686
/* invalidate one TB */
687
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
688
                             int next_offset)
689
{
690
    TranslationBlock *tb1;
691
    for(;;) {
692
        tb1 = *ptb;
693
        if (tb1 == tb) {
694
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
695
            break;
696
        }
697
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
698
    }
699
}
700

    
701
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
702
{
703
    TranslationBlock *tb1;
704
    unsigned int n1;
705

    
706
    for(;;) {
707
        tb1 = *ptb;
708
        n1 = (long)tb1 & 3;
709
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
710
        if (tb1 == tb) {
711
            *ptb = tb1->page_next[n1];
712
            break;
713
        }
714
        ptb = &tb1->page_next[n1];
715
    }
716
}
717

    
718
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
719
{
720
    TranslationBlock *tb1, **ptb;
721
    unsigned int n1;
722

    
723
    ptb = &tb->jmp_next[n];
724
    tb1 = *ptb;
725
    if (tb1) {
726
        /* find tb(n) in circular list */
727
        for(;;) {
728
            tb1 = *ptb;
729
            n1 = (long)tb1 & 3;
730
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
731
            if (n1 == n && tb1 == tb)
732
                break;
733
            if (n1 == 2) {
734
                ptb = &tb1->jmp_first;
735
            } else {
736
                ptb = &tb1->jmp_next[n1];
737
            }
738
        }
739
        /* now we can suppress tb(n) from the list */
740
        *ptb = tb->jmp_next[n];
741

    
742
        tb->jmp_next[n] = NULL;
743
    }
744
}
745

    
746
/* reset the jump entry 'n' of a TB so that it is not chained to
747
   another TB */
748
static inline void tb_reset_jump(TranslationBlock *tb, int n)
749
{
750
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
751
}
752

    
753
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
754
{
755
    CPUState *env;
756
    PageDesc *p;
757
    unsigned int h, n1;
758
    target_phys_addr_t phys_pc;
759
    TranslationBlock *tb1, *tb2;
760

    
761
    /* remove the TB from the hash list */
762
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
763
    h = tb_phys_hash_func(phys_pc);
764
    tb_remove(&tb_phys_hash[h], tb,
765
              offsetof(TranslationBlock, phys_hash_next));
766

    
767
    /* remove the TB from the page list */
768
    if (tb->page_addr[0] != page_addr) {
769
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
770
        tb_page_remove(&p->first_tb, tb);
771
        invalidate_page_bitmap(p);
772
    }
773
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
774
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
775
        tb_page_remove(&p->first_tb, tb);
776
        invalidate_page_bitmap(p);
777
    }
778

    
779
    tb_invalidated_flag = 1;
780

    
781
    /* remove the TB from the hash list */
782
    h = tb_jmp_cache_hash_func(tb->pc);
783
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
784
        if (env->tb_jmp_cache[h] == tb)
785
            env->tb_jmp_cache[h] = NULL;
786
    }
787

    
788
    /* suppress this TB from the two jump lists */
789
    tb_jmp_remove(tb, 0);
790
    tb_jmp_remove(tb, 1);
791

    
792
    /* suppress any remaining jumps to this TB */
793
    tb1 = tb->jmp_first;
794
    for(;;) {
795
        n1 = (long)tb1 & 3;
796
        if (n1 == 2)
797
            break;
798
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
799
        tb2 = tb1->jmp_next[n1];
800
        tb_reset_jump(tb1, n1);
801
        tb1->jmp_next[n1] = NULL;
802
        tb1 = tb2;
803
    }
804
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
805

    
806
    tb_phys_invalidate_count++;
807
}
808

    
809
static inline void set_bits(uint8_t *tab, int start, int len)
810
{
811
    int end, mask, end1;
812

    
813
    end = start + len;
814
    tab += start >> 3;
815
    mask = 0xff << (start & 7);
816
    if ((start & ~7) == (end & ~7)) {
817
        if (start < end) {
818
            mask &= ~(0xff << (end & 7));
819
            *tab |= mask;
820
        }
821
    } else {
822
        *tab++ |= mask;
823
        start = (start + 8) & ~7;
824
        end1 = end & ~7;
825
        while (start < end1) {
826
            *tab++ = 0xff;
827
            start += 8;
828
        }
829
        if (start < end) {
830
            mask = ~(0xff << (end & 7));
831
            *tab |= mask;
832
        }
833
    }
834
}
835

    
836
static void build_page_bitmap(PageDesc *p)
837
{
838
    int n, tb_start, tb_end;
839
    TranslationBlock *tb;
840

    
841
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
842

    
843
    tb = p->first_tb;
844
    while (tb != NULL) {
845
        n = (long)tb & 3;
846
        tb = (TranslationBlock *)((long)tb & ~3);
847
        /* NOTE: this is subtle as a TB may span two physical pages */
848
        if (n == 0) {
849
            /* NOTE: tb_end may be after the end of the page, but
850
               it is not a problem */
851
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
852
            tb_end = tb_start + tb->size;
853
            if (tb_end > TARGET_PAGE_SIZE)
854
                tb_end = TARGET_PAGE_SIZE;
855
        } else {
856
            tb_start = 0;
857
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
858
        }
859
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
860
        tb = tb->page_next[n];
861
    }
862
}
863

    
864
TranslationBlock *tb_gen_code(CPUState *env,
865
                              target_ulong pc, target_ulong cs_base,
866
                              int flags, int cflags)
867
{
868
    TranslationBlock *tb;
869
    uint8_t *tc_ptr;
870
    target_ulong phys_pc, phys_page2, virt_page2;
871
    int code_gen_size;
872

    
873
    phys_pc = get_phys_addr_code(env, pc);
874
    tb = tb_alloc(pc);
875
    if (!tb) {
876
        /* flush must be done */
877
        tb_flush(env);
878
        /* cannot fail at this point */
879
        tb = tb_alloc(pc);
880
        /* Don't forget to invalidate previous TB info.  */
881
        tb_invalidated_flag = 1;
882
    }
883
    tc_ptr = code_gen_ptr;
884
    tb->tc_ptr = tc_ptr;
885
    tb->cs_base = cs_base;
886
    tb->flags = flags;
887
    tb->cflags = cflags;
888
    cpu_gen_code(env, tb, &code_gen_size);
889
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
890

    
891
    /* check next page if needed */
892
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
893
    phys_page2 = -1;
894
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
895
        phys_page2 = get_phys_addr_code(env, virt_page2);
896
    }
897
    tb_link_phys(tb, phys_pc, phys_page2);
898
    return tb;
899
}
900

    
901
/* invalidate all TBs which intersect with the target physical page
902
   starting in range [start;end[. NOTE: start and end must refer to
903
   the same physical page. 'is_cpu_write_access' should be true if called
904
   from a real cpu write access: the virtual CPU will exit the current
905
   TB if code is modified inside this TB. */
906
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
907
                                   int is_cpu_write_access)
908
{
909
    TranslationBlock *tb, *tb_next, *saved_tb;
910
    CPUState *env = cpu_single_env;
911
    target_ulong tb_start, tb_end;
912
    PageDesc *p;
913
    int n;
914
#ifdef TARGET_HAS_PRECISE_SMC
915
    int current_tb_not_found = is_cpu_write_access;
916
    TranslationBlock *current_tb = NULL;
917
    int current_tb_modified = 0;
918
    target_ulong current_pc = 0;
919
    target_ulong current_cs_base = 0;
920
    int current_flags = 0;
921
#endif /* TARGET_HAS_PRECISE_SMC */
922

    
923
    p = page_find(start >> TARGET_PAGE_BITS);
924
    if (!p)
925
        return;
926
    if (!p->code_bitmap &&
927
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
928
        is_cpu_write_access) {
929
        /* build code bitmap */
930
        build_page_bitmap(p);
931
    }
932

    
933
    /* we remove all the TBs in the range [start, end[ */
934
    /* XXX: see if in some cases it could be faster to invalidate all the code */
935
    tb = p->first_tb;
936
    while (tb != NULL) {
937
        n = (long)tb & 3;
938
        tb = (TranslationBlock *)((long)tb & ~3);
939
        tb_next = tb->page_next[n];
940
        /* NOTE: this is subtle as a TB may span two physical pages */
941
        if (n == 0) {
942
            /* NOTE: tb_end may be after the end of the page, but
943
               it is not a problem */
944
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
945
            tb_end = tb_start + tb->size;
946
        } else {
947
            tb_start = tb->page_addr[1];
948
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
949
        }
950
        if (!(tb_end <= start || tb_start >= end)) {
951
#ifdef TARGET_HAS_PRECISE_SMC
952
            if (current_tb_not_found) {
953
                current_tb_not_found = 0;
954
                current_tb = NULL;
955
                if (env->mem_io_pc) {
956
                    /* now we have a real cpu fault */
957
                    current_tb = tb_find_pc(env->mem_io_pc);
958
                }
959
            }
960
            if (current_tb == tb &&
961
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
962
                /* If we are modifying the current TB, we must stop
963
                its execution. We could be more precise by checking
964
                that the modification is after the current PC, but it
965
                would require a specialized function to partially
966
                restore the CPU state */
967

    
968
                current_tb_modified = 1;
969
                cpu_restore_state(current_tb, env,
970
                                  env->mem_io_pc, NULL);
971
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
972
                                     &current_flags);
973
            }
974
#endif /* TARGET_HAS_PRECISE_SMC */
975
            /* we need to do that to handle the case where a signal
976
               occurs while doing tb_phys_invalidate() */
977
            saved_tb = NULL;
978
            if (env) {
979
                saved_tb = env->current_tb;
980
                env->current_tb = NULL;
981
            }
982
            tb_phys_invalidate(tb, -1);
983
            if (env) {
984
                env->current_tb = saved_tb;
985
                if (env->interrupt_request && env->current_tb)
986
                    cpu_interrupt(env, env->interrupt_request);
987
            }
988
        }
989
        tb = tb_next;
990
    }
991
#if !defined(CONFIG_USER_ONLY)
992
    /* if no code remaining, no need to continue to use slow writes */
993
    if (!p->first_tb) {
994
        invalidate_page_bitmap(p);
995
        if (is_cpu_write_access) {
996
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
997
        }
998
    }
999
#endif
1000
#ifdef TARGET_HAS_PRECISE_SMC
1001
    if (current_tb_modified) {
1002
        /* we generate a block containing just the instruction
1003
           modifying the memory. It will ensure that it cannot modify
1004
           itself */
1005
        env->current_tb = NULL;
1006
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1007
        cpu_resume_from_signal(env, NULL);
1008
    }
1009
#endif
1010
}
1011

    
1012
/* len must be <= 8 and start must be a multiple of len */
1013
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1014
{
1015
    PageDesc *p;
1016
    int offset, b;
1017
#if 0
1018
    if (1) {
1019
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1020
                  cpu_single_env->mem_io_vaddr, len,
1021
                  cpu_single_env->eip,
1022
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1023
    }
1024
#endif
1025
    p = page_find(start >> TARGET_PAGE_BITS);
1026
    if (!p)
1027
        return;
1028
    if (p->code_bitmap) {
1029
        offset = start & ~TARGET_PAGE_MASK;
1030
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1031
        if (b & ((1 << len) - 1))
1032
            goto do_invalidate;
1033
    } else {
1034
    do_invalidate:
1035
        tb_invalidate_phys_page_range(start, start + len, 1);
1036
    }
1037
}
1038

    
1039
#if !defined(CONFIG_SOFTMMU)
1040
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1041
                                    unsigned long pc, void *puc)
1042
{
1043
    TranslationBlock *tb;
1044
    PageDesc *p;
1045
    int n;
1046
#ifdef TARGET_HAS_PRECISE_SMC
1047
    TranslationBlock *current_tb = NULL;
1048
    CPUState *env = cpu_single_env;
1049
    int current_tb_modified = 0;
1050
    target_ulong current_pc = 0;
1051
    target_ulong current_cs_base = 0;
1052
    int current_flags = 0;
1053
#endif
1054

    
1055
    addr &= TARGET_PAGE_MASK;
1056
    p = page_find(addr >> TARGET_PAGE_BITS);
1057
    if (!p)
1058
        return;
1059
    tb = p->first_tb;
1060
#ifdef TARGET_HAS_PRECISE_SMC
1061
    if (tb && pc != 0) {
1062
        current_tb = tb_find_pc(pc);
1063
    }
1064
#endif
1065
    while (tb != NULL) {
1066
        n = (long)tb & 3;
1067
        tb = (TranslationBlock *)((long)tb & ~3);
1068
#ifdef TARGET_HAS_PRECISE_SMC
1069
        if (current_tb == tb &&
1070
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1071
                /* If we are modifying the current TB, we must stop
1072
                   its execution. We could be more precise by checking
1073
                   that the modification is after the current PC, but it
1074
                   would require a specialized function to partially
1075
                   restore the CPU state */
1076

    
1077
            current_tb_modified = 1;
1078
            cpu_restore_state(current_tb, env, pc, puc);
1079
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1080
                                 &current_flags);
1081
        }
1082
#endif /* TARGET_HAS_PRECISE_SMC */
1083
        tb_phys_invalidate(tb, addr);
1084
        tb = tb->page_next[n];
1085
    }
1086
    p->first_tb = NULL;
1087
#ifdef TARGET_HAS_PRECISE_SMC
1088
    if (current_tb_modified) {
1089
        /* we generate a block containing just the instruction
1090
           modifying the memory. It will ensure that it cannot modify
1091
           itself */
1092
        env->current_tb = NULL;
1093
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1094
        cpu_resume_from_signal(env, puc);
1095
    }
1096
#endif
1097
}
1098
#endif
1099

    
1100
/* add the tb in the target page and protect it if necessary */
1101
static inline void tb_alloc_page(TranslationBlock *tb,
1102
                                 unsigned int n, target_ulong page_addr)
1103
{
1104
    PageDesc *p;
1105
    TranslationBlock *last_first_tb;
1106

    
1107
    tb->page_addr[n] = page_addr;
1108
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1109
    tb->page_next[n] = p->first_tb;
1110
    last_first_tb = p->first_tb;
1111
    p->first_tb = (TranslationBlock *)((long)tb | n);
1112
    invalidate_page_bitmap(p);
1113

    
1114
#if defined(TARGET_HAS_SMC) || 1
1115

    
1116
#if defined(CONFIG_USER_ONLY)
1117
    if (p->flags & PAGE_WRITE) {
1118
        target_ulong addr;
1119
        PageDesc *p2;
1120
        int prot;
1121

    
1122
        /* force the host page as non writable (writes will have a
1123
           page fault + mprotect overhead) */
1124
        page_addr &= qemu_host_page_mask;
1125
        prot = 0;
1126
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1127
            addr += TARGET_PAGE_SIZE) {
1128

    
1129
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1130
            if (!p2)
1131
                continue;
1132
            prot |= p2->flags;
1133
            p2->flags &= ~PAGE_WRITE;
1134
            page_get_flags(addr);
1135
          }
1136
        mprotect(g2h(page_addr), qemu_host_page_size,
1137
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1138
#ifdef DEBUG_TB_INVALIDATE
1139
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1140
               page_addr);
1141
#endif
1142
    }
1143
#else
1144
    /* if some code is already present, then the pages are already
1145
       protected. So we handle the case where only the first TB is
1146
       allocated in a physical page */
1147
    if (!last_first_tb) {
1148
        tlb_protect_code(page_addr);
1149
    }
1150
#endif
1151

    
1152
#endif /* TARGET_HAS_SMC */
1153
}
1154

    
1155
/* Allocate a new translation block. Flush the translation buffer if
1156
   too many translation blocks or too much generated code. */
1157
TranslationBlock *tb_alloc(target_ulong pc)
1158
{
1159
    TranslationBlock *tb;
1160

    
1161
    if (nb_tbs >= code_gen_max_blocks ||
1162
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1163
        return NULL;
1164
    tb = &tbs[nb_tbs++];
1165
    tb->pc = pc;
1166
    tb->cflags = 0;
1167
    return tb;
1168
}
1169

    
1170
void tb_free(TranslationBlock *tb)
1171
{
1172
    /* In practice this is mostly used for single use temporary TB
1173
       Ignore the hard cases and just back up if this TB happens to
1174
       be the last one generated.  */
1175
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1176
        code_gen_ptr = tb->tc_ptr;
1177
        nb_tbs--;
1178
    }
1179
}
1180

    
1181
/* add a new TB and link it to the physical page tables. phys_page2 is
1182
   (-1) to indicate that only one page contains the TB. */
1183
void tb_link_phys(TranslationBlock *tb,
1184
                  target_ulong phys_pc, target_ulong phys_page2)
1185
{
1186
    unsigned int h;
1187
    TranslationBlock **ptb;
1188

    
1189
    /* Grab the mmap lock to stop another thread invalidating this TB
1190
       before we are done.  */
1191
    mmap_lock();
1192
    /* add in the physical hash table */
1193
    h = tb_phys_hash_func(phys_pc);
1194
    ptb = &tb_phys_hash[h];
1195
    tb->phys_hash_next = *ptb;
1196
    *ptb = tb;
1197

    
1198
    /* add in the page list */
1199
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1200
    if (phys_page2 != -1)
1201
        tb_alloc_page(tb, 1, phys_page2);
1202
    else
1203
        tb->page_addr[1] = -1;
1204

    
1205
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1206
    tb->jmp_next[0] = NULL;
1207
    tb->jmp_next[1] = NULL;
1208

    
1209
    /* init original jump addresses */
1210
    if (tb->tb_next_offset[0] != 0xffff)
1211
        tb_reset_jump(tb, 0);
1212
    if (tb->tb_next_offset[1] != 0xffff)
1213
        tb_reset_jump(tb, 1);
1214

    
1215
#ifdef DEBUG_TB_CHECK
1216
    tb_page_check();
1217
#endif
1218
    mmap_unlock();
1219
}
1220

    
1221
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1222
   tb[1].tc_ptr. Return NULL if not found */
1223
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1224
{
1225
    int m_min, m_max, m;
1226
    unsigned long v;
1227
    TranslationBlock *tb;
1228

    
1229
    if (nb_tbs <= 0)
1230
        return NULL;
1231
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1232
        tc_ptr >= (unsigned long)code_gen_ptr)
1233
        return NULL;
1234
    /* binary search (cf Knuth) */
1235
    m_min = 0;
1236
    m_max = nb_tbs - 1;
1237
    while (m_min <= m_max) {
1238
        m = (m_min + m_max) >> 1;
1239
        tb = &tbs[m];
1240
        v = (unsigned long)tb->tc_ptr;
1241
        if (v == tc_ptr)
1242
            return tb;
1243
        else if (tc_ptr < v) {
1244
            m_max = m - 1;
1245
        } else {
1246
            m_min = m + 1;
1247
        }
1248
    }
1249
    return &tbs[m_max];
1250
}
1251

    
1252
static void tb_reset_jump_recursive(TranslationBlock *tb);
1253

    
1254
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1255
{
1256
    TranslationBlock *tb1, *tb_next, **ptb;
1257
    unsigned int n1;
1258

    
1259
    tb1 = tb->jmp_next[n];
1260
    if (tb1 != NULL) {
1261
        /* find head of list */
1262
        for(;;) {
1263
            n1 = (long)tb1 & 3;
1264
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1265
            if (n1 == 2)
1266
                break;
1267
            tb1 = tb1->jmp_next[n1];
1268
        }
1269
        /* we are now sure now that tb jumps to tb1 */
1270
        tb_next = tb1;
1271

    
1272
        /* remove tb from the jmp_first list */
1273
        ptb = &tb_next->jmp_first;
1274
        for(;;) {
1275
            tb1 = *ptb;
1276
            n1 = (long)tb1 & 3;
1277
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1278
            if (n1 == n && tb1 == tb)
1279
                break;
1280
            ptb = &tb1->jmp_next[n1];
1281
        }
1282
        *ptb = tb->jmp_next[n];
1283
        tb->jmp_next[n] = NULL;
1284

    
1285
        /* suppress the jump to next tb in generated code */
1286
        tb_reset_jump(tb, n);
1287

    
1288
        /* suppress jumps in the tb on which we could have jumped */
1289
        tb_reset_jump_recursive(tb_next);
1290
    }
1291
}
1292

    
1293
static void tb_reset_jump_recursive(TranslationBlock *tb)
1294
{
1295
    tb_reset_jump_recursive2(tb, 0);
1296
    tb_reset_jump_recursive2(tb, 1);
1297
}
1298

    
1299
#if defined(TARGET_HAS_ICE)
1300
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1301
{
1302
    target_phys_addr_t addr;
1303
    target_ulong pd;
1304
    ram_addr_t ram_addr;
1305
    PhysPageDesc *p;
1306

    
1307
    addr = cpu_get_phys_page_debug(env, pc);
1308
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1309
    if (!p) {
1310
        pd = IO_MEM_UNASSIGNED;
1311
    } else {
1312
        pd = p->phys_offset;
1313
    }
1314
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1315
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1316
}
1317
#endif
1318

    
1319
/* Add a watchpoint.  */
1320
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1321
                          int flags, CPUWatchpoint **watchpoint)
1322
{
1323
    target_ulong len_mask = ~(len - 1);
1324
    CPUWatchpoint *wp;
1325

    
1326
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1327
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1328
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1329
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1330
        return -EINVAL;
1331
    }
1332
    wp = qemu_malloc(sizeof(*wp));
1333

    
1334
    wp->vaddr = addr;
1335
    wp->len_mask = len_mask;
1336
    wp->flags = flags;
1337

    
1338
    /* keep all GDB-injected watchpoints in front */
1339
    if (flags & BP_GDB)
1340
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1341
    else
1342
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1343

    
1344
    tlb_flush_page(env, addr);
1345

    
1346
    if (watchpoint)
1347
        *watchpoint = wp;
1348
    return 0;
1349
}
1350

    
1351
/* Remove a specific watchpoint.  */
1352
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1353
                          int flags)
1354
{
1355
    target_ulong len_mask = ~(len - 1);
1356
    CPUWatchpoint *wp;
1357

    
1358
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1359
        if (addr == wp->vaddr && len_mask == wp->len_mask
1360
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1361
            cpu_watchpoint_remove_by_ref(env, wp);
1362
            return 0;
1363
        }
1364
    }
1365
    return -ENOENT;
1366
}
1367

    
1368
/* Remove a specific watchpoint by reference.  */
1369
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1370
{
1371
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1372

    
1373
    tlb_flush_page(env, watchpoint->vaddr);
1374

    
1375
    qemu_free(watchpoint);
1376
}
1377

    
1378
/* Remove all matching watchpoints.  */
1379
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1380
{
1381
    CPUWatchpoint *wp, *next;
1382

    
1383
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1384
        if (wp->flags & mask)
1385
            cpu_watchpoint_remove_by_ref(env, wp);
1386
    }
1387
}
1388

    
1389
/* Add a breakpoint.  */
1390
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1391
                          CPUBreakpoint **breakpoint)
1392
{
1393
#if defined(TARGET_HAS_ICE)
1394
    CPUBreakpoint *bp;
1395

    
1396
    bp = qemu_malloc(sizeof(*bp));
1397

    
1398
    bp->pc = pc;
1399
    bp->flags = flags;
1400

    
1401
    /* keep all GDB-injected breakpoints in front */
1402
    if (flags & BP_GDB)
1403
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1404
    else
1405
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1406

    
1407
    breakpoint_invalidate(env, pc);
1408

    
1409
    if (breakpoint)
1410
        *breakpoint = bp;
1411
    return 0;
1412
#else
1413
    return -ENOSYS;
1414
#endif
1415
}
1416

    
1417
/* Remove a specific breakpoint.  */
1418
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1419
{
1420
#if defined(TARGET_HAS_ICE)
1421
    CPUBreakpoint *bp;
1422

    
1423
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1424
        if (bp->pc == pc && bp->flags == flags) {
1425
            cpu_breakpoint_remove_by_ref(env, bp);
1426
            return 0;
1427
        }
1428
    }
1429
    return -ENOENT;
1430
#else
1431
    return -ENOSYS;
1432
#endif
1433
}
1434

    
1435
/* Remove a specific breakpoint by reference.  */
1436
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1437
{
1438
#if defined(TARGET_HAS_ICE)
1439
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1440

    
1441
    breakpoint_invalidate(env, breakpoint->pc);
1442

    
1443
    qemu_free(breakpoint);
1444
#endif
1445
}
1446

    
1447
/* Remove all matching breakpoints. */
1448
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1449
{
1450
#if defined(TARGET_HAS_ICE)
1451
    CPUBreakpoint *bp, *next;
1452

    
1453
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1454
        if (bp->flags & mask)
1455
            cpu_breakpoint_remove_by_ref(env, bp);
1456
    }
1457
#endif
1458
}
1459

    
1460
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1461
   CPU loop after each instruction */
1462
void cpu_single_step(CPUState *env, int enabled)
1463
{
1464
#if defined(TARGET_HAS_ICE)
1465
    if (env->singlestep_enabled != enabled) {
1466
        env->singlestep_enabled = enabled;
1467
        if (kvm_enabled())
1468
            kvm_update_guest_debug(env, 0);
1469
        else {
1470
            /* must flush all the translated code to avoid inconsistancies */
1471
            /* XXX: only flush what is necessary */
1472
            tb_flush(env);
1473
        }
1474
    }
1475
#endif
1476
}
1477

    
1478
/* enable or disable low levels log */
1479
void cpu_set_log(int log_flags)
1480
{
1481
    loglevel = log_flags;
1482
    if (loglevel && !logfile) {
1483
        logfile = fopen(logfilename, log_append ? "a" : "w");
1484
        if (!logfile) {
1485
            perror(logfilename);
1486
            _exit(1);
1487
        }
1488
#if !defined(CONFIG_SOFTMMU)
1489
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1490
        {
1491
            static char logfile_buf[4096];
1492
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1493
        }
1494
#else
1495
        setvbuf(logfile, NULL, _IOLBF, 0);
1496
#endif
1497
        log_append = 1;
1498
    }
1499
    if (!loglevel && logfile) {
1500
        fclose(logfile);
1501
        logfile = NULL;
1502
    }
1503
}
1504

    
1505
void cpu_set_log_filename(const char *filename)
1506
{
1507
    logfilename = strdup(filename);
1508
    if (logfile) {
1509
        fclose(logfile);
1510
        logfile = NULL;
1511
    }
1512
    cpu_set_log(loglevel);
1513
}
1514

    
1515
static void cpu_unlink_tb(CPUState *env)
1516
{
1517
#if defined(USE_NPTL)
1518
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1519
       problem and hope the cpu will stop of its own accord.  For userspace
1520
       emulation this often isn't actually as bad as it sounds.  Often
1521
       signals are used primarily to interrupt blocking syscalls.  */
1522
#else
1523
    TranslationBlock *tb;
1524
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1525

    
1526
    tb = env->current_tb;
1527
    /* if the cpu is currently executing code, we must unlink it and
1528
       all the potentially executing TB */
1529
    if (tb && !testandset(&interrupt_lock)) {
1530
        env->current_tb = NULL;
1531
        tb_reset_jump_recursive(tb);
1532
        resetlock(&interrupt_lock);
1533
    }
1534
#endif
1535
}
1536

    
1537
/* mask must never be zero, except for A20 change call */
1538
void cpu_interrupt(CPUState *env, int mask)
1539
{
1540
    int old_mask;
1541

    
1542
    old_mask = env->interrupt_request;
1543
    env->interrupt_request |= mask;
1544

    
1545
#ifndef CONFIG_USER_ONLY
1546
    /*
1547
     * If called from iothread context, wake the target cpu in
1548
     * case its halted.
1549
     */
1550
    if (!qemu_cpu_self(env)) {
1551
        qemu_cpu_kick(env);
1552
        return;
1553
    }
1554
#endif
1555

    
1556
    if (use_icount) {
1557
        env->icount_decr.u16.high = 0xffff;
1558
#ifndef CONFIG_USER_ONLY
1559
        if (!can_do_io(env)
1560
            && (mask & ~old_mask) != 0) {
1561
            cpu_abort(env, "Raised interrupt while not in I/O function");
1562
        }
1563
#endif
1564
    } else {
1565
        cpu_unlink_tb(env);
1566
    }
1567
}
1568

    
1569
void cpu_reset_interrupt(CPUState *env, int mask)
1570
{
1571
    env->interrupt_request &= ~mask;
1572
}
1573

    
1574
void cpu_exit(CPUState *env)
1575
{
1576
    env->exit_request = 1;
1577
    cpu_unlink_tb(env);
1578
}
1579

    
1580
const CPULogItem cpu_log_items[] = {
1581
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1582
      "show generated host assembly code for each compiled TB" },
1583
    { CPU_LOG_TB_IN_ASM, "in_asm",
1584
      "show target assembly code for each compiled TB" },
1585
    { CPU_LOG_TB_OP, "op",
1586
      "show micro ops for each compiled TB" },
1587
    { CPU_LOG_TB_OP_OPT, "op_opt",
1588
      "show micro ops "
1589
#ifdef TARGET_I386
1590
      "before eflags optimization and "
1591
#endif
1592
      "after liveness analysis" },
1593
    { CPU_LOG_INT, "int",
1594
      "show interrupts/exceptions in short format" },
1595
    { CPU_LOG_EXEC, "exec",
1596
      "show trace before each executed TB (lots of logs)" },
1597
    { CPU_LOG_TB_CPU, "cpu",
1598
      "show CPU state before block translation" },
1599
#ifdef TARGET_I386
1600
    { CPU_LOG_PCALL, "pcall",
1601
      "show protected mode far calls/returns/exceptions" },
1602
    { CPU_LOG_RESET, "cpu_reset",
1603
      "show CPU state before CPU resets" },
1604
#endif
1605
#ifdef DEBUG_IOPORT
1606
    { CPU_LOG_IOPORT, "ioport",
1607
      "show all i/o ports accesses" },
1608
#endif
1609
    { 0, NULL, NULL },
1610
};
1611

    
1612
static int cmp1(const char *s1, int n, const char *s2)
1613
{
1614
    if (strlen(s2) != n)
1615
        return 0;
1616
    return memcmp(s1, s2, n) == 0;
1617
}
1618

    
1619
/* takes a comma separated list of log masks. Return 0 if error. */
1620
int cpu_str_to_log_mask(const char *str)
1621
{
1622
    const CPULogItem *item;
1623
    int mask;
1624
    const char *p, *p1;
1625

    
1626
    p = str;
1627
    mask = 0;
1628
    for(;;) {
1629
        p1 = strchr(p, ',');
1630
        if (!p1)
1631
            p1 = p + strlen(p);
1632
        if(cmp1(p,p1-p,"all")) {
1633
                for(item = cpu_log_items; item->mask != 0; item++) {
1634
                        mask |= item->mask;
1635
                }
1636
        } else {
1637
        for(item = cpu_log_items; item->mask != 0; item++) {
1638
            if (cmp1(p, p1 - p, item->name))
1639
                goto found;
1640
        }
1641
        return 0;
1642
        }
1643
    found:
1644
        mask |= item->mask;
1645
        if (*p1 != ',')
1646
            break;
1647
        p = p1 + 1;
1648
    }
1649
    return mask;
1650
}
1651

    
1652
void cpu_abort(CPUState *env, const char *fmt, ...)
1653
{
1654
    va_list ap;
1655
    va_list ap2;
1656

    
1657
    va_start(ap, fmt);
1658
    va_copy(ap2, ap);
1659
    fprintf(stderr, "qemu: fatal: ");
1660
    vfprintf(stderr, fmt, ap);
1661
    fprintf(stderr, "\n");
1662
#ifdef TARGET_I386
1663
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1664
#else
1665
    cpu_dump_state(env, stderr, fprintf, 0);
1666
#endif
1667
    if (qemu_log_enabled()) {
1668
        qemu_log("qemu: fatal: ");
1669
        qemu_log_vprintf(fmt, ap2);
1670
        qemu_log("\n");
1671
#ifdef TARGET_I386
1672
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1673
#else
1674
        log_cpu_state(env, 0);
1675
#endif
1676
        qemu_log_flush();
1677
        qemu_log_close();
1678
    }
1679
    va_end(ap2);
1680
    va_end(ap);
1681
    abort();
1682
}
1683

    
1684
CPUState *cpu_copy(CPUState *env)
1685
{
1686
    CPUState *new_env = cpu_init(env->cpu_model_str);
1687
    CPUState *next_cpu = new_env->next_cpu;
1688
    int cpu_index = new_env->cpu_index;
1689
#if defined(TARGET_HAS_ICE)
1690
    CPUBreakpoint *bp;
1691
    CPUWatchpoint *wp;
1692
#endif
1693

    
1694
    memcpy(new_env, env, sizeof(CPUState));
1695

    
1696
    /* Preserve chaining and index. */
1697
    new_env->next_cpu = next_cpu;
1698
    new_env->cpu_index = cpu_index;
1699

    
1700
    /* Clone all break/watchpoints.
1701
       Note: Once we support ptrace with hw-debug register access, make sure
1702
       BP_CPU break/watchpoints are handled correctly on clone. */
1703
    TAILQ_INIT(&env->breakpoints);
1704
    TAILQ_INIT(&env->watchpoints);
1705
#if defined(TARGET_HAS_ICE)
1706
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1707
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1708
    }
1709
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1710
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1711
                              wp->flags, NULL);
1712
    }
1713
#endif
1714

    
1715
    return new_env;
1716
}
1717

    
1718
#if !defined(CONFIG_USER_ONLY)
1719

    
1720
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1721
{
1722
    unsigned int i;
1723

    
1724
    /* Discard jump cache entries for any tb which might potentially
1725
       overlap the flushed page.  */
1726
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1727
    memset (&env->tb_jmp_cache[i], 0, 
1728
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1729

    
1730
    i = tb_jmp_cache_hash_page(addr);
1731
    memset (&env->tb_jmp_cache[i], 0, 
1732
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1733
}
1734

    
1735
/* NOTE: if flush_global is true, also flush global entries (not
1736
   implemented yet) */
1737
void tlb_flush(CPUState *env, int flush_global)
1738
{
1739
    int i;
1740

    
1741
#if defined(DEBUG_TLB)
1742
    printf("tlb_flush:\n");
1743
#endif
1744
    /* must reset current TB so that interrupts cannot modify the
1745
       links while we are modifying them */
1746
    env->current_tb = NULL;
1747

    
1748
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1749
        env->tlb_table[0][i].addr_read = -1;
1750
        env->tlb_table[0][i].addr_write = -1;
1751
        env->tlb_table[0][i].addr_code = -1;
1752
        env->tlb_table[1][i].addr_read = -1;
1753
        env->tlb_table[1][i].addr_write = -1;
1754
        env->tlb_table[1][i].addr_code = -1;
1755
#if (NB_MMU_MODES >= 3)
1756
        env->tlb_table[2][i].addr_read = -1;
1757
        env->tlb_table[2][i].addr_write = -1;
1758
        env->tlb_table[2][i].addr_code = -1;
1759
#endif
1760
#if (NB_MMU_MODES >= 4)
1761
        env->tlb_table[3][i].addr_read = -1;
1762
        env->tlb_table[3][i].addr_write = -1;
1763
        env->tlb_table[3][i].addr_code = -1;
1764
#endif
1765
#if (NB_MMU_MODES >= 5)
1766
        env->tlb_table[4][i].addr_read = -1;
1767
        env->tlb_table[4][i].addr_write = -1;
1768
        env->tlb_table[4][i].addr_code = -1;
1769
#endif
1770

    
1771
    }
1772

    
1773
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1774

    
1775
#ifdef CONFIG_KQEMU
1776
    if (env->kqemu_enabled) {
1777
        kqemu_flush(env, flush_global);
1778
    }
1779
#endif
1780
    tlb_flush_count++;
1781
}
1782

    
1783
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1784
{
1785
    if (addr == (tlb_entry->addr_read &
1786
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1787
        addr == (tlb_entry->addr_write &
1788
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1789
        addr == (tlb_entry->addr_code &
1790
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1791
        tlb_entry->addr_read = -1;
1792
        tlb_entry->addr_write = -1;
1793
        tlb_entry->addr_code = -1;
1794
    }
1795
}
1796

    
1797
void tlb_flush_page(CPUState *env, target_ulong addr)
1798
{
1799
    int i;
1800

    
1801
#if defined(DEBUG_TLB)
1802
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1803
#endif
1804
    /* must reset current TB so that interrupts cannot modify the
1805
       links while we are modifying them */
1806
    env->current_tb = NULL;
1807

    
1808
    addr &= TARGET_PAGE_MASK;
1809
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1810
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1811
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1812
#if (NB_MMU_MODES >= 3)
1813
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1814
#endif
1815
#if (NB_MMU_MODES >= 4)
1816
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1817
#endif
1818
#if (NB_MMU_MODES >= 5)
1819
    tlb_flush_entry(&env->tlb_table[4][i], addr);
1820
#endif
1821

    
1822
    tlb_flush_jmp_cache(env, addr);
1823

    
1824
#ifdef CONFIG_KQEMU
1825
    if (env->kqemu_enabled) {
1826
        kqemu_flush_page(env, addr);
1827
    }
1828
#endif
1829
}
1830

    
1831
/* update the TLBs so that writes to code in the virtual page 'addr'
1832
   can be detected */
1833
static void tlb_protect_code(ram_addr_t ram_addr)
1834
{
1835
    cpu_physical_memory_reset_dirty(ram_addr,
1836
                                    ram_addr + TARGET_PAGE_SIZE,
1837
                                    CODE_DIRTY_FLAG);
1838
}
1839

    
1840
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1841
   tested for self modifying code */
1842
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1843
                                    target_ulong vaddr)
1844
{
1845
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1846
}
1847

    
1848
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1849
                                         unsigned long start, unsigned long length)
1850
{
1851
    unsigned long addr;
1852
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1853
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1854
        if ((addr - start) < length) {
1855
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1856
        }
1857
    }
1858
}
1859

    
1860
/* Note: start and end must be within the same ram block.  */
1861
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1862
                                     int dirty_flags)
1863
{
1864
    CPUState *env;
1865
    unsigned long length, start1;
1866
    int i, mask, len;
1867
    uint8_t *p;
1868

    
1869
    start &= TARGET_PAGE_MASK;
1870
    end = TARGET_PAGE_ALIGN(end);
1871

    
1872
    length = end - start;
1873
    if (length == 0)
1874
        return;
1875
    len = length >> TARGET_PAGE_BITS;
1876
#ifdef CONFIG_KQEMU
1877
    /* XXX: should not depend on cpu context */
1878
    env = first_cpu;
1879
    if (env->kqemu_enabled) {
1880
        ram_addr_t addr;
1881
        addr = start;
1882
        for(i = 0; i < len; i++) {
1883
            kqemu_set_notdirty(env, addr);
1884
            addr += TARGET_PAGE_SIZE;
1885
        }
1886
    }
1887
#endif
1888
    mask = ~dirty_flags;
1889
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1890
    for(i = 0; i < len; i++)
1891
        p[i] &= mask;
1892

    
1893
    /* we modify the TLB cache so that the dirty bit will be set again
1894
       when accessing the range */
1895
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1896
    /* Chek that we don't span multiple blocks - this breaks the
1897
       address comparisons below.  */
1898
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1899
            != (end - 1) - start) {
1900
        abort();
1901
    }
1902

    
1903
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1904
        for(i = 0; i < CPU_TLB_SIZE; i++)
1905
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1906
        for(i = 0; i < CPU_TLB_SIZE; i++)
1907
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1908
#if (NB_MMU_MODES >= 3)
1909
        for(i = 0; i < CPU_TLB_SIZE; i++)
1910
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1911
#endif
1912
#if (NB_MMU_MODES >= 4)
1913
        for(i = 0; i < CPU_TLB_SIZE; i++)
1914
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1915
#endif
1916
#if (NB_MMU_MODES >= 5)
1917
        for(i = 0; i < CPU_TLB_SIZE; i++)
1918
            tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1919
#endif
1920
    }
1921
}
1922

    
1923
int cpu_physical_memory_set_dirty_tracking(int enable)
1924
{
1925
    in_migration = enable;
1926
    return 0;
1927
}
1928

    
1929
int cpu_physical_memory_get_dirty_tracking(void)
1930
{
1931
    return in_migration;
1932
}
1933

    
1934
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1935
{
1936
    if (kvm_enabled())
1937
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1938
}
1939

    
1940
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1941
{
1942
    ram_addr_t ram_addr;
1943
    void *p;
1944

    
1945
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1946
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1947
            + tlb_entry->addend);
1948
        ram_addr = qemu_ram_addr_from_host(p);
1949
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1950
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1951
        }
1952
    }
1953
}
1954

    
1955
/* update the TLB according to the current state of the dirty bits */
1956
void cpu_tlb_update_dirty(CPUState *env)
1957
{
1958
    int i;
1959
    for(i = 0; i < CPU_TLB_SIZE; i++)
1960
        tlb_update_dirty(&env->tlb_table[0][i]);
1961
    for(i = 0; i < CPU_TLB_SIZE; i++)
1962
        tlb_update_dirty(&env->tlb_table[1][i]);
1963
#if (NB_MMU_MODES >= 3)
1964
    for(i = 0; i < CPU_TLB_SIZE; i++)
1965
        tlb_update_dirty(&env->tlb_table[2][i]);
1966
#endif
1967
#if (NB_MMU_MODES >= 4)
1968
    for(i = 0; i < CPU_TLB_SIZE; i++)
1969
        tlb_update_dirty(&env->tlb_table[3][i]);
1970
#endif
1971
#if (NB_MMU_MODES >= 5)
1972
    for(i = 0; i < CPU_TLB_SIZE; i++)
1973
        tlb_update_dirty(&env->tlb_table[4][i]);
1974
#endif
1975
}
1976

    
1977
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1978
{
1979
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1980
        tlb_entry->addr_write = vaddr;
1981
}
1982

    
1983
/* update the TLB corresponding to virtual page vaddr
1984
   so that it is no longer dirty */
1985
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1986
{
1987
    int i;
1988

    
1989
    vaddr &= TARGET_PAGE_MASK;
1990
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1991
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1992
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1993
#if (NB_MMU_MODES >= 3)
1994
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1995
#endif
1996
#if (NB_MMU_MODES >= 4)
1997
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1998
#endif
1999
#if (NB_MMU_MODES >= 5)
2000
    tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
2001
#endif
2002
}
2003

    
2004
/* add a new TLB entry. At most one entry for a given virtual address
2005
   is permitted. Return 0 if OK or 2 if the page could not be mapped
2006
   (can only happen in non SOFTMMU mode for I/O pages or pages
2007
   conflicting with the host address space). */
2008
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2009
                      target_phys_addr_t paddr, int prot,
2010
                      int mmu_idx, int is_softmmu)
2011
{
2012
    PhysPageDesc *p;
2013
    unsigned long pd;
2014
    unsigned int index;
2015
    target_ulong address;
2016
    target_ulong code_address;
2017
    target_phys_addr_t addend;
2018
    int ret;
2019
    CPUTLBEntry *te;
2020
    CPUWatchpoint *wp;
2021
    target_phys_addr_t iotlb;
2022

    
2023
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2024
    if (!p) {
2025
        pd = IO_MEM_UNASSIGNED;
2026
    } else {
2027
        pd = p->phys_offset;
2028
    }
2029
#if defined(DEBUG_TLB)
2030
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2031
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2032
#endif
2033

    
2034
    ret = 0;
2035
    address = vaddr;
2036
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2037
        /* IO memory case (romd handled later) */
2038
        address |= TLB_MMIO;
2039
    }
2040
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2041
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2042
        /* Normal RAM.  */
2043
        iotlb = pd & TARGET_PAGE_MASK;
2044
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2045
            iotlb |= IO_MEM_NOTDIRTY;
2046
        else
2047
            iotlb |= IO_MEM_ROM;
2048
    } else {
2049
        /* IO handlers are currently passed a phsical address.
2050
           It would be nice to pass an offset from the base address
2051
           of that region.  This would avoid having to special case RAM,
2052
           and avoid full address decoding in every device.
2053
           We can't use the high bits of pd for this because
2054
           IO_MEM_ROMD uses these as a ram address.  */
2055
        iotlb = (pd & ~TARGET_PAGE_MASK);
2056
        if (p) {
2057
            iotlb += p->region_offset;
2058
        } else {
2059
            iotlb += paddr;
2060
        }
2061
    }
2062

    
2063
    code_address = address;
2064
    /* Make accesses to pages with watchpoints go via the
2065
       watchpoint trap routines.  */
2066
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2067
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2068
            iotlb = io_mem_watch + paddr;
2069
            /* TODO: The memory case can be optimized by not trapping
2070
               reads of pages with a write breakpoint.  */
2071
            address |= TLB_MMIO;
2072
        }
2073
    }
2074

    
2075
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2076
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2077
    te = &env->tlb_table[mmu_idx][index];
2078
    te->addend = addend - vaddr;
2079
    if (prot & PAGE_READ) {
2080
        te->addr_read = address;
2081
    } else {
2082
        te->addr_read = -1;
2083
    }
2084

    
2085
    if (prot & PAGE_EXEC) {
2086
        te->addr_code = code_address;
2087
    } else {
2088
        te->addr_code = -1;
2089
    }
2090
    if (prot & PAGE_WRITE) {
2091
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2092
            (pd & IO_MEM_ROMD)) {
2093
            /* Write access calls the I/O callback.  */
2094
            te->addr_write = address | TLB_MMIO;
2095
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2096
                   !cpu_physical_memory_is_dirty(pd)) {
2097
            te->addr_write = address | TLB_NOTDIRTY;
2098
        } else {
2099
            te->addr_write = address;
2100
        }
2101
    } else {
2102
        te->addr_write = -1;
2103
    }
2104
    return ret;
2105
}
2106

    
2107
#else
2108

    
2109
void tlb_flush(CPUState *env, int flush_global)
2110
{
2111
}
2112

    
2113
void tlb_flush_page(CPUState *env, target_ulong addr)
2114
{
2115
}
2116

    
2117
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2118
                      target_phys_addr_t paddr, int prot,
2119
                      int mmu_idx, int is_softmmu)
2120
{
2121
    return 0;
2122
}
2123

    
2124
/* dump memory mappings */
2125
void page_dump(FILE *f)
2126
{
2127
    unsigned long start, end;
2128
    int i, j, prot, prot1;
2129
    PageDesc *p;
2130

    
2131
    fprintf(f, "%-8s %-8s %-8s %s\n",
2132
            "start", "end", "size", "prot");
2133
    start = -1;
2134
    end = -1;
2135
    prot = 0;
2136
    for(i = 0; i <= L1_SIZE; i++) {
2137
        if (i < L1_SIZE)
2138
            p = l1_map[i];
2139
        else
2140
            p = NULL;
2141
        for(j = 0;j < L2_SIZE; j++) {
2142
            if (!p)
2143
                prot1 = 0;
2144
            else
2145
                prot1 = p[j].flags;
2146
            if (prot1 != prot) {
2147
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2148
                if (start != -1) {
2149
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2150
                            start, end, end - start,
2151
                            prot & PAGE_READ ? 'r' : '-',
2152
                            prot & PAGE_WRITE ? 'w' : '-',
2153
                            prot & PAGE_EXEC ? 'x' : '-');
2154
                }
2155
                if (prot1 != 0)
2156
                    start = end;
2157
                else
2158
                    start = -1;
2159
                prot = prot1;
2160
            }
2161
            if (!p)
2162
                break;
2163
        }
2164
    }
2165
}
2166

    
2167
int page_get_flags(target_ulong address)
2168
{
2169
    PageDesc *p;
2170

    
2171
    p = page_find(address >> TARGET_PAGE_BITS);
2172
    if (!p)
2173
        return 0;
2174
    return p->flags;
2175
}
2176

    
2177
/* modify the flags of a page and invalidate the code if
2178
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2179
   depending on PAGE_WRITE */
2180
void page_set_flags(target_ulong start, target_ulong end, int flags)
2181
{
2182
    PageDesc *p;
2183
    target_ulong addr;
2184

    
2185
    /* mmap_lock should already be held.  */
2186
    start = start & TARGET_PAGE_MASK;
2187
    end = TARGET_PAGE_ALIGN(end);
2188
    if (flags & PAGE_WRITE)
2189
        flags |= PAGE_WRITE_ORG;
2190
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2191
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2192
        /* We may be called for host regions that are outside guest
2193
           address space.  */
2194
        if (!p)
2195
            return;
2196
        /* if the write protection is set, then we invalidate the code
2197
           inside */
2198
        if (!(p->flags & PAGE_WRITE) &&
2199
            (flags & PAGE_WRITE) &&
2200
            p->first_tb) {
2201
            tb_invalidate_phys_page(addr, 0, NULL);
2202
        }
2203
        p->flags = flags;
2204
    }
2205
}
2206

    
2207
int page_check_range(target_ulong start, target_ulong len, int flags)
2208
{
2209
    PageDesc *p;
2210
    target_ulong end;
2211
    target_ulong addr;
2212

    
2213
    if (start + len < start)
2214
        /* we've wrapped around */
2215
        return -1;
2216

    
2217
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2218
    start = start & TARGET_PAGE_MASK;
2219

    
2220
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2221
        p = page_find(addr >> TARGET_PAGE_BITS);
2222
        if( !p )
2223
            return -1;
2224
        if( !(p->flags & PAGE_VALID) )
2225
            return -1;
2226

    
2227
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2228
            return -1;
2229
        if (flags & PAGE_WRITE) {
2230
            if (!(p->flags & PAGE_WRITE_ORG))
2231
                return -1;
2232
            /* unprotect the page if it was put read-only because it
2233
               contains translated code */
2234
            if (!(p->flags & PAGE_WRITE)) {
2235
                if (!page_unprotect(addr, 0, NULL))
2236
                    return -1;
2237
            }
2238
            return 0;
2239
        }
2240
    }
2241
    return 0;
2242
}
2243

    
2244
/* called from signal handler: invalidate the code and unprotect the
2245
   page. Return TRUE if the fault was succesfully handled. */
2246
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2247
{
2248
    unsigned int page_index, prot, pindex;
2249
    PageDesc *p, *p1;
2250
    target_ulong host_start, host_end, addr;
2251

    
2252
    /* Technically this isn't safe inside a signal handler.  However we
2253
       know this only ever happens in a synchronous SEGV handler, so in
2254
       practice it seems to be ok.  */
2255
    mmap_lock();
2256

    
2257
    host_start = address & qemu_host_page_mask;
2258
    page_index = host_start >> TARGET_PAGE_BITS;
2259
    p1 = page_find(page_index);
2260
    if (!p1) {
2261
        mmap_unlock();
2262
        return 0;
2263
    }
2264
    host_end = host_start + qemu_host_page_size;
2265
    p = p1;
2266
    prot = 0;
2267
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2268
        prot |= p->flags;
2269
        p++;
2270
    }
2271
    /* if the page was really writable, then we change its
2272
       protection back to writable */
2273
    if (prot & PAGE_WRITE_ORG) {
2274
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2275
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2276
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2277
                     (prot & PAGE_BITS) | PAGE_WRITE);
2278
            p1[pindex].flags |= PAGE_WRITE;
2279
            /* and since the content will be modified, we must invalidate
2280
               the corresponding translated code. */
2281
            tb_invalidate_phys_page(address, pc, puc);
2282
#ifdef DEBUG_TB_CHECK
2283
            tb_invalidate_check(address);
2284
#endif
2285
            mmap_unlock();
2286
            return 1;
2287
        }
2288
    }
2289
    mmap_unlock();
2290
    return 0;
2291
}
2292

    
2293
static inline void tlb_set_dirty(CPUState *env,
2294
                                 unsigned long addr, target_ulong vaddr)
2295
{
2296
}
2297
#endif /* defined(CONFIG_USER_ONLY) */
2298

    
2299
#if !defined(CONFIG_USER_ONLY)
2300

    
2301
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2302
                             ram_addr_t memory, ram_addr_t region_offset);
2303
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2304
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2305
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2306
                      need_subpage)                                     \
2307
    do {                                                                \
2308
        if (addr > start_addr)                                          \
2309
            start_addr2 = 0;                                            \
2310
        else {                                                          \
2311
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2312
            if (start_addr2 > 0)                                        \
2313
                need_subpage = 1;                                       \
2314
        }                                                               \
2315
                                                                        \
2316
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2317
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2318
        else {                                                          \
2319
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2320
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2321
                need_subpage = 1;                                       \
2322
        }                                                               \
2323
    } while (0)
2324

    
2325
/* register physical memory. 'size' must be a multiple of the target
2326
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2327
   io memory page.  The address used when calling the IO function is
2328
   the offset from the start of the region, plus region_offset.  Both
2329
   start_region and regon_offset are rounded down to a page boundary
2330
   before calculating this offset.  This should not be a problem unless
2331
   the low bits of start_addr and region_offset differ.  */
2332
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2333
                                         ram_addr_t size,
2334
                                         ram_addr_t phys_offset,
2335
                                         ram_addr_t region_offset)
2336
{
2337
    target_phys_addr_t addr, end_addr;
2338
    PhysPageDesc *p;
2339
    CPUState *env;
2340
    ram_addr_t orig_size = size;
2341
    void *subpage;
2342

    
2343
#ifdef CONFIG_KQEMU
2344
    /* XXX: should not depend on cpu context */
2345
    env = first_cpu;
2346
    if (env->kqemu_enabled) {
2347
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2348
    }
2349
#endif
2350
    if (kvm_enabled())
2351
        kvm_set_phys_mem(start_addr, size, phys_offset);
2352

    
2353
    if (phys_offset == IO_MEM_UNASSIGNED) {
2354
        region_offset = start_addr;
2355
    }
2356
    region_offset &= TARGET_PAGE_MASK;
2357
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2358
    end_addr = start_addr + (target_phys_addr_t)size;
2359
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2360
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2361
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2362
            ram_addr_t orig_memory = p->phys_offset;
2363
            target_phys_addr_t start_addr2, end_addr2;
2364
            int need_subpage = 0;
2365

    
2366
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2367
                          need_subpage);
2368
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2369
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2370
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2371
                                           &p->phys_offset, orig_memory,
2372
                                           p->region_offset);
2373
                } else {
2374
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2375
                                            >> IO_MEM_SHIFT];
2376
                }
2377
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2378
                                 region_offset);
2379
                p->region_offset = 0;
2380
            } else {
2381
                p->phys_offset = phys_offset;
2382
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2383
                    (phys_offset & IO_MEM_ROMD))
2384
                    phys_offset += TARGET_PAGE_SIZE;
2385
            }
2386
        } else {
2387
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2388
            p->phys_offset = phys_offset;
2389
            p->region_offset = region_offset;
2390
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2391
                (phys_offset & IO_MEM_ROMD)) {
2392
                phys_offset += TARGET_PAGE_SIZE;
2393
            } else {
2394
                target_phys_addr_t start_addr2, end_addr2;
2395
                int need_subpage = 0;
2396

    
2397
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2398
                              end_addr2, need_subpage);
2399

    
2400
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2401
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2402
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2403
                                           addr & TARGET_PAGE_MASK);
2404
                    subpage_register(subpage, start_addr2, end_addr2,
2405
                                     phys_offset, region_offset);
2406
                    p->region_offset = 0;
2407
                }
2408
            }
2409
        }
2410
        region_offset += TARGET_PAGE_SIZE;
2411
    }
2412

    
2413
    /* since each CPU stores ram addresses in its TLB cache, we must
2414
       reset the modified entries */
2415
    /* XXX: slow ! */
2416
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2417
        tlb_flush(env, 1);
2418
    }
2419
}
2420

    
2421
/* XXX: temporary until new memory mapping API */
2422
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2423
{
2424
    PhysPageDesc *p;
2425

    
2426
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2427
    if (!p)
2428
        return IO_MEM_UNASSIGNED;
2429
    return p->phys_offset;
2430
}
2431

    
2432
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2433
{
2434
    if (kvm_enabled())
2435
        kvm_coalesce_mmio_region(addr, size);
2436
}
2437

    
2438
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2439
{
2440
    if (kvm_enabled())
2441
        kvm_uncoalesce_mmio_region(addr, size);
2442
}
2443

    
2444
#ifdef CONFIG_KQEMU
2445
/* XXX: better than nothing */
2446
static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2447
{
2448
    ram_addr_t addr;
2449
    if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2450
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2451
                (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2452
        abort();
2453
    }
2454
    addr = last_ram_offset;
2455
    last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2456
    return addr;
2457
}
2458
#endif
2459

    
2460
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2461
{
2462
    RAMBlock *new_block;
2463

    
2464
#ifdef CONFIG_KQEMU
2465
    if (kqemu_phys_ram_base) {
2466
        return kqemu_ram_alloc(size);
2467
    }
2468
#endif
2469

    
2470
    size = TARGET_PAGE_ALIGN(size);
2471
    new_block = qemu_malloc(sizeof(*new_block));
2472

    
2473
    new_block->host = qemu_vmalloc(size);
2474
    new_block->offset = last_ram_offset;
2475
    new_block->length = size;
2476

    
2477
    new_block->next = ram_blocks;
2478
    ram_blocks = new_block;
2479

    
2480
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2481
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2482
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2483
           0xff, size >> TARGET_PAGE_BITS);
2484

    
2485
    last_ram_offset += size;
2486

    
2487
    if (kvm_enabled())
2488
        kvm_setup_guest_memory(new_block->host, size);
2489

    
2490
    return new_block->offset;
2491
}
2492

    
2493
void qemu_ram_free(ram_addr_t addr)
2494
{
2495
    /* TODO: implement this.  */
2496
}
2497

    
2498
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2499
   With the exception of the softmmu code in this file, this should
2500
   only be used for local memory (e.g. video ram) that the device owns,
2501
   and knows it isn't going to access beyond the end of the block.
2502

2503
   It should not be used for general purpose DMA.
2504
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2505
 */
2506
void *qemu_get_ram_ptr(ram_addr_t addr)
2507
{
2508
    RAMBlock *prev;
2509
    RAMBlock **prevp;
2510
    RAMBlock *block;
2511

    
2512
#ifdef CONFIG_KQEMU
2513
    if (kqemu_phys_ram_base) {
2514
        return kqemu_phys_ram_base + addr;
2515
    }
2516
#endif
2517

    
2518
    prev = NULL;
2519
    prevp = &ram_blocks;
2520
    block = ram_blocks;
2521
    while (block && (block->offset > addr
2522
                     || block->offset + block->length <= addr)) {
2523
        if (prev)
2524
          prevp = &prev->next;
2525
        prev = block;
2526
        block = block->next;
2527
    }
2528
    if (!block) {
2529
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2530
        abort();
2531
    }
2532
    /* Move this entry to to start of the list.  */
2533
    if (prev) {
2534
        prev->next = block->next;
2535
        block->next = *prevp;
2536
        *prevp = block;
2537
    }
2538
    return block->host + (addr - block->offset);
2539
}
2540

    
2541
/* Some of the softmmu routines need to translate from a host pointer
2542
   (typically a TLB entry) back to a ram offset.  */
2543
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2544
{
2545
    RAMBlock *prev;
2546
    RAMBlock **prevp;
2547
    RAMBlock *block;
2548
    uint8_t *host = ptr;
2549

    
2550
#ifdef CONFIG_KQEMU
2551
    if (kqemu_phys_ram_base) {
2552
        return host - kqemu_phys_ram_base;
2553
    }
2554
#endif
2555

    
2556
    prev = NULL;
2557
    prevp = &ram_blocks;
2558
    block = ram_blocks;
2559
    while (block && (block->host > host
2560
                     || block->host + block->length <= host)) {
2561
        if (prev)
2562
          prevp = &prev->next;
2563
        prev = block;
2564
        block = block->next;
2565
    }
2566
    if (!block) {
2567
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2568
        abort();
2569
    }
2570
    return block->offset + (host - block->host);
2571
}
2572

    
2573
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2574
{
2575
#ifdef DEBUG_UNASSIGNED
2576
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2577
#endif
2578
#if defined(TARGET_SPARC)
2579
    do_unassigned_access(addr, 0, 0, 0, 1);
2580
#endif
2581
    return 0;
2582
}
2583

    
2584
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2585
{
2586
#ifdef DEBUG_UNASSIGNED
2587
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2588
#endif
2589
#if defined(TARGET_SPARC)
2590
    do_unassigned_access(addr, 0, 0, 0, 2);
2591
#endif
2592
    return 0;
2593
}
2594

    
2595
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2596
{
2597
#ifdef DEBUG_UNASSIGNED
2598
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2599
#endif
2600
#if defined(TARGET_SPARC)
2601
    do_unassigned_access(addr, 0, 0, 0, 4);
2602
#endif
2603
    return 0;
2604
}
2605

    
2606
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2607
{
2608
#ifdef DEBUG_UNASSIGNED
2609
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2610
#endif
2611
#if defined(TARGET_SPARC)
2612
    do_unassigned_access(addr, 1, 0, 0, 1);
2613
#endif
2614
}
2615

    
2616
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2617
{
2618
#ifdef DEBUG_UNASSIGNED
2619
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2620
#endif
2621
#if defined(TARGET_SPARC)
2622
    do_unassigned_access(addr, 1, 0, 0, 2);
2623
#endif
2624
}
2625

    
2626
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2627
{
2628
#ifdef DEBUG_UNASSIGNED
2629
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2630
#endif
2631
#if defined(TARGET_SPARC)
2632
    do_unassigned_access(addr, 1, 0, 0, 4);
2633
#endif
2634
}
2635

    
2636
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2637
    unassigned_mem_readb,
2638
    unassigned_mem_readw,
2639
    unassigned_mem_readl,
2640
};
2641

    
2642
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2643
    unassigned_mem_writeb,
2644
    unassigned_mem_writew,
2645
    unassigned_mem_writel,
2646
};
2647

    
2648
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2649
                                uint32_t val)
2650
{
2651
    int dirty_flags;
2652
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2653
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2654
#if !defined(CONFIG_USER_ONLY)
2655
        tb_invalidate_phys_page_fast(ram_addr, 1);
2656
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2657
#endif
2658
    }
2659
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2660
#ifdef CONFIG_KQEMU
2661
    if (cpu_single_env->kqemu_enabled &&
2662
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2663
        kqemu_modify_page(cpu_single_env, ram_addr);
2664
#endif
2665
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2666
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2667
    /* we remove the notdirty callback only if the code has been
2668
       flushed */
2669
    if (dirty_flags == 0xff)
2670
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2671
}
2672

    
2673
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2674
                                uint32_t val)
2675
{
2676
    int dirty_flags;
2677
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2678
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2679
#if !defined(CONFIG_USER_ONLY)
2680
        tb_invalidate_phys_page_fast(ram_addr, 2);
2681
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2682
#endif
2683
    }
2684
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2685
#ifdef CONFIG_KQEMU
2686
    if (cpu_single_env->kqemu_enabled &&
2687
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2688
        kqemu_modify_page(cpu_single_env, ram_addr);
2689
#endif
2690
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2691
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2692
    /* we remove the notdirty callback only if the code has been
2693
       flushed */
2694
    if (dirty_flags == 0xff)
2695
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2696
}
2697

    
2698
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2699
                                uint32_t val)
2700
{
2701
    int dirty_flags;
2702
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2703
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2704
#if !defined(CONFIG_USER_ONLY)
2705
        tb_invalidate_phys_page_fast(ram_addr, 4);
2706
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2707
#endif
2708
    }
2709
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2710
#ifdef CONFIG_KQEMU
2711
    if (cpu_single_env->kqemu_enabled &&
2712
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2713
        kqemu_modify_page(cpu_single_env, ram_addr);
2714
#endif
2715
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2716
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2717
    /* we remove the notdirty callback only if the code has been
2718
       flushed */
2719
    if (dirty_flags == 0xff)
2720
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2721
}
2722

    
2723
static CPUReadMemoryFunc *error_mem_read[3] = {
2724
    NULL, /* never used */
2725
    NULL, /* never used */
2726
    NULL, /* never used */
2727
};
2728

    
2729
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2730
    notdirty_mem_writeb,
2731
    notdirty_mem_writew,
2732
    notdirty_mem_writel,
2733
};
2734

    
2735
/* Generate a debug exception if a watchpoint has been hit.  */
2736
static void check_watchpoint(int offset, int len_mask, int flags)
2737
{
2738
    CPUState *env = cpu_single_env;
2739
    target_ulong pc, cs_base;
2740
    TranslationBlock *tb;
2741
    target_ulong vaddr;
2742
    CPUWatchpoint *wp;
2743
    int cpu_flags;
2744

    
2745
    if (env->watchpoint_hit) {
2746
        /* We re-entered the check after replacing the TB. Now raise
2747
         * the debug interrupt so that is will trigger after the
2748
         * current instruction. */
2749
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2750
        return;
2751
    }
2752
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2753
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2754
        if ((vaddr == (wp->vaddr & len_mask) ||
2755
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2756
            wp->flags |= BP_WATCHPOINT_HIT;
2757
            if (!env->watchpoint_hit) {
2758
                env->watchpoint_hit = wp;
2759
                tb = tb_find_pc(env->mem_io_pc);
2760
                if (!tb) {
2761
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2762
                              "pc=%p", (void *)env->mem_io_pc);
2763
                }
2764
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2765
                tb_phys_invalidate(tb, -1);
2766
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2767
                    env->exception_index = EXCP_DEBUG;
2768
                } else {
2769
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2770
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2771
                }
2772
                cpu_resume_from_signal(env, NULL);
2773
            }
2774
        } else {
2775
            wp->flags &= ~BP_WATCHPOINT_HIT;
2776
        }
2777
    }
2778
}
2779

    
2780
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2781
   so these check for a hit then pass through to the normal out-of-line
2782
   phys routines.  */
2783
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2784
{
2785
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2786
    return ldub_phys(addr);
2787
}
2788

    
2789
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2790
{
2791
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2792
    return lduw_phys(addr);
2793
}
2794

    
2795
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2796
{
2797
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2798
    return ldl_phys(addr);
2799
}
2800

    
2801
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2802
                             uint32_t val)
2803
{
2804
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2805
    stb_phys(addr, val);
2806
}
2807

    
2808
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2809
                             uint32_t val)
2810
{
2811
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2812
    stw_phys(addr, val);
2813
}
2814

    
2815
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2816
                             uint32_t val)
2817
{
2818
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2819
    stl_phys(addr, val);
2820
}
2821

    
2822
static CPUReadMemoryFunc *watch_mem_read[3] = {
2823
    watch_mem_readb,
2824
    watch_mem_readw,
2825
    watch_mem_readl,
2826
};
2827

    
2828
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2829
    watch_mem_writeb,
2830
    watch_mem_writew,
2831
    watch_mem_writel,
2832
};
2833

    
2834
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2835
                                 unsigned int len)
2836
{
2837
    uint32_t ret;
2838
    unsigned int idx;
2839

    
2840
    idx = SUBPAGE_IDX(addr);
2841
#if defined(DEBUG_SUBPAGE)
2842
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2843
           mmio, len, addr, idx);
2844
#endif
2845
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2846
                                       addr + mmio->region_offset[idx][0][len]);
2847

    
2848
    return ret;
2849
}
2850

    
2851
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2852
                              uint32_t value, unsigned int len)
2853
{
2854
    unsigned int idx;
2855

    
2856
    idx = SUBPAGE_IDX(addr);
2857
#if defined(DEBUG_SUBPAGE)
2858
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2859
           mmio, len, addr, idx, value);
2860
#endif
2861
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2862
                                  addr + mmio->region_offset[idx][1][len],
2863
                                  value);
2864
}
2865

    
2866
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2867
{
2868
#if defined(DEBUG_SUBPAGE)
2869
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2870
#endif
2871

    
2872
    return subpage_readlen(opaque, addr, 0);
2873
}
2874

    
2875
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2876
                            uint32_t value)
2877
{
2878
#if defined(DEBUG_SUBPAGE)
2879
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2880
#endif
2881
    subpage_writelen(opaque, addr, value, 0);
2882
}
2883

    
2884
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2885
{
2886
#if defined(DEBUG_SUBPAGE)
2887
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2888
#endif
2889

    
2890
    return subpage_readlen(opaque, addr, 1);
2891
}
2892

    
2893
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2894
                            uint32_t value)
2895
{
2896
#if defined(DEBUG_SUBPAGE)
2897
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2898
#endif
2899
    subpage_writelen(opaque, addr, value, 1);
2900
}
2901

    
2902
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2903
{
2904
#if defined(DEBUG_SUBPAGE)
2905
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2906
#endif
2907

    
2908
    return subpage_readlen(opaque, addr, 2);
2909
}
2910

    
2911
static void subpage_writel (void *opaque,
2912
                         target_phys_addr_t addr, uint32_t value)
2913
{
2914
#if defined(DEBUG_SUBPAGE)
2915
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2916
#endif
2917
    subpage_writelen(opaque, addr, value, 2);
2918
}
2919

    
2920
static CPUReadMemoryFunc *subpage_read[] = {
2921
    &subpage_readb,
2922
    &subpage_readw,
2923
    &subpage_readl,
2924
};
2925

    
2926
static CPUWriteMemoryFunc *subpage_write[] = {
2927
    &subpage_writeb,
2928
    &subpage_writew,
2929
    &subpage_writel,
2930
};
2931

    
2932
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2933
                             ram_addr_t memory, ram_addr_t region_offset)
2934
{
2935
    int idx, eidx;
2936
    unsigned int i;
2937

    
2938
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2939
        return -1;
2940
    idx = SUBPAGE_IDX(start);
2941
    eidx = SUBPAGE_IDX(end);
2942
#if defined(DEBUG_SUBPAGE)
2943
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2944
           mmio, start, end, idx, eidx, memory);
2945
#endif
2946
    memory >>= IO_MEM_SHIFT;
2947
    for (; idx <= eidx; idx++) {
2948
        for (i = 0; i < 4; i++) {
2949
            if (io_mem_read[memory][i]) {
2950
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2951
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2952
                mmio->region_offset[idx][0][i] = region_offset;
2953
            }
2954
            if (io_mem_write[memory][i]) {
2955
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2956
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2957
                mmio->region_offset[idx][1][i] = region_offset;
2958
            }
2959
        }
2960
    }
2961

    
2962
    return 0;
2963
}
2964

    
2965
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2966
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2967
{
2968
    subpage_t *mmio;
2969
    int subpage_memory;
2970

    
2971
    mmio = qemu_mallocz(sizeof(subpage_t));
2972

    
2973
    mmio->base = base;
2974
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2975
#if defined(DEBUG_SUBPAGE)
2976
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2977
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2978
#endif
2979
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2980
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2981
                         region_offset);
2982

    
2983
    return mmio;
2984
}
2985

    
2986
static int get_free_io_mem_idx(void)
2987
{
2988
    int i;
2989

    
2990
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2991
        if (!io_mem_used[i]) {
2992
            io_mem_used[i] = 1;
2993
            return i;
2994
        }
2995

    
2996
    return -1;
2997
}
2998

    
2999
static void io_mem_init(void)
3000
{
3001
    int i;
3002

    
3003
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3004
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3005
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3006
    for (i=0; i<5; i++)
3007
        io_mem_used[i] = 1;
3008

    
3009
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3010
                                          watch_mem_write, NULL);
3011
#ifdef CONFIG_KQEMU
3012
    if (kqemu_phys_ram_base) {
3013
        /* alloc dirty bits array */
3014
        phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3015
        memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3016
    }
3017
#endif
3018
}
3019

    
3020
/* mem_read and mem_write are arrays of functions containing the
3021
   function to access byte (index 0), word (index 1) and dword (index
3022
   2). Functions can be omitted with a NULL function pointer.
3023
   If io_index is non zero, the corresponding io zone is
3024
   modified. If it is zero, a new io zone is allocated. The return
3025
   value can be used with cpu_register_physical_memory(). (-1) is
3026
   returned if error. */
3027
int cpu_register_io_memory(int io_index,
3028
                           CPUReadMemoryFunc **mem_read,
3029
                           CPUWriteMemoryFunc **mem_write,
3030
                           void *opaque)
3031
{
3032
    int i, subwidth = 0;
3033

    
3034
    if (io_index <= 0) {
3035
        io_index = get_free_io_mem_idx();
3036
        if (io_index == -1)
3037
            return io_index;
3038
    } else {
3039
        if (io_index >= IO_MEM_NB_ENTRIES)
3040
            return -1;
3041
    }
3042

    
3043
    for(i = 0;i < 3; i++) {
3044
        if (!mem_read[i] || !mem_write[i])
3045
            subwidth = IO_MEM_SUBWIDTH;
3046
        io_mem_read[io_index][i] = mem_read[i];
3047
        io_mem_write[io_index][i] = mem_write[i];
3048
    }
3049
    io_mem_opaque[io_index] = opaque;
3050
    return (io_index << IO_MEM_SHIFT) | subwidth;
3051
}
3052

    
3053
void cpu_unregister_io_memory(int io_table_address)
3054
{
3055
    int i;
3056
    int io_index = io_table_address >> IO_MEM_SHIFT;
3057

    
3058
    for (i=0;i < 3; i++) {
3059
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3060
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3061
    }
3062
    io_mem_opaque[io_index] = NULL;
3063
    io_mem_used[io_index] = 0;
3064
}
3065

    
3066
#endif /* !defined(CONFIG_USER_ONLY) */
3067

    
3068
/* physical memory access (slow version, mainly for debug) */
3069
#if defined(CONFIG_USER_ONLY)
3070
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3071
                            int len, int is_write)
3072
{
3073
    int l, flags;
3074
    target_ulong page;
3075
    void * p;
3076

    
3077
    while (len > 0) {
3078
        page = addr & TARGET_PAGE_MASK;
3079
        l = (page + TARGET_PAGE_SIZE) - addr;
3080
        if (l > len)
3081
            l = len;
3082
        flags = page_get_flags(page);
3083
        if (!(flags & PAGE_VALID))
3084
            return;
3085
        if (is_write) {
3086
            if (!(flags & PAGE_WRITE))
3087
                return;
3088
            /* XXX: this code should not depend on lock_user */
3089
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3090
                /* FIXME - should this return an error rather than just fail? */
3091
                return;
3092
            memcpy(p, buf, l);
3093
            unlock_user(p, addr, l);
3094
        } else {
3095
            if (!(flags & PAGE_READ))
3096
                return;
3097
            /* XXX: this code should not depend on lock_user */
3098
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3099
                /* FIXME - should this return an error rather than just fail? */
3100
                return;
3101
            memcpy(buf, p, l);
3102
            unlock_user(p, addr, 0);
3103
        }
3104
        len -= l;
3105
        buf += l;
3106
        addr += l;
3107
    }
3108
}
3109

    
3110
#else
3111
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3112
                            int len, int is_write)
3113
{
3114
    int l, io_index;
3115
    uint8_t *ptr;
3116
    uint32_t val;
3117
    target_phys_addr_t page;
3118
    unsigned long pd;
3119
    PhysPageDesc *p;
3120

    
3121
    while (len > 0) {
3122
        page = addr & TARGET_PAGE_MASK;
3123
        l = (page + TARGET_PAGE_SIZE) - addr;
3124
        if (l > len)
3125
            l = len;
3126
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3127
        if (!p) {
3128
            pd = IO_MEM_UNASSIGNED;
3129
        } else {
3130
            pd = p->phys_offset;
3131
        }
3132

    
3133
        if (is_write) {
3134
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3135
                target_phys_addr_t addr1 = addr;
3136
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3137
                if (p)
3138
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3139
                /* XXX: could force cpu_single_env to NULL to avoid
3140
                   potential bugs */
3141
                if (l >= 4 && ((addr1 & 3) == 0)) {
3142
                    /* 32 bit write access */
3143
                    val = ldl_p(buf);
3144
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3145
                    l = 4;
3146
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3147
                    /* 16 bit write access */
3148
                    val = lduw_p(buf);
3149
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3150
                    l = 2;
3151
                } else {
3152
                    /* 8 bit write access */
3153
                    val = ldub_p(buf);
3154
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3155
                    l = 1;
3156
                }
3157
            } else {
3158
                unsigned long addr1;
3159
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3160
                /* RAM case */
3161
                ptr = qemu_get_ram_ptr(addr1);
3162
                memcpy(ptr, buf, l);
3163
                if (!cpu_physical_memory_is_dirty(addr1)) {
3164
                    /* invalidate code */
3165
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3166
                    /* set dirty bit */
3167
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3168
                        (0xff & ~CODE_DIRTY_FLAG);
3169
                }
3170
            }
3171
        } else {
3172
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3173
                !(pd & IO_MEM_ROMD)) {
3174
                target_phys_addr_t addr1 = addr;
3175
                /* I/O case */
3176
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3177
                if (p)
3178
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3179
                if (l >= 4 && ((addr1 & 3) == 0)) {
3180
                    /* 32 bit read access */
3181
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3182
                    stl_p(buf, val);
3183
                    l = 4;
3184
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3185
                    /* 16 bit read access */
3186
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3187
                    stw_p(buf, val);
3188
                    l = 2;
3189
                } else {
3190
                    /* 8 bit read access */
3191
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3192
                    stb_p(buf, val);
3193
                    l = 1;
3194
                }
3195
            } else {
3196
                /* RAM case */
3197
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3198
                    (addr & ~TARGET_PAGE_MASK);
3199
                memcpy(buf, ptr, l);
3200
            }
3201
        }
3202
        len -= l;
3203
        buf += l;
3204
        addr += l;
3205
    }
3206
}
3207

    
3208
/* used for ROM loading : can write in RAM and ROM */
3209
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3210
                                   const uint8_t *buf, int len)
3211
{
3212
    int l;
3213
    uint8_t *ptr;
3214
    target_phys_addr_t page;
3215
    unsigned long pd;
3216
    PhysPageDesc *p;
3217

    
3218
    while (len > 0) {
3219
        page = addr & TARGET_PAGE_MASK;
3220
        l = (page + TARGET_PAGE_SIZE) - addr;
3221
        if (l > len)
3222
            l = len;
3223
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3224
        if (!p) {
3225
            pd = IO_MEM_UNASSIGNED;
3226
        } else {
3227
            pd = p->phys_offset;
3228
        }
3229

    
3230
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3231
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3232
            !(pd & IO_MEM_ROMD)) {
3233
            /* do nothing */
3234
        } else {
3235
            unsigned long addr1;
3236
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3237
            /* ROM/RAM case */
3238
            ptr = qemu_get_ram_ptr(addr1);
3239
            memcpy(ptr, buf, l);
3240
        }
3241
        len -= l;
3242
        buf += l;
3243
        addr += l;
3244
    }
3245
}
3246

    
3247
typedef struct {
3248
    void *buffer;
3249
    target_phys_addr_t addr;
3250
    target_phys_addr_t len;
3251
} BounceBuffer;
3252

    
3253
static BounceBuffer bounce;
3254

    
3255
typedef struct MapClient {
3256
    void *opaque;
3257
    void (*callback)(void *opaque);
3258
    LIST_ENTRY(MapClient) link;
3259
} MapClient;
3260

    
3261
static LIST_HEAD(map_client_list, MapClient) map_client_list
3262
    = LIST_HEAD_INITIALIZER(map_client_list);
3263

    
3264
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3265
{
3266
    MapClient *client = qemu_malloc(sizeof(*client));
3267

    
3268
    client->opaque = opaque;
3269
    client->callback = callback;
3270
    LIST_INSERT_HEAD(&map_client_list, client, link);
3271
    return client;
3272
}
3273

    
3274
void cpu_unregister_map_client(void *_client)
3275
{
3276
    MapClient *client = (MapClient *)_client;
3277

    
3278
    LIST_REMOVE(client, link);
3279
}
3280

    
3281
static void cpu_notify_map_clients(void)
3282
{
3283
    MapClient *client;
3284

    
3285
    while (!LIST_EMPTY(&map_client_list)) {
3286
        client = LIST_FIRST(&map_client_list);
3287
        client->callback(client->opaque);
3288
        LIST_REMOVE(client, link);
3289
    }
3290
}
3291

    
3292
/* Map a physical memory region into a host virtual address.
3293
 * May map a subset of the requested range, given by and returned in *plen.
3294
 * May return NULL if resources needed to perform the mapping are exhausted.
3295
 * Use only for reads OR writes - not for read-modify-write operations.
3296
 * Use cpu_register_map_client() to know when retrying the map operation is
3297
 * likely to succeed.
3298
 */
3299
void *cpu_physical_memory_map(target_phys_addr_t addr,
3300
                              target_phys_addr_t *plen,
3301
                              int is_write)
3302
{
3303
    target_phys_addr_t len = *plen;
3304
    target_phys_addr_t done = 0;
3305
    int l;
3306
    uint8_t *ret = NULL;
3307
    uint8_t *ptr;
3308
    target_phys_addr_t page;
3309
    unsigned long pd;
3310
    PhysPageDesc *p;
3311
    unsigned long addr1;
3312

    
3313
    while (len > 0) {
3314
        page = addr & TARGET_PAGE_MASK;
3315
        l = (page + TARGET_PAGE_SIZE) - addr;
3316
        if (l > len)
3317
            l = len;
3318
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3319
        if (!p) {
3320
            pd = IO_MEM_UNASSIGNED;
3321
        } else {
3322
            pd = p->phys_offset;
3323
        }
3324

    
3325
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3326
            if (done || bounce.buffer) {
3327
                break;
3328
            }
3329
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3330
            bounce.addr = addr;
3331
            bounce.len = l;
3332
            if (!is_write) {
3333
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3334
            }
3335
            ptr = bounce.buffer;
3336
        } else {
3337
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3338
            ptr = qemu_get_ram_ptr(addr1);
3339
        }
3340
        if (!done) {
3341
            ret = ptr;
3342
        } else if (ret + done != ptr) {
3343
            break;
3344
        }
3345

    
3346
        len -= l;
3347
        addr += l;
3348
        done += l;
3349
    }
3350
    *plen = done;
3351
    return ret;
3352
}
3353

    
3354
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3355
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3356
 * the amount of memory that was actually read or written by the caller.
3357
 */
3358
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3359
                               int is_write, target_phys_addr_t access_len)
3360
{
3361
    if (buffer != bounce.buffer) {
3362
        if (is_write) {
3363
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3364
            while (access_len) {
3365
                unsigned l;
3366
                l = TARGET_PAGE_SIZE;
3367
                if (l > access_len)
3368
                    l = access_len;
3369
                if (!cpu_physical_memory_is_dirty(addr1)) {
3370
                    /* invalidate code */
3371
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3372
                    /* set dirty bit */
3373
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3374
                        (0xff & ~CODE_DIRTY_FLAG);
3375
                }
3376
                addr1 += l;
3377
                access_len -= l;
3378
            }
3379
        }
3380
        return;
3381
    }
3382
    if (is_write) {
3383
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3384
    }
3385
    qemu_free(bounce.buffer);
3386
    bounce.buffer = NULL;
3387
    cpu_notify_map_clients();
3388
}
3389

    
3390
/* warning: addr must be aligned */
3391
uint32_t ldl_phys(target_phys_addr_t addr)
3392
{
3393
    int io_index;
3394
    uint8_t *ptr;
3395
    uint32_t val;
3396
    unsigned long pd;
3397
    PhysPageDesc *p;
3398

    
3399
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3400
    if (!p) {
3401
        pd = IO_MEM_UNASSIGNED;
3402
    } else {
3403
        pd = p->phys_offset;
3404
    }
3405

    
3406
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3407
        !(pd & IO_MEM_ROMD)) {
3408
        /* I/O case */
3409
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3410
        if (p)
3411
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3412
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3413
    } else {
3414
        /* RAM case */
3415
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3416
            (addr & ~TARGET_PAGE_MASK);
3417
        val = ldl_p(ptr);
3418
    }
3419
    return val;
3420
}
3421

    
3422
/* warning: addr must be aligned */
3423
uint64_t ldq_phys(target_phys_addr_t addr)
3424
{
3425
    int io_index;
3426
    uint8_t *ptr;
3427
    uint64_t val;
3428
    unsigned long pd;
3429
    PhysPageDesc *p;
3430

    
3431
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3432
    if (!p) {
3433
        pd = IO_MEM_UNASSIGNED;
3434
    } else {
3435
        pd = p->phys_offset;
3436
    }
3437

    
3438
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3439
        !(pd & IO_MEM_ROMD)) {
3440
        /* I/O case */
3441
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3442
        if (p)
3443
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3444
#ifdef TARGET_WORDS_BIGENDIAN
3445
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3446
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3447
#else
3448
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3449
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3450
#endif
3451
    } else {
3452
        /* RAM case */
3453
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3454
            (addr & ~TARGET_PAGE_MASK);
3455
        val = ldq_p(ptr);
3456
    }
3457
    return val;
3458
}
3459

    
3460
/* XXX: optimize */
3461
uint32_t ldub_phys(target_phys_addr_t addr)
3462
{
3463
    uint8_t val;
3464
    cpu_physical_memory_read(addr, &val, 1);
3465
    return val;
3466
}
3467

    
3468
/* XXX: optimize */
3469
uint32_t lduw_phys(target_phys_addr_t addr)
3470
{
3471
    uint16_t val;
3472
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3473
    return tswap16(val);
3474
}
3475

    
3476
/* warning: addr must be aligned. The ram page is not masked as dirty
3477
   and the code inside is not invalidated. It is useful if the dirty
3478
   bits are used to track modified PTEs */
3479
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3480
{
3481
    int io_index;
3482
    uint8_t *ptr;
3483
    unsigned long pd;
3484
    PhysPageDesc *p;
3485

    
3486
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3487
    if (!p) {
3488
        pd = IO_MEM_UNASSIGNED;
3489
    } else {
3490
        pd = p->phys_offset;
3491
    }
3492

    
3493
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3494
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3495
        if (p)
3496
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3497
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3498
    } else {
3499
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3500
        ptr = qemu_get_ram_ptr(addr1);
3501
        stl_p(ptr, val);
3502

    
3503
        if (unlikely(in_migration)) {
3504
            if (!cpu_physical_memory_is_dirty(addr1)) {
3505
                /* invalidate code */
3506
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3507
                /* set dirty bit */
3508
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3509
                    (0xff & ~CODE_DIRTY_FLAG);
3510
            }
3511
        }
3512
    }
3513
}
3514

    
3515
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3516
{
3517
    int io_index;
3518
    uint8_t *ptr;
3519
    unsigned long pd;
3520
    PhysPageDesc *p;
3521

    
3522
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3523
    if (!p) {
3524
        pd = IO_MEM_UNASSIGNED;
3525
    } else {
3526
        pd = p->phys_offset;
3527
    }
3528

    
3529
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3530
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3531
        if (p)
3532
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3533
#ifdef TARGET_WORDS_BIGENDIAN
3534
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3535
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3536
#else
3537
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3538
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3539
#endif
3540
    } else {
3541
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3542
            (addr & ~TARGET_PAGE_MASK);
3543
        stq_p(ptr, val);
3544
    }
3545
}
3546

    
3547
/* warning: addr must be aligned */
3548
void stl_phys(target_phys_addr_t addr, uint32_t val)
3549
{
3550
    int io_index;
3551
    uint8_t *ptr;
3552
    unsigned long pd;
3553
    PhysPageDesc *p;
3554

    
3555
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3556
    if (!p) {
3557
        pd = IO_MEM_UNASSIGNED;
3558
    } else {
3559
        pd = p->phys_offset;
3560
    }
3561

    
3562
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3563
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3564
        if (p)
3565
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3566
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3567
    } else {
3568
        unsigned long addr1;
3569
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3570
        /* RAM case */
3571
        ptr = qemu_get_ram_ptr(addr1);
3572
        stl_p(ptr, val);
3573
        if (!cpu_physical_memory_is_dirty(addr1)) {
3574
            /* invalidate code */
3575
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3576
            /* set dirty bit */
3577
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3578
                (0xff & ~CODE_DIRTY_FLAG);
3579
        }
3580
    }
3581
}
3582

    
3583
/* XXX: optimize */
3584
void stb_phys(target_phys_addr_t addr, uint32_t val)
3585
{
3586
    uint8_t v = val;
3587
    cpu_physical_memory_write(addr, &v, 1);
3588
}
3589

    
3590
/* XXX: optimize */
3591
void stw_phys(target_phys_addr_t addr, uint32_t val)
3592
{
3593
    uint16_t v = tswap16(val);
3594
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3595
}
3596

    
3597
/* XXX: optimize */
3598
void stq_phys(target_phys_addr_t addr, uint64_t val)
3599
{
3600
    val = tswap64(val);
3601
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3602
}
3603

    
3604
#endif
3605

    
3606
/* virtual memory access for debug (includes writing to ROM) */
3607
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3608
                        uint8_t *buf, int len, int is_write)
3609
{
3610
    int l;
3611
    target_phys_addr_t phys_addr;
3612
    target_ulong page;
3613

    
3614
    while (len > 0) {
3615
        page = addr & TARGET_PAGE_MASK;
3616
        phys_addr = cpu_get_phys_page_debug(env, page);
3617
        /* if no physical page mapped, return an error */
3618
        if (phys_addr == -1)
3619
            return -1;
3620
        l = (page + TARGET_PAGE_SIZE) - addr;
3621
        if (l > len)
3622
            l = len;
3623
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3624
#if !defined(CONFIG_USER_ONLY)
3625
        if (is_write)
3626
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3627
        else
3628
#endif
3629
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3630
        len -= l;
3631
        buf += l;
3632
        addr += l;
3633
    }
3634
    return 0;
3635
}
3636

    
3637
/* in deterministic execution mode, instructions doing device I/Os
3638
   must be at the end of the TB */
3639
void cpu_io_recompile(CPUState *env, void *retaddr)
3640
{
3641
    TranslationBlock *tb;
3642
    uint32_t n, cflags;
3643
    target_ulong pc, cs_base;
3644
    uint64_t flags;
3645

    
3646
    tb = tb_find_pc((unsigned long)retaddr);
3647
    if (!tb) {
3648
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3649
                  retaddr);
3650
    }
3651
    n = env->icount_decr.u16.low + tb->icount;
3652
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3653
    /* Calculate how many instructions had been executed before the fault
3654
       occurred.  */
3655
    n = n - env->icount_decr.u16.low;
3656
    /* Generate a new TB ending on the I/O insn.  */
3657
    n++;
3658
    /* On MIPS and SH, delay slot instructions can only be restarted if
3659
       they were already the first instruction in the TB.  If this is not
3660
       the first instruction in a TB then re-execute the preceding
3661
       branch.  */
3662
#if defined(TARGET_MIPS)
3663
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3664
        env->active_tc.PC -= 4;
3665
        env->icount_decr.u16.low++;
3666
        env->hflags &= ~MIPS_HFLAG_BMASK;
3667
    }
3668
#elif defined(TARGET_SH4)
3669
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3670
            && n > 1) {
3671
        env->pc -= 2;
3672
        env->icount_decr.u16.low++;
3673
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3674
    }
3675
#endif
3676
    /* This should never happen.  */
3677
    if (n > CF_COUNT_MASK)
3678
        cpu_abort(env, "TB too big during recompile");
3679

    
3680
    cflags = n | CF_LAST_IO;
3681
    pc = tb->pc;
3682
    cs_base = tb->cs_base;
3683
    flags = tb->flags;
3684
    tb_phys_invalidate(tb, -1);
3685
    /* FIXME: In theory this could raise an exception.  In practice
3686
       we have already translated the block once so it's probably ok.  */
3687
    tb_gen_code(env, pc, cs_base, flags, cflags);
3688
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3689
       the first in the TB) then we end up generating a whole new TB and
3690
       repeating the fault, which is horribly inefficient.
3691
       Better would be to execute just this insn uncached, or generate a
3692
       second new TB.  */
3693
    cpu_resume_from_signal(env, NULL);
3694
}
3695

    
3696
void dump_exec_info(FILE *f,
3697
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3698
{
3699
    int i, target_code_size, max_target_code_size;
3700
    int direct_jmp_count, direct_jmp2_count, cross_page;
3701
    TranslationBlock *tb;
3702

    
3703
    target_code_size = 0;
3704
    max_target_code_size = 0;
3705
    cross_page = 0;
3706
    direct_jmp_count = 0;
3707
    direct_jmp2_count = 0;
3708
    for(i = 0; i < nb_tbs; i++) {
3709
        tb = &tbs[i];
3710
        target_code_size += tb->size;
3711
        if (tb->size > max_target_code_size)
3712
            max_target_code_size = tb->size;
3713
        if (tb->page_addr[1] != -1)
3714
            cross_page++;
3715
        if (tb->tb_next_offset[0] != 0xffff) {
3716
            direct_jmp_count++;
3717
            if (tb->tb_next_offset[1] != 0xffff) {
3718
                direct_jmp2_count++;
3719
            }
3720
        }
3721
    }
3722
    /* XXX: avoid using doubles ? */
3723
    cpu_fprintf(f, "Translation buffer state:\n");
3724
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3725
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3726
    cpu_fprintf(f, "TB count            %d/%d\n", 
3727
                nb_tbs, code_gen_max_blocks);
3728
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3729
                nb_tbs ? target_code_size / nb_tbs : 0,
3730
                max_target_code_size);
3731
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3732
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3733
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3734
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3735
            cross_page,
3736
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3737
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3738
                direct_jmp_count,
3739
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3740
                direct_jmp2_count,
3741
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3742
    cpu_fprintf(f, "\nStatistics:\n");
3743
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3744
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3745
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3746
    tcg_dump_info(f, cpu_fprintf);
3747
}
3748

    
3749
#if !defined(CONFIG_USER_ONLY)
3750

    
3751
#define MMUSUFFIX _cmmu
3752
#define GETPC() NULL
3753
#define env cpu_single_env
3754
#define SOFTMMU_CODE_ACCESS
3755

    
3756
#define SHIFT 0
3757
#include "softmmu_template.h"
3758

    
3759
#define SHIFT 1
3760
#include "softmmu_template.h"
3761

    
3762
#define SHIFT 2
3763
#include "softmmu_template.h"
3764

    
3765
#define SHIFT 3
3766
#include "softmmu_template.h"
3767

    
3768
#undef env
3769

    
3770
#endif