Statistics
| Branch: | Revision:

root / exec.c @ 151f7749

History | View | Annotate | Download (112.1 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#include "qemu-common.h"
38
#include "tcg.h"
39
#include "hw/hw.h"
40
#include "osdep.h"
41
#include "kvm.h"
42
#if defined(CONFIG_USER_ONLY)
43
#include <qemu.h>
44
#endif
45

    
46
//#define DEBUG_TB_INVALIDATE
47
//#define DEBUG_FLUSH
48
//#define DEBUG_TLB
49
//#define DEBUG_UNASSIGNED
50

    
51
/* make various TB consistency checks */
52
//#define DEBUG_TB_CHECK
53
//#define DEBUG_TLB_CHECK
54

    
55
//#define DEBUG_IOPORT
56
//#define DEBUG_SUBPAGE
57

    
58
#if !defined(CONFIG_USER_ONLY)
59
/* TB consistency checks only implemented for usermode emulation.  */
60
#undef DEBUG_TB_CHECK
61
#endif
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#if defined(TARGET_SPARC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67
#elif defined(TARGET_SPARC)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69
#elif defined(TARGET_ALPHA)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72
#elif defined(TARGET_PPC64)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78
#else
79
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80
#define TARGET_PHYS_ADDR_SPACE_BITS 32
81
#endif
82

    
83
static TranslationBlock *tbs;
84
int code_gen_max_blocks;
85
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86
static int nb_tbs;
87
/* any access to the tbs or the page table must use this lock */
88
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89

    
90
#if defined(__arm__) || defined(__sparc_v9__)
91
/* The prologue must be reachable with a direct jump. ARM and Sparc64
92
 have limited branch ranges (possibly also PPC) so place it in a
93
 section close to code segment. */
94
#define code_gen_section                                \
95
    __attribute__((__section__(".gen_code")))           \
96
    __attribute__((aligned (32)))
97
#else
98
#define code_gen_section                                \
99
    __attribute__((aligned (32)))
100
#endif
101

    
102
uint8_t code_gen_prologue[1024] code_gen_section;
103
static uint8_t *code_gen_buffer;
104
static unsigned long code_gen_buffer_size;
105
/* threshold to flush the translated code buffer */
106
static unsigned long code_gen_buffer_max_size;
107
uint8_t *code_gen_ptr;
108

    
109
#if !defined(CONFIG_USER_ONLY)
110
int phys_ram_fd;
111
uint8_t *phys_ram_dirty;
112
static int in_migration;
113

    
114
typedef struct RAMBlock {
115
    uint8_t *host;
116
    ram_addr_t offset;
117
    ram_addr_t length;
118
    struct RAMBlock *next;
119
} RAMBlock;
120

    
121
static RAMBlock *ram_blocks;
122
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
123
   then we can no longer assume contiguous ram offsets, and external uses
124
   of this variable will break.  */
125
ram_addr_t last_ram_offset;
126
#endif
127

    
128
CPUState *first_cpu;
129
/* current CPU in the current thread. It is only valid inside
130
   cpu_exec() */
131
CPUState *cpu_single_env;
132
/* 0 = Do not count executed instructions.
133
   1 = Precise instruction counting.
134
   2 = Adaptive rate instruction counting.  */
135
int use_icount = 0;
136
/* Current instruction counter.  While executing translated code this may
137
   include some instructions that have not yet been executed.  */
138
int64_t qemu_icount;
139

    
140
typedef struct PageDesc {
141
    /* list of TBs intersecting this ram page */
142
    TranslationBlock *first_tb;
143
    /* in order to optimize self modifying code, we count the number
144
       of lookups we do to a given page to use a bitmap */
145
    unsigned int code_write_count;
146
    uint8_t *code_bitmap;
147
#if defined(CONFIG_USER_ONLY)
148
    unsigned long flags;
149
#endif
150
} PageDesc;
151

    
152
typedef struct PhysPageDesc {
153
    /* offset in host memory of the page + io_index in the low bits */
154
    ram_addr_t phys_offset;
155
    ram_addr_t region_offset;
156
} PhysPageDesc;
157

    
158
#define L2_BITS 10
159
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
160
/* XXX: this is a temporary hack for alpha target.
161
 *      In the future, this is to be replaced by a multi-level table
162
 *      to actually be able to handle the complete 64 bits address space.
163
 */
164
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
165
#else
166
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
167
#endif
168

    
169
#define L1_SIZE (1 << L1_BITS)
170
#define L2_SIZE (1 << L2_BITS)
171

    
172
unsigned long qemu_real_host_page_size;
173
unsigned long qemu_host_page_bits;
174
unsigned long qemu_host_page_size;
175
unsigned long qemu_host_page_mask;
176

    
177
/* XXX: for system emulation, it could just be an array */
178
static PageDesc *l1_map[L1_SIZE];
179
static PhysPageDesc **l1_phys_map;
180

    
181
#if !defined(CONFIG_USER_ONLY)
182
static void io_mem_init(void);
183

    
184
/* io memory support */
185
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
186
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
187
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
188
static char io_mem_used[IO_MEM_NB_ENTRIES];
189
static int io_mem_watch;
190
#endif
191

    
192
/* log support */
193
static const char *logfilename = "/tmp/qemu.log";
194
FILE *logfile;
195
int loglevel;
196
static int log_append = 0;
197

    
198
/* statistics */
199
static int tlb_flush_count;
200
static int tb_flush_count;
201
static int tb_phys_invalidate_count;
202

    
203
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
204
typedef struct subpage_t {
205
    target_phys_addr_t base;
206
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
207
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
208
    void *opaque[TARGET_PAGE_SIZE][2][4];
209
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
210
} subpage_t;
211

    
212
#ifdef _WIN32
213
static void map_exec(void *addr, long size)
214
{
215
    DWORD old_protect;
216
    VirtualProtect(addr, size,
217
                   PAGE_EXECUTE_READWRITE, &old_protect);
218
    
219
}
220
#else
221
static void map_exec(void *addr, long size)
222
{
223
    unsigned long start, end, page_size;
224
    
225
    page_size = getpagesize();
226
    start = (unsigned long)addr;
227
    start &= ~(page_size - 1);
228
    
229
    end = (unsigned long)addr + size;
230
    end += page_size - 1;
231
    end &= ~(page_size - 1);
232
    
233
    mprotect((void *)start, end - start,
234
             PROT_READ | PROT_WRITE | PROT_EXEC);
235
}
236
#endif
237

    
238
static void page_init(void)
239
{
240
    /* NOTE: we can always suppose that qemu_host_page_size >=
241
       TARGET_PAGE_SIZE */
242
#ifdef _WIN32
243
    {
244
        SYSTEM_INFO system_info;
245

    
246
        GetSystemInfo(&system_info);
247
        qemu_real_host_page_size = system_info.dwPageSize;
248
    }
249
#else
250
    qemu_real_host_page_size = getpagesize();
251
#endif
252
    if (qemu_host_page_size == 0)
253
        qemu_host_page_size = qemu_real_host_page_size;
254
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
255
        qemu_host_page_size = TARGET_PAGE_SIZE;
256
    qemu_host_page_bits = 0;
257
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
258
        qemu_host_page_bits++;
259
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
260
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
262

    
263
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
264
    {
265
        long long startaddr, endaddr;
266
        FILE *f;
267
        int n;
268

    
269
        mmap_lock();
270
        last_brk = (unsigned long)sbrk(0);
271
        f = fopen("/proc/self/maps", "r");
272
        if (f) {
273
            do {
274
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
275
                if (n == 2) {
276
                    startaddr = MIN(startaddr,
277
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
278
                    endaddr = MIN(endaddr,
279
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
281
                                   TARGET_PAGE_ALIGN(endaddr),
282
                                   PAGE_RESERVED); 
283
                }
284
            } while (!feof(f));
285
            fclose(f);
286
        }
287
        mmap_unlock();
288
    }
289
#endif
290
}
291

    
292
static inline PageDesc **page_l1_map(target_ulong index)
293
{
294
#if TARGET_LONG_BITS > 32
295
    /* Host memory outside guest VM.  For 32-bit targets we have already
296
       excluded high addresses.  */
297
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
298
        return NULL;
299
#endif
300
    return &l1_map[index >> L2_BITS];
301
}
302

    
303
static inline PageDesc *page_find_alloc(target_ulong index)
304
{
305
    PageDesc **lp, *p;
306
    lp = page_l1_map(index);
307
    if (!lp)
308
        return NULL;
309

    
310
    p = *lp;
311
    if (!p) {
312
        /* allocate if not found */
313
#if defined(CONFIG_USER_ONLY)
314
        size_t len = sizeof(PageDesc) * L2_SIZE;
315
        /* Don't use qemu_malloc because it may recurse.  */
316
        p = mmap(0, len, PROT_READ | PROT_WRITE,
317
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
318
        *lp = p;
319
        if (h2g_valid(p)) {
320
            unsigned long addr = h2g(p);
321
            page_set_flags(addr & TARGET_PAGE_MASK,
322
                           TARGET_PAGE_ALIGN(addr + len),
323
                           PAGE_RESERVED); 
324
        }
325
#else
326
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
327
        *lp = p;
328
#endif
329
    }
330
    return p + (index & (L2_SIZE - 1));
331
}
332

    
333
static inline PageDesc *page_find(target_ulong index)
334
{
335
    PageDesc **lp, *p;
336
    lp = page_l1_map(index);
337
    if (!lp)
338
        return NULL;
339

    
340
    p = *lp;
341
    if (!p)
342
        return 0;
343
    return p + (index & (L2_SIZE - 1));
344
}
345

    
346
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
347
{
348
    void **lp, **p;
349
    PhysPageDesc *pd;
350

    
351
    p = (void **)l1_phys_map;
352
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
353

    
354
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
355
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
356
#endif
357
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
358
    p = *lp;
359
    if (!p) {
360
        /* allocate if not found */
361
        if (!alloc)
362
            return NULL;
363
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
364
        memset(p, 0, sizeof(void *) * L1_SIZE);
365
        *lp = p;
366
    }
367
#endif
368
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
369
    pd = *lp;
370
    if (!pd) {
371
        int i;
372
        /* allocate if not found */
373
        if (!alloc)
374
            return NULL;
375
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
376
        *lp = pd;
377
        for (i = 0; i < L2_SIZE; i++) {
378
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
379
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
380
        }
381
    }
382
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
383
}
384

    
385
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
386
{
387
    return phys_page_find_alloc(index, 0);
388
}
389

    
390
#if !defined(CONFIG_USER_ONLY)
391
static void tlb_protect_code(ram_addr_t ram_addr);
392
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
393
                                    target_ulong vaddr);
394
#define mmap_lock() do { } while(0)
395
#define mmap_unlock() do { } while(0)
396
#endif
397

    
398
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399

    
400
#if defined(CONFIG_USER_ONLY)
401
/* Currently it is not recommended to allocate big chunks of data in
402
   user mode. It will change when a dedicated libc will be used */
403
#define USE_STATIC_CODE_GEN_BUFFER
404
#endif
405

    
406
#ifdef USE_STATIC_CODE_GEN_BUFFER
407
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
408
#endif
409

    
410
static void code_gen_alloc(unsigned long tb_size)
411
{
412
#ifdef USE_STATIC_CODE_GEN_BUFFER
413
    code_gen_buffer = static_code_gen_buffer;
414
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415
    map_exec(code_gen_buffer, code_gen_buffer_size);
416
#else
417
    code_gen_buffer_size = tb_size;
418
    if (code_gen_buffer_size == 0) {
419
#if defined(CONFIG_USER_ONLY)
420
        /* in user mode, phys_ram_size is not meaningful */
421
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
422
#else
423
        /* XXX: needs adjustments */
424
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
425
#endif
426
    }
427
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
428
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
429
    /* The code gen buffer location may have constraints depending on
430
       the host cpu and OS */
431
#if defined(__linux__) 
432
    {
433
        int flags;
434
        void *start = NULL;
435

    
436
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
437
#if defined(__x86_64__)
438
        flags |= MAP_32BIT;
439
        /* Cannot map more than that */
440
        if (code_gen_buffer_size > (800 * 1024 * 1024))
441
            code_gen_buffer_size = (800 * 1024 * 1024);
442
#elif defined(__sparc_v9__)
443
        // Map the buffer below 2G, so we can use direct calls and branches
444
        flags |= MAP_FIXED;
445
        start = (void *) 0x60000000UL;
446
        if (code_gen_buffer_size > (512 * 1024 * 1024))
447
            code_gen_buffer_size = (512 * 1024 * 1024);
448
#elif defined(__arm__)
449
        /* Map the buffer below 32M, so we can use direct calls and branches */
450
        flags |= MAP_FIXED;
451
        start = (void *) 0x01000000UL;
452
        if (code_gen_buffer_size > 16 * 1024 * 1024)
453
            code_gen_buffer_size = 16 * 1024 * 1024;
454
#endif
455
        code_gen_buffer = mmap(start, code_gen_buffer_size,
456
                               PROT_WRITE | PROT_READ | PROT_EXEC,
457
                               flags, -1, 0);
458
        if (code_gen_buffer == MAP_FAILED) {
459
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
460
            exit(1);
461
        }
462
    }
463
#elif defined(__FreeBSD__) || defined(__DragonFly__)
464
    {
465
        int flags;
466
        void *addr = NULL;
467
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
468
#if defined(__x86_64__)
469
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
470
         * 0x40000000 is free */
471
        flags |= MAP_FIXED;
472
        addr = (void *)0x40000000;
473
        /* Cannot map more than that */
474
        if (code_gen_buffer_size > (800 * 1024 * 1024))
475
            code_gen_buffer_size = (800 * 1024 * 1024);
476
#endif
477
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
478
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
479
                               flags, -1, 0);
480
        if (code_gen_buffer == MAP_FAILED) {
481
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
482
            exit(1);
483
        }
484
    }
485
#else
486
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
487
    map_exec(code_gen_buffer, code_gen_buffer_size);
488
#endif
489
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
490
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
491
    code_gen_buffer_max_size = code_gen_buffer_size - 
492
        code_gen_max_block_size();
493
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
494
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
495
}
496

    
497
/* Must be called before using the QEMU cpus. 'tb_size' is the size
498
   (in bytes) allocated to the translation buffer. Zero means default
499
   size. */
500
void cpu_exec_init_all(unsigned long tb_size)
501
{
502
    cpu_gen_init();
503
    code_gen_alloc(tb_size);
504
    code_gen_ptr = code_gen_buffer;
505
    page_init();
506
#if !defined(CONFIG_USER_ONLY)
507
    io_mem_init();
508
#endif
509
}
510

    
511
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
512

    
513
#define CPU_COMMON_SAVE_VERSION 1
514

    
515
static void cpu_common_save(QEMUFile *f, void *opaque)
516
{
517
    CPUState *env = opaque;
518

    
519
    qemu_put_be32s(f, &env->halted);
520
    qemu_put_be32s(f, &env->interrupt_request);
521
}
522

    
523
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
524
{
525
    CPUState *env = opaque;
526

    
527
    if (version_id != CPU_COMMON_SAVE_VERSION)
528
        return -EINVAL;
529

    
530
    qemu_get_be32s(f, &env->halted);
531
    qemu_get_be32s(f, &env->interrupt_request);
532
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
533
       version_id is increased. */
534
    env->interrupt_request &= ~0x01;
535
    tlb_flush(env, 1);
536

    
537
    return 0;
538
}
539
#endif
540

    
541
void cpu_exec_init(CPUState *env)
542
{
543
    CPUState **penv;
544
    int cpu_index;
545

    
546
#if defined(CONFIG_USER_ONLY)
547
    cpu_list_lock();
548
#endif
549
    env->next_cpu = NULL;
550
    penv = &first_cpu;
551
    cpu_index = 0;
552
    while (*penv != NULL) {
553
        penv = (CPUState **)&(*penv)->next_cpu;
554
        cpu_index++;
555
    }
556
    env->cpu_index = cpu_index;
557
    env->numa_node = 0;
558
    TAILQ_INIT(&env->breakpoints);
559
    TAILQ_INIT(&env->watchpoints);
560
    *penv = env;
561
#if defined(CONFIG_USER_ONLY)
562
    cpu_list_unlock();
563
#endif
564
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
565
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
566
                    cpu_common_save, cpu_common_load, env);
567
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
568
                    cpu_save, cpu_load, env);
569
#endif
570
}
571

    
572
static inline void invalidate_page_bitmap(PageDesc *p)
573
{
574
    if (p->code_bitmap) {
575
        qemu_free(p->code_bitmap);
576
        p->code_bitmap = NULL;
577
    }
578
    p->code_write_count = 0;
579
}
580

    
581
/* set to NULL all the 'first_tb' fields in all PageDescs */
582
static void page_flush_tb(void)
583
{
584
    int i, j;
585
    PageDesc *p;
586

    
587
    for(i = 0; i < L1_SIZE; i++) {
588
        p = l1_map[i];
589
        if (p) {
590
            for(j = 0; j < L2_SIZE; j++) {
591
                p->first_tb = NULL;
592
                invalidate_page_bitmap(p);
593
                p++;
594
            }
595
        }
596
    }
597
}
598

    
599
/* flush all the translation blocks */
600
/* XXX: tb_flush is currently not thread safe */
601
void tb_flush(CPUState *env1)
602
{
603
    CPUState *env;
604
#if defined(DEBUG_FLUSH)
605
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
606
           (unsigned long)(code_gen_ptr - code_gen_buffer),
607
           nb_tbs, nb_tbs > 0 ?
608
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
609
#endif
610
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
611
        cpu_abort(env1, "Internal error: code buffer overflow\n");
612

    
613
    nb_tbs = 0;
614

    
615
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
616
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
617
    }
618

    
619
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
620
    page_flush_tb();
621

    
622
    code_gen_ptr = code_gen_buffer;
623
    /* XXX: flush processor icache at this point if cache flush is
624
       expensive */
625
    tb_flush_count++;
626
}
627

    
628
#ifdef DEBUG_TB_CHECK
629

    
630
static void tb_invalidate_check(target_ulong address)
631
{
632
    TranslationBlock *tb;
633
    int i;
634
    address &= TARGET_PAGE_MASK;
635
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
636
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
637
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
638
                  address >= tb->pc + tb->size)) {
639
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
640
                       address, (long)tb->pc, tb->size);
641
            }
642
        }
643
    }
644
}
645

    
646
/* verify that all the pages have correct rights for code */
647
static void tb_page_check(void)
648
{
649
    TranslationBlock *tb;
650
    int i, flags1, flags2;
651

    
652
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
653
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
654
            flags1 = page_get_flags(tb->pc);
655
            flags2 = page_get_flags(tb->pc + tb->size - 1);
656
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
657
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
658
                       (long)tb->pc, tb->size, flags1, flags2);
659
            }
660
        }
661
    }
662
}
663

    
664
static void tb_jmp_check(TranslationBlock *tb)
665
{
666
    TranslationBlock *tb1;
667
    unsigned int n1;
668

    
669
    /* suppress any remaining jumps to this TB */
670
    tb1 = tb->jmp_first;
671
    for(;;) {
672
        n1 = (long)tb1 & 3;
673
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
674
        if (n1 == 2)
675
            break;
676
        tb1 = tb1->jmp_next[n1];
677
    }
678
    /* check end of list */
679
    if (tb1 != tb) {
680
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
681
    }
682
}
683

    
684
#endif
685

    
686
/* invalidate one TB */
687
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
688
                             int next_offset)
689
{
690
    TranslationBlock *tb1;
691
    for(;;) {
692
        tb1 = *ptb;
693
        if (tb1 == tb) {
694
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
695
            break;
696
        }
697
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
698
    }
699
}
700

    
701
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
702
{
703
    TranslationBlock *tb1;
704
    unsigned int n1;
705

    
706
    for(;;) {
707
        tb1 = *ptb;
708
        n1 = (long)tb1 & 3;
709
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
710
        if (tb1 == tb) {
711
            *ptb = tb1->page_next[n1];
712
            break;
713
        }
714
        ptb = &tb1->page_next[n1];
715
    }
716
}
717

    
718
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
719
{
720
    TranslationBlock *tb1, **ptb;
721
    unsigned int n1;
722

    
723
    ptb = &tb->jmp_next[n];
724
    tb1 = *ptb;
725
    if (tb1) {
726
        /* find tb(n) in circular list */
727
        for(;;) {
728
            tb1 = *ptb;
729
            n1 = (long)tb1 & 3;
730
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
731
            if (n1 == n && tb1 == tb)
732
                break;
733
            if (n1 == 2) {
734
                ptb = &tb1->jmp_first;
735
            } else {
736
                ptb = &tb1->jmp_next[n1];
737
            }
738
        }
739
        /* now we can suppress tb(n) from the list */
740
        *ptb = tb->jmp_next[n];
741

    
742
        tb->jmp_next[n] = NULL;
743
    }
744
}
745

    
746
/* reset the jump entry 'n' of a TB so that it is not chained to
747
   another TB */
748
static inline void tb_reset_jump(TranslationBlock *tb, int n)
749
{
750
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
751
}
752

    
753
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
754
{
755
    CPUState *env;
756
    PageDesc *p;
757
    unsigned int h, n1;
758
    target_phys_addr_t phys_pc;
759
    TranslationBlock *tb1, *tb2;
760

    
761
    /* remove the TB from the hash list */
762
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
763
    h = tb_phys_hash_func(phys_pc);
764
    tb_remove(&tb_phys_hash[h], tb,
765
              offsetof(TranslationBlock, phys_hash_next));
766

    
767
    /* remove the TB from the page list */
768
    if (tb->page_addr[0] != page_addr) {
769
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
770
        tb_page_remove(&p->first_tb, tb);
771
        invalidate_page_bitmap(p);
772
    }
773
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
774
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
775
        tb_page_remove(&p->first_tb, tb);
776
        invalidate_page_bitmap(p);
777
    }
778

    
779
    tb_invalidated_flag = 1;
780

    
781
    /* remove the TB from the hash list */
782
    h = tb_jmp_cache_hash_func(tb->pc);
783
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
784
        if (env->tb_jmp_cache[h] == tb)
785
            env->tb_jmp_cache[h] = NULL;
786
    }
787

    
788
    /* suppress this TB from the two jump lists */
789
    tb_jmp_remove(tb, 0);
790
    tb_jmp_remove(tb, 1);
791

    
792
    /* suppress any remaining jumps to this TB */
793
    tb1 = tb->jmp_first;
794
    for(;;) {
795
        n1 = (long)tb1 & 3;
796
        if (n1 == 2)
797
            break;
798
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
799
        tb2 = tb1->jmp_next[n1];
800
        tb_reset_jump(tb1, n1);
801
        tb1->jmp_next[n1] = NULL;
802
        tb1 = tb2;
803
    }
804
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
805

    
806
    tb_phys_invalidate_count++;
807
}
808

    
809
static inline void set_bits(uint8_t *tab, int start, int len)
810
{
811
    int end, mask, end1;
812

    
813
    end = start + len;
814
    tab += start >> 3;
815
    mask = 0xff << (start & 7);
816
    if ((start & ~7) == (end & ~7)) {
817
        if (start < end) {
818
            mask &= ~(0xff << (end & 7));
819
            *tab |= mask;
820
        }
821
    } else {
822
        *tab++ |= mask;
823
        start = (start + 8) & ~7;
824
        end1 = end & ~7;
825
        while (start < end1) {
826
            *tab++ = 0xff;
827
            start += 8;
828
        }
829
        if (start < end) {
830
            mask = ~(0xff << (end & 7));
831
            *tab |= mask;
832
        }
833
    }
834
}
835

    
836
static void build_page_bitmap(PageDesc *p)
837
{
838
    int n, tb_start, tb_end;
839
    TranslationBlock *tb;
840

    
841
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
842

    
843
    tb = p->first_tb;
844
    while (tb != NULL) {
845
        n = (long)tb & 3;
846
        tb = (TranslationBlock *)((long)tb & ~3);
847
        /* NOTE: this is subtle as a TB may span two physical pages */
848
        if (n == 0) {
849
            /* NOTE: tb_end may be after the end of the page, but
850
               it is not a problem */
851
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
852
            tb_end = tb_start + tb->size;
853
            if (tb_end > TARGET_PAGE_SIZE)
854
                tb_end = TARGET_PAGE_SIZE;
855
        } else {
856
            tb_start = 0;
857
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
858
        }
859
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
860
        tb = tb->page_next[n];
861
    }
862
}
863

    
864
TranslationBlock *tb_gen_code(CPUState *env,
865
                              target_ulong pc, target_ulong cs_base,
866
                              int flags, int cflags)
867
{
868
    TranslationBlock *tb;
869
    uint8_t *tc_ptr;
870
    target_ulong phys_pc, phys_page2, virt_page2;
871
    int code_gen_size;
872

    
873
    phys_pc = get_phys_addr_code(env, pc);
874
    tb = tb_alloc(pc);
875
    if (!tb) {
876
        /* flush must be done */
877
        tb_flush(env);
878
        /* cannot fail at this point */
879
        tb = tb_alloc(pc);
880
        /* Don't forget to invalidate previous TB info.  */
881
        tb_invalidated_flag = 1;
882
    }
883
    tc_ptr = code_gen_ptr;
884
    tb->tc_ptr = tc_ptr;
885
    tb->cs_base = cs_base;
886
    tb->flags = flags;
887
    tb->cflags = cflags;
888
    cpu_gen_code(env, tb, &code_gen_size);
889
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
890

    
891
    /* check next page if needed */
892
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
893
    phys_page2 = -1;
894
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
895
        phys_page2 = get_phys_addr_code(env, virt_page2);
896
    }
897
    tb_link_phys(tb, phys_pc, phys_page2);
898
    return tb;
899
}
900

    
901
/* invalidate all TBs which intersect with the target physical page
902
   starting in range [start;end[. NOTE: start and end must refer to
903
   the same physical page. 'is_cpu_write_access' should be true if called
904
   from a real cpu write access: the virtual CPU will exit the current
905
   TB if code is modified inside this TB. */
906
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
907
                                   int is_cpu_write_access)
908
{
909
    TranslationBlock *tb, *tb_next, *saved_tb;
910
    CPUState *env = cpu_single_env;
911
    target_ulong tb_start, tb_end;
912
    PageDesc *p;
913
    int n;
914
#ifdef TARGET_HAS_PRECISE_SMC
915
    int current_tb_not_found = is_cpu_write_access;
916
    TranslationBlock *current_tb = NULL;
917
    int current_tb_modified = 0;
918
    target_ulong current_pc = 0;
919
    target_ulong current_cs_base = 0;
920
    int current_flags = 0;
921
#endif /* TARGET_HAS_PRECISE_SMC */
922

    
923
    p = page_find(start >> TARGET_PAGE_BITS);
924
    if (!p)
925
        return;
926
    if (!p->code_bitmap &&
927
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
928
        is_cpu_write_access) {
929
        /* build code bitmap */
930
        build_page_bitmap(p);
931
    }
932

    
933
    /* we remove all the TBs in the range [start, end[ */
934
    /* XXX: see if in some cases it could be faster to invalidate all the code */
935
    tb = p->first_tb;
936
    while (tb != NULL) {
937
        n = (long)tb & 3;
938
        tb = (TranslationBlock *)((long)tb & ~3);
939
        tb_next = tb->page_next[n];
940
        /* NOTE: this is subtle as a TB may span two physical pages */
941
        if (n == 0) {
942
            /* NOTE: tb_end may be after the end of the page, but
943
               it is not a problem */
944
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
945
            tb_end = tb_start + tb->size;
946
        } else {
947
            tb_start = tb->page_addr[1];
948
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
949
        }
950
        if (!(tb_end <= start || tb_start >= end)) {
951
#ifdef TARGET_HAS_PRECISE_SMC
952
            if (current_tb_not_found) {
953
                current_tb_not_found = 0;
954
                current_tb = NULL;
955
                if (env->mem_io_pc) {
956
                    /* now we have a real cpu fault */
957
                    current_tb = tb_find_pc(env->mem_io_pc);
958
                }
959
            }
960
            if (current_tb == tb &&
961
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
962
                /* If we are modifying the current TB, we must stop
963
                its execution. We could be more precise by checking
964
                that the modification is after the current PC, but it
965
                would require a specialized function to partially
966
                restore the CPU state */
967

    
968
                current_tb_modified = 1;
969
                cpu_restore_state(current_tb, env,
970
                                  env->mem_io_pc, NULL);
971
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
972
                                     &current_flags);
973
            }
974
#endif /* TARGET_HAS_PRECISE_SMC */
975
            /* we need to do that to handle the case where a signal
976
               occurs while doing tb_phys_invalidate() */
977
            saved_tb = NULL;
978
            if (env) {
979
                saved_tb = env->current_tb;
980
                env->current_tb = NULL;
981
            }
982
            tb_phys_invalidate(tb, -1);
983
            if (env) {
984
                env->current_tb = saved_tb;
985
                if (env->interrupt_request && env->current_tb)
986
                    cpu_interrupt(env, env->interrupt_request);
987
            }
988
        }
989
        tb = tb_next;
990
    }
991
#if !defined(CONFIG_USER_ONLY)
992
    /* if no code remaining, no need to continue to use slow writes */
993
    if (!p->first_tb) {
994
        invalidate_page_bitmap(p);
995
        if (is_cpu_write_access) {
996
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
997
        }
998
    }
999
#endif
1000
#ifdef TARGET_HAS_PRECISE_SMC
1001
    if (current_tb_modified) {
1002
        /* we generate a block containing just the instruction
1003
           modifying the memory. It will ensure that it cannot modify
1004
           itself */
1005
        env->current_tb = NULL;
1006
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1007
        cpu_resume_from_signal(env, NULL);
1008
    }
1009
#endif
1010
}
1011

    
1012
/* len must be <= 8 and start must be a multiple of len */
1013
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1014
{
1015
    PageDesc *p;
1016
    int offset, b;
1017
#if 0
1018
    if (1) {
1019
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1020
                  cpu_single_env->mem_io_vaddr, len,
1021
                  cpu_single_env->eip,
1022
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1023
    }
1024
#endif
1025
    p = page_find(start >> TARGET_PAGE_BITS);
1026
    if (!p)
1027
        return;
1028
    if (p->code_bitmap) {
1029
        offset = start & ~TARGET_PAGE_MASK;
1030
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1031
        if (b & ((1 << len) - 1))
1032
            goto do_invalidate;
1033
    } else {
1034
    do_invalidate:
1035
        tb_invalidate_phys_page_range(start, start + len, 1);
1036
    }
1037
}
1038

    
1039
#if !defined(CONFIG_SOFTMMU)
1040
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1041
                                    unsigned long pc, void *puc)
1042
{
1043
    TranslationBlock *tb;
1044
    PageDesc *p;
1045
    int n;
1046
#ifdef TARGET_HAS_PRECISE_SMC
1047
    TranslationBlock *current_tb = NULL;
1048
    CPUState *env = cpu_single_env;
1049
    int current_tb_modified = 0;
1050
    target_ulong current_pc = 0;
1051
    target_ulong current_cs_base = 0;
1052
    int current_flags = 0;
1053
#endif
1054

    
1055
    addr &= TARGET_PAGE_MASK;
1056
    p = page_find(addr >> TARGET_PAGE_BITS);
1057
    if (!p)
1058
        return;
1059
    tb = p->first_tb;
1060
#ifdef TARGET_HAS_PRECISE_SMC
1061
    if (tb && pc != 0) {
1062
        current_tb = tb_find_pc(pc);
1063
    }
1064
#endif
1065
    while (tb != NULL) {
1066
        n = (long)tb & 3;
1067
        tb = (TranslationBlock *)((long)tb & ~3);
1068
#ifdef TARGET_HAS_PRECISE_SMC
1069
        if (current_tb == tb &&
1070
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1071
                /* If we are modifying the current TB, we must stop
1072
                   its execution. We could be more precise by checking
1073
                   that the modification is after the current PC, but it
1074
                   would require a specialized function to partially
1075
                   restore the CPU state */
1076

    
1077
            current_tb_modified = 1;
1078
            cpu_restore_state(current_tb, env, pc, puc);
1079
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1080
                                 &current_flags);
1081
        }
1082
#endif /* TARGET_HAS_PRECISE_SMC */
1083
        tb_phys_invalidate(tb, addr);
1084
        tb = tb->page_next[n];
1085
    }
1086
    p->first_tb = NULL;
1087
#ifdef TARGET_HAS_PRECISE_SMC
1088
    if (current_tb_modified) {
1089
        /* we generate a block containing just the instruction
1090
           modifying the memory. It will ensure that it cannot modify
1091
           itself */
1092
        env->current_tb = NULL;
1093
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1094
        cpu_resume_from_signal(env, puc);
1095
    }
1096
#endif
1097
}
1098
#endif
1099

    
1100
/* add the tb in the target page and protect it if necessary */
1101
static inline void tb_alloc_page(TranslationBlock *tb,
1102
                                 unsigned int n, target_ulong page_addr)
1103
{
1104
    PageDesc *p;
1105
    TranslationBlock *last_first_tb;
1106

    
1107
    tb->page_addr[n] = page_addr;
1108
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1109
    tb->page_next[n] = p->first_tb;
1110
    last_first_tb = p->first_tb;
1111
    p->first_tb = (TranslationBlock *)((long)tb | n);
1112
    invalidate_page_bitmap(p);
1113

    
1114
#if defined(TARGET_HAS_SMC) || 1
1115

    
1116
#if defined(CONFIG_USER_ONLY)
1117
    if (p->flags & PAGE_WRITE) {
1118
        target_ulong addr;
1119
        PageDesc *p2;
1120
        int prot;
1121

    
1122
        /* force the host page as non writable (writes will have a
1123
           page fault + mprotect overhead) */
1124
        page_addr &= qemu_host_page_mask;
1125
        prot = 0;
1126
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1127
            addr += TARGET_PAGE_SIZE) {
1128

    
1129
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1130
            if (!p2)
1131
                continue;
1132
            prot |= p2->flags;
1133
            p2->flags &= ~PAGE_WRITE;
1134
            page_get_flags(addr);
1135
          }
1136
        mprotect(g2h(page_addr), qemu_host_page_size,
1137
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1138
#ifdef DEBUG_TB_INVALIDATE
1139
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1140
               page_addr);
1141
#endif
1142
    }
1143
#else
1144
    /* if some code is already present, then the pages are already
1145
       protected. So we handle the case where only the first TB is
1146
       allocated in a physical page */
1147
    if (!last_first_tb) {
1148
        tlb_protect_code(page_addr);
1149
    }
1150
#endif
1151

    
1152
#endif /* TARGET_HAS_SMC */
1153
}
1154

    
1155
/* Allocate a new translation block. Flush the translation buffer if
1156
   too many translation blocks or too much generated code. */
1157
TranslationBlock *tb_alloc(target_ulong pc)
1158
{
1159
    TranslationBlock *tb;
1160

    
1161
    if (nb_tbs >= code_gen_max_blocks ||
1162
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1163
        return NULL;
1164
    tb = &tbs[nb_tbs++];
1165
    tb->pc = pc;
1166
    tb->cflags = 0;
1167
    return tb;
1168
}
1169

    
1170
void tb_free(TranslationBlock *tb)
1171
{
1172
    /* In practice this is mostly used for single use temporary TB
1173
       Ignore the hard cases and just back up if this TB happens to
1174
       be the last one generated.  */
1175
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1176
        code_gen_ptr = tb->tc_ptr;
1177
        nb_tbs--;
1178
    }
1179
}
1180

    
1181
/* add a new TB and link it to the physical page tables. phys_page2 is
1182
   (-1) to indicate that only one page contains the TB. */
1183
void tb_link_phys(TranslationBlock *tb,
1184
                  target_ulong phys_pc, target_ulong phys_page2)
1185
{
1186
    unsigned int h;
1187
    TranslationBlock **ptb;
1188

    
1189
    /* Grab the mmap lock to stop another thread invalidating this TB
1190
       before we are done.  */
1191
    mmap_lock();
1192
    /* add in the physical hash table */
1193
    h = tb_phys_hash_func(phys_pc);
1194
    ptb = &tb_phys_hash[h];
1195
    tb->phys_hash_next = *ptb;
1196
    *ptb = tb;
1197

    
1198
    /* add in the page list */
1199
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1200
    if (phys_page2 != -1)
1201
        tb_alloc_page(tb, 1, phys_page2);
1202
    else
1203
        tb->page_addr[1] = -1;
1204

    
1205
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1206
    tb->jmp_next[0] = NULL;
1207
    tb->jmp_next[1] = NULL;
1208

    
1209
    /* init original jump addresses */
1210
    if (tb->tb_next_offset[0] != 0xffff)
1211
        tb_reset_jump(tb, 0);
1212
    if (tb->tb_next_offset[1] != 0xffff)
1213
        tb_reset_jump(tb, 1);
1214

    
1215
#ifdef DEBUG_TB_CHECK
1216
    tb_page_check();
1217
#endif
1218
    mmap_unlock();
1219
}
1220

    
1221
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1222
   tb[1].tc_ptr. Return NULL if not found */
1223
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1224
{
1225
    int m_min, m_max, m;
1226
    unsigned long v;
1227
    TranslationBlock *tb;
1228

    
1229
    if (nb_tbs <= 0)
1230
        return NULL;
1231
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1232
        tc_ptr >= (unsigned long)code_gen_ptr)
1233
        return NULL;
1234
    /* binary search (cf Knuth) */
1235
    m_min = 0;
1236
    m_max = nb_tbs - 1;
1237
    while (m_min <= m_max) {
1238
        m = (m_min + m_max) >> 1;
1239
        tb = &tbs[m];
1240
        v = (unsigned long)tb->tc_ptr;
1241
        if (v == tc_ptr)
1242
            return tb;
1243
        else if (tc_ptr < v) {
1244
            m_max = m - 1;
1245
        } else {
1246
            m_min = m + 1;
1247
        }
1248
    }
1249
    return &tbs[m_max];
1250
}
1251

    
1252
static void tb_reset_jump_recursive(TranslationBlock *tb);
1253

    
1254
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1255
{
1256
    TranslationBlock *tb1, *tb_next, **ptb;
1257
    unsigned int n1;
1258

    
1259
    tb1 = tb->jmp_next[n];
1260
    if (tb1 != NULL) {
1261
        /* find head of list */
1262
        for(;;) {
1263
            n1 = (long)tb1 & 3;
1264
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1265
            if (n1 == 2)
1266
                break;
1267
            tb1 = tb1->jmp_next[n1];
1268
        }
1269
        /* we are now sure now that tb jumps to tb1 */
1270
        tb_next = tb1;
1271

    
1272
        /* remove tb from the jmp_first list */
1273
        ptb = &tb_next->jmp_first;
1274
        for(;;) {
1275
            tb1 = *ptb;
1276
            n1 = (long)tb1 & 3;
1277
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1278
            if (n1 == n && tb1 == tb)
1279
                break;
1280
            ptb = &tb1->jmp_next[n1];
1281
        }
1282
        *ptb = tb->jmp_next[n];
1283
        tb->jmp_next[n] = NULL;
1284

    
1285
        /* suppress the jump to next tb in generated code */
1286
        tb_reset_jump(tb, n);
1287

    
1288
        /* suppress jumps in the tb on which we could have jumped */
1289
        tb_reset_jump_recursive(tb_next);
1290
    }
1291
}
1292

    
1293
static void tb_reset_jump_recursive(TranslationBlock *tb)
1294
{
1295
    tb_reset_jump_recursive2(tb, 0);
1296
    tb_reset_jump_recursive2(tb, 1);
1297
}
1298

    
1299
#if defined(TARGET_HAS_ICE)
1300
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1301
{
1302
    target_phys_addr_t addr;
1303
    target_ulong pd;
1304
    ram_addr_t ram_addr;
1305
    PhysPageDesc *p;
1306

    
1307
    addr = cpu_get_phys_page_debug(env, pc);
1308
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1309
    if (!p) {
1310
        pd = IO_MEM_UNASSIGNED;
1311
    } else {
1312
        pd = p->phys_offset;
1313
    }
1314
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1315
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1316
}
1317
#endif
1318

    
1319
/* Add a watchpoint.  */
1320
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1321
                          int flags, CPUWatchpoint **watchpoint)
1322
{
1323
    target_ulong len_mask = ~(len - 1);
1324
    CPUWatchpoint *wp;
1325

    
1326
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1327
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1328
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1329
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1330
        return -EINVAL;
1331
    }
1332
    wp = qemu_malloc(sizeof(*wp));
1333

    
1334
    wp->vaddr = addr;
1335
    wp->len_mask = len_mask;
1336
    wp->flags = flags;
1337

    
1338
    /* keep all GDB-injected watchpoints in front */
1339
    if (flags & BP_GDB)
1340
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1341
    else
1342
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1343

    
1344
    tlb_flush_page(env, addr);
1345

    
1346
    if (watchpoint)
1347
        *watchpoint = wp;
1348
    return 0;
1349
}
1350

    
1351
/* Remove a specific watchpoint.  */
1352
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1353
                          int flags)
1354
{
1355
    target_ulong len_mask = ~(len - 1);
1356
    CPUWatchpoint *wp;
1357

    
1358
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1359
        if (addr == wp->vaddr && len_mask == wp->len_mask
1360
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1361
            cpu_watchpoint_remove_by_ref(env, wp);
1362
            return 0;
1363
        }
1364
    }
1365
    return -ENOENT;
1366
}
1367

    
1368
/* Remove a specific watchpoint by reference.  */
1369
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1370
{
1371
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1372

    
1373
    tlb_flush_page(env, watchpoint->vaddr);
1374

    
1375
    qemu_free(watchpoint);
1376
}
1377

    
1378
/* Remove all matching watchpoints.  */
1379
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1380
{
1381
    CPUWatchpoint *wp, *next;
1382

    
1383
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1384
        if (wp->flags & mask)
1385
            cpu_watchpoint_remove_by_ref(env, wp);
1386
    }
1387
}
1388

    
1389
/* Add a breakpoint.  */
1390
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1391
                          CPUBreakpoint **breakpoint)
1392
{
1393
#if defined(TARGET_HAS_ICE)
1394
    CPUBreakpoint *bp;
1395

    
1396
    bp = qemu_malloc(sizeof(*bp));
1397

    
1398
    bp->pc = pc;
1399
    bp->flags = flags;
1400

    
1401
    /* keep all GDB-injected breakpoints in front */
1402
    if (flags & BP_GDB)
1403
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1404
    else
1405
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1406

    
1407
    breakpoint_invalidate(env, pc);
1408

    
1409
    if (breakpoint)
1410
        *breakpoint = bp;
1411
    return 0;
1412
#else
1413
    return -ENOSYS;
1414
#endif
1415
}
1416

    
1417
/* Remove a specific breakpoint.  */
1418
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1419
{
1420
#if defined(TARGET_HAS_ICE)
1421
    CPUBreakpoint *bp;
1422

    
1423
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1424
        if (bp->pc == pc && bp->flags == flags) {
1425
            cpu_breakpoint_remove_by_ref(env, bp);
1426
            return 0;
1427
        }
1428
    }
1429
    return -ENOENT;
1430
#else
1431
    return -ENOSYS;
1432
#endif
1433
}
1434

    
1435
/* Remove a specific breakpoint by reference.  */
1436
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1437
{
1438
#if defined(TARGET_HAS_ICE)
1439
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1440

    
1441
    breakpoint_invalidate(env, breakpoint->pc);
1442

    
1443
    qemu_free(breakpoint);
1444
#endif
1445
}
1446

    
1447
/* Remove all matching breakpoints. */
1448
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1449
{
1450
#if defined(TARGET_HAS_ICE)
1451
    CPUBreakpoint *bp, *next;
1452

    
1453
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1454
        if (bp->flags & mask)
1455
            cpu_breakpoint_remove_by_ref(env, bp);
1456
    }
1457
#endif
1458
}
1459

    
1460
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1461
   CPU loop after each instruction */
1462
void cpu_single_step(CPUState *env, int enabled)
1463
{
1464
#if defined(TARGET_HAS_ICE)
1465
    if (env->singlestep_enabled != enabled) {
1466
        env->singlestep_enabled = enabled;
1467
        if (kvm_enabled())
1468
            kvm_update_guest_debug(env, 0);
1469
        else {
1470
            /* must flush all the translated code to avoid inconsistencies */
1471
            /* XXX: only flush what is necessary */
1472
            tb_flush(env);
1473
        }
1474
    }
1475
#endif
1476
}
1477

    
1478
/* enable or disable low levels log */
1479
void cpu_set_log(int log_flags)
1480
{
1481
    loglevel = log_flags;
1482
    if (loglevel && !logfile) {
1483
        logfile = fopen(logfilename, log_append ? "a" : "w");
1484
        if (!logfile) {
1485
            perror(logfilename);
1486
            _exit(1);
1487
        }
1488
#if !defined(CONFIG_SOFTMMU)
1489
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1490
        {
1491
            static char logfile_buf[4096];
1492
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1493
        }
1494
#else
1495
        setvbuf(logfile, NULL, _IOLBF, 0);
1496
#endif
1497
        log_append = 1;
1498
    }
1499
    if (!loglevel && logfile) {
1500
        fclose(logfile);
1501
        logfile = NULL;
1502
    }
1503
}
1504

    
1505
void cpu_set_log_filename(const char *filename)
1506
{
1507
    logfilename = strdup(filename);
1508
    if (logfile) {
1509
        fclose(logfile);
1510
        logfile = NULL;
1511
    }
1512
    cpu_set_log(loglevel);
1513
}
1514

    
1515
static void cpu_unlink_tb(CPUState *env)
1516
{
1517
#if defined(USE_NPTL)
1518
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1519
       problem and hope the cpu will stop of its own accord.  For userspace
1520
       emulation this often isn't actually as bad as it sounds.  Often
1521
       signals are used primarily to interrupt blocking syscalls.  */
1522
#else
1523
    TranslationBlock *tb;
1524
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1525

    
1526
    tb = env->current_tb;
1527
    /* if the cpu is currently executing code, we must unlink it and
1528
       all the potentially executing TB */
1529
    if (tb && !testandset(&interrupt_lock)) {
1530
        env->current_tb = NULL;
1531
        tb_reset_jump_recursive(tb);
1532
        resetlock(&interrupt_lock);
1533
    }
1534
#endif
1535
}
1536

    
1537
/* mask must never be zero, except for A20 change call */
1538
void cpu_interrupt(CPUState *env, int mask)
1539
{
1540
    int old_mask;
1541

    
1542
    old_mask = env->interrupt_request;
1543
    env->interrupt_request |= mask;
1544

    
1545
#ifndef CONFIG_USER_ONLY
1546
    /*
1547
     * If called from iothread context, wake the target cpu in
1548
     * case its halted.
1549
     */
1550
    if (!qemu_cpu_self(env)) {
1551
        qemu_cpu_kick(env);
1552
        return;
1553
    }
1554
#endif
1555

    
1556
    if (use_icount) {
1557
        env->icount_decr.u16.high = 0xffff;
1558
#ifndef CONFIG_USER_ONLY
1559
        if (!can_do_io(env)
1560
            && (mask & ~old_mask) != 0) {
1561
            cpu_abort(env, "Raised interrupt while not in I/O function");
1562
        }
1563
#endif
1564
    } else {
1565
        cpu_unlink_tb(env);
1566
    }
1567
}
1568

    
1569
void cpu_reset_interrupt(CPUState *env, int mask)
1570
{
1571
    env->interrupt_request &= ~mask;
1572
}
1573

    
1574
void cpu_exit(CPUState *env)
1575
{
1576
    env->exit_request = 1;
1577
    cpu_unlink_tb(env);
1578
}
1579

    
1580
const CPULogItem cpu_log_items[] = {
1581
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1582
      "show generated host assembly code for each compiled TB" },
1583
    { CPU_LOG_TB_IN_ASM, "in_asm",
1584
      "show target assembly code for each compiled TB" },
1585
    { CPU_LOG_TB_OP, "op",
1586
      "show micro ops for each compiled TB" },
1587
    { CPU_LOG_TB_OP_OPT, "op_opt",
1588
      "show micro ops "
1589
#ifdef TARGET_I386
1590
      "before eflags optimization and "
1591
#endif
1592
      "after liveness analysis" },
1593
    { CPU_LOG_INT, "int",
1594
      "show interrupts/exceptions in short format" },
1595
    { CPU_LOG_EXEC, "exec",
1596
      "show trace before each executed TB (lots of logs)" },
1597
    { CPU_LOG_TB_CPU, "cpu",
1598
      "show CPU state before block translation" },
1599
#ifdef TARGET_I386
1600
    { CPU_LOG_PCALL, "pcall",
1601
      "show protected mode far calls/returns/exceptions" },
1602
    { CPU_LOG_RESET, "cpu_reset",
1603
      "show CPU state before CPU resets" },
1604
#endif
1605
#ifdef DEBUG_IOPORT
1606
    { CPU_LOG_IOPORT, "ioport",
1607
      "show all i/o ports accesses" },
1608
#endif
1609
    { 0, NULL, NULL },
1610
};
1611

    
1612
static int cmp1(const char *s1, int n, const char *s2)
1613
{
1614
    if (strlen(s2) != n)
1615
        return 0;
1616
    return memcmp(s1, s2, n) == 0;
1617
}
1618

    
1619
/* takes a comma separated list of log masks. Return 0 if error. */
1620
int cpu_str_to_log_mask(const char *str)
1621
{
1622
    const CPULogItem *item;
1623
    int mask;
1624
    const char *p, *p1;
1625

    
1626
    p = str;
1627
    mask = 0;
1628
    for(;;) {
1629
        p1 = strchr(p, ',');
1630
        if (!p1)
1631
            p1 = p + strlen(p);
1632
        if(cmp1(p,p1-p,"all")) {
1633
                for(item = cpu_log_items; item->mask != 0; item++) {
1634
                        mask |= item->mask;
1635
                }
1636
        } else {
1637
        for(item = cpu_log_items; item->mask != 0; item++) {
1638
            if (cmp1(p, p1 - p, item->name))
1639
                goto found;
1640
        }
1641
        return 0;
1642
        }
1643
    found:
1644
        mask |= item->mask;
1645
        if (*p1 != ',')
1646
            break;
1647
        p = p1 + 1;
1648
    }
1649
    return mask;
1650
}
1651

    
1652
void cpu_abort(CPUState *env, const char *fmt, ...)
1653
{
1654
    va_list ap;
1655
    va_list ap2;
1656

    
1657
    va_start(ap, fmt);
1658
    va_copy(ap2, ap);
1659
    fprintf(stderr, "qemu: fatal: ");
1660
    vfprintf(stderr, fmt, ap);
1661
    fprintf(stderr, "\n");
1662
#ifdef TARGET_I386
1663
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1664
#else
1665
    cpu_dump_state(env, stderr, fprintf, 0);
1666
#endif
1667
    if (qemu_log_enabled()) {
1668
        qemu_log("qemu: fatal: ");
1669
        qemu_log_vprintf(fmt, ap2);
1670
        qemu_log("\n");
1671
#ifdef TARGET_I386
1672
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1673
#else
1674
        log_cpu_state(env, 0);
1675
#endif
1676
        qemu_log_flush();
1677
        qemu_log_close();
1678
    }
1679
    va_end(ap2);
1680
    va_end(ap);
1681
    abort();
1682
}
1683

    
1684
CPUState *cpu_copy(CPUState *env)
1685
{
1686
    CPUState *new_env = cpu_init(env->cpu_model_str);
1687
    CPUState *next_cpu = new_env->next_cpu;
1688
    int cpu_index = new_env->cpu_index;
1689
#if defined(TARGET_HAS_ICE)
1690
    CPUBreakpoint *bp;
1691
    CPUWatchpoint *wp;
1692
#endif
1693

    
1694
    memcpy(new_env, env, sizeof(CPUState));
1695

    
1696
    /* Preserve chaining and index. */
1697
    new_env->next_cpu = next_cpu;
1698
    new_env->cpu_index = cpu_index;
1699

    
1700
    /* Clone all break/watchpoints.
1701
       Note: Once we support ptrace with hw-debug register access, make sure
1702
       BP_CPU break/watchpoints are handled correctly on clone. */
1703
    TAILQ_INIT(&env->breakpoints);
1704
    TAILQ_INIT(&env->watchpoints);
1705
#if defined(TARGET_HAS_ICE)
1706
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1707
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1708
    }
1709
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1710
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1711
                              wp->flags, NULL);
1712
    }
1713
#endif
1714

    
1715
    return new_env;
1716
}
1717

    
1718
#if !defined(CONFIG_USER_ONLY)
1719

    
1720
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1721
{
1722
    unsigned int i;
1723

    
1724
    /* Discard jump cache entries for any tb which might potentially
1725
       overlap the flushed page.  */
1726
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1727
    memset (&env->tb_jmp_cache[i], 0, 
1728
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1729

    
1730
    i = tb_jmp_cache_hash_page(addr);
1731
    memset (&env->tb_jmp_cache[i], 0, 
1732
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1733
}
1734

    
1735
/* NOTE: if flush_global is true, also flush global entries (not
1736
   implemented yet) */
1737
void tlb_flush(CPUState *env, int flush_global)
1738
{
1739
    int i;
1740

    
1741
#if defined(DEBUG_TLB)
1742
    printf("tlb_flush:\n");
1743
#endif
1744
    /* must reset current TB so that interrupts cannot modify the
1745
       links while we are modifying them */
1746
    env->current_tb = NULL;
1747

    
1748
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1749
        env->tlb_table[0][i].addr_read = -1;
1750
        env->tlb_table[0][i].addr_write = -1;
1751
        env->tlb_table[0][i].addr_code = -1;
1752
        env->tlb_table[1][i].addr_read = -1;
1753
        env->tlb_table[1][i].addr_write = -1;
1754
        env->tlb_table[1][i].addr_code = -1;
1755
#if (NB_MMU_MODES >= 3)
1756
        env->tlb_table[2][i].addr_read = -1;
1757
        env->tlb_table[2][i].addr_write = -1;
1758
        env->tlb_table[2][i].addr_code = -1;
1759
#endif
1760
#if (NB_MMU_MODES >= 4)
1761
        env->tlb_table[3][i].addr_read = -1;
1762
        env->tlb_table[3][i].addr_write = -1;
1763
        env->tlb_table[3][i].addr_code = -1;
1764
#endif
1765
#if (NB_MMU_MODES >= 5)
1766
        env->tlb_table[4][i].addr_read = -1;
1767
        env->tlb_table[4][i].addr_write = -1;
1768
        env->tlb_table[4][i].addr_code = -1;
1769
#endif
1770

    
1771
    }
1772

    
1773
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1774

    
1775
#ifdef CONFIG_KQEMU
1776
    if (env->kqemu_enabled) {
1777
        kqemu_flush(env, flush_global);
1778
    }
1779
#endif
1780
    tlb_flush_count++;
1781
}
1782

    
1783
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1784
{
1785
    if (addr == (tlb_entry->addr_read &
1786
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1787
        addr == (tlb_entry->addr_write &
1788
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1789
        addr == (tlb_entry->addr_code &
1790
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1791
        tlb_entry->addr_read = -1;
1792
        tlb_entry->addr_write = -1;
1793
        tlb_entry->addr_code = -1;
1794
    }
1795
}
1796

    
1797
void tlb_flush_page(CPUState *env, target_ulong addr)
1798
{
1799
    int i;
1800

    
1801
#if defined(DEBUG_TLB)
1802
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1803
#endif
1804
    /* must reset current TB so that interrupts cannot modify the
1805
       links while we are modifying them */
1806
    env->current_tb = NULL;
1807

    
1808
    addr &= TARGET_PAGE_MASK;
1809
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1810
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1811
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1812
#if (NB_MMU_MODES >= 3)
1813
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1814
#endif
1815
#if (NB_MMU_MODES >= 4)
1816
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1817
#endif
1818
#if (NB_MMU_MODES >= 5)
1819
    tlb_flush_entry(&env->tlb_table[4][i], addr);
1820
#endif
1821

    
1822
    tlb_flush_jmp_cache(env, addr);
1823

    
1824
#ifdef CONFIG_KQEMU
1825
    if (env->kqemu_enabled) {
1826
        kqemu_flush_page(env, addr);
1827
    }
1828
#endif
1829
}
1830

    
1831
/* update the TLBs so that writes to code in the virtual page 'addr'
1832
   can be detected */
1833
static void tlb_protect_code(ram_addr_t ram_addr)
1834
{
1835
    cpu_physical_memory_reset_dirty(ram_addr,
1836
                                    ram_addr + TARGET_PAGE_SIZE,
1837
                                    CODE_DIRTY_FLAG);
1838
}
1839

    
1840
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1841
   tested for self modifying code */
1842
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1843
                                    target_ulong vaddr)
1844
{
1845
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1846
}
1847

    
1848
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1849
                                         unsigned long start, unsigned long length)
1850
{
1851
    unsigned long addr;
1852
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1853
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1854
        if ((addr - start) < length) {
1855
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1856
        }
1857
    }
1858
}
1859

    
1860
/* Note: start and end must be within the same ram block.  */
1861
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1862
                                     int dirty_flags)
1863
{
1864
    CPUState *env;
1865
    unsigned long length, start1;
1866
    int i, mask, len;
1867
    uint8_t *p;
1868

    
1869
    start &= TARGET_PAGE_MASK;
1870
    end = TARGET_PAGE_ALIGN(end);
1871

    
1872
    length = end - start;
1873
    if (length == 0)
1874
        return;
1875
    len = length >> TARGET_PAGE_BITS;
1876
#ifdef CONFIG_KQEMU
1877
    /* XXX: should not depend on cpu context */
1878
    env = first_cpu;
1879
    if (env->kqemu_enabled) {
1880
        ram_addr_t addr;
1881
        addr = start;
1882
        for(i = 0; i < len; i++) {
1883
            kqemu_set_notdirty(env, addr);
1884
            addr += TARGET_PAGE_SIZE;
1885
        }
1886
    }
1887
#endif
1888
    mask = ~dirty_flags;
1889
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1890
    for(i = 0; i < len; i++)
1891
        p[i] &= mask;
1892

    
1893
    /* we modify the TLB cache so that the dirty bit will be set again
1894
       when accessing the range */
1895
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1896
    /* Chek that we don't span multiple blocks - this breaks the
1897
       address comparisons below.  */
1898
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1899
            != (end - 1) - start) {
1900
        abort();
1901
    }
1902

    
1903
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1904
        for(i = 0; i < CPU_TLB_SIZE; i++)
1905
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1906
        for(i = 0; i < CPU_TLB_SIZE; i++)
1907
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1908
#if (NB_MMU_MODES >= 3)
1909
        for(i = 0; i < CPU_TLB_SIZE; i++)
1910
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1911
#endif
1912
#if (NB_MMU_MODES >= 4)
1913
        for(i = 0; i < CPU_TLB_SIZE; i++)
1914
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1915
#endif
1916
#if (NB_MMU_MODES >= 5)
1917
        for(i = 0; i < CPU_TLB_SIZE; i++)
1918
            tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1919
#endif
1920
    }
1921
}
1922

    
1923
int cpu_physical_memory_set_dirty_tracking(int enable)
1924
{
1925
    in_migration = enable;
1926
    return 0;
1927
}
1928

    
1929
int cpu_physical_memory_get_dirty_tracking(void)
1930
{
1931
    return in_migration;
1932
}
1933

    
1934
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1935
                                   target_phys_addr_t end_addr)
1936
{
1937
    int ret = 0;
1938

    
1939
    if (kvm_enabled())
1940
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1941
    return ret;
1942
}
1943

    
1944
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1945
{
1946
    ram_addr_t ram_addr;
1947
    void *p;
1948

    
1949
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1950
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1951
            + tlb_entry->addend);
1952
        ram_addr = qemu_ram_addr_from_host(p);
1953
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1954
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1955
        }
1956
    }
1957
}
1958

    
1959
/* update the TLB according to the current state of the dirty bits */
1960
void cpu_tlb_update_dirty(CPUState *env)
1961
{
1962
    int i;
1963
    for(i = 0; i < CPU_TLB_SIZE; i++)
1964
        tlb_update_dirty(&env->tlb_table[0][i]);
1965
    for(i = 0; i < CPU_TLB_SIZE; i++)
1966
        tlb_update_dirty(&env->tlb_table[1][i]);
1967
#if (NB_MMU_MODES >= 3)
1968
    for(i = 0; i < CPU_TLB_SIZE; i++)
1969
        tlb_update_dirty(&env->tlb_table[2][i]);
1970
#endif
1971
#if (NB_MMU_MODES >= 4)
1972
    for(i = 0; i < CPU_TLB_SIZE; i++)
1973
        tlb_update_dirty(&env->tlb_table[3][i]);
1974
#endif
1975
#if (NB_MMU_MODES >= 5)
1976
    for(i = 0; i < CPU_TLB_SIZE; i++)
1977
        tlb_update_dirty(&env->tlb_table[4][i]);
1978
#endif
1979
}
1980

    
1981
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1982
{
1983
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1984
        tlb_entry->addr_write = vaddr;
1985
}
1986

    
1987
/* update the TLB corresponding to virtual page vaddr
1988
   so that it is no longer dirty */
1989
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1990
{
1991
    int i;
1992

    
1993
    vaddr &= TARGET_PAGE_MASK;
1994
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1995
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1996
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1997
#if (NB_MMU_MODES >= 3)
1998
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1999
#endif
2000
#if (NB_MMU_MODES >= 4)
2001
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2002
#endif
2003
#if (NB_MMU_MODES >= 5)
2004
    tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
2005
#endif
2006
}
2007

    
2008
/* add a new TLB entry. At most one entry for a given virtual address
2009
   is permitted. Return 0 if OK or 2 if the page could not be mapped
2010
   (can only happen in non SOFTMMU mode for I/O pages or pages
2011
   conflicting with the host address space). */
2012
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2013
                      target_phys_addr_t paddr, int prot,
2014
                      int mmu_idx, int is_softmmu)
2015
{
2016
    PhysPageDesc *p;
2017
    unsigned long pd;
2018
    unsigned int index;
2019
    target_ulong address;
2020
    target_ulong code_address;
2021
    target_phys_addr_t addend;
2022
    int ret;
2023
    CPUTLBEntry *te;
2024
    CPUWatchpoint *wp;
2025
    target_phys_addr_t iotlb;
2026

    
2027
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2028
    if (!p) {
2029
        pd = IO_MEM_UNASSIGNED;
2030
    } else {
2031
        pd = p->phys_offset;
2032
    }
2033
#if defined(DEBUG_TLB)
2034
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2035
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2036
#endif
2037

    
2038
    ret = 0;
2039
    address = vaddr;
2040
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2041
        /* IO memory case (romd handled later) */
2042
        address |= TLB_MMIO;
2043
    }
2044
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2045
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2046
        /* Normal RAM.  */
2047
        iotlb = pd & TARGET_PAGE_MASK;
2048
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2049
            iotlb |= IO_MEM_NOTDIRTY;
2050
        else
2051
            iotlb |= IO_MEM_ROM;
2052
    } else {
2053
        /* IO handlers are currently passed a physical address.
2054
           It would be nice to pass an offset from the base address
2055
           of that region.  This would avoid having to special case RAM,
2056
           and avoid full address decoding in every device.
2057
           We can't use the high bits of pd for this because
2058
           IO_MEM_ROMD uses these as a ram address.  */
2059
        iotlb = (pd & ~TARGET_PAGE_MASK);
2060
        if (p) {
2061
            iotlb += p->region_offset;
2062
        } else {
2063
            iotlb += paddr;
2064
        }
2065
    }
2066

    
2067
    code_address = address;
2068
    /* Make accesses to pages with watchpoints go via the
2069
       watchpoint trap routines.  */
2070
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2071
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2072
            iotlb = io_mem_watch + paddr;
2073
            /* TODO: The memory case can be optimized by not trapping
2074
               reads of pages with a write breakpoint.  */
2075
            address |= TLB_MMIO;
2076
        }
2077
    }
2078

    
2079
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2080
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2081
    te = &env->tlb_table[mmu_idx][index];
2082
    te->addend = addend - vaddr;
2083
    if (prot & PAGE_READ) {
2084
        te->addr_read = address;
2085
    } else {
2086
        te->addr_read = -1;
2087
    }
2088

    
2089
    if (prot & PAGE_EXEC) {
2090
        te->addr_code = code_address;
2091
    } else {
2092
        te->addr_code = -1;
2093
    }
2094
    if (prot & PAGE_WRITE) {
2095
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2096
            (pd & IO_MEM_ROMD)) {
2097
            /* Write access calls the I/O callback.  */
2098
            te->addr_write = address | TLB_MMIO;
2099
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2100
                   !cpu_physical_memory_is_dirty(pd)) {
2101
            te->addr_write = address | TLB_NOTDIRTY;
2102
        } else {
2103
            te->addr_write = address;
2104
        }
2105
    } else {
2106
        te->addr_write = -1;
2107
    }
2108
    return ret;
2109
}
2110

    
2111
#else
2112

    
2113
void tlb_flush(CPUState *env, int flush_global)
2114
{
2115
}
2116

    
2117
void tlb_flush_page(CPUState *env, target_ulong addr)
2118
{
2119
}
2120

    
2121
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2122
                      target_phys_addr_t paddr, int prot,
2123
                      int mmu_idx, int is_softmmu)
2124
{
2125
    return 0;
2126
}
2127

    
2128
/* dump memory mappings */
2129
void page_dump(FILE *f)
2130
{
2131
    unsigned long start, end;
2132
    int i, j, prot, prot1;
2133
    PageDesc *p;
2134

    
2135
    fprintf(f, "%-8s %-8s %-8s %s\n",
2136
            "start", "end", "size", "prot");
2137
    start = -1;
2138
    end = -1;
2139
    prot = 0;
2140
    for(i = 0; i <= L1_SIZE; i++) {
2141
        if (i < L1_SIZE)
2142
            p = l1_map[i];
2143
        else
2144
            p = NULL;
2145
        for(j = 0;j < L2_SIZE; j++) {
2146
            if (!p)
2147
                prot1 = 0;
2148
            else
2149
                prot1 = p[j].flags;
2150
            if (prot1 != prot) {
2151
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2152
                if (start != -1) {
2153
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2154
                            start, end, end - start,
2155
                            prot & PAGE_READ ? 'r' : '-',
2156
                            prot & PAGE_WRITE ? 'w' : '-',
2157
                            prot & PAGE_EXEC ? 'x' : '-');
2158
                }
2159
                if (prot1 != 0)
2160
                    start = end;
2161
                else
2162
                    start = -1;
2163
                prot = prot1;
2164
            }
2165
            if (!p)
2166
                break;
2167
        }
2168
    }
2169
}
2170

    
2171
int page_get_flags(target_ulong address)
2172
{
2173
    PageDesc *p;
2174

    
2175
    p = page_find(address >> TARGET_PAGE_BITS);
2176
    if (!p)
2177
        return 0;
2178
    return p->flags;
2179
}
2180

    
2181
/* modify the flags of a page and invalidate the code if
2182
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2183
   depending on PAGE_WRITE */
2184
void page_set_flags(target_ulong start, target_ulong end, int flags)
2185
{
2186
    PageDesc *p;
2187
    target_ulong addr;
2188

    
2189
    /* mmap_lock should already be held.  */
2190
    start = start & TARGET_PAGE_MASK;
2191
    end = TARGET_PAGE_ALIGN(end);
2192
    if (flags & PAGE_WRITE)
2193
        flags |= PAGE_WRITE_ORG;
2194
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2195
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2196
        /* We may be called for host regions that are outside guest
2197
           address space.  */
2198
        if (!p)
2199
            return;
2200
        /* if the write protection is set, then we invalidate the code
2201
           inside */
2202
        if (!(p->flags & PAGE_WRITE) &&
2203
            (flags & PAGE_WRITE) &&
2204
            p->first_tb) {
2205
            tb_invalidate_phys_page(addr, 0, NULL);
2206
        }
2207
        p->flags = flags;
2208
    }
2209
}
2210

    
2211
int page_check_range(target_ulong start, target_ulong len, int flags)
2212
{
2213
    PageDesc *p;
2214
    target_ulong end;
2215
    target_ulong addr;
2216

    
2217
    if (start + len < start)
2218
        /* we've wrapped around */
2219
        return -1;
2220

    
2221
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2222
    start = start & TARGET_PAGE_MASK;
2223

    
2224
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2225
        p = page_find(addr >> TARGET_PAGE_BITS);
2226
        if( !p )
2227
            return -1;
2228
        if( !(p->flags & PAGE_VALID) )
2229
            return -1;
2230

    
2231
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2232
            return -1;
2233
        if (flags & PAGE_WRITE) {
2234
            if (!(p->flags & PAGE_WRITE_ORG))
2235
                return -1;
2236
            /* unprotect the page if it was put read-only because it
2237
               contains translated code */
2238
            if (!(p->flags & PAGE_WRITE)) {
2239
                if (!page_unprotect(addr, 0, NULL))
2240
                    return -1;
2241
            }
2242
            return 0;
2243
        }
2244
    }
2245
    return 0;
2246
}
2247

    
2248
/* called from signal handler: invalidate the code and unprotect the
2249
   page. Return TRUE if the fault was successfully handled. */
2250
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2251
{
2252
    unsigned int page_index, prot, pindex;
2253
    PageDesc *p, *p1;
2254
    target_ulong host_start, host_end, addr;
2255

    
2256
    /* Technically this isn't safe inside a signal handler.  However we
2257
       know this only ever happens in a synchronous SEGV handler, so in
2258
       practice it seems to be ok.  */
2259
    mmap_lock();
2260

    
2261
    host_start = address & qemu_host_page_mask;
2262
    page_index = host_start >> TARGET_PAGE_BITS;
2263
    p1 = page_find(page_index);
2264
    if (!p1) {
2265
        mmap_unlock();
2266
        return 0;
2267
    }
2268
    host_end = host_start + qemu_host_page_size;
2269
    p = p1;
2270
    prot = 0;
2271
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2272
        prot |= p->flags;
2273
        p++;
2274
    }
2275
    /* if the page was really writable, then we change its
2276
       protection back to writable */
2277
    if (prot & PAGE_WRITE_ORG) {
2278
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2279
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2280
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2281
                     (prot & PAGE_BITS) | PAGE_WRITE);
2282
            p1[pindex].flags |= PAGE_WRITE;
2283
            /* and since the content will be modified, we must invalidate
2284
               the corresponding translated code. */
2285
            tb_invalidate_phys_page(address, pc, puc);
2286
#ifdef DEBUG_TB_CHECK
2287
            tb_invalidate_check(address);
2288
#endif
2289
            mmap_unlock();
2290
            return 1;
2291
        }
2292
    }
2293
    mmap_unlock();
2294
    return 0;
2295
}
2296

    
2297
static inline void tlb_set_dirty(CPUState *env,
2298
                                 unsigned long addr, target_ulong vaddr)
2299
{
2300
}
2301
#endif /* defined(CONFIG_USER_ONLY) */
2302

    
2303
#if !defined(CONFIG_USER_ONLY)
2304

    
2305
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2306
                             ram_addr_t memory, ram_addr_t region_offset);
2307
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2308
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2309
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2310
                      need_subpage)                                     \
2311
    do {                                                                \
2312
        if (addr > start_addr)                                          \
2313
            start_addr2 = 0;                                            \
2314
        else {                                                          \
2315
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2316
            if (start_addr2 > 0)                                        \
2317
                need_subpage = 1;                                       \
2318
        }                                                               \
2319
                                                                        \
2320
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2321
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2322
        else {                                                          \
2323
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2324
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2325
                need_subpage = 1;                                       \
2326
        }                                                               \
2327
    } while (0)
2328

    
2329
/* register physical memory. 'size' must be a multiple of the target
2330
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2331
   io memory page.  The address used when calling the IO function is
2332
   the offset from the start of the region, plus region_offset.  Both
2333
   start_addr and region_offset are rounded down to a page boundary
2334
   before calculating this offset.  This should not be a problem unless
2335
   the low bits of start_addr and region_offset differ.  */
2336
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2337
                                         ram_addr_t size,
2338
                                         ram_addr_t phys_offset,
2339
                                         ram_addr_t region_offset)
2340
{
2341
    target_phys_addr_t addr, end_addr;
2342
    PhysPageDesc *p;
2343
    CPUState *env;
2344
    ram_addr_t orig_size = size;
2345
    void *subpage;
2346

    
2347
#ifdef CONFIG_KQEMU
2348
    /* XXX: should not depend on cpu context */
2349
    env = first_cpu;
2350
    if (env->kqemu_enabled) {
2351
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2352
    }
2353
#endif
2354
    if (kvm_enabled())
2355
        kvm_set_phys_mem(start_addr, size, phys_offset);
2356

    
2357
    if (phys_offset == IO_MEM_UNASSIGNED) {
2358
        region_offset = start_addr;
2359
    }
2360
    region_offset &= TARGET_PAGE_MASK;
2361
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2362
    end_addr = start_addr + (target_phys_addr_t)size;
2363
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2364
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2365
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2366
            ram_addr_t orig_memory = p->phys_offset;
2367
            target_phys_addr_t start_addr2, end_addr2;
2368
            int need_subpage = 0;
2369

    
2370
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2371
                          need_subpage);
2372
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2373
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2374
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2375
                                           &p->phys_offset, orig_memory,
2376
                                           p->region_offset);
2377
                } else {
2378
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2379
                                            >> IO_MEM_SHIFT];
2380
                }
2381
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2382
                                 region_offset);
2383
                p->region_offset = 0;
2384
            } else {
2385
                p->phys_offset = phys_offset;
2386
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2387
                    (phys_offset & IO_MEM_ROMD))
2388
                    phys_offset += TARGET_PAGE_SIZE;
2389
            }
2390
        } else {
2391
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2392
            p->phys_offset = phys_offset;
2393
            p->region_offset = region_offset;
2394
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2395
                (phys_offset & IO_MEM_ROMD)) {
2396
                phys_offset += TARGET_PAGE_SIZE;
2397
            } else {
2398
                target_phys_addr_t start_addr2, end_addr2;
2399
                int need_subpage = 0;
2400

    
2401
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2402
                              end_addr2, need_subpage);
2403

    
2404
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2405
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2406
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2407
                                           addr & TARGET_PAGE_MASK);
2408
                    subpage_register(subpage, start_addr2, end_addr2,
2409
                                     phys_offset, region_offset);
2410
                    p->region_offset = 0;
2411
                }
2412
            }
2413
        }
2414
        region_offset += TARGET_PAGE_SIZE;
2415
    }
2416

    
2417
    /* since each CPU stores ram addresses in its TLB cache, we must
2418
       reset the modified entries */
2419
    /* XXX: slow ! */
2420
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2421
        tlb_flush(env, 1);
2422
    }
2423
}
2424

    
2425
/* XXX: temporary until new memory mapping API */
2426
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2427
{
2428
    PhysPageDesc *p;
2429

    
2430
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2431
    if (!p)
2432
        return IO_MEM_UNASSIGNED;
2433
    return p->phys_offset;
2434
}
2435

    
2436
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2437
{
2438
    if (kvm_enabled())
2439
        kvm_coalesce_mmio_region(addr, size);
2440
}
2441

    
2442
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2443
{
2444
    if (kvm_enabled())
2445
        kvm_uncoalesce_mmio_region(addr, size);
2446
}
2447

    
2448
#ifdef CONFIG_KQEMU
2449
/* XXX: better than nothing */
2450
static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2451
{
2452
    ram_addr_t addr;
2453
    if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2454
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2455
                (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2456
        abort();
2457
    }
2458
    addr = last_ram_offset;
2459
    last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2460
    return addr;
2461
}
2462
#endif
2463

    
2464
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2465
{
2466
    RAMBlock *new_block;
2467

    
2468
#ifdef CONFIG_KQEMU
2469
    if (kqemu_phys_ram_base) {
2470
        return kqemu_ram_alloc(size);
2471
    }
2472
#endif
2473

    
2474
    size = TARGET_PAGE_ALIGN(size);
2475
    new_block = qemu_malloc(sizeof(*new_block));
2476

    
2477
    new_block->host = qemu_vmalloc(size);
2478
    new_block->offset = last_ram_offset;
2479
    new_block->length = size;
2480

    
2481
    new_block->next = ram_blocks;
2482
    ram_blocks = new_block;
2483

    
2484
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2485
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2486
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2487
           0xff, size >> TARGET_PAGE_BITS);
2488

    
2489
    last_ram_offset += size;
2490

    
2491
    if (kvm_enabled())
2492
        kvm_setup_guest_memory(new_block->host, size);
2493

    
2494
    return new_block->offset;
2495
}
2496

    
2497
void qemu_ram_free(ram_addr_t addr)
2498
{
2499
    /* TODO: implement this.  */
2500
}
2501

    
2502
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2503
   With the exception of the softmmu code in this file, this should
2504
   only be used for local memory (e.g. video ram) that the device owns,
2505
   and knows it isn't going to access beyond the end of the block.
2506

2507
   It should not be used for general purpose DMA.
2508
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2509
 */
2510
void *qemu_get_ram_ptr(ram_addr_t addr)
2511
{
2512
    RAMBlock *prev;
2513
    RAMBlock **prevp;
2514
    RAMBlock *block;
2515

    
2516
#ifdef CONFIG_KQEMU
2517
    if (kqemu_phys_ram_base) {
2518
        return kqemu_phys_ram_base + addr;
2519
    }
2520
#endif
2521

    
2522
    prev = NULL;
2523
    prevp = &ram_blocks;
2524
    block = ram_blocks;
2525
    while (block && (block->offset > addr
2526
                     || block->offset + block->length <= addr)) {
2527
        if (prev)
2528
          prevp = &prev->next;
2529
        prev = block;
2530
        block = block->next;
2531
    }
2532
    if (!block) {
2533
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2534
        abort();
2535
    }
2536
    /* Move this entry to to start of the list.  */
2537
    if (prev) {
2538
        prev->next = block->next;
2539
        block->next = *prevp;
2540
        *prevp = block;
2541
    }
2542
    return block->host + (addr - block->offset);
2543
}
2544

    
2545
/* Some of the softmmu routines need to translate from a host pointer
2546
   (typically a TLB entry) back to a ram offset.  */
2547
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2548
{
2549
    RAMBlock *prev;
2550
    RAMBlock **prevp;
2551
    RAMBlock *block;
2552
    uint8_t *host = ptr;
2553

    
2554
#ifdef CONFIG_KQEMU
2555
    if (kqemu_phys_ram_base) {
2556
        return host - kqemu_phys_ram_base;
2557
    }
2558
#endif
2559

    
2560
    prev = NULL;
2561
    prevp = &ram_blocks;
2562
    block = ram_blocks;
2563
    while (block && (block->host > host
2564
                     || block->host + block->length <= host)) {
2565
        if (prev)
2566
          prevp = &prev->next;
2567
        prev = block;
2568
        block = block->next;
2569
    }
2570
    if (!block) {
2571
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2572
        abort();
2573
    }
2574
    return block->offset + (host - block->host);
2575
}
2576

    
2577
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2578
{
2579
#ifdef DEBUG_UNASSIGNED
2580
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2581
#endif
2582
#if defined(TARGET_SPARC)
2583
    do_unassigned_access(addr, 0, 0, 0, 1);
2584
#endif
2585
    return 0;
2586
}
2587

    
2588
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2589
{
2590
#ifdef DEBUG_UNASSIGNED
2591
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2592
#endif
2593
#if defined(TARGET_SPARC)
2594
    do_unassigned_access(addr, 0, 0, 0, 2);
2595
#endif
2596
    return 0;
2597
}
2598

    
2599
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2600
{
2601
#ifdef DEBUG_UNASSIGNED
2602
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2603
#endif
2604
#if defined(TARGET_SPARC)
2605
    do_unassigned_access(addr, 0, 0, 0, 4);
2606
#endif
2607
    return 0;
2608
}
2609

    
2610
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2611
{
2612
#ifdef DEBUG_UNASSIGNED
2613
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2614
#endif
2615
#if defined(TARGET_SPARC)
2616
    do_unassigned_access(addr, 1, 0, 0, 1);
2617
#endif
2618
}
2619

    
2620
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2621
{
2622
#ifdef DEBUG_UNASSIGNED
2623
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2624
#endif
2625
#if defined(TARGET_SPARC)
2626
    do_unassigned_access(addr, 1, 0, 0, 2);
2627
#endif
2628
}
2629

    
2630
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2631
{
2632
#ifdef DEBUG_UNASSIGNED
2633
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2634
#endif
2635
#if defined(TARGET_SPARC)
2636
    do_unassigned_access(addr, 1, 0, 0, 4);
2637
#endif
2638
}
2639

    
2640
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2641
    unassigned_mem_readb,
2642
    unassigned_mem_readw,
2643
    unassigned_mem_readl,
2644
};
2645

    
2646
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2647
    unassigned_mem_writeb,
2648
    unassigned_mem_writew,
2649
    unassigned_mem_writel,
2650
};
2651

    
2652
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2653
                                uint32_t val)
2654
{
2655
    int dirty_flags;
2656
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2657
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2658
#if !defined(CONFIG_USER_ONLY)
2659
        tb_invalidate_phys_page_fast(ram_addr, 1);
2660
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2661
#endif
2662
    }
2663
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2664
#ifdef CONFIG_KQEMU
2665
    if (cpu_single_env->kqemu_enabled &&
2666
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2667
        kqemu_modify_page(cpu_single_env, ram_addr);
2668
#endif
2669
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2670
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2671
    /* we remove the notdirty callback only if the code has been
2672
       flushed */
2673
    if (dirty_flags == 0xff)
2674
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2675
}
2676

    
2677
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2678
                                uint32_t val)
2679
{
2680
    int dirty_flags;
2681
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2682
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2683
#if !defined(CONFIG_USER_ONLY)
2684
        tb_invalidate_phys_page_fast(ram_addr, 2);
2685
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2686
#endif
2687
    }
2688
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2689
#ifdef CONFIG_KQEMU
2690
    if (cpu_single_env->kqemu_enabled &&
2691
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2692
        kqemu_modify_page(cpu_single_env, ram_addr);
2693
#endif
2694
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2695
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2696
    /* we remove the notdirty callback only if the code has been
2697
       flushed */
2698
    if (dirty_flags == 0xff)
2699
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2700
}
2701

    
2702
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2703
                                uint32_t val)
2704
{
2705
    int dirty_flags;
2706
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2707
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2708
#if !defined(CONFIG_USER_ONLY)
2709
        tb_invalidate_phys_page_fast(ram_addr, 4);
2710
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2711
#endif
2712
    }
2713
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2714
#ifdef CONFIG_KQEMU
2715
    if (cpu_single_env->kqemu_enabled &&
2716
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2717
        kqemu_modify_page(cpu_single_env, ram_addr);
2718
#endif
2719
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2720
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2721
    /* we remove the notdirty callback only if the code has been
2722
       flushed */
2723
    if (dirty_flags == 0xff)
2724
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2725
}
2726

    
2727
static CPUReadMemoryFunc *error_mem_read[3] = {
2728
    NULL, /* never used */
2729
    NULL, /* never used */
2730
    NULL, /* never used */
2731
};
2732

    
2733
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2734
    notdirty_mem_writeb,
2735
    notdirty_mem_writew,
2736
    notdirty_mem_writel,
2737
};
2738

    
2739
/* Generate a debug exception if a watchpoint has been hit.  */
2740
static void check_watchpoint(int offset, int len_mask, int flags)
2741
{
2742
    CPUState *env = cpu_single_env;
2743
    target_ulong pc, cs_base;
2744
    TranslationBlock *tb;
2745
    target_ulong vaddr;
2746
    CPUWatchpoint *wp;
2747
    int cpu_flags;
2748

    
2749
    if (env->watchpoint_hit) {
2750
        /* We re-entered the check after replacing the TB. Now raise
2751
         * the debug interrupt so that is will trigger after the
2752
         * current instruction. */
2753
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2754
        return;
2755
    }
2756
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2757
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2758
        if ((vaddr == (wp->vaddr & len_mask) ||
2759
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2760
            wp->flags |= BP_WATCHPOINT_HIT;
2761
            if (!env->watchpoint_hit) {
2762
                env->watchpoint_hit = wp;
2763
                tb = tb_find_pc(env->mem_io_pc);
2764
                if (!tb) {
2765
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2766
                              "pc=%p", (void *)env->mem_io_pc);
2767
                }
2768
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2769
                tb_phys_invalidate(tb, -1);
2770
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2771
                    env->exception_index = EXCP_DEBUG;
2772
                } else {
2773
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2774
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2775
                }
2776
                cpu_resume_from_signal(env, NULL);
2777
            }
2778
        } else {
2779
            wp->flags &= ~BP_WATCHPOINT_HIT;
2780
        }
2781
    }
2782
}
2783

    
2784
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2785
   so these check for a hit then pass through to the normal out-of-line
2786
   phys routines.  */
2787
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2788
{
2789
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2790
    return ldub_phys(addr);
2791
}
2792

    
2793
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2794
{
2795
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2796
    return lduw_phys(addr);
2797
}
2798

    
2799
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2800
{
2801
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2802
    return ldl_phys(addr);
2803
}
2804

    
2805
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2806
                             uint32_t val)
2807
{
2808
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2809
    stb_phys(addr, val);
2810
}
2811

    
2812
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2813
                             uint32_t val)
2814
{
2815
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2816
    stw_phys(addr, val);
2817
}
2818

    
2819
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2820
                             uint32_t val)
2821
{
2822
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2823
    stl_phys(addr, val);
2824
}
2825

    
2826
static CPUReadMemoryFunc *watch_mem_read[3] = {
2827
    watch_mem_readb,
2828
    watch_mem_readw,
2829
    watch_mem_readl,
2830
};
2831

    
2832
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2833
    watch_mem_writeb,
2834
    watch_mem_writew,
2835
    watch_mem_writel,
2836
};
2837

    
2838
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2839
                                 unsigned int len)
2840
{
2841
    uint32_t ret;
2842
    unsigned int idx;
2843

    
2844
    idx = SUBPAGE_IDX(addr);
2845
#if defined(DEBUG_SUBPAGE)
2846
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2847
           mmio, len, addr, idx);
2848
#endif
2849
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2850
                                       addr + mmio->region_offset[idx][0][len]);
2851

    
2852
    return ret;
2853
}
2854

    
2855
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2856
                              uint32_t value, unsigned int len)
2857
{
2858
    unsigned int idx;
2859

    
2860
    idx = SUBPAGE_IDX(addr);
2861
#if defined(DEBUG_SUBPAGE)
2862
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2863
           mmio, len, addr, idx, value);
2864
#endif
2865
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2866
                                  addr + mmio->region_offset[idx][1][len],
2867
                                  value);
2868
}
2869

    
2870
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2871
{
2872
#if defined(DEBUG_SUBPAGE)
2873
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2874
#endif
2875

    
2876
    return subpage_readlen(opaque, addr, 0);
2877
}
2878

    
2879
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2880
                            uint32_t value)
2881
{
2882
#if defined(DEBUG_SUBPAGE)
2883
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2884
#endif
2885
    subpage_writelen(opaque, addr, value, 0);
2886
}
2887

    
2888
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2889
{
2890
#if defined(DEBUG_SUBPAGE)
2891
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2892
#endif
2893

    
2894
    return subpage_readlen(opaque, addr, 1);
2895
}
2896

    
2897
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2898
                            uint32_t value)
2899
{
2900
#if defined(DEBUG_SUBPAGE)
2901
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2902
#endif
2903
    subpage_writelen(opaque, addr, value, 1);
2904
}
2905

    
2906
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2907
{
2908
#if defined(DEBUG_SUBPAGE)
2909
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2910
#endif
2911

    
2912
    return subpage_readlen(opaque, addr, 2);
2913
}
2914

    
2915
static void subpage_writel (void *opaque,
2916
                         target_phys_addr_t addr, uint32_t value)
2917
{
2918
#if defined(DEBUG_SUBPAGE)
2919
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2920
#endif
2921
    subpage_writelen(opaque, addr, value, 2);
2922
}
2923

    
2924
static CPUReadMemoryFunc *subpage_read[] = {
2925
    &subpage_readb,
2926
    &subpage_readw,
2927
    &subpage_readl,
2928
};
2929

    
2930
static CPUWriteMemoryFunc *subpage_write[] = {
2931
    &subpage_writeb,
2932
    &subpage_writew,
2933
    &subpage_writel,
2934
};
2935

    
2936
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2937
                             ram_addr_t memory, ram_addr_t region_offset)
2938
{
2939
    int idx, eidx;
2940
    unsigned int i;
2941

    
2942
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2943
        return -1;
2944
    idx = SUBPAGE_IDX(start);
2945
    eidx = SUBPAGE_IDX(end);
2946
#if defined(DEBUG_SUBPAGE)
2947
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2948
           mmio, start, end, idx, eidx, memory);
2949
#endif
2950
    memory >>= IO_MEM_SHIFT;
2951
    for (; idx <= eidx; idx++) {
2952
        for (i = 0; i < 4; i++) {
2953
            if (io_mem_read[memory][i]) {
2954
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2955
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2956
                mmio->region_offset[idx][0][i] = region_offset;
2957
            }
2958
            if (io_mem_write[memory][i]) {
2959
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2960
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2961
                mmio->region_offset[idx][1][i] = region_offset;
2962
            }
2963
        }
2964
    }
2965

    
2966
    return 0;
2967
}
2968

    
2969
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2970
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2971
{
2972
    subpage_t *mmio;
2973
    int subpage_memory;
2974

    
2975
    mmio = qemu_mallocz(sizeof(subpage_t));
2976

    
2977
    mmio->base = base;
2978
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2979
#if defined(DEBUG_SUBPAGE)
2980
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2981
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2982
#endif
2983
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2984
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2985
                         region_offset);
2986

    
2987
    return mmio;
2988
}
2989

    
2990
static int get_free_io_mem_idx(void)
2991
{
2992
    int i;
2993

    
2994
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2995
        if (!io_mem_used[i]) {
2996
            io_mem_used[i] = 1;
2997
            return i;
2998
        }
2999

    
3000
    return -1;
3001
}
3002

    
3003
static void io_mem_init(void)
3004
{
3005
    int i;
3006

    
3007
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3008
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3009
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3010
    for (i=0; i<5; i++)
3011
        io_mem_used[i] = 1;
3012

    
3013
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3014
                                          watch_mem_write, NULL);
3015
#ifdef CONFIG_KQEMU
3016
    if (kqemu_phys_ram_base) {
3017
        /* alloc dirty bits array */
3018
        phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3019
        memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3020
    }
3021
#endif
3022
}
3023

    
3024
/* mem_read and mem_write are arrays of functions containing the
3025
   function to access byte (index 0), word (index 1) and dword (index
3026
   2). Functions can be omitted with a NULL function pointer.
3027
   If io_index is non zero, the corresponding io zone is
3028
   modified. If it is zero, a new io zone is allocated. The return
3029
   value can be used with cpu_register_physical_memory(). (-1) is
3030
   returned if error. */
3031
int cpu_register_io_memory(int io_index,
3032
                           CPUReadMemoryFunc **mem_read,
3033
                           CPUWriteMemoryFunc **mem_write,
3034
                           void *opaque)
3035
{
3036
    int i, subwidth = 0;
3037

    
3038
    if (io_index <= 0) {
3039
        io_index = get_free_io_mem_idx();
3040
        if (io_index == -1)
3041
            return io_index;
3042
    } else {
3043
        if (io_index >= IO_MEM_NB_ENTRIES)
3044
            return -1;
3045
    }
3046

    
3047
    for(i = 0;i < 3; i++) {
3048
        if (!mem_read[i] || !mem_write[i])
3049
            subwidth = IO_MEM_SUBWIDTH;
3050
        io_mem_read[io_index][i] = mem_read[i];
3051
        io_mem_write[io_index][i] = mem_write[i];
3052
    }
3053
    io_mem_opaque[io_index] = opaque;
3054
    return (io_index << IO_MEM_SHIFT) | subwidth;
3055
}
3056

    
3057
void cpu_unregister_io_memory(int io_table_address)
3058
{
3059
    int i;
3060
    int io_index = io_table_address >> IO_MEM_SHIFT;
3061

    
3062
    for (i=0;i < 3; i++) {
3063
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3064
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3065
    }
3066
    io_mem_opaque[io_index] = NULL;
3067
    io_mem_used[io_index] = 0;
3068
}
3069

    
3070
#endif /* !defined(CONFIG_USER_ONLY) */
3071

    
3072
/* physical memory access (slow version, mainly for debug) */
3073
#if defined(CONFIG_USER_ONLY)
3074
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3075
                            int len, int is_write)
3076
{
3077
    int l, flags;
3078
    target_ulong page;
3079
    void * p;
3080

    
3081
    while (len > 0) {
3082
        page = addr & TARGET_PAGE_MASK;
3083
        l = (page + TARGET_PAGE_SIZE) - addr;
3084
        if (l > len)
3085
            l = len;
3086
        flags = page_get_flags(page);
3087
        if (!(flags & PAGE_VALID))
3088
            return;
3089
        if (is_write) {
3090
            if (!(flags & PAGE_WRITE))
3091
                return;
3092
            /* XXX: this code should not depend on lock_user */
3093
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3094
                /* FIXME - should this return an error rather than just fail? */
3095
                return;
3096
            memcpy(p, buf, l);
3097
            unlock_user(p, addr, l);
3098
        } else {
3099
            if (!(flags & PAGE_READ))
3100
                return;
3101
            /* XXX: this code should not depend on lock_user */
3102
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3103
                /* FIXME - should this return an error rather than just fail? */
3104
                return;
3105
            memcpy(buf, p, l);
3106
            unlock_user(p, addr, 0);
3107
        }
3108
        len -= l;
3109
        buf += l;
3110
        addr += l;
3111
    }
3112
}
3113

    
3114
#else
3115
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3116
                            int len, int is_write)
3117
{
3118
    int l, io_index;
3119
    uint8_t *ptr;
3120
    uint32_t val;
3121
    target_phys_addr_t page;
3122
    unsigned long pd;
3123
    PhysPageDesc *p;
3124

    
3125
    while (len > 0) {
3126
        page = addr & TARGET_PAGE_MASK;
3127
        l = (page + TARGET_PAGE_SIZE) - addr;
3128
        if (l > len)
3129
            l = len;
3130
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3131
        if (!p) {
3132
            pd = IO_MEM_UNASSIGNED;
3133
        } else {
3134
            pd = p->phys_offset;
3135
        }
3136

    
3137
        if (is_write) {
3138
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3139
                target_phys_addr_t addr1 = addr;
3140
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3141
                if (p)
3142
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3143
                /* XXX: could force cpu_single_env to NULL to avoid
3144
                   potential bugs */
3145
                if (l >= 4 && ((addr1 & 3) == 0)) {
3146
                    /* 32 bit write access */
3147
                    val = ldl_p(buf);
3148
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3149
                    l = 4;
3150
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3151
                    /* 16 bit write access */
3152
                    val = lduw_p(buf);
3153
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3154
                    l = 2;
3155
                } else {
3156
                    /* 8 bit write access */
3157
                    val = ldub_p(buf);
3158
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3159
                    l = 1;
3160
                }
3161
            } else {
3162
                unsigned long addr1;
3163
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3164
                /* RAM case */
3165
                ptr = qemu_get_ram_ptr(addr1);
3166
                memcpy(ptr, buf, l);
3167
                if (!cpu_physical_memory_is_dirty(addr1)) {
3168
                    /* invalidate code */
3169
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3170
                    /* set dirty bit */
3171
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3172
                        (0xff & ~CODE_DIRTY_FLAG);
3173
                }
3174
            }
3175
        } else {
3176
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3177
                !(pd & IO_MEM_ROMD)) {
3178
                target_phys_addr_t addr1 = addr;
3179
                /* I/O case */
3180
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3181
                if (p)
3182
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3183
                if (l >= 4 && ((addr1 & 3) == 0)) {
3184
                    /* 32 bit read access */
3185
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3186
                    stl_p(buf, val);
3187
                    l = 4;
3188
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3189
                    /* 16 bit read access */
3190
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3191
                    stw_p(buf, val);
3192
                    l = 2;
3193
                } else {
3194
                    /* 8 bit read access */
3195
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3196
                    stb_p(buf, val);
3197
                    l = 1;
3198
                }
3199
            } else {
3200
                /* RAM case */
3201
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3202
                    (addr & ~TARGET_PAGE_MASK);
3203
                memcpy(buf, ptr, l);
3204
            }
3205
        }
3206
        len -= l;
3207
        buf += l;
3208
        addr += l;
3209
    }
3210
}
3211

    
3212
/* used for ROM loading : can write in RAM and ROM */
3213
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3214
                                   const uint8_t *buf, int len)
3215
{
3216
    int l;
3217
    uint8_t *ptr;
3218
    target_phys_addr_t page;
3219
    unsigned long pd;
3220
    PhysPageDesc *p;
3221

    
3222
    while (len > 0) {
3223
        page = addr & TARGET_PAGE_MASK;
3224
        l = (page + TARGET_PAGE_SIZE) - addr;
3225
        if (l > len)
3226
            l = len;
3227
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3228
        if (!p) {
3229
            pd = IO_MEM_UNASSIGNED;
3230
        } else {
3231
            pd = p->phys_offset;
3232
        }
3233

    
3234
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3235
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3236
            !(pd & IO_MEM_ROMD)) {
3237
            /* do nothing */
3238
        } else {
3239
            unsigned long addr1;
3240
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3241
            /* ROM/RAM case */
3242
            ptr = qemu_get_ram_ptr(addr1);
3243
            memcpy(ptr, buf, l);
3244
        }
3245
        len -= l;
3246
        buf += l;
3247
        addr += l;
3248
    }
3249
}
3250

    
3251
typedef struct {
3252
    void *buffer;
3253
    target_phys_addr_t addr;
3254
    target_phys_addr_t len;
3255
} BounceBuffer;
3256

    
3257
static BounceBuffer bounce;
3258

    
3259
typedef struct MapClient {
3260
    void *opaque;
3261
    void (*callback)(void *opaque);
3262
    LIST_ENTRY(MapClient) link;
3263
} MapClient;
3264

    
3265
static LIST_HEAD(map_client_list, MapClient) map_client_list
3266
    = LIST_HEAD_INITIALIZER(map_client_list);
3267

    
3268
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3269
{
3270
    MapClient *client = qemu_malloc(sizeof(*client));
3271

    
3272
    client->opaque = opaque;
3273
    client->callback = callback;
3274
    LIST_INSERT_HEAD(&map_client_list, client, link);
3275
    return client;
3276
}
3277

    
3278
void cpu_unregister_map_client(void *_client)
3279
{
3280
    MapClient *client = (MapClient *)_client;
3281

    
3282
    LIST_REMOVE(client, link);
3283
}
3284

    
3285
static void cpu_notify_map_clients(void)
3286
{
3287
    MapClient *client;
3288

    
3289
    while (!LIST_EMPTY(&map_client_list)) {
3290
        client = LIST_FIRST(&map_client_list);
3291
        client->callback(client->opaque);
3292
        LIST_REMOVE(client, link);
3293
    }
3294
}
3295

    
3296
/* Map a physical memory region into a host virtual address.
3297
 * May map a subset of the requested range, given by and returned in *plen.
3298
 * May return NULL if resources needed to perform the mapping are exhausted.
3299
 * Use only for reads OR writes - not for read-modify-write operations.
3300
 * Use cpu_register_map_client() to know when retrying the map operation is
3301
 * likely to succeed.
3302
 */
3303
void *cpu_physical_memory_map(target_phys_addr_t addr,
3304
                              target_phys_addr_t *plen,
3305
                              int is_write)
3306
{
3307
    target_phys_addr_t len = *plen;
3308
    target_phys_addr_t done = 0;
3309
    int l;
3310
    uint8_t *ret = NULL;
3311
    uint8_t *ptr;
3312
    target_phys_addr_t page;
3313
    unsigned long pd;
3314
    PhysPageDesc *p;
3315
    unsigned long addr1;
3316

    
3317
    while (len > 0) {
3318
        page = addr & TARGET_PAGE_MASK;
3319
        l = (page + TARGET_PAGE_SIZE) - addr;
3320
        if (l > len)
3321
            l = len;
3322
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3323
        if (!p) {
3324
            pd = IO_MEM_UNASSIGNED;
3325
        } else {
3326
            pd = p->phys_offset;
3327
        }
3328

    
3329
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3330
            if (done || bounce.buffer) {
3331
                break;
3332
            }
3333
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3334
            bounce.addr = addr;
3335
            bounce.len = l;
3336
            if (!is_write) {
3337
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3338
            }
3339
            ptr = bounce.buffer;
3340
        } else {
3341
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3342
            ptr = qemu_get_ram_ptr(addr1);
3343
        }
3344
        if (!done) {
3345
            ret = ptr;
3346
        } else if (ret + done != ptr) {
3347
            break;
3348
        }
3349

    
3350
        len -= l;
3351
        addr += l;
3352
        done += l;
3353
    }
3354
    *plen = done;
3355
    return ret;
3356
}
3357

    
3358
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3359
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3360
 * the amount of memory that was actually read or written by the caller.
3361
 */
3362
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3363
                               int is_write, target_phys_addr_t access_len)
3364
{
3365
    if (buffer != bounce.buffer) {
3366
        if (is_write) {
3367
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3368
            while (access_len) {
3369
                unsigned l;
3370
                l = TARGET_PAGE_SIZE;
3371
                if (l > access_len)
3372
                    l = access_len;
3373
                if (!cpu_physical_memory_is_dirty(addr1)) {
3374
                    /* invalidate code */
3375
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3376
                    /* set dirty bit */
3377
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3378
                        (0xff & ~CODE_DIRTY_FLAG);
3379
                }
3380
                addr1 += l;
3381
                access_len -= l;
3382
            }
3383
        }
3384
        return;
3385
    }
3386
    if (is_write) {
3387
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3388
    }
3389
    qemu_free(bounce.buffer);
3390
    bounce.buffer = NULL;
3391
    cpu_notify_map_clients();
3392
}
3393

    
3394
/* warning: addr must be aligned */
3395
uint32_t ldl_phys(target_phys_addr_t addr)
3396
{
3397
    int io_index;
3398
    uint8_t *ptr;
3399
    uint32_t val;
3400
    unsigned long pd;
3401
    PhysPageDesc *p;
3402

    
3403
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3404
    if (!p) {
3405
        pd = IO_MEM_UNASSIGNED;
3406
    } else {
3407
        pd = p->phys_offset;
3408
    }
3409

    
3410
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3411
        !(pd & IO_MEM_ROMD)) {
3412
        /* I/O case */
3413
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3414
        if (p)
3415
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3416
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3417
    } else {
3418
        /* RAM case */
3419
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3420
            (addr & ~TARGET_PAGE_MASK);
3421
        val = ldl_p(ptr);
3422
    }
3423
    return val;
3424
}
3425

    
3426
/* warning: addr must be aligned */
3427
uint64_t ldq_phys(target_phys_addr_t addr)
3428
{
3429
    int io_index;
3430
    uint8_t *ptr;
3431
    uint64_t val;
3432
    unsigned long pd;
3433
    PhysPageDesc *p;
3434

    
3435
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3436
    if (!p) {
3437
        pd = IO_MEM_UNASSIGNED;
3438
    } else {
3439
        pd = p->phys_offset;
3440
    }
3441

    
3442
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3443
        !(pd & IO_MEM_ROMD)) {
3444
        /* I/O case */
3445
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3446
        if (p)
3447
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3448
#ifdef TARGET_WORDS_BIGENDIAN
3449
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3450
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3451
#else
3452
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3453
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3454
#endif
3455
    } else {
3456
        /* RAM case */
3457
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3458
            (addr & ~TARGET_PAGE_MASK);
3459
        val = ldq_p(ptr);
3460
    }
3461
    return val;
3462
}
3463

    
3464
/* XXX: optimize */
3465
uint32_t ldub_phys(target_phys_addr_t addr)
3466
{
3467
    uint8_t val;
3468
    cpu_physical_memory_read(addr, &val, 1);
3469
    return val;
3470
}
3471

    
3472
/* XXX: optimize */
3473
uint32_t lduw_phys(target_phys_addr_t addr)
3474
{
3475
    uint16_t val;
3476
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3477
    return tswap16(val);
3478
}
3479

    
3480
/* warning: addr must be aligned. The ram page is not masked as dirty
3481
   and the code inside is not invalidated. It is useful if the dirty
3482
   bits are used to track modified PTEs */
3483
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3484
{
3485
    int io_index;
3486
    uint8_t *ptr;
3487
    unsigned long pd;
3488
    PhysPageDesc *p;
3489

    
3490
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3491
    if (!p) {
3492
        pd = IO_MEM_UNASSIGNED;
3493
    } else {
3494
        pd = p->phys_offset;
3495
    }
3496

    
3497
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3498
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3499
        if (p)
3500
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3501
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3502
    } else {
3503
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3504
        ptr = qemu_get_ram_ptr(addr1);
3505
        stl_p(ptr, val);
3506

    
3507
        if (unlikely(in_migration)) {
3508
            if (!cpu_physical_memory_is_dirty(addr1)) {
3509
                /* invalidate code */
3510
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3511
                /* set dirty bit */
3512
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3513
                    (0xff & ~CODE_DIRTY_FLAG);
3514
            }
3515
        }
3516
    }
3517
}
3518

    
3519
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3520
{
3521
    int io_index;
3522
    uint8_t *ptr;
3523
    unsigned long pd;
3524
    PhysPageDesc *p;
3525

    
3526
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3527
    if (!p) {
3528
        pd = IO_MEM_UNASSIGNED;
3529
    } else {
3530
        pd = p->phys_offset;
3531
    }
3532

    
3533
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3534
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3535
        if (p)
3536
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3537
#ifdef TARGET_WORDS_BIGENDIAN
3538
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3539
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3540
#else
3541
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3542
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3543
#endif
3544
    } else {
3545
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3546
            (addr & ~TARGET_PAGE_MASK);
3547
        stq_p(ptr, val);
3548
    }
3549
}
3550

    
3551
/* warning: addr must be aligned */
3552
void stl_phys(target_phys_addr_t addr, uint32_t val)
3553
{
3554
    int io_index;
3555
    uint8_t *ptr;
3556
    unsigned long pd;
3557
    PhysPageDesc *p;
3558

    
3559
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3560
    if (!p) {
3561
        pd = IO_MEM_UNASSIGNED;
3562
    } else {
3563
        pd = p->phys_offset;
3564
    }
3565

    
3566
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3567
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3568
        if (p)
3569
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3570
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3571
    } else {
3572
        unsigned long addr1;
3573
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3574
        /* RAM case */
3575
        ptr = qemu_get_ram_ptr(addr1);
3576
        stl_p(ptr, val);
3577
        if (!cpu_physical_memory_is_dirty(addr1)) {
3578
            /* invalidate code */
3579
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3580
            /* set dirty bit */
3581
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3582
                (0xff & ~CODE_DIRTY_FLAG);
3583
        }
3584
    }
3585
}
3586

    
3587
/* XXX: optimize */
3588
void stb_phys(target_phys_addr_t addr, uint32_t val)
3589
{
3590
    uint8_t v = val;
3591
    cpu_physical_memory_write(addr, &v, 1);
3592
}
3593

    
3594
/* XXX: optimize */
3595
void stw_phys(target_phys_addr_t addr, uint32_t val)
3596
{
3597
    uint16_t v = tswap16(val);
3598
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3599
}
3600

    
3601
/* XXX: optimize */
3602
void stq_phys(target_phys_addr_t addr, uint64_t val)
3603
{
3604
    val = tswap64(val);
3605
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3606
}
3607

    
3608
#endif
3609

    
3610
/* virtual memory access for debug (includes writing to ROM) */
3611
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3612
                        uint8_t *buf, int len, int is_write)
3613
{
3614
    int l;
3615
    target_phys_addr_t phys_addr;
3616
    target_ulong page;
3617

    
3618
    while (len > 0) {
3619
        page = addr & TARGET_PAGE_MASK;
3620
        phys_addr = cpu_get_phys_page_debug(env, page);
3621
        /* if no physical page mapped, return an error */
3622
        if (phys_addr == -1)
3623
            return -1;
3624
        l = (page + TARGET_PAGE_SIZE) - addr;
3625
        if (l > len)
3626
            l = len;
3627
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3628
#if !defined(CONFIG_USER_ONLY)
3629
        if (is_write)
3630
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3631
        else
3632
#endif
3633
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3634
        len -= l;
3635
        buf += l;
3636
        addr += l;
3637
    }
3638
    return 0;
3639
}
3640

    
3641
/* in deterministic execution mode, instructions doing device I/Os
3642
   must be at the end of the TB */
3643
void cpu_io_recompile(CPUState *env, void *retaddr)
3644
{
3645
    TranslationBlock *tb;
3646
    uint32_t n, cflags;
3647
    target_ulong pc, cs_base;
3648
    uint64_t flags;
3649

    
3650
    tb = tb_find_pc((unsigned long)retaddr);
3651
    if (!tb) {
3652
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3653
                  retaddr);
3654
    }
3655
    n = env->icount_decr.u16.low + tb->icount;
3656
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3657
    /* Calculate how many instructions had been executed before the fault
3658
       occurred.  */
3659
    n = n - env->icount_decr.u16.low;
3660
    /* Generate a new TB ending on the I/O insn.  */
3661
    n++;
3662
    /* On MIPS and SH, delay slot instructions can only be restarted if
3663
       they were already the first instruction in the TB.  If this is not
3664
       the first instruction in a TB then re-execute the preceding
3665
       branch.  */
3666
#if defined(TARGET_MIPS)
3667
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3668
        env->active_tc.PC -= 4;
3669
        env->icount_decr.u16.low++;
3670
        env->hflags &= ~MIPS_HFLAG_BMASK;
3671
    }
3672
#elif defined(TARGET_SH4)
3673
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3674
            && n > 1) {
3675
        env->pc -= 2;
3676
        env->icount_decr.u16.low++;
3677
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3678
    }
3679
#endif
3680
    /* This should never happen.  */
3681
    if (n > CF_COUNT_MASK)
3682
        cpu_abort(env, "TB too big during recompile");
3683

    
3684
    cflags = n | CF_LAST_IO;
3685
    pc = tb->pc;
3686
    cs_base = tb->cs_base;
3687
    flags = tb->flags;
3688
    tb_phys_invalidate(tb, -1);
3689
    /* FIXME: In theory this could raise an exception.  In practice
3690
       we have already translated the block once so it's probably ok.  */
3691
    tb_gen_code(env, pc, cs_base, flags, cflags);
3692
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3693
       the first in the TB) then we end up generating a whole new TB and
3694
       repeating the fault, which is horribly inefficient.
3695
       Better would be to execute just this insn uncached, or generate a
3696
       second new TB.  */
3697
    cpu_resume_from_signal(env, NULL);
3698
}
3699

    
3700
void dump_exec_info(FILE *f,
3701
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3702
{
3703
    int i, target_code_size, max_target_code_size;
3704
    int direct_jmp_count, direct_jmp2_count, cross_page;
3705
    TranslationBlock *tb;
3706

    
3707
    target_code_size = 0;
3708
    max_target_code_size = 0;
3709
    cross_page = 0;
3710
    direct_jmp_count = 0;
3711
    direct_jmp2_count = 0;
3712
    for(i = 0; i < nb_tbs; i++) {
3713
        tb = &tbs[i];
3714
        target_code_size += tb->size;
3715
        if (tb->size > max_target_code_size)
3716
            max_target_code_size = tb->size;
3717
        if (tb->page_addr[1] != -1)
3718
            cross_page++;
3719
        if (tb->tb_next_offset[0] != 0xffff) {
3720
            direct_jmp_count++;
3721
            if (tb->tb_next_offset[1] != 0xffff) {
3722
                direct_jmp2_count++;
3723
            }
3724
        }
3725
    }
3726
    /* XXX: avoid using doubles ? */
3727
    cpu_fprintf(f, "Translation buffer state:\n");
3728
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3729
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3730
    cpu_fprintf(f, "TB count            %d/%d\n", 
3731
                nb_tbs, code_gen_max_blocks);
3732
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3733
                nb_tbs ? target_code_size / nb_tbs : 0,
3734
                max_target_code_size);
3735
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3736
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3737
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3738
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3739
            cross_page,
3740
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3741
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3742
                direct_jmp_count,
3743
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3744
                direct_jmp2_count,
3745
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3746
    cpu_fprintf(f, "\nStatistics:\n");
3747
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3748
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3749
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3750
    tcg_dump_info(f, cpu_fprintf);
3751
}
3752

    
3753
#if !defined(CONFIG_USER_ONLY)
3754

    
3755
#define MMUSUFFIX _cmmu
3756
#define GETPC() NULL
3757
#define env cpu_single_env
3758
#define SOFTMMU_CODE_ACCESS
3759

    
3760
#define SHIFT 0
3761
#include "softmmu_template.h"
3762

    
3763
#define SHIFT 1
3764
#include "softmmu_template.h"
3765

    
3766
#define SHIFT 2
3767
#include "softmmu_template.h"
3768

    
3769
#define SHIFT 3
3770
#include "softmmu_template.h"
3771

    
3772
#undef env
3773

    
3774
#endif