Statistics
| Branch: | Revision:

root / exec.c @ 1eed09cb

History | View | Annotate | Download (113.3 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#include "qemu-common.h"
38
#include "tcg.h"
39
#include "hw/hw.h"
40
#include "osdep.h"
41
#include "kvm.h"
42
#if defined(CONFIG_USER_ONLY)
43
#include <qemu.h>
44
#endif
45

    
46
//#define DEBUG_TB_INVALIDATE
47
//#define DEBUG_FLUSH
48
//#define DEBUG_TLB
49
//#define DEBUG_UNASSIGNED
50

    
51
/* make various TB consistency checks */
52
//#define DEBUG_TB_CHECK
53
//#define DEBUG_TLB_CHECK
54

    
55
//#define DEBUG_IOPORT
56
//#define DEBUG_SUBPAGE
57

    
58
#if !defined(CONFIG_USER_ONLY)
59
/* TB consistency checks only implemented for usermode emulation.  */
60
#undef DEBUG_TB_CHECK
61
#endif
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#if defined(TARGET_SPARC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67
#elif defined(TARGET_SPARC)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69
#elif defined(TARGET_ALPHA)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72
#elif defined(TARGET_PPC64)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78
#else
79
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80
#define TARGET_PHYS_ADDR_SPACE_BITS 32
81
#endif
82

    
83
static TranslationBlock *tbs;
84
int code_gen_max_blocks;
85
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86
static int nb_tbs;
87
/* any access to the tbs or the page table must use this lock */
88
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89

    
90
#if defined(__arm__) || defined(__sparc_v9__)
91
/* The prologue must be reachable with a direct jump. ARM and Sparc64
92
 have limited branch ranges (possibly also PPC) so place it in a
93
 section close to code segment. */
94
#define code_gen_section                                \
95
    __attribute__((__section__(".gen_code")))           \
96
    __attribute__((aligned (32)))
97
#else
98
#define code_gen_section                                \
99
    __attribute__((aligned (32)))
100
#endif
101

    
102
uint8_t code_gen_prologue[1024] code_gen_section;
103
static uint8_t *code_gen_buffer;
104
static unsigned long code_gen_buffer_size;
105
/* threshold to flush the translated code buffer */
106
static unsigned long code_gen_buffer_max_size;
107
uint8_t *code_gen_ptr;
108

    
109
#if !defined(CONFIG_USER_ONLY)
110
int phys_ram_fd;
111
uint8_t *phys_ram_dirty;
112
static int in_migration;
113

    
114
typedef struct RAMBlock {
115
    uint8_t *host;
116
    ram_addr_t offset;
117
    ram_addr_t length;
118
    struct RAMBlock *next;
119
} RAMBlock;
120

    
121
static RAMBlock *ram_blocks;
122
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
123
   then we can no longer assume contiguous ram offsets, and external uses
124
   of this variable will break.  */
125
ram_addr_t last_ram_offset;
126
#endif
127

    
128
CPUState *first_cpu;
129
/* current CPU in the current thread. It is only valid inside
130
   cpu_exec() */
131
CPUState *cpu_single_env;
132
/* 0 = Do not count executed instructions.
133
   1 = Precise instruction counting.
134
   2 = Adaptive rate instruction counting.  */
135
int use_icount = 0;
136
/* Current instruction counter.  While executing translated code this may
137
   include some instructions that have not yet been executed.  */
138
int64_t qemu_icount;
139

    
140
typedef struct PageDesc {
141
    /* list of TBs intersecting this ram page */
142
    TranslationBlock *first_tb;
143
    /* in order to optimize self modifying code, we count the number
144
       of lookups we do to a given page to use a bitmap */
145
    unsigned int code_write_count;
146
    uint8_t *code_bitmap;
147
#if defined(CONFIG_USER_ONLY)
148
    unsigned long flags;
149
#endif
150
} PageDesc;
151

    
152
typedef struct PhysPageDesc {
153
    /* offset in host memory of the page + io_index in the low bits */
154
    ram_addr_t phys_offset;
155
    ram_addr_t region_offset;
156
} PhysPageDesc;
157

    
158
#define L2_BITS 10
159
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
160
/* XXX: this is a temporary hack for alpha target.
161
 *      In the future, this is to be replaced by a multi-level table
162
 *      to actually be able to handle the complete 64 bits address space.
163
 */
164
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
165
#else
166
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
167
#endif
168

    
169
#define L1_SIZE (1 << L1_BITS)
170
#define L2_SIZE (1 << L2_BITS)
171

    
172
unsigned long qemu_real_host_page_size;
173
unsigned long qemu_host_page_bits;
174
unsigned long qemu_host_page_size;
175
unsigned long qemu_host_page_mask;
176

    
177
/* XXX: for system emulation, it could just be an array */
178
static PageDesc *l1_map[L1_SIZE];
179
static PhysPageDesc **l1_phys_map;
180

    
181
#if !defined(CONFIG_USER_ONLY)
182
static void io_mem_init(void);
183

    
184
/* io memory support */
185
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
186
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
187
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
188
static char io_mem_used[IO_MEM_NB_ENTRIES];
189
static int io_mem_watch;
190
#endif
191

    
192
/* log support */
193
static const char *logfilename = "/tmp/qemu.log";
194
FILE *logfile;
195
int loglevel;
196
static int log_append = 0;
197

    
198
/* statistics */
199
static int tlb_flush_count;
200
static int tb_flush_count;
201
static int tb_phys_invalidate_count;
202

    
203
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
204
typedef struct subpage_t {
205
    target_phys_addr_t base;
206
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
207
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
208
    void *opaque[TARGET_PAGE_SIZE][2][4];
209
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
210
} subpage_t;
211

    
212
#ifdef _WIN32
213
static void map_exec(void *addr, long size)
214
{
215
    DWORD old_protect;
216
    VirtualProtect(addr, size,
217
                   PAGE_EXECUTE_READWRITE, &old_protect);
218
    
219
}
220
#else
221
static void map_exec(void *addr, long size)
222
{
223
    unsigned long start, end, page_size;
224
    
225
    page_size = getpagesize();
226
    start = (unsigned long)addr;
227
    start &= ~(page_size - 1);
228
    
229
    end = (unsigned long)addr + size;
230
    end += page_size - 1;
231
    end &= ~(page_size - 1);
232
    
233
    mprotect((void *)start, end - start,
234
             PROT_READ | PROT_WRITE | PROT_EXEC);
235
}
236
#endif
237

    
238
static void page_init(void)
239
{
240
    /* NOTE: we can always suppose that qemu_host_page_size >=
241
       TARGET_PAGE_SIZE */
242
#ifdef _WIN32
243
    {
244
        SYSTEM_INFO system_info;
245

    
246
        GetSystemInfo(&system_info);
247
        qemu_real_host_page_size = system_info.dwPageSize;
248
    }
249
#else
250
    qemu_real_host_page_size = getpagesize();
251
#endif
252
    if (qemu_host_page_size == 0)
253
        qemu_host_page_size = qemu_real_host_page_size;
254
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
255
        qemu_host_page_size = TARGET_PAGE_SIZE;
256
    qemu_host_page_bits = 0;
257
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
258
        qemu_host_page_bits++;
259
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
260
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
262

    
263
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
264
    {
265
        long long startaddr, endaddr;
266
        FILE *f;
267
        int n;
268

    
269
        mmap_lock();
270
        last_brk = (unsigned long)sbrk(0);
271
        f = fopen("/proc/self/maps", "r");
272
        if (f) {
273
            do {
274
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
275
                if (n == 2) {
276
                    startaddr = MIN(startaddr,
277
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
278
                    endaddr = MIN(endaddr,
279
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
281
                                   TARGET_PAGE_ALIGN(endaddr),
282
                                   PAGE_RESERVED); 
283
                }
284
            } while (!feof(f));
285
            fclose(f);
286
        }
287
        mmap_unlock();
288
    }
289
#endif
290
}
291

    
292
static inline PageDesc **page_l1_map(target_ulong index)
293
{
294
#if TARGET_LONG_BITS > 32
295
    /* Host memory outside guest VM.  For 32-bit targets we have already
296
       excluded high addresses.  */
297
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
298
        return NULL;
299
#endif
300
    return &l1_map[index >> L2_BITS];
301
}
302

    
303
static inline PageDesc *page_find_alloc(target_ulong index)
304
{
305
    PageDesc **lp, *p;
306
    lp = page_l1_map(index);
307
    if (!lp)
308
        return NULL;
309

    
310
    p = *lp;
311
    if (!p) {
312
        /* allocate if not found */
313
#if defined(CONFIG_USER_ONLY)
314
        size_t len = sizeof(PageDesc) * L2_SIZE;
315
        /* Don't use qemu_malloc because it may recurse.  */
316
        p = mmap(0, len, PROT_READ | PROT_WRITE,
317
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
318
        *lp = p;
319
        if (h2g_valid(p)) {
320
            unsigned long addr = h2g(p);
321
            page_set_flags(addr & TARGET_PAGE_MASK,
322
                           TARGET_PAGE_ALIGN(addr + len),
323
                           PAGE_RESERVED); 
324
        }
325
#else
326
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
327
        *lp = p;
328
#endif
329
    }
330
    return p + (index & (L2_SIZE - 1));
331
}
332

    
333
static inline PageDesc *page_find(target_ulong index)
334
{
335
    PageDesc **lp, *p;
336
    lp = page_l1_map(index);
337
    if (!lp)
338
        return NULL;
339

    
340
    p = *lp;
341
    if (!p)
342
        return 0;
343
    return p + (index & (L2_SIZE - 1));
344
}
345

    
346
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
347
{
348
    void **lp, **p;
349
    PhysPageDesc *pd;
350

    
351
    p = (void **)l1_phys_map;
352
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
353

    
354
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
355
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
356
#endif
357
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
358
    p = *lp;
359
    if (!p) {
360
        /* allocate if not found */
361
        if (!alloc)
362
            return NULL;
363
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
364
        memset(p, 0, sizeof(void *) * L1_SIZE);
365
        *lp = p;
366
    }
367
#endif
368
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
369
    pd = *lp;
370
    if (!pd) {
371
        int i;
372
        /* allocate if not found */
373
        if (!alloc)
374
            return NULL;
375
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
376
        *lp = pd;
377
        for (i = 0; i < L2_SIZE; i++) {
378
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
379
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
380
        }
381
    }
382
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
383
}
384

    
385
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
386
{
387
    return phys_page_find_alloc(index, 0);
388
}
389

    
390
#if !defined(CONFIG_USER_ONLY)
391
static void tlb_protect_code(ram_addr_t ram_addr);
392
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
393
                                    target_ulong vaddr);
394
#define mmap_lock() do { } while(0)
395
#define mmap_unlock() do { } while(0)
396
#endif
397

    
398
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399

    
400
#if defined(CONFIG_USER_ONLY)
401
/* Currently it is not recommended to allocate big chunks of data in
402
   user mode. It will change when a dedicated libc will be used */
403
#define USE_STATIC_CODE_GEN_BUFFER
404
#endif
405

    
406
#ifdef USE_STATIC_CODE_GEN_BUFFER
407
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
408
#endif
409

    
410
static void code_gen_alloc(unsigned long tb_size)
411
{
412
#ifdef USE_STATIC_CODE_GEN_BUFFER
413
    code_gen_buffer = static_code_gen_buffer;
414
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415
    map_exec(code_gen_buffer, code_gen_buffer_size);
416
#else
417
    code_gen_buffer_size = tb_size;
418
    if (code_gen_buffer_size == 0) {
419
#if defined(CONFIG_USER_ONLY)
420
        /* in user mode, phys_ram_size is not meaningful */
421
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
422
#else
423
        /* XXX: needs adjustments */
424
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
425
#endif
426
    }
427
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
428
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
429
    /* The code gen buffer location may have constraints depending on
430
       the host cpu and OS */
431
#if defined(__linux__) 
432
    {
433
        int flags;
434
        void *start = NULL;
435

    
436
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
437
#if defined(__x86_64__)
438
        flags |= MAP_32BIT;
439
        /* Cannot map more than that */
440
        if (code_gen_buffer_size > (800 * 1024 * 1024))
441
            code_gen_buffer_size = (800 * 1024 * 1024);
442
#elif defined(__sparc_v9__)
443
        // Map the buffer below 2G, so we can use direct calls and branches
444
        flags |= MAP_FIXED;
445
        start = (void *) 0x60000000UL;
446
        if (code_gen_buffer_size > (512 * 1024 * 1024))
447
            code_gen_buffer_size = (512 * 1024 * 1024);
448
#elif defined(__arm__)
449
        /* Map the buffer below 32M, so we can use direct calls and branches */
450
        flags |= MAP_FIXED;
451
        start = (void *) 0x01000000UL;
452
        if (code_gen_buffer_size > 16 * 1024 * 1024)
453
            code_gen_buffer_size = 16 * 1024 * 1024;
454
#endif
455
        code_gen_buffer = mmap(start, code_gen_buffer_size,
456
                               PROT_WRITE | PROT_READ | PROT_EXEC,
457
                               flags, -1, 0);
458
        if (code_gen_buffer == MAP_FAILED) {
459
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
460
            exit(1);
461
        }
462
    }
463
#elif defined(__FreeBSD__) || defined(__DragonFly__)
464
    {
465
        int flags;
466
        void *addr = NULL;
467
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
468
#if defined(__x86_64__)
469
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
470
         * 0x40000000 is free */
471
        flags |= MAP_FIXED;
472
        addr = (void *)0x40000000;
473
        /* Cannot map more than that */
474
        if (code_gen_buffer_size > (800 * 1024 * 1024))
475
            code_gen_buffer_size = (800 * 1024 * 1024);
476
#endif
477
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
478
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
479
                               flags, -1, 0);
480
        if (code_gen_buffer == MAP_FAILED) {
481
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
482
            exit(1);
483
        }
484
    }
485
#else
486
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
487
    map_exec(code_gen_buffer, code_gen_buffer_size);
488
#endif
489
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
490
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
491
    code_gen_buffer_max_size = code_gen_buffer_size - 
492
        code_gen_max_block_size();
493
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
494
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
495
}
496

    
497
/* Must be called before using the QEMU cpus. 'tb_size' is the size
498
   (in bytes) allocated to the translation buffer. Zero means default
499
   size. */
500
void cpu_exec_init_all(unsigned long tb_size)
501
{
502
    cpu_gen_init();
503
    code_gen_alloc(tb_size);
504
    code_gen_ptr = code_gen_buffer;
505
    page_init();
506
#if !defined(CONFIG_USER_ONLY)
507
    io_mem_init();
508
#endif
509
}
510

    
511
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
512

    
513
#define CPU_COMMON_SAVE_VERSION 1
514

    
515
static void cpu_common_save(QEMUFile *f, void *opaque)
516
{
517
    CPUState *env = opaque;
518

    
519
    cpu_synchronize_state(env, 0);
520

    
521
    qemu_put_be32s(f, &env->halted);
522
    qemu_put_be32s(f, &env->interrupt_request);
523
}
524

    
525
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
526
{
527
    CPUState *env = opaque;
528

    
529
    if (version_id != CPU_COMMON_SAVE_VERSION)
530
        return -EINVAL;
531

    
532
    qemu_get_be32s(f, &env->halted);
533
    qemu_get_be32s(f, &env->interrupt_request);
534
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
535
       version_id is increased. */
536
    env->interrupt_request &= ~0x01;
537
    tlb_flush(env, 1);
538
    cpu_synchronize_state(env, 1);
539

    
540
    return 0;
541
}
542
#endif
543

    
544
void cpu_exec_init(CPUState *env)
545
{
546
    CPUState **penv;
547
    int cpu_index;
548

    
549
#if defined(CONFIG_USER_ONLY)
550
    cpu_list_lock();
551
#endif
552
    env->next_cpu = NULL;
553
    penv = &first_cpu;
554
    cpu_index = 0;
555
    while (*penv != NULL) {
556
        penv = &(*penv)->next_cpu;
557
        cpu_index++;
558
    }
559
    env->cpu_index = cpu_index;
560
    env->numa_node = 0;
561
    TAILQ_INIT(&env->breakpoints);
562
    TAILQ_INIT(&env->watchpoints);
563
    *penv = env;
564
#if defined(CONFIG_USER_ONLY)
565
    cpu_list_unlock();
566
#endif
567
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
568
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
569
                    cpu_common_save, cpu_common_load, env);
570
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
571
                    cpu_save, cpu_load, env);
572
#endif
573
}
574

    
575
static inline void invalidate_page_bitmap(PageDesc *p)
576
{
577
    if (p->code_bitmap) {
578
        qemu_free(p->code_bitmap);
579
        p->code_bitmap = NULL;
580
    }
581
    p->code_write_count = 0;
582
}
583

    
584
/* set to NULL all the 'first_tb' fields in all PageDescs */
585
static void page_flush_tb(void)
586
{
587
    int i, j;
588
    PageDesc *p;
589

    
590
    for(i = 0; i < L1_SIZE; i++) {
591
        p = l1_map[i];
592
        if (p) {
593
            for(j = 0; j < L2_SIZE; j++) {
594
                p->first_tb = NULL;
595
                invalidate_page_bitmap(p);
596
                p++;
597
            }
598
        }
599
    }
600
}
601

    
602
/* flush all the translation blocks */
603
/* XXX: tb_flush is currently not thread safe */
604
void tb_flush(CPUState *env1)
605
{
606
    CPUState *env;
607
#if defined(DEBUG_FLUSH)
608
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
609
           (unsigned long)(code_gen_ptr - code_gen_buffer),
610
           nb_tbs, nb_tbs > 0 ?
611
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
612
#endif
613
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
614
        cpu_abort(env1, "Internal error: code buffer overflow\n");
615

    
616
    nb_tbs = 0;
617

    
618
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
619
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
620
    }
621

    
622
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
623
    page_flush_tb();
624

    
625
    code_gen_ptr = code_gen_buffer;
626
    /* XXX: flush processor icache at this point if cache flush is
627
       expensive */
628
    tb_flush_count++;
629
}
630

    
631
#ifdef DEBUG_TB_CHECK
632

    
633
static void tb_invalidate_check(target_ulong address)
634
{
635
    TranslationBlock *tb;
636
    int i;
637
    address &= TARGET_PAGE_MASK;
638
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
639
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
640
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
641
                  address >= tb->pc + tb->size)) {
642
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
643
                       address, (long)tb->pc, tb->size);
644
            }
645
        }
646
    }
647
}
648

    
649
/* verify that all the pages have correct rights for code */
650
static void tb_page_check(void)
651
{
652
    TranslationBlock *tb;
653
    int i, flags1, flags2;
654

    
655
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
656
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
657
            flags1 = page_get_flags(tb->pc);
658
            flags2 = page_get_flags(tb->pc + tb->size - 1);
659
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
660
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
661
                       (long)tb->pc, tb->size, flags1, flags2);
662
            }
663
        }
664
    }
665
}
666

    
667
static void tb_jmp_check(TranslationBlock *tb)
668
{
669
    TranslationBlock *tb1;
670
    unsigned int n1;
671

    
672
    /* suppress any remaining jumps to this TB */
673
    tb1 = tb->jmp_first;
674
    for(;;) {
675
        n1 = (long)tb1 & 3;
676
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
677
        if (n1 == 2)
678
            break;
679
        tb1 = tb1->jmp_next[n1];
680
    }
681
    /* check end of list */
682
    if (tb1 != tb) {
683
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
684
    }
685
}
686

    
687
#endif
688

    
689
/* invalidate one TB */
690
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
691
                             int next_offset)
692
{
693
    TranslationBlock *tb1;
694
    for(;;) {
695
        tb1 = *ptb;
696
        if (tb1 == tb) {
697
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
698
            break;
699
        }
700
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
701
    }
702
}
703

    
704
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
705
{
706
    TranslationBlock *tb1;
707
    unsigned int n1;
708

    
709
    for(;;) {
710
        tb1 = *ptb;
711
        n1 = (long)tb1 & 3;
712
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
713
        if (tb1 == tb) {
714
            *ptb = tb1->page_next[n1];
715
            break;
716
        }
717
        ptb = &tb1->page_next[n1];
718
    }
719
}
720

    
721
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
722
{
723
    TranslationBlock *tb1, **ptb;
724
    unsigned int n1;
725

    
726
    ptb = &tb->jmp_next[n];
727
    tb1 = *ptb;
728
    if (tb1) {
729
        /* find tb(n) in circular list */
730
        for(;;) {
731
            tb1 = *ptb;
732
            n1 = (long)tb1 & 3;
733
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
734
            if (n1 == n && tb1 == tb)
735
                break;
736
            if (n1 == 2) {
737
                ptb = &tb1->jmp_first;
738
            } else {
739
                ptb = &tb1->jmp_next[n1];
740
            }
741
        }
742
        /* now we can suppress tb(n) from the list */
743
        *ptb = tb->jmp_next[n];
744

    
745
        tb->jmp_next[n] = NULL;
746
    }
747
}
748

    
749
/* reset the jump entry 'n' of a TB so that it is not chained to
750
   another TB */
751
static inline void tb_reset_jump(TranslationBlock *tb, int n)
752
{
753
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
754
}
755

    
756
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
757
{
758
    CPUState *env;
759
    PageDesc *p;
760
    unsigned int h, n1;
761
    target_phys_addr_t phys_pc;
762
    TranslationBlock *tb1, *tb2;
763

    
764
    /* remove the TB from the hash list */
765
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
766
    h = tb_phys_hash_func(phys_pc);
767
    tb_remove(&tb_phys_hash[h], tb,
768
              offsetof(TranslationBlock, phys_hash_next));
769

    
770
    /* remove the TB from the page list */
771
    if (tb->page_addr[0] != page_addr) {
772
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
773
        tb_page_remove(&p->first_tb, tb);
774
        invalidate_page_bitmap(p);
775
    }
776
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
777
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
778
        tb_page_remove(&p->first_tb, tb);
779
        invalidate_page_bitmap(p);
780
    }
781

    
782
    tb_invalidated_flag = 1;
783

    
784
    /* remove the TB from the hash list */
785
    h = tb_jmp_cache_hash_func(tb->pc);
786
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
787
        if (env->tb_jmp_cache[h] == tb)
788
            env->tb_jmp_cache[h] = NULL;
789
    }
790

    
791
    /* suppress this TB from the two jump lists */
792
    tb_jmp_remove(tb, 0);
793
    tb_jmp_remove(tb, 1);
794

    
795
    /* suppress any remaining jumps to this TB */
796
    tb1 = tb->jmp_first;
797
    for(;;) {
798
        n1 = (long)tb1 & 3;
799
        if (n1 == 2)
800
            break;
801
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
802
        tb2 = tb1->jmp_next[n1];
803
        tb_reset_jump(tb1, n1);
804
        tb1->jmp_next[n1] = NULL;
805
        tb1 = tb2;
806
    }
807
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
808

    
809
    tb_phys_invalidate_count++;
810
}
811

    
812
static inline void set_bits(uint8_t *tab, int start, int len)
813
{
814
    int end, mask, end1;
815

    
816
    end = start + len;
817
    tab += start >> 3;
818
    mask = 0xff << (start & 7);
819
    if ((start & ~7) == (end & ~7)) {
820
        if (start < end) {
821
            mask &= ~(0xff << (end & 7));
822
            *tab |= mask;
823
        }
824
    } else {
825
        *tab++ |= mask;
826
        start = (start + 8) & ~7;
827
        end1 = end & ~7;
828
        while (start < end1) {
829
            *tab++ = 0xff;
830
            start += 8;
831
        }
832
        if (start < end) {
833
            mask = ~(0xff << (end & 7));
834
            *tab |= mask;
835
        }
836
    }
837
}
838

    
839
static void build_page_bitmap(PageDesc *p)
840
{
841
    int n, tb_start, tb_end;
842
    TranslationBlock *tb;
843

    
844
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
845

    
846
    tb = p->first_tb;
847
    while (tb != NULL) {
848
        n = (long)tb & 3;
849
        tb = (TranslationBlock *)((long)tb & ~3);
850
        /* NOTE: this is subtle as a TB may span two physical pages */
851
        if (n == 0) {
852
            /* NOTE: tb_end may be after the end of the page, but
853
               it is not a problem */
854
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
855
            tb_end = tb_start + tb->size;
856
            if (tb_end > TARGET_PAGE_SIZE)
857
                tb_end = TARGET_PAGE_SIZE;
858
        } else {
859
            tb_start = 0;
860
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
861
        }
862
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
863
        tb = tb->page_next[n];
864
    }
865
}
866

    
867
TranslationBlock *tb_gen_code(CPUState *env,
868
                              target_ulong pc, target_ulong cs_base,
869
                              int flags, int cflags)
870
{
871
    TranslationBlock *tb;
872
    uint8_t *tc_ptr;
873
    target_ulong phys_pc, phys_page2, virt_page2;
874
    int code_gen_size;
875

    
876
    phys_pc = get_phys_addr_code(env, pc);
877
    tb = tb_alloc(pc);
878
    if (!tb) {
879
        /* flush must be done */
880
        tb_flush(env);
881
        /* cannot fail at this point */
882
        tb = tb_alloc(pc);
883
        /* Don't forget to invalidate previous TB info.  */
884
        tb_invalidated_flag = 1;
885
    }
886
    tc_ptr = code_gen_ptr;
887
    tb->tc_ptr = tc_ptr;
888
    tb->cs_base = cs_base;
889
    tb->flags = flags;
890
    tb->cflags = cflags;
891
    cpu_gen_code(env, tb, &code_gen_size);
892
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
893

    
894
    /* check next page if needed */
895
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
896
    phys_page2 = -1;
897
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
898
        phys_page2 = get_phys_addr_code(env, virt_page2);
899
    }
900
    tb_link_phys(tb, phys_pc, phys_page2);
901
    return tb;
902
}
903

    
904
/* invalidate all TBs which intersect with the target physical page
905
   starting in range [start;end[. NOTE: start and end must refer to
906
   the same physical page. 'is_cpu_write_access' should be true if called
907
   from a real cpu write access: the virtual CPU will exit the current
908
   TB if code is modified inside this TB. */
909
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
910
                                   int is_cpu_write_access)
911
{
912
    TranslationBlock *tb, *tb_next, *saved_tb;
913
    CPUState *env = cpu_single_env;
914
    target_ulong tb_start, tb_end;
915
    PageDesc *p;
916
    int n;
917
#ifdef TARGET_HAS_PRECISE_SMC
918
    int current_tb_not_found = is_cpu_write_access;
919
    TranslationBlock *current_tb = NULL;
920
    int current_tb_modified = 0;
921
    target_ulong current_pc = 0;
922
    target_ulong current_cs_base = 0;
923
    int current_flags = 0;
924
#endif /* TARGET_HAS_PRECISE_SMC */
925

    
926
    p = page_find(start >> TARGET_PAGE_BITS);
927
    if (!p)
928
        return;
929
    if (!p->code_bitmap &&
930
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
931
        is_cpu_write_access) {
932
        /* build code bitmap */
933
        build_page_bitmap(p);
934
    }
935

    
936
    /* we remove all the TBs in the range [start, end[ */
937
    /* XXX: see if in some cases it could be faster to invalidate all the code */
938
    tb = p->first_tb;
939
    while (tb != NULL) {
940
        n = (long)tb & 3;
941
        tb = (TranslationBlock *)((long)tb & ~3);
942
        tb_next = tb->page_next[n];
943
        /* NOTE: this is subtle as a TB may span two physical pages */
944
        if (n == 0) {
945
            /* NOTE: tb_end may be after the end of the page, but
946
               it is not a problem */
947
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
948
            tb_end = tb_start + tb->size;
949
        } else {
950
            tb_start = tb->page_addr[1];
951
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
952
        }
953
        if (!(tb_end <= start || tb_start >= end)) {
954
#ifdef TARGET_HAS_PRECISE_SMC
955
            if (current_tb_not_found) {
956
                current_tb_not_found = 0;
957
                current_tb = NULL;
958
                if (env->mem_io_pc) {
959
                    /* now we have a real cpu fault */
960
                    current_tb = tb_find_pc(env->mem_io_pc);
961
                }
962
            }
963
            if (current_tb == tb &&
964
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
965
                /* If we are modifying the current TB, we must stop
966
                its execution. We could be more precise by checking
967
                that the modification is after the current PC, but it
968
                would require a specialized function to partially
969
                restore the CPU state */
970

    
971
                current_tb_modified = 1;
972
                cpu_restore_state(current_tb, env,
973
                                  env->mem_io_pc, NULL);
974
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
975
                                     &current_flags);
976
            }
977
#endif /* TARGET_HAS_PRECISE_SMC */
978
            /* we need to do that to handle the case where a signal
979
               occurs while doing tb_phys_invalidate() */
980
            saved_tb = NULL;
981
            if (env) {
982
                saved_tb = env->current_tb;
983
                env->current_tb = NULL;
984
            }
985
            tb_phys_invalidate(tb, -1);
986
            if (env) {
987
                env->current_tb = saved_tb;
988
                if (env->interrupt_request && env->current_tb)
989
                    cpu_interrupt(env, env->interrupt_request);
990
            }
991
        }
992
        tb = tb_next;
993
    }
994
#if !defined(CONFIG_USER_ONLY)
995
    /* if no code remaining, no need to continue to use slow writes */
996
    if (!p->first_tb) {
997
        invalidate_page_bitmap(p);
998
        if (is_cpu_write_access) {
999
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1000
        }
1001
    }
1002
#endif
1003
#ifdef TARGET_HAS_PRECISE_SMC
1004
    if (current_tb_modified) {
1005
        /* we generate a block containing just the instruction
1006
           modifying the memory. It will ensure that it cannot modify
1007
           itself */
1008
        env->current_tb = NULL;
1009
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1010
        cpu_resume_from_signal(env, NULL);
1011
    }
1012
#endif
1013
}
1014

    
1015
/* len must be <= 8 and start must be a multiple of len */
1016
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1017
{
1018
    PageDesc *p;
1019
    int offset, b;
1020
#if 0
1021
    if (1) {
1022
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1023
                  cpu_single_env->mem_io_vaddr, len,
1024
                  cpu_single_env->eip,
1025
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1026
    }
1027
#endif
1028
    p = page_find(start >> TARGET_PAGE_BITS);
1029
    if (!p)
1030
        return;
1031
    if (p->code_bitmap) {
1032
        offset = start & ~TARGET_PAGE_MASK;
1033
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1034
        if (b & ((1 << len) - 1))
1035
            goto do_invalidate;
1036
    } else {
1037
    do_invalidate:
1038
        tb_invalidate_phys_page_range(start, start + len, 1);
1039
    }
1040
}
1041

    
1042
#if !defined(CONFIG_SOFTMMU)
1043
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1044
                                    unsigned long pc, void *puc)
1045
{
1046
    TranslationBlock *tb;
1047
    PageDesc *p;
1048
    int n;
1049
#ifdef TARGET_HAS_PRECISE_SMC
1050
    TranslationBlock *current_tb = NULL;
1051
    CPUState *env = cpu_single_env;
1052
    int current_tb_modified = 0;
1053
    target_ulong current_pc = 0;
1054
    target_ulong current_cs_base = 0;
1055
    int current_flags = 0;
1056
#endif
1057

    
1058
    addr &= TARGET_PAGE_MASK;
1059
    p = page_find(addr >> TARGET_PAGE_BITS);
1060
    if (!p)
1061
        return;
1062
    tb = p->first_tb;
1063
#ifdef TARGET_HAS_PRECISE_SMC
1064
    if (tb && pc != 0) {
1065
        current_tb = tb_find_pc(pc);
1066
    }
1067
#endif
1068
    while (tb != NULL) {
1069
        n = (long)tb & 3;
1070
        tb = (TranslationBlock *)((long)tb & ~3);
1071
#ifdef TARGET_HAS_PRECISE_SMC
1072
        if (current_tb == tb &&
1073
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1074
                /* If we are modifying the current TB, we must stop
1075
                   its execution. We could be more precise by checking
1076
                   that the modification is after the current PC, but it
1077
                   would require a specialized function to partially
1078
                   restore the CPU state */
1079

    
1080
            current_tb_modified = 1;
1081
            cpu_restore_state(current_tb, env, pc, puc);
1082
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1083
                                 &current_flags);
1084
        }
1085
#endif /* TARGET_HAS_PRECISE_SMC */
1086
        tb_phys_invalidate(tb, addr);
1087
        tb = tb->page_next[n];
1088
    }
1089
    p->first_tb = NULL;
1090
#ifdef TARGET_HAS_PRECISE_SMC
1091
    if (current_tb_modified) {
1092
        /* we generate a block containing just the instruction
1093
           modifying the memory. It will ensure that it cannot modify
1094
           itself */
1095
        env->current_tb = NULL;
1096
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1097
        cpu_resume_from_signal(env, puc);
1098
    }
1099
#endif
1100
}
1101
#endif
1102

    
1103
/* add the tb in the target page and protect it if necessary */
1104
static inline void tb_alloc_page(TranslationBlock *tb,
1105
                                 unsigned int n, target_ulong page_addr)
1106
{
1107
    PageDesc *p;
1108
    TranslationBlock *last_first_tb;
1109

    
1110
    tb->page_addr[n] = page_addr;
1111
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1112
    tb->page_next[n] = p->first_tb;
1113
    last_first_tb = p->first_tb;
1114
    p->first_tb = (TranslationBlock *)((long)tb | n);
1115
    invalidate_page_bitmap(p);
1116

    
1117
#if defined(TARGET_HAS_SMC) || 1
1118

    
1119
#if defined(CONFIG_USER_ONLY)
1120
    if (p->flags & PAGE_WRITE) {
1121
        target_ulong addr;
1122
        PageDesc *p2;
1123
        int prot;
1124

    
1125
        /* force the host page as non writable (writes will have a
1126
           page fault + mprotect overhead) */
1127
        page_addr &= qemu_host_page_mask;
1128
        prot = 0;
1129
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1130
            addr += TARGET_PAGE_SIZE) {
1131

    
1132
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1133
            if (!p2)
1134
                continue;
1135
            prot |= p2->flags;
1136
            p2->flags &= ~PAGE_WRITE;
1137
            page_get_flags(addr);
1138
          }
1139
        mprotect(g2h(page_addr), qemu_host_page_size,
1140
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1141
#ifdef DEBUG_TB_INVALIDATE
1142
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1143
               page_addr);
1144
#endif
1145
    }
1146
#else
1147
    /* if some code is already present, then the pages are already
1148
       protected. So we handle the case where only the first TB is
1149
       allocated in a physical page */
1150
    if (!last_first_tb) {
1151
        tlb_protect_code(page_addr);
1152
    }
1153
#endif
1154

    
1155
#endif /* TARGET_HAS_SMC */
1156
}
1157

    
1158
/* Allocate a new translation block. Flush the translation buffer if
1159
   too many translation blocks or too much generated code. */
1160
TranslationBlock *tb_alloc(target_ulong pc)
1161
{
1162
    TranslationBlock *tb;
1163

    
1164
    if (nb_tbs >= code_gen_max_blocks ||
1165
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1166
        return NULL;
1167
    tb = &tbs[nb_tbs++];
1168
    tb->pc = pc;
1169
    tb->cflags = 0;
1170
    return tb;
1171
}
1172

    
1173
void tb_free(TranslationBlock *tb)
1174
{
1175
    /* In practice this is mostly used for single use temporary TB
1176
       Ignore the hard cases and just back up if this TB happens to
1177
       be the last one generated.  */
1178
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1179
        code_gen_ptr = tb->tc_ptr;
1180
        nb_tbs--;
1181
    }
1182
}
1183

    
1184
/* add a new TB and link it to the physical page tables. phys_page2 is
1185
   (-1) to indicate that only one page contains the TB. */
1186
void tb_link_phys(TranslationBlock *tb,
1187
                  target_ulong phys_pc, target_ulong phys_page2)
1188
{
1189
    unsigned int h;
1190
    TranslationBlock **ptb;
1191

    
1192
    /* Grab the mmap lock to stop another thread invalidating this TB
1193
       before we are done.  */
1194
    mmap_lock();
1195
    /* add in the physical hash table */
1196
    h = tb_phys_hash_func(phys_pc);
1197
    ptb = &tb_phys_hash[h];
1198
    tb->phys_hash_next = *ptb;
1199
    *ptb = tb;
1200

    
1201
    /* add in the page list */
1202
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1203
    if (phys_page2 != -1)
1204
        tb_alloc_page(tb, 1, phys_page2);
1205
    else
1206
        tb->page_addr[1] = -1;
1207

    
1208
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1209
    tb->jmp_next[0] = NULL;
1210
    tb->jmp_next[1] = NULL;
1211

    
1212
    /* init original jump addresses */
1213
    if (tb->tb_next_offset[0] != 0xffff)
1214
        tb_reset_jump(tb, 0);
1215
    if (tb->tb_next_offset[1] != 0xffff)
1216
        tb_reset_jump(tb, 1);
1217

    
1218
#ifdef DEBUG_TB_CHECK
1219
    tb_page_check();
1220
#endif
1221
    mmap_unlock();
1222
}
1223

    
1224
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1225
   tb[1].tc_ptr. Return NULL if not found */
1226
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1227
{
1228
    int m_min, m_max, m;
1229
    unsigned long v;
1230
    TranslationBlock *tb;
1231

    
1232
    if (nb_tbs <= 0)
1233
        return NULL;
1234
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1235
        tc_ptr >= (unsigned long)code_gen_ptr)
1236
        return NULL;
1237
    /* binary search (cf Knuth) */
1238
    m_min = 0;
1239
    m_max = nb_tbs - 1;
1240
    while (m_min <= m_max) {
1241
        m = (m_min + m_max) >> 1;
1242
        tb = &tbs[m];
1243
        v = (unsigned long)tb->tc_ptr;
1244
        if (v == tc_ptr)
1245
            return tb;
1246
        else if (tc_ptr < v) {
1247
            m_max = m - 1;
1248
        } else {
1249
            m_min = m + 1;
1250
        }
1251
    }
1252
    return &tbs[m_max];
1253
}
1254

    
1255
static void tb_reset_jump_recursive(TranslationBlock *tb);
1256

    
1257
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1258
{
1259
    TranslationBlock *tb1, *tb_next, **ptb;
1260
    unsigned int n1;
1261

    
1262
    tb1 = tb->jmp_next[n];
1263
    if (tb1 != NULL) {
1264
        /* find head of list */
1265
        for(;;) {
1266
            n1 = (long)tb1 & 3;
1267
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1268
            if (n1 == 2)
1269
                break;
1270
            tb1 = tb1->jmp_next[n1];
1271
        }
1272
        /* we are now sure now that tb jumps to tb1 */
1273
        tb_next = tb1;
1274

    
1275
        /* remove tb from the jmp_first list */
1276
        ptb = &tb_next->jmp_first;
1277
        for(;;) {
1278
            tb1 = *ptb;
1279
            n1 = (long)tb1 & 3;
1280
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1281
            if (n1 == n && tb1 == tb)
1282
                break;
1283
            ptb = &tb1->jmp_next[n1];
1284
        }
1285
        *ptb = tb->jmp_next[n];
1286
        tb->jmp_next[n] = NULL;
1287

    
1288
        /* suppress the jump to next tb in generated code */
1289
        tb_reset_jump(tb, n);
1290

    
1291
        /* suppress jumps in the tb on which we could have jumped */
1292
        tb_reset_jump_recursive(tb_next);
1293
    }
1294
}
1295

    
1296
static void tb_reset_jump_recursive(TranslationBlock *tb)
1297
{
1298
    tb_reset_jump_recursive2(tb, 0);
1299
    tb_reset_jump_recursive2(tb, 1);
1300
}
1301

    
1302
#if defined(TARGET_HAS_ICE)
1303
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1304
{
1305
    target_phys_addr_t addr;
1306
    target_ulong pd;
1307
    ram_addr_t ram_addr;
1308
    PhysPageDesc *p;
1309

    
1310
    addr = cpu_get_phys_page_debug(env, pc);
1311
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1312
    if (!p) {
1313
        pd = IO_MEM_UNASSIGNED;
1314
    } else {
1315
        pd = p->phys_offset;
1316
    }
1317
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1318
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1319
}
1320
#endif
1321

    
1322
/* Add a watchpoint.  */
1323
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1324
                          int flags, CPUWatchpoint **watchpoint)
1325
{
1326
    target_ulong len_mask = ~(len - 1);
1327
    CPUWatchpoint *wp;
1328

    
1329
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1330
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1331
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1332
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1333
        return -EINVAL;
1334
    }
1335
    wp = qemu_malloc(sizeof(*wp));
1336

    
1337
    wp->vaddr = addr;
1338
    wp->len_mask = len_mask;
1339
    wp->flags = flags;
1340

    
1341
    /* keep all GDB-injected watchpoints in front */
1342
    if (flags & BP_GDB)
1343
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1344
    else
1345
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1346

    
1347
    tlb_flush_page(env, addr);
1348

    
1349
    if (watchpoint)
1350
        *watchpoint = wp;
1351
    return 0;
1352
}
1353

    
1354
/* Remove a specific watchpoint.  */
1355
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1356
                          int flags)
1357
{
1358
    target_ulong len_mask = ~(len - 1);
1359
    CPUWatchpoint *wp;
1360

    
1361
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1362
        if (addr == wp->vaddr && len_mask == wp->len_mask
1363
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1364
            cpu_watchpoint_remove_by_ref(env, wp);
1365
            return 0;
1366
        }
1367
    }
1368
    return -ENOENT;
1369
}
1370

    
1371
/* Remove a specific watchpoint by reference.  */
1372
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1373
{
1374
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1375

    
1376
    tlb_flush_page(env, watchpoint->vaddr);
1377

    
1378
    qemu_free(watchpoint);
1379
}
1380

    
1381
/* Remove all matching watchpoints.  */
1382
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1383
{
1384
    CPUWatchpoint *wp, *next;
1385

    
1386
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1387
        if (wp->flags & mask)
1388
            cpu_watchpoint_remove_by_ref(env, wp);
1389
    }
1390
}
1391

    
1392
/* Add a breakpoint.  */
1393
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1394
                          CPUBreakpoint **breakpoint)
1395
{
1396
#if defined(TARGET_HAS_ICE)
1397
    CPUBreakpoint *bp;
1398

    
1399
    bp = qemu_malloc(sizeof(*bp));
1400

    
1401
    bp->pc = pc;
1402
    bp->flags = flags;
1403

    
1404
    /* keep all GDB-injected breakpoints in front */
1405
    if (flags & BP_GDB)
1406
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1407
    else
1408
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1409

    
1410
    breakpoint_invalidate(env, pc);
1411

    
1412
    if (breakpoint)
1413
        *breakpoint = bp;
1414
    return 0;
1415
#else
1416
    return -ENOSYS;
1417
#endif
1418
}
1419

    
1420
/* Remove a specific breakpoint.  */
1421
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1422
{
1423
#if defined(TARGET_HAS_ICE)
1424
    CPUBreakpoint *bp;
1425

    
1426
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1427
        if (bp->pc == pc && bp->flags == flags) {
1428
            cpu_breakpoint_remove_by_ref(env, bp);
1429
            return 0;
1430
        }
1431
    }
1432
    return -ENOENT;
1433
#else
1434
    return -ENOSYS;
1435
#endif
1436
}
1437

    
1438
/* Remove a specific breakpoint by reference.  */
1439
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1440
{
1441
#if defined(TARGET_HAS_ICE)
1442
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1443

    
1444
    breakpoint_invalidate(env, breakpoint->pc);
1445

    
1446
    qemu_free(breakpoint);
1447
#endif
1448
}
1449

    
1450
/* Remove all matching breakpoints. */
1451
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1452
{
1453
#if defined(TARGET_HAS_ICE)
1454
    CPUBreakpoint *bp, *next;
1455

    
1456
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1457
        if (bp->flags & mask)
1458
            cpu_breakpoint_remove_by_ref(env, bp);
1459
    }
1460
#endif
1461
}
1462

    
1463
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1464
   CPU loop after each instruction */
1465
void cpu_single_step(CPUState *env, int enabled)
1466
{
1467
#if defined(TARGET_HAS_ICE)
1468
    if (env->singlestep_enabled != enabled) {
1469
        env->singlestep_enabled = enabled;
1470
        if (kvm_enabled())
1471
            kvm_update_guest_debug(env, 0);
1472
        else {
1473
            /* must flush all the translated code to avoid inconsistencies */
1474
            /* XXX: only flush what is necessary */
1475
            tb_flush(env);
1476
        }
1477
    }
1478
#endif
1479
}
1480

    
1481
/* enable or disable low levels log */
1482
void cpu_set_log(int log_flags)
1483
{
1484
    loglevel = log_flags;
1485
    if (loglevel && !logfile) {
1486
        logfile = fopen(logfilename, log_append ? "a" : "w");
1487
        if (!logfile) {
1488
            perror(logfilename);
1489
            _exit(1);
1490
        }
1491
#if !defined(CONFIG_SOFTMMU)
1492
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1493
        {
1494
            static char logfile_buf[4096];
1495
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1496
        }
1497
#else
1498
        setvbuf(logfile, NULL, _IOLBF, 0);
1499
#endif
1500
        log_append = 1;
1501
    }
1502
    if (!loglevel && logfile) {
1503
        fclose(logfile);
1504
        logfile = NULL;
1505
    }
1506
}
1507

    
1508
void cpu_set_log_filename(const char *filename)
1509
{
1510
    logfilename = strdup(filename);
1511
    if (logfile) {
1512
        fclose(logfile);
1513
        logfile = NULL;
1514
    }
1515
    cpu_set_log(loglevel);
1516
}
1517

    
1518
static void cpu_unlink_tb(CPUState *env)
1519
{
1520
#if defined(USE_NPTL)
1521
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1522
       problem and hope the cpu will stop of its own accord.  For userspace
1523
       emulation this often isn't actually as bad as it sounds.  Often
1524
       signals are used primarily to interrupt blocking syscalls.  */
1525
#else
1526
    TranslationBlock *tb;
1527
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1528

    
1529
    tb = env->current_tb;
1530
    /* if the cpu is currently executing code, we must unlink it and
1531
       all the potentially executing TB */
1532
    if (tb && !testandset(&interrupt_lock)) {
1533
        env->current_tb = NULL;
1534
        tb_reset_jump_recursive(tb);
1535
        resetlock(&interrupt_lock);
1536
    }
1537
#endif
1538
}
1539

    
1540
/* mask must never be zero, except for A20 change call */
1541
void cpu_interrupt(CPUState *env, int mask)
1542
{
1543
    int old_mask;
1544

    
1545
    old_mask = env->interrupt_request;
1546
    env->interrupt_request |= mask;
1547

    
1548
#ifndef CONFIG_USER_ONLY
1549
    /*
1550
     * If called from iothread context, wake the target cpu in
1551
     * case its halted.
1552
     */
1553
    if (!qemu_cpu_self(env)) {
1554
        qemu_cpu_kick(env);
1555
        return;
1556
    }
1557
#endif
1558

    
1559
    if (use_icount) {
1560
        env->icount_decr.u16.high = 0xffff;
1561
#ifndef CONFIG_USER_ONLY
1562
        if (!can_do_io(env)
1563
            && (mask & ~old_mask) != 0) {
1564
            cpu_abort(env, "Raised interrupt while not in I/O function");
1565
        }
1566
#endif
1567
    } else {
1568
        cpu_unlink_tb(env);
1569
    }
1570
}
1571

    
1572
void cpu_reset_interrupt(CPUState *env, int mask)
1573
{
1574
    env->interrupt_request &= ~mask;
1575
}
1576

    
1577
void cpu_exit(CPUState *env)
1578
{
1579
    env->exit_request = 1;
1580
    cpu_unlink_tb(env);
1581
}
1582

    
1583
const CPULogItem cpu_log_items[] = {
1584
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1585
      "show generated host assembly code for each compiled TB" },
1586
    { CPU_LOG_TB_IN_ASM, "in_asm",
1587
      "show target assembly code for each compiled TB" },
1588
    { CPU_LOG_TB_OP, "op",
1589
      "show micro ops for each compiled TB" },
1590
    { CPU_LOG_TB_OP_OPT, "op_opt",
1591
      "show micro ops "
1592
#ifdef TARGET_I386
1593
      "before eflags optimization and "
1594
#endif
1595
      "after liveness analysis" },
1596
    { CPU_LOG_INT, "int",
1597
      "show interrupts/exceptions in short format" },
1598
    { CPU_LOG_EXEC, "exec",
1599
      "show trace before each executed TB (lots of logs)" },
1600
    { CPU_LOG_TB_CPU, "cpu",
1601
      "show CPU state before block translation" },
1602
#ifdef TARGET_I386
1603
    { CPU_LOG_PCALL, "pcall",
1604
      "show protected mode far calls/returns/exceptions" },
1605
    { CPU_LOG_RESET, "cpu_reset",
1606
      "show CPU state before CPU resets" },
1607
#endif
1608
#ifdef DEBUG_IOPORT
1609
    { CPU_LOG_IOPORT, "ioport",
1610
      "show all i/o ports accesses" },
1611
#endif
1612
    { 0, NULL, NULL },
1613
};
1614

    
1615
static int cmp1(const char *s1, int n, const char *s2)
1616
{
1617
    if (strlen(s2) != n)
1618
        return 0;
1619
    return memcmp(s1, s2, n) == 0;
1620
}
1621

    
1622
/* takes a comma separated list of log masks. Return 0 if error. */
1623
int cpu_str_to_log_mask(const char *str)
1624
{
1625
    const CPULogItem *item;
1626
    int mask;
1627
    const char *p, *p1;
1628

    
1629
    p = str;
1630
    mask = 0;
1631
    for(;;) {
1632
        p1 = strchr(p, ',');
1633
        if (!p1)
1634
            p1 = p + strlen(p);
1635
        if(cmp1(p,p1-p,"all")) {
1636
                for(item = cpu_log_items; item->mask != 0; item++) {
1637
                        mask |= item->mask;
1638
                }
1639
        } else {
1640
        for(item = cpu_log_items; item->mask != 0; item++) {
1641
            if (cmp1(p, p1 - p, item->name))
1642
                goto found;
1643
        }
1644
        return 0;
1645
        }
1646
    found:
1647
        mask |= item->mask;
1648
        if (*p1 != ',')
1649
            break;
1650
        p = p1 + 1;
1651
    }
1652
    return mask;
1653
}
1654

    
1655
void cpu_abort(CPUState *env, const char *fmt, ...)
1656
{
1657
    va_list ap;
1658
    va_list ap2;
1659

    
1660
    va_start(ap, fmt);
1661
    va_copy(ap2, ap);
1662
    fprintf(stderr, "qemu: fatal: ");
1663
    vfprintf(stderr, fmt, ap);
1664
    fprintf(stderr, "\n");
1665
#ifdef TARGET_I386
1666
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1667
#else
1668
    cpu_dump_state(env, stderr, fprintf, 0);
1669
#endif
1670
    if (qemu_log_enabled()) {
1671
        qemu_log("qemu: fatal: ");
1672
        qemu_log_vprintf(fmt, ap2);
1673
        qemu_log("\n");
1674
#ifdef TARGET_I386
1675
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1676
#else
1677
        log_cpu_state(env, 0);
1678
#endif
1679
        qemu_log_flush();
1680
        qemu_log_close();
1681
    }
1682
    va_end(ap2);
1683
    va_end(ap);
1684
    abort();
1685
}
1686

    
1687
CPUState *cpu_copy(CPUState *env)
1688
{
1689
    CPUState *new_env = cpu_init(env->cpu_model_str);
1690
    CPUState *next_cpu = new_env->next_cpu;
1691
    int cpu_index = new_env->cpu_index;
1692
#if defined(TARGET_HAS_ICE)
1693
    CPUBreakpoint *bp;
1694
    CPUWatchpoint *wp;
1695
#endif
1696

    
1697
    memcpy(new_env, env, sizeof(CPUState));
1698

    
1699
    /* Preserve chaining and index. */
1700
    new_env->next_cpu = next_cpu;
1701
    new_env->cpu_index = cpu_index;
1702

    
1703
    /* Clone all break/watchpoints.
1704
       Note: Once we support ptrace with hw-debug register access, make sure
1705
       BP_CPU break/watchpoints are handled correctly on clone. */
1706
    TAILQ_INIT(&env->breakpoints);
1707
    TAILQ_INIT(&env->watchpoints);
1708
#if defined(TARGET_HAS_ICE)
1709
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1710
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1711
    }
1712
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1713
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1714
                              wp->flags, NULL);
1715
    }
1716
#endif
1717

    
1718
    return new_env;
1719
}
1720

    
1721
#if !defined(CONFIG_USER_ONLY)
1722

    
1723
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1724
{
1725
    unsigned int i;
1726

    
1727
    /* Discard jump cache entries for any tb which might potentially
1728
       overlap the flushed page.  */
1729
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1730
    memset (&env->tb_jmp_cache[i], 0, 
1731
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1732

    
1733
    i = tb_jmp_cache_hash_page(addr);
1734
    memset (&env->tb_jmp_cache[i], 0, 
1735
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1736
}
1737

    
1738
/* NOTE: if flush_global is true, also flush global entries (not
1739
   implemented yet) */
1740
void tlb_flush(CPUState *env, int flush_global)
1741
{
1742
    int i;
1743

    
1744
#if defined(DEBUG_TLB)
1745
    printf("tlb_flush:\n");
1746
#endif
1747
    /* must reset current TB so that interrupts cannot modify the
1748
       links while we are modifying them */
1749
    env->current_tb = NULL;
1750

    
1751
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1752
        env->tlb_table[0][i].addr_read = -1;
1753
        env->tlb_table[0][i].addr_write = -1;
1754
        env->tlb_table[0][i].addr_code = -1;
1755
        env->tlb_table[1][i].addr_read = -1;
1756
        env->tlb_table[1][i].addr_write = -1;
1757
        env->tlb_table[1][i].addr_code = -1;
1758
#if (NB_MMU_MODES >= 3)
1759
        env->tlb_table[2][i].addr_read = -1;
1760
        env->tlb_table[2][i].addr_write = -1;
1761
        env->tlb_table[2][i].addr_code = -1;
1762
#endif
1763
#if (NB_MMU_MODES >= 4)
1764
        env->tlb_table[3][i].addr_read = -1;
1765
        env->tlb_table[3][i].addr_write = -1;
1766
        env->tlb_table[3][i].addr_code = -1;
1767
#endif
1768
#if (NB_MMU_MODES >= 5)
1769
        env->tlb_table[4][i].addr_read = -1;
1770
        env->tlb_table[4][i].addr_write = -1;
1771
        env->tlb_table[4][i].addr_code = -1;
1772
#endif
1773

    
1774
    }
1775

    
1776
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1777

    
1778
#ifdef CONFIG_KQEMU
1779
    if (env->kqemu_enabled) {
1780
        kqemu_flush(env, flush_global);
1781
    }
1782
#endif
1783
    tlb_flush_count++;
1784
}
1785

    
1786
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1787
{
1788
    if (addr == (tlb_entry->addr_read &
1789
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1790
        addr == (tlb_entry->addr_write &
1791
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1792
        addr == (tlb_entry->addr_code &
1793
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1794
        tlb_entry->addr_read = -1;
1795
        tlb_entry->addr_write = -1;
1796
        tlb_entry->addr_code = -1;
1797
    }
1798
}
1799

    
1800
void tlb_flush_page(CPUState *env, target_ulong addr)
1801
{
1802
    int i;
1803

    
1804
#if defined(DEBUG_TLB)
1805
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1806
#endif
1807
    /* must reset current TB so that interrupts cannot modify the
1808
       links while we are modifying them */
1809
    env->current_tb = NULL;
1810

    
1811
    addr &= TARGET_PAGE_MASK;
1812
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1813
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1814
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1815
#if (NB_MMU_MODES >= 3)
1816
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1817
#endif
1818
#if (NB_MMU_MODES >= 4)
1819
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1820
#endif
1821
#if (NB_MMU_MODES >= 5)
1822
    tlb_flush_entry(&env->tlb_table[4][i], addr);
1823
#endif
1824

    
1825
    tlb_flush_jmp_cache(env, addr);
1826

    
1827
#ifdef CONFIG_KQEMU
1828
    if (env->kqemu_enabled) {
1829
        kqemu_flush_page(env, addr);
1830
    }
1831
#endif
1832
}
1833

    
1834
/* update the TLBs so that writes to code in the virtual page 'addr'
1835
   can be detected */
1836
static void tlb_protect_code(ram_addr_t ram_addr)
1837
{
1838
    cpu_physical_memory_reset_dirty(ram_addr,
1839
                                    ram_addr + TARGET_PAGE_SIZE,
1840
                                    CODE_DIRTY_FLAG);
1841
}
1842

    
1843
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1844
   tested for self modifying code */
1845
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1846
                                    target_ulong vaddr)
1847
{
1848
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1849
}
1850

    
1851
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1852
                                         unsigned long start, unsigned long length)
1853
{
1854
    unsigned long addr;
1855
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1856
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1857
        if ((addr - start) < length) {
1858
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1859
        }
1860
    }
1861
}
1862

    
1863
/* Note: start and end must be within the same ram block.  */
1864
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1865
                                     int dirty_flags)
1866
{
1867
    CPUState *env;
1868
    unsigned long length, start1;
1869
    int i, mask, len;
1870
    uint8_t *p;
1871

    
1872
    start &= TARGET_PAGE_MASK;
1873
    end = TARGET_PAGE_ALIGN(end);
1874

    
1875
    length = end - start;
1876
    if (length == 0)
1877
        return;
1878
    len = length >> TARGET_PAGE_BITS;
1879
#ifdef CONFIG_KQEMU
1880
    /* XXX: should not depend on cpu context */
1881
    env = first_cpu;
1882
    if (env->kqemu_enabled) {
1883
        ram_addr_t addr;
1884
        addr = start;
1885
        for(i = 0; i < len; i++) {
1886
            kqemu_set_notdirty(env, addr);
1887
            addr += TARGET_PAGE_SIZE;
1888
        }
1889
    }
1890
#endif
1891
    mask = ~dirty_flags;
1892
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1893
    for(i = 0; i < len; i++)
1894
        p[i] &= mask;
1895

    
1896
    /* we modify the TLB cache so that the dirty bit will be set again
1897
       when accessing the range */
1898
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1899
    /* Chek that we don't span multiple blocks - this breaks the
1900
       address comparisons below.  */
1901
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1902
            != (end - 1) - start) {
1903
        abort();
1904
    }
1905

    
1906
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1907
        for(i = 0; i < CPU_TLB_SIZE; i++)
1908
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1909
        for(i = 0; i < CPU_TLB_SIZE; i++)
1910
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1911
#if (NB_MMU_MODES >= 3)
1912
        for(i = 0; i < CPU_TLB_SIZE; i++)
1913
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1914
#endif
1915
#if (NB_MMU_MODES >= 4)
1916
        for(i = 0; i < CPU_TLB_SIZE; i++)
1917
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1918
#endif
1919
#if (NB_MMU_MODES >= 5)
1920
        for(i = 0; i < CPU_TLB_SIZE; i++)
1921
            tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1922
#endif
1923
    }
1924
}
1925

    
1926
int cpu_physical_memory_set_dirty_tracking(int enable)
1927
{
1928
    in_migration = enable;
1929
    if (kvm_enabled()) {
1930
        return kvm_set_migration_log(enable);
1931
    }
1932
    return 0;
1933
}
1934

    
1935
int cpu_physical_memory_get_dirty_tracking(void)
1936
{
1937
    return in_migration;
1938
}
1939

    
1940
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1941
                                   target_phys_addr_t end_addr)
1942
{
1943
    int ret = 0;
1944

    
1945
    if (kvm_enabled())
1946
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1947
    return ret;
1948
}
1949

    
1950
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1951
{
1952
    ram_addr_t ram_addr;
1953
    void *p;
1954

    
1955
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1956
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1957
            + tlb_entry->addend);
1958
        ram_addr = qemu_ram_addr_from_host(p);
1959
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1960
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1961
        }
1962
    }
1963
}
1964

    
1965
/* update the TLB according to the current state of the dirty bits */
1966
void cpu_tlb_update_dirty(CPUState *env)
1967
{
1968
    int i;
1969
    for(i = 0; i < CPU_TLB_SIZE; i++)
1970
        tlb_update_dirty(&env->tlb_table[0][i]);
1971
    for(i = 0; i < CPU_TLB_SIZE; i++)
1972
        tlb_update_dirty(&env->tlb_table[1][i]);
1973
#if (NB_MMU_MODES >= 3)
1974
    for(i = 0; i < CPU_TLB_SIZE; i++)
1975
        tlb_update_dirty(&env->tlb_table[2][i]);
1976
#endif
1977
#if (NB_MMU_MODES >= 4)
1978
    for(i = 0; i < CPU_TLB_SIZE; i++)
1979
        tlb_update_dirty(&env->tlb_table[3][i]);
1980
#endif
1981
#if (NB_MMU_MODES >= 5)
1982
    for(i = 0; i < CPU_TLB_SIZE; i++)
1983
        tlb_update_dirty(&env->tlb_table[4][i]);
1984
#endif
1985
}
1986

    
1987
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1988
{
1989
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1990
        tlb_entry->addr_write = vaddr;
1991
}
1992

    
1993
/* update the TLB corresponding to virtual page vaddr
1994
   so that it is no longer dirty */
1995
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1996
{
1997
    int i;
1998

    
1999
    vaddr &= TARGET_PAGE_MASK;
2000
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2001
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
2002
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
2003
#if (NB_MMU_MODES >= 3)
2004
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2005
#endif
2006
#if (NB_MMU_MODES >= 4)
2007
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2008
#endif
2009
#if (NB_MMU_MODES >= 5)
2010
    tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
2011
#endif
2012
}
2013

    
2014
/* add a new TLB entry. At most one entry for a given virtual address
2015
   is permitted. Return 0 if OK or 2 if the page could not be mapped
2016
   (can only happen in non SOFTMMU mode for I/O pages or pages
2017
   conflicting with the host address space). */
2018
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2019
                      target_phys_addr_t paddr, int prot,
2020
                      int mmu_idx, int is_softmmu)
2021
{
2022
    PhysPageDesc *p;
2023
    unsigned long pd;
2024
    unsigned int index;
2025
    target_ulong address;
2026
    target_ulong code_address;
2027
    target_phys_addr_t addend;
2028
    int ret;
2029
    CPUTLBEntry *te;
2030
    CPUWatchpoint *wp;
2031
    target_phys_addr_t iotlb;
2032

    
2033
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2034
    if (!p) {
2035
        pd = IO_MEM_UNASSIGNED;
2036
    } else {
2037
        pd = p->phys_offset;
2038
    }
2039
#if defined(DEBUG_TLB)
2040
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2041
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2042
#endif
2043

    
2044
    ret = 0;
2045
    address = vaddr;
2046
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2047
        /* IO memory case (romd handled later) */
2048
        address |= TLB_MMIO;
2049
    }
2050
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2051
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2052
        /* Normal RAM.  */
2053
        iotlb = pd & TARGET_PAGE_MASK;
2054
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2055
            iotlb |= IO_MEM_NOTDIRTY;
2056
        else
2057
            iotlb |= IO_MEM_ROM;
2058
    } else {
2059
        /* IO handlers are currently passed a physical address.
2060
           It would be nice to pass an offset from the base address
2061
           of that region.  This would avoid having to special case RAM,
2062
           and avoid full address decoding in every device.
2063
           We can't use the high bits of pd for this because
2064
           IO_MEM_ROMD uses these as a ram address.  */
2065
        iotlb = (pd & ~TARGET_PAGE_MASK);
2066
        if (p) {
2067
            iotlb += p->region_offset;
2068
        } else {
2069
            iotlb += paddr;
2070
        }
2071
    }
2072

    
2073
    code_address = address;
2074
    /* Make accesses to pages with watchpoints go via the
2075
       watchpoint trap routines.  */
2076
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2077
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2078
            iotlb = io_mem_watch + paddr;
2079
            /* TODO: The memory case can be optimized by not trapping
2080
               reads of pages with a write breakpoint.  */
2081
            address |= TLB_MMIO;
2082
        }
2083
    }
2084

    
2085
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2086
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2087
    te = &env->tlb_table[mmu_idx][index];
2088
    te->addend = addend - vaddr;
2089
    if (prot & PAGE_READ) {
2090
        te->addr_read = address;
2091
    } else {
2092
        te->addr_read = -1;
2093
    }
2094

    
2095
    if (prot & PAGE_EXEC) {
2096
        te->addr_code = code_address;
2097
    } else {
2098
        te->addr_code = -1;
2099
    }
2100
    if (prot & PAGE_WRITE) {
2101
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2102
            (pd & IO_MEM_ROMD)) {
2103
            /* Write access calls the I/O callback.  */
2104
            te->addr_write = address | TLB_MMIO;
2105
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2106
                   !cpu_physical_memory_is_dirty(pd)) {
2107
            te->addr_write = address | TLB_NOTDIRTY;
2108
        } else {
2109
            te->addr_write = address;
2110
        }
2111
    } else {
2112
        te->addr_write = -1;
2113
    }
2114
    return ret;
2115
}
2116

    
2117
#else
2118

    
2119
void tlb_flush(CPUState *env, int flush_global)
2120
{
2121
}
2122

    
2123
void tlb_flush_page(CPUState *env, target_ulong addr)
2124
{
2125
}
2126

    
2127
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2128
                      target_phys_addr_t paddr, int prot,
2129
                      int mmu_idx, int is_softmmu)
2130
{
2131
    return 0;
2132
}
2133

    
2134
/*
2135
 * Walks guest process memory "regions" one by one
2136
 * and calls callback function 'fn' for each region.
2137
 */
2138
int walk_memory_regions(void *priv,
2139
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2140
{
2141
    unsigned long start, end;
2142
    PageDesc *p = NULL;
2143
    int i, j, prot, prot1;
2144
    int rc = 0;
2145

    
2146
    start = end = -1;
2147
    prot = 0;
2148

    
2149
    for (i = 0; i <= L1_SIZE; i++) {
2150
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2151
        for (j = 0; j < L2_SIZE; j++) {
2152
            prot1 = (p == NULL) ? 0 : p[j].flags;
2153
            /*
2154
             * "region" is one continuous chunk of memory
2155
             * that has same protection flags set.
2156
             */
2157
            if (prot1 != prot) {
2158
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2159
                if (start != -1) {
2160
                    rc = (*fn)(priv, start, end, prot);
2161
                    /* callback can stop iteration by returning != 0 */
2162
                    if (rc != 0)
2163
                        return (rc);
2164
                }
2165
                if (prot1 != 0)
2166
                    start = end;
2167
                else
2168
                    start = -1;
2169
                prot = prot1;
2170
            }
2171
            if (p == NULL)
2172
                break;
2173
        }
2174
    }
2175
    return (rc);
2176
}
2177

    
2178
static int dump_region(void *priv, unsigned long start,
2179
    unsigned long end, unsigned long prot)
2180
{
2181
    FILE *f = (FILE *)priv;
2182

    
2183
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2184
        start, end, end - start,
2185
        ((prot & PAGE_READ) ? 'r' : '-'),
2186
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2187
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2188

    
2189
    return (0);
2190
}
2191

    
2192
/* dump memory mappings */
2193
void page_dump(FILE *f)
2194
{
2195
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2196
            "start", "end", "size", "prot");
2197
    walk_memory_regions(f, dump_region);
2198
}
2199

    
2200
int page_get_flags(target_ulong address)
2201
{
2202
    PageDesc *p;
2203

    
2204
    p = page_find(address >> TARGET_PAGE_BITS);
2205
    if (!p)
2206
        return 0;
2207
    return p->flags;
2208
}
2209

    
2210
/* modify the flags of a page and invalidate the code if
2211
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2212
   depending on PAGE_WRITE */
2213
void page_set_flags(target_ulong start, target_ulong end, int flags)
2214
{
2215
    PageDesc *p;
2216
    target_ulong addr;
2217

    
2218
    /* mmap_lock should already be held.  */
2219
    start = start & TARGET_PAGE_MASK;
2220
    end = TARGET_PAGE_ALIGN(end);
2221
    if (flags & PAGE_WRITE)
2222
        flags |= PAGE_WRITE_ORG;
2223
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2224
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2225
        /* We may be called for host regions that are outside guest
2226
           address space.  */
2227
        if (!p)
2228
            return;
2229
        /* if the write protection is set, then we invalidate the code
2230
           inside */
2231
        if (!(p->flags & PAGE_WRITE) &&
2232
            (flags & PAGE_WRITE) &&
2233
            p->first_tb) {
2234
            tb_invalidate_phys_page(addr, 0, NULL);
2235
        }
2236
        p->flags = flags;
2237
    }
2238
}
2239

    
2240
int page_check_range(target_ulong start, target_ulong len, int flags)
2241
{
2242
    PageDesc *p;
2243
    target_ulong end;
2244
    target_ulong addr;
2245

    
2246
    if (start + len < start)
2247
        /* we've wrapped around */
2248
        return -1;
2249

    
2250
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2251
    start = start & TARGET_PAGE_MASK;
2252

    
2253
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2254
        p = page_find(addr >> TARGET_PAGE_BITS);
2255
        if( !p )
2256
            return -1;
2257
        if( !(p->flags & PAGE_VALID) )
2258
            return -1;
2259

    
2260
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2261
            return -1;
2262
        if (flags & PAGE_WRITE) {
2263
            if (!(p->flags & PAGE_WRITE_ORG))
2264
                return -1;
2265
            /* unprotect the page if it was put read-only because it
2266
               contains translated code */
2267
            if (!(p->flags & PAGE_WRITE)) {
2268
                if (!page_unprotect(addr, 0, NULL))
2269
                    return -1;
2270
            }
2271
            return 0;
2272
        }
2273
    }
2274
    return 0;
2275
}
2276

    
2277
/* called from signal handler: invalidate the code and unprotect the
2278
   page. Return TRUE if the fault was successfully handled. */
2279
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2280
{
2281
    unsigned int page_index, prot, pindex;
2282
    PageDesc *p, *p1;
2283
    target_ulong host_start, host_end, addr;
2284

    
2285
    /* Technically this isn't safe inside a signal handler.  However we
2286
       know this only ever happens in a synchronous SEGV handler, so in
2287
       practice it seems to be ok.  */
2288
    mmap_lock();
2289

    
2290
    host_start = address & qemu_host_page_mask;
2291
    page_index = host_start >> TARGET_PAGE_BITS;
2292
    p1 = page_find(page_index);
2293
    if (!p1) {
2294
        mmap_unlock();
2295
        return 0;
2296
    }
2297
    host_end = host_start + qemu_host_page_size;
2298
    p = p1;
2299
    prot = 0;
2300
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2301
        prot |= p->flags;
2302
        p++;
2303
    }
2304
    /* if the page was really writable, then we change its
2305
       protection back to writable */
2306
    if (prot & PAGE_WRITE_ORG) {
2307
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2308
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2309
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2310
                     (prot & PAGE_BITS) | PAGE_WRITE);
2311
            p1[pindex].flags |= PAGE_WRITE;
2312
            /* and since the content will be modified, we must invalidate
2313
               the corresponding translated code. */
2314
            tb_invalidate_phys_page(address, pc, puc);
2315
#ifdef DEBUG_TB_CHECK
2316
            tb_invalidate_check(address);
2317
#endif
2318
            mmap_unlock();
2319
            return 1;
2320
        }
2321
    }
2322
    mmap_unlock();
2323
    return 0;
2324
}
2325

    
2326
static inline void tlb_set_dirty(CPUState *env,
2327
                                 unsigned long addr, target_ulong vaddr)
2328
{
2329
}
2330
#endif /* defined(CONFIG_USER_ONLY) */
2331

    
2332
#if !defined(CONFIG_USER_ONLY)
2333

    
2334
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2335
                             ram_addr_t memory, ram_addr_t region_offset);
2336
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2337
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2338
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2339
                      need_subpage)                                     \
2340
    do {                                                                \
2341
        if (addr > start_addr)                                          \
2342
            start_addr2 = 0;                                            \
2343
        else {                                                          \
2344
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2345
            if (start_addr2 > 0)                                        \
2346
                need_subpage = 1;                                       \
2347
        }                                                               \
2348
                                                                        \
2349
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2350
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2351
        else {                                                          \
2352
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2353
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2354
                need_subpage = 1;                                       \
2355
        }                                                               \
2356
    } while (0)
2357

    
2358
/* register physical memory. 'size' must be a multiple of the target
2359
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2360
   io memory page.  The address used when calling the IO function is
2361
   the offset from the start of the region, plus region_offset.  Both
2362
   start_addr and region_offset are rounded down to a page boundary
2363
   before calculating this offset.  This should not be a problem unless
2364
   the low bits of start_addr and region_offset differ.  */
2365
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2366
                                         ram_addr_t size,
2367
                                         ram_addr_t phys_offset,
2368
                                         ram_addr_t region_offset)
2369
{
2370
    target_phys_addr_t addr, end_addr;
2371
    PhysPageDesc *p;
2372
    CPUState *env;
2373
    ram_addr_t orig_size = size;
2374
    void *subpage;
2375

    
2376
#ifdef CONFIG_KQEMU
2377
    /* XXX: should not depend on cpu context */
2378
    env = first_cpu;
2379
    if (env->kqemu_enabled) {
2380
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2381
    }
2382
#endif
2383
    if (kvm_enabled())
2384
        kvm_set_phys_mem(start_addr, size, phys_offset);
2385

    
2386
    if (phys_offset == IO_MEM_UNASSIGNED) {
2387
        region_offset = start_addr;
2388
    }
2389
    region_offset &= TARGET_PAGE_MASK;
2390
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2391
    end_addr = start_addr + (target_phys_addr_t)size;
2392
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2393
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2394
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2395
            ram_addr_t orig_memory = p->phys_offset;
2396
            target_phys_addr_t start_addr2, end_addr2;
2397
            int need_subpage = 0;
2398

    
2399
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2400
                          need_subpage);
2401
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2402
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2403
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2404
                                           &p->phys_offset, orig_memory,
2405
                                           p->region_offset);
2406
                } else {
2407
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2408
                                            >> IO_MEM_SHIFT];
2409
                }
2410
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2411
                                 region_offset);
2412
                p->region_offset = 0;
2413
            } else {
2414
                p->phys_offset = phys_offset;
2415
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2416
                    (phys_offset & IO_MEM_ROMD))
2417
                    phys_offset += TARGET_PAGE_SIZE;
2418
            }
2419
        } else {
2420
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2421
            p->phys_offset = phys_offset;
2422
            p->region_offset = region_offset;
2423
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2424
                (phys_offset & IO_MEM_ROMD)) {
2425
                phys_offset += TARGET_PAGE_SIZE;
2426
            } else {
2427
                target_phys_addr_t start_addr2, end_addr2;
2428
                int need_subpage = 0;
2429

    
2430
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2431
                              end_addr2, need_subpage);
2432

    
2433
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2434
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2435
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2436
                                           addr & TARGET_PAGE_MASK);
2437
                    subpage_register(subpage, start_addr2, end_addr2,
2438
                                     phys_offset, region_offset);
2439
                    p->region_offset = 0;
2440
                }
2441
            }
2442
        }
2443
        region_offset += TARGET_PAGE_SIZE;
2444
    }
2445

    
2446
    /* since each CPU stores ram addresses in its TLB cache, we must
2447
       reset the modified entries */
2448
    /* XXX: slow ! */
2449
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2450
        tlb_flush(env, 1);
2451
    }
2452
}
2453

    
2454
/* XXX: temporary until new memory mapping API */
2455
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2456
{
2457
    PhysPageDesc *p;
2458

    
2459
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2460
    if (!p)
2461
        return IO_MEM_UNASSIGNED;
2462
    return p->phys_offset;
2463
}
2464

    
2465
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2466
{
2467
    if (kvm_enabled())
2468
        kvm_coalesce_mmio_region(addr, size);
2469
}
2470

    
2471
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2472
{
2473
    if (kvm_enabled())
2474
        kvm_uncoalesce_mmio_region(addr, size);
2475
}
2476

    
2477
#ifdef CONFIG_KQEMU
2478
/* XXX: better than nothing */
2479
static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2480
{
2481
    ram_addr_t addr;
2482
    if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2483
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2484
                (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2485
        abort();
2486
    }
2487
    addr = last_ram_offset;
2488
    last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2489
    return addr;
2490
}
2491
#endif
2492

    
2493
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2494
{
2495
    RAMBlock *new_block;
2496

    
2497
#ifdef CONFIG_KQEMU
2498
    if (kqemu_phys_ram_base) {
2499
        return kqemu_ram_alloc(size);
2500
    }
2501
#endif
2502

    
2503
    size = TARGET_PAGE_ALIGN(size);
2504
    new_block = qemu_malloc(sizeof(*new_block));
2505

    
2506
    new_block->host = qemu_vmalloc(size);
2507
    new_block->offset = last_ram_offset;
2508
    new_block->length = size;
2509

    
2510
    new_block->next = ram_blocks;
2511
    ram_blocks = new_block;
2512

    
2513
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2514
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2515
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2516
           0xff, size >> TARGET_PAGE_BITS);
2517

    
2518
    last_ram_offset += size;
2519

    
2520
    if (kvm_enabled())
2521
        kvm_setup_guest_memory(new_block->host, size);
2522

    
2523
    return new_block->offset;
2524
}
2525

    
2526
void qemu_ram_free(ram_addr_t addr)
2527
{
2528
    /* TODO: implement this.  */
2529
}
2530

    
2531
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2532
   With the exception of the softmmu code in this file, this should
2533
   only be used for local memory (e.g. video ram) that the device owns,
2534
   and knows it isn't going to access beyond the end of the block.
2535

2536
   It should not be used for general purpose DMA.
2537
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2538
 */
2539
void *qemu_get_ram_ptr(ram_addr_t addr)
2540
{
2541
    RAMBlock *prev;
2542
    RAMBlock **prevp;
2543
    RAMBlock *block;
2544

    
2545
#ifdef CONFIG_KQEMU
2546
    if (kqemu_phys_ram_base) {
2547
        return kqemu_phys_ram_base + addr;
2548
    }
2549
#endif
2550

    
2551
    prev = NULL;
2552
    prevp = &ram_blocks;
2553
    block = ram_blocks;
2554
    while (block && (block->offset > addr
2555
                     || block->offset + block->length <= addr)) {
2556
        if (prev)
2557
          prevp = &prev->next;
2558
        prev = block;
2559
        block = block->next;
2560
    }
2561
    if (!block) {
2562
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2563
        abort();
2564
    }
2565
    /* Move this entry to to start of the list.  */
2566
    if (prev) {
2567
        prev->next = block->next;
2568
        block->next = *prevp;
2569
        *prevp = block;
2570
    }
2571
    return block->host + (addr - block->offset);
2572
}
2573

    
2574
/* Some of the softmmu routines need to translate from a host pointer
2575
   (typically a TLB entry) back to a ram offset.  */
2576
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2577
{
2578
    RAMBlock *prev;
2579
    RAMBlock **prevp;
2580
    RAMBlock *block;
2581
    uint8_t *host = ptr;
2582

    
2583
#ifdef CONFIG_KQEMU
2584
    if (kqemu_phys_ram_base) {
2585
        return host - kqemu_phys_ram_base;
2586
    }
2587
#endif
2588

    
2589
    prev = NULL;
2590
    prevp = &ram_blocks;
2591
    block = ram_blocks;
2592
    while (block && (block->host > host
2593
                     || block->host + block->length <= host)) {
2594
        if (prev)
2595
          prevp = &prev->next;
2596
        prev = block;
2597
        block = block->next;
2598
    }
2599
    if (!block) {
2600
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2601
        abort();
2602
    }
2603
    return block->offset + (host - block->host);
2604
}
2605

    
2606
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2607
{
2608
#ifdef DEBUG_UNASSIGNED
2609
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2610
#endif
2611
#if defined(TARGET_SPARC)
2612
    do_unassigned_access(addr, 0, 0, 0, 1);
2613
#endif
2614
    return 0;
2615
}
2616

    
2617
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2618
{
2619
#ifdef DEBUG_UNASSIGNED
2620
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2621
#endif
2622
#if defined(TARGET_SPARC)
2623
    do_unassigned_access(addr, 0, 0, 0, 2);
2624
#endif
2625
    return 0;
2626
}
2627

    
2628
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2629
{
2630
#ifdef DEBUG_UNASSIGNED
2631
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2632
#endif
2633
#if defined(TARGET_SPARC)
2634
    do_unassigned_access(addr, 0, 0, 0, 4);
2635
#endif
2636
    return 0;
2637
}
2638

    
2639
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2640
{
2641
#ifdef DEBUG_UNASSIGNED
2642
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2643
#endif
2644
#if defined(TARGET_SPARC)
2645
    do_unassigned_access(addr, 1, 0, 0, 1);
2646
#endif
2647
}
2648

    
2649
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2650
{
2651
#ifdef DEBUG_UNASSIGNED
2652
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2653
#endif
2654
#if defined(TARGET_SPARC)
2655
    do_unassigned_access(addr, 1, 0, 0, 2);
2656
#endif
2657
}
2658

    
2659
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2660
{
2661
#ifdef DEBUG_UNASSIGNED
2662
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2663
#endif
2664
#if defined(TARGET_SPARC)
2665
    do_unassigned_access(addr, 1, 0, 0, 4);
2666
#endif
2667
}
2668

    
2669
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2670
    unassigned_mem_readb,
2671
    unassigned_mem_readw,
2672
    unassigned_mem_readl,
2673
};
2674

    
2675
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2676
    unassigned_mem_writeb,
2677
    unassigned_mem_writew,
2678
    unassigned_mem_writel,
2679
};
2680

    
2681
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2682
                                uint32_t val)
2683
{
2684
    int dirty_flags;
2685
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2686
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2687
#if !defined(CONFIG_USER_ONLY)
2688
        tb_invalidate_phys_page_fast(ram_addr, 1);
2689
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2690
#endif
2691
    }
2692
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2693
#ifdef CONFIG_KQEMU
2694
    if (cpu_single_env->kqemu_enabled &&
2695
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2696
        kqemu_modify_page(cpu_single_env, ram_addr);
2697
#endif
2698
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2699
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2700
    /* we remove the notdirty callback only if the code has been
2701
       flushed */
2702
    if (dirty_flags == 0xff)
2703
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2704
}
2705

    
2706
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2707
                                uint32_t val)
2708
{
2709
    int dirty_flags;
2710
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2711
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2712
#if !defined(CONFIG_USER_ONLY)
2713
        tb_invalidate_phys_page_fast(ram_addr, 2);
2714
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2715
#endif
2716
    }
2717
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2718
#ifdef CONFIG_KQEMU
2719
    if (cpu_single_env->kqemu_enabled &&
2720
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2721
        kqemu_modify_page(cpu_single_env, ram_addr);
2722
#endif
2723
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2724
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2725
    /* we remove the notdirty callback only if the code has been
2726
       flushed */
2727
    if (dirty_flags == 0xff)
2728
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2729
}
2730

    
2731
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2732
                                uint32_t val)
2733
{
2734
    int dirty_flags;
2735
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2736
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2737
#if !defined(CONFIG_USER_ONLY)
2738
        tb_invalidate_phys_page_fast(ram_addr, 4);
2739
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2740
#endif
2741
    }
2742
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2743
#ifdef CONFIG_KQEMU
2744
    if (cpu_single_env->kqemu_enabled &&
2745
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2746
        kqemu_modify_page(cpu_single_env, ram_addr);
2747
#endif
2748
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2749
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2750
    /* we remove the notdirty callback only if the code has been
2751
       flushed */
2752
    if (dirty_flags == 0xff)
2753
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2754
}
2755

    
2756
static CPUReadMemoryFunc *error_mem_read[3] = {
2757
    NULL, /* never used */
2758
    NULL, /* never used */
2759
    NULL, /* never used */
2760
};
2761

    
2762
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2763
    notdirty_mem_writeb,
2764
    notdirty_mem_writew,
2765
    notdirty_mem_writel,
2766
};
2767

    
2768
/* Generate a debug exception if a watchpoint has been hit.  */
2769
static void check_watchpoint(int offset, int len_mask, int flags)
2770
{
2771
    CPUState *env = cpu_single_env;
2772
    target_ulong pc, cs_base;
2773
    TranslationBlock *tb;
2774
    target_ulong vaddr;
2775
    CPUWatchpoint *wp;
2776
    int cpu_flags;
2777

    
2778
    if (env->watchpoint_hit) {
2779
        /* We re-entered the check after replacing the TB. Now raise
2780
         * the debug interrupt so that is will trigger after the
2781
         * current instruction. */
2782
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2783
        return;
2784
    }
2785
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2786
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2787
        if ((vaddr == (wp->vaddr & len_mask) ||
2788
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2789
            wp->flags |= BP_WATCHPOINT_HIT;
2790
            if (!env->watchpoint_hit) {
2791
                env->watchpoint_hit = wp;
2792
                tb = tb_find_pc(env->mem_io_pc);
2793
                if (!tb) {
2794
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2795
                              "pc=%p", (void *)env->mem_io_pc);
2796
                }
2797
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2798
                tb_phys_invalidate(tb, -1);
2799
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2800
                    env->exception_index = EXCP_DEBUG;
2801
                } else {
2802
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2803
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2804
                }
2805
                cpu_resume_from_signal(env, NULL);
2806
            }
2807
        } else {
2808
            wp->flags &= ~BP_WATCHPOINT_HIT;
2809
        }
2810
    }
2811
}
2812

    
2813
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2814
   so these check for a hit then pass through to the normal out-of-line
2815
   phys routines.  */
2816
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2817
{
2818
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2819
    return ldub_phys(addr);
2820
}
2821

    
2822
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2823
{
2824
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2825
    return lduw_phys(addr);
2826
}
2827

    
2828
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2829
{
2830
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2831
    return ldl_phys(addr);
2832
}
2833

    
2834
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2835
                             uint32_t val)
2836
{
2837
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2838
    stb_phys(addr, val);
2839
}
2840

    
2841
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2842
                             uint32_t val)
2843
{
2844
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2845
    stw_phys(addr, val);
2846
}
2847

    
2848
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2849
                             uint32_t val)
2850
{
2851
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2852
    stl_phys(addr, val);
2853
}
2854

    
2855
static CPUReadMemoryFunc *watch_mem_read[3] = {
2856
    watch_mem_readb,
2857
    watch_mem_readw,
2858
    watch_mem_readl,
2859
};
2860

    
2861
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2862
    watch_mem_writeb,
2863
    watch_mem_writew,
2864
    watch_mem_writel,
2865
};
2866

    
2867
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2868
                                 unsigned int len)
2869
{
2870
    uint32_t ret;
2871
    unsigned int idx;
2872

    
2873
    idx = SUBPAGE_IDX(addr);
2874
#if defined(DEBUG_SUBPAGE)
2875
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2876
           mmio, len, addr, idx);
2877
#endif
2878
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2879
                                       addr + mmio->region_offset[idx][0][len]);
2880

    
2881
    return ret;
2882
}
2883

    
2884
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2885
                              uint32_t value, unsigned int len)
2886
{
2887
    unsigned int idx;
2888

    
2889
    idx = SUBPAGE_IDX(addr);
2890
#if defined(DEBUG_SUBPAGE)
2891
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2892
           mmio, len, addr, idx, value);
2893
#endif
2894
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2895
                                  addr + mmio->region_offset[idx][1][len],
2896
                                  value);
2897
}
2898

    
2899
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2900
{
2901
#if defined(DEBUG_SUBPAGE)
2902
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2903
#endif
2904

    
2905
    return subpage_readlen(opaque, addr, 0);
2906
}
2907

    
2908
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2909
                            uint32_t value)
2910
{
2911
#if defined(DEBUG_SUBPAGE)
2912
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2913
#endif
2914
    subpage_writelen(opaque, addr, value, 0);
2915
}
2916

    
2917
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2918
{
2919
#if defined(DEBUG_SUBPAGE)
2920
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2921
#endif
2922

    
2923
    return subpage_readlen(opaque, addr, 1);
2924
}
2925

    
2926
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2927
                            uint32_t value)
2928
{
2929
#if defined(DEBUG_SUBPAGE)
2930
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2931
#endif
2932
    subpage_writelen(opaque, addr, value, 1);
2933
}
2934

    
2935
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2936
{
2937
#if defined(DEBUG_SUBPAGE)
2938
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2939
#endif
2940

    
2941
    return subpage_readlen(opaque, addr, 2);
2942
}
2943

    
2944
static void subpage_writel (void *opaque,
2945
                         target_phys_addr_t addr, uint32_t value)
2946
{
2947
#if defined(DEBUG_SUBPAGE)
2948
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2949
#endif
2950
    subpage_writelen(opaque, addr, value, 2);
2951
}
2952

    
2953
static CPUReadMemoryFunc *subpage_read[] = {
2954
    &subpage_readb,
2955
    &subpage_readw,
2956
    &subpage_readl,
2957
};
2958

    
2959
static CPUWriteMemoryFunc *subpage_write[] = {
2960
    &subpage_writeb,
2961
    &subpage_writew,
2962
    &subpage_writel,
2963
};
2964

    
2965
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2966
                             ram_addr_t memory, ram_addr_t region_offset)
2967
{
2968
    int idx, eidx;
2969
    unsigned int i;
2970

    
2971
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2972
        return -1;
2973
    idx = SUBPAGE_IDX(start);
2974
    eidx = SUBPAGE_IDX(end);
2975
#if defined(DEBUG_SUBPAGE)
2976
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2977
           mmio, start, end, idx, eidx, memory);
2978
#endif
2979
    memory >>= IO_MEM_SHIFT;
2980
    for (; idx <= eidx; idx++) {
2981
        for (i = 0; i < 4; i++) {
2982
            if (io_mem_read[memory][i]) {
2983
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2984
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2985
                mmio->region_offset[idx][0][i] = region_offset;
2986
            }
2987
            if (io_mem_write[memory][i]) {
2988
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2989
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2990
                mmio->region_offset[idx][1][i] = region_offset;
2991
            }
2992
        }
2993
    }
2994

    
2995
    return 0;
2996
}
2997

    
2998
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2999
                           ram_addr_t orig_memory, ram_addr_t region_offset)
3000
{
3001
    subpage_t *mmio;
3002
    int subpage_memory;
3003

    
3004
    mmio = qemu_mallocz(sizeof(subpage_t));
3005

    
3006
    mmio->base = base;
3007
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
3008
#if defined(DEBUG_SUBPAGE)
3009
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3010
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
3011
#endif
3012
    *phys = subpage_memory | IO_MEM_SUBPAGE;
3013
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
3014
                         region_offset);
3015

    
3016
    return mmio;
3017
}
3018

    
3019
static int get_free_io_mem_idx(void)
3020
{
3021
    int i;
3022

    
3023
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3024
        if (!io_mem_used[i]) {
3025
            io_mem_used[i] = 1;
3026
            return i;
3027
        }
3028

    
3029
    return -1;
3030
}
3031

    
3032
static int cpu_register_io_memory_fixed(int io_index,
3033
                                        CPUReadMemoryFunc **mem_read,
3034
                                        CPUWriteMemoryFunc **mem_write,
3035
                                        void *opaque);
3036

    
3037
static void io_mem_init(void)
3038
{
3039
    int i;
3040

    
3041
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
3042
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
3043
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
3044
    for (i=0; i<5; i++)
3045
        io_mem_used[i] = 1;
3046

    
3047
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3048
                                          watch_mem_write, NULL);
3049
#ifdef CONFIG_KQEMU
3050
    if (kqemu_phys_ram_base) {
3051
        /* alloc dirty bits array */
3052
        phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3053
        memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3054
    }
3055
#endif
3056
}
3057

    
3058
/* mem_read and mem_write are arrays of functions containing the
3059
   function to access byte (index 0), word (index 1) and dword (index
3060
   2). Functions can be omitted with a NULL function pointer.
3061
   If io_index is non zero, the corresponding io zone is
3062
   modified. If it is zero, a new io zone is allocated. The return
3063
   value can be used with cpu_register_physical_memory(). (-1) is
3064
   returned if error. */
3065
static int cpu_register_io_memory_fixed(int io_index,
3066
                                        CPUReadMemoryFunc **mem_read,
3067
                                        CPUWriteMemoryFunc **mem_write,
3068
                                        void *opaque)
3069
{
3070
    int i, subwidth = 0;
3071

    
3072
    if (io_index <= 0) {
3073
        io_index = get_free_io_mem_idx();
3074
        if (io_index == -1)
3075
            return io_index;
3076
    } else {
3077
        io_index >>= IO_MEM_SHIFT;
3078
        if (io_index >= IO_MEM_NB_ENTRIES)
3079
            return -1;
3080
    }
3081

    
3082
    for(i = 0;i < 3; i++) {
3083
        if (!mem_read[i] || !mem_write[i])
3084
            subwidth = IO_MEM_SUBWIDTH;
3085
        io_mem_read[io_index][i] = mem_read[i];
3086
        io_mem_write[io_index][i] = mem_write[i];
3087
    }
3088
    io_mem_opaque[io_index] = opaque;
3089
    return (io_index << IO_MEM_SHIFT) | subwidth;
3090
}
3091

    
3092
int cpu_register_io_memory(CPUReadMemoryFunc **mem_read,
3093
                           CPUWriteMemoryFunc **mem_write,
3094
                           void *opaque)
3095
{
3096
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
3097
}
3098

    
3099
void cpu_unregister_io_memory(int io_table_address)
3100
{
3101
    int i;
3102
    int io_index = io_table_address >> IO_MEM_SHIFT;
3103

    
3104
    for (i=0;i < 3; i++) {
3105
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3106
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3107
    }
3108
    io_mem_opaque[io_index] = NULL;
3109
    io_mem_used[io_index] = 0;
3110
}
3111

    
3112
#endif /* !defined(CONFIG_USER_ONLY) */
3113

    
3114
/* physical memory access (slow version, mainly for debug) */
3115
#if defined(CONFIG_USER_ONLY)
3116
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3117
                            int len, int is_write)
3118
{
3119
    int l, flags;
3120
    target_ulong page;
3121
    void * p;
3122

    
3123
    while (len > 0) {
3124
        page = addr & TARGET_PAGE_MASK;
3125
        l = (page + TARGET_PAGE_SIZE) - addr;
3126
        if (l > len)
3127
            l = len;
3128
        flags = page_get_flags(page);
3129
        if (!(flags & PAGE_VALID))
3130
            return;
3131
        if (is_write) {
3132
            if (!(flags & PAGE_WRITE))
3133
                return;
3134
            /* XXX: this code should not depend on lock_user */
3135
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3136
                /* FIXME - should this return an error rather than just fail? */
3137
                return;
3138
            memcpy(p, buf, l);
3139
            unlock_user(p, addr, l);
3140
        } else {
3141
            if (!(flags & PAGE_READ))
3142
                return;
3143
            /* XXX: this code should not depend on lock_user */
3144
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3145
                /* FIXME - should this return an error rather than just fail? */
3146
                return;
3147
            memcpy(buf, p, l);
3148
            unlock_user(p, addr, 0);
3149
        }
3150
        len -= l;
3151
        buf += l;
3152
        addr += l;
3153
    }
3154
}
3155

    
3156
#else
3157
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3158
                            int len, int is_write)
3159
{
3160
    int l, io_index;
3161
    uint8_t *ptr;
3162
    uint32_t val;
3163
    target_phys_addr_t page;
3164
    unsigned long pd;
3165
    PhysPageDesc *p;
3166

    
3167
    while (len > 0) {
3168
        page = addr & TARGET_PAGE_MASK;
3169
        l = (page + TARGET_PAGE_SIZE) - addr;
3170
        if (l > len)
3171
            l = len;
3172
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3173
        if (!p) {
3174
            pd = IO_MEM_UNASSIGNED;
3175
        } else {
3176
            pd = p->phys_offset;
3177
        }
3178

    
3179
        if (is_write) {
3180
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3181
                target_phys_addr_t addr1 = addr;
3182
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3183
                if (p)
3184
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3185
                /* XXX: could force cpu_single_env to NULL to avoid
3186
                   potential bugs */
3187
                if (l >= 4 && ((addr1 & 3) == 0)) {
3188
                    /* 32 bit write access */
3189
                    val = ldl_p(buf);
3190
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3191
                    l = 4;
3192
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3193
                    /* 16 bit write access */
3194
                    val = lduw_p(buf);
3195
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3196
                    l = 2;
3197
                } else {
3198
                    /* 8 bit write access */
3199
                    val = ldub_p(buf);
3200
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3201
                    l = 1;
3202
                }
3203
            } else {
3204
                unsigned long addr1;
3205
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3206
                /* RAM case */
3207
                ptr = qemu_get_ram_ptr(addr1);
3208
                memcpy(ptr, buf, l);
3209
                if (!cpu_physical_memory_is_dirty(addr1)) {
3210
                    /* invalidate code */
3211
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3212
                    /* set dirty bit */
3213
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3214
                        (0xff & ~CODE_DIRTY_FLAG);
3215
                }
3216
            }
3217
        } else {
3218
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3219
                !(pd & IO_MEM_ROMD)) {
3220
                target_phys_addr_t addr1 = addr;
3221
                /* I/O case */
3222
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3223
                if (p)
3224
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3225
                if (l >= 4 && ((addr1 & 3) == 0)) {
3226
                    /* 32 bit read access */
3227
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3228
                    stl_p(buf, val);
3229
                    l = 4;
3230
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3231
                    /* 16 bit read access */
3232
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3233
                    stw_p(buf, val);
3234
                    l = 2;
3235
                } else {
3236
                    /* 8 bit read access */
3237
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3238
                    stb_p(buf, val);
3239
                    l = 1;
3240
                }
3241
            } else {
3242
                /* RAM case */
3243
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3244
                    (addr & ~TARGET_PAGE_MASK);
3245
                memcpy(buf, ptr, l);
3246
            }
3247
        }
3248
        len -= l;
3249
        buf += l;
3250
        addr += l;
3251
    }
3252
}
3253

    
3254
/* used for ROM loading : can write in RAM and ROM */
3255
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3256
                                   const uint8_t *buf, int len)
3257
{
3258
    int l;
3259
    uint8_t *ptr;
3260
    target_phys_addr_t page;
3261
    unsigned long pd;
3262
    PhysPageDesc *p;
3263

    
3264
    while (len > 0) {
3265
        page = addr & TARGET_PAGE_MASK;
3266
        l = (page + TARGET_PAGE_SIZE) - addr;
3267
        if (l > len)
3268
            l = len;
3269
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3270
        if (!p) {
3271
            pd = IO_MEM_UNASSIGNED;
3272
        } else {
3273
            pd = p->phys_offset;
3274
        }
3275

    
3276
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3277
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3278
            !(pd & IO_MEM_ROMD)) {
3279
            /* do nothing */
3280
        } else {
3281
            unsigned long addr1;
3282
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3283
            /* ROM/RAM case */
3284
            ptr = qemu_get_ram_ptr(addr1);
3285
            memcpy(ptr, buf, l);
3286
        }
3287
        len -= l;
3288
        buf += l;
3289
        addr += l;
3290
    }
3291
}
3292

    
3293
typedef struct {
3294
    void *buffer;
3295
    target_phys_addr_t addr;
3296
    target_phys_addr_t len;
3297
} BounceBuffer;
3298

    
3299
static BounceBuffer bounce;
3300

    
3301
typedef struct MapClient {
3302
    void *opaque;
3303
    void (*callback)(void *opaque);
3304
    LIST_ENTRY(MapClient) link;
3305
} MapClient;
3306

    
3307
static LIST_HEAD(map_client_list, MapClient) map_client_list
3308
    = LIST_HEAD_INITIALIZER(map_client_list);
3309

    
3310
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3311
{
3312
    MapClient *client = qemu_malloc(sizeof(*client));
3313

    
3314
    client->opaque = opaque;
3315
    client->callback = callback;
3316
    LIST_INSERT_HEAD(&map_client_list, client, link);
3317
    return client;
3318
}
3319

    
3320
void cpu_unregister_map_client(void *_client)
3321
{
3322
    MapClient *client = (MapClient *)_client;
3323

    
3324
    LIST_REMOVE(client, link);
3325
}
3326

    
3327
static void cpu_notify_map_clients(void)
3328
{
3329
    MapClient *client;
3330

    
3331
    while (!LIST_EMPTY(&map_client_list)) {
3332
        client = LIST_FIRST(&map_client_list);
3333
        client->callback(client->opaque);
3334
        LIST_REMOVE(client, link);
3335
    }
3336
}
3337

    
3338
/* Map a physical memory region into a host virtual address.
3339
 * May map a subset of the requested range, given by and returned in *plen.
3340
 * May return NULL if resources needed to perform the mapping are exhausted.
3341
 * Use only for reads OR writes - not for read-modify-write operations.
3342
 * Use cpu_register_map_client() to know when retrying the map operation is
3343
 * likely to succeed.
3344
 */
3345
void *cpu_physical_memory_map(target_phys_addr_t addr,
3346
                              target_phys_addr_t *plen,
3347
                              int is_write)
3348
{
3349
    target_phys_addr_t len = *plen;
3350
    target_phys_addr_t done = 0;
3351
    int l;
3352
    uint8_t *ret = NULL;
3353
    uint8_t *ptr;
3354
    target_phys_addr_t page;
3355
    unsigned long pd;
3356
    PhysPageDesc *p;
3357
    unsigned long addr1;
3358

    
3359
    while (len > 0) {
3360
        page = addr & TARGET_PAGE_MASK;
3361
        l = (page + TARGET_PAGE_SIZE) - addr;
3362
        if (l > len)
3363
            l = len;
3364
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3365
        if (!p) {
3366
            pd = IO_MEM_UNASSIGNED;
3367
        } else {
3368
            pd = p->phys_offset;
3369
        }
3370

    
3371
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3372
            if (done || bounce.buffer) {
3373
                break;
3374
            }
3375
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3376
            bounce.addr = addr;
3377
            bounce.len = l;
3378
            if (!is_write) {
3379
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3380
            }
3381
            ptr = bounce.buffer;
3382
        } else {
3383
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3384
            ptr = qemu_get_ram_ptr(addr1);
3385
        }
3386
        if (!done) {
3387
            ret = ptr;
3388
        } else if (ret + done != ptr) {
3389
            break;
3390
        }
3391

    
3392
        len -= l;
3393
        addr += l;
3394
        done += l;
3395
    }
3396
    *plen = done;
3397
    return ret;
3398
}
3399

    
3400
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3401
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3402
 * the amount of memory that was actually read or written by the caller.
3403
 */
3404
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3405
                               int is_write, target_phys_addr_t access_len)
3406
{
3407
    if (buffer != bounce.buffer) {
3408
        if (is_write) {
3409
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3410
            while (access_len) {
3411
                unsigned l;
3412
                l = TARGET_PAGE_SIZE;
3413
                if (l > access_len)
3414
                    l = access_len;
3415
                if (!cpu_physical_memory_is_dirty(addr1)) {
3416
                    /* invalidate code */
3417
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3418
                    /* set dirty bit */
3419
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3420
                        (0xff & ~CODE_DIRTY_FLAG);
3421
                }
3422
                addr1 += l;
3423
                access_len -= l;
3424
            }
3425
        }
3426
        return;
3427
    }
3428
    if (is_write) {
3429
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3430
    }
3431
    qemu_free(bounce.buffer);
3432
    bounce.buffer = NULL;
3433
    cpu_notify_map_clients();
3434
}
3435

    
3436
/* warning: addr must be aligned */
3437
uint32_t ldl_phys(target_phys_addr_t addr)
3438
{
3439
    int io_index;
3440
    uint8_t *ptr;
3441
    uint32_t val;
3442
    unsigned long pd;
3443
    PhysPageDesc *p;
3444

    
3445
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3446
    if (!p) {
3447
        pd = IO_MEM_UNASSIGNED;
3448
    } else {
3449
        pd = p->phys_offset;
3450
    }
3451

    
3452
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3453
        !(pd & IO_MEM_ROMD)) {
3454
        /* I/O case */
3455
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3456
        if (p)
3457
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3458
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3459
    } else {
3460
        /* RAM case */
3461
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3462
            (addr & ~TARGET_PAGE_MASK);
3463
        val = ldl_p(ptr);
3464
    }
3465
    return val;
3466
}
3467

    
3468
/* warning: addr must be aligned */
3469
uint64_t ldq_phys(target_phys_addr_t addr)
3470
{
3471
    int io_index;
3472
    uint8_t *ptr;
3473
    uint64_t val;
3474
    unsigned long pd;
3475
    PhysPageDesc *p;
3476

    
3477
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3478
    if (!p) {
3479
        pd = IO_MEM_UNASSIGNED;
3480
    } else {
3481
        pd = p->phys_offset;
3482
    }
3483

    
3484
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3485
        !(pd & IO_MEM_ROMD)) {
3486
        /* I/O case */
3487
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3488
        if (p)
3489
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3490
#ifdef TARGET_WORDS_BIGENDIAN
3491
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3492
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3493
#else
3494
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3495
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3496
#endif
3497
    } else {
3498
        /* RAM case */
3499
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3500
            (addr & ~TARGET_PAGE_MASK);
3501
        val = ldq_p(ptr);
3502
    }
3503
    return val;
3504
}
3505

    
3506
/* XXX: optimize */
3507
uint32_t ldub_phys(target_phys_addr_t addr)
3508
{
3509
    uint8_t val;
3510
    cpu_physical_memory_read(addr, &val, 1);
3511
    return val;
3512
}
3513

    
3514
/* XXX: optimize */
3515
uint32_t lduw_phys(target_phys_addr_t addr)
3516
{
3517
    uint16_t val;
3518
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3519
    return tswap16(val);
3520
}
3521

    
3522
/* warning: addr must be aligned. The ram page is not masked as dirty
3523
   and the code inside is not invalidated. It is useful if the dirty
3524
   bits are used to track modified PTEs */
3525
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3526
{
3527
    int io_index;
3528
    uint8_t *ptr;
3529
    unsigned long pd;
3530
    PhysPageDesc *p;
3531

    
3532
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3533
    if (!p) {
3534
        pd = IO_MEM_UNASSIGNED;
3535
    } else {
3536
        pd = p->phys_offset;
3537
    }
3538

    
3539
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3540
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3541
        if (p)
3542
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3543
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3544
    } else {
3545
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3546
        ptr = qemu_get_ram_ptr(addr1);
3547
        stl_p(ptr, val);
3548

    
3549
        if (unlikely(in_migration)) {
3550
            if (!cpu_physical_memory_is_dirty(addr1)) {
3551
                /* invalidate code */
3552
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3553
                /* set dirty bit */
3554
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3555
                    (0xff & ~CODE_DIRTY_FLAG);
3556
            }
3557
        }
3558
    }
3559
}
3560

    
3561
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3562
{
3563
    int io_index;
3564
    uint8_t *ptr;
3565
    unsigned long pd;
3566
    PhysPageDesc *p;
3567

    
3568
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3569
    if (!p) {
3570
        pd = IO_MEM_UNASSIGNED;
3571
    } else {
3572
        pd = p->phys_offset;
3573
    }
3574

    
3575
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3576
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3577
        if (p)
3578
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3579
#ifdef TARGET_WORDS_BIGENDIAN
3580
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3581
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3582
#else
3583
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3584
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3585
#endif
3586
    } else {
3587
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3588
            (addr & ~TARGET_PAGE_MASK);
3589
        stq_p(ptr, val);
3590
    }
3591
}
3592

    
3593
/* warning: addr must be aligned */
3594
void stl_phys(target_phys_addr_t addr, uint32_t val)
3595
{
3596
    int io_index;
3597
    uint8_t *ptr;
3598
    unsigned long pd;
3599
    PhysPageDesc *p;
3600

    
3601
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3602
    if (!p) {
3603
        pd = IO_MEM_UNASSIGNED;
3604
    } else {
3605
        pd = p->phys_offset;
3606
    }
3607

    
3608
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3609
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3610
        if (p)
3611
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3612
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3613
    } else {
3614
        unsigned long addr1;
3615
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3616
        /* RAM case */
3617
        ptr = qemu_get_ram_ptr(addr1);
3618
        stl_p(ptr, val);
3619
        if (!cpu_physical_memory_is_dirty(addr1)) {
3620
            /* invalidate code */
3621
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3622
            /* set dirty bit */
3623
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3624
                (0xff & ~CODE_DIRTY_FLAG);
3625
        }
3626
    }
3627
}
3628

    
3629
/* XXX: optimize */
3630
void stb_phys(target_phys_addr_t addr, uint32_t val)
3631
{
3632
    uint8_t v = val;
3633
    cpu_physical_memory_write(addr, &v, 1);
3634
}
3635

    
3636
/* XXX: optimize */
3637
void stw_phys(target_phys_addr_t addr, uint32_t val)
3638
{
3639
    uint16_t v = tswap16(val);
3640
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3641
}
3642

    
3643
/* XXX: optimize */
3644
void stq_phys(target_phys_addr_t addr, uint64_t val)
3645
{
3646
    val = tswap64(val);
3647
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3648
}
3649

    
3650
#endif
3651

    
3652
/* virtual memory access for debug (includes writing to ROM) */
3653
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3654
                        uint8_t *buf, int len, int is_write)
3655
{
3656
    int l;
3657
    target_phys_addr_t phys_addr;
3658
    target_ulong page;
3659

    
3660
    while (len > 0) {
3661
        page = addr & TARGET_PAGE_MASK;
3662
        phys_addr = cpu_get_phys_page_debug(env, page);
3663
        /* if no physical page mapped, return an error */
3664
        if (phys_addr == -1)
3665
            return -1;
3666
        l = (page + TARGET_PAGE_SIZE) - addr;
3667
        if (l > len)
3668
            l = len;
3669
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3670
#if !defined(CONFIG_USER_ONLY)
3671
        if (is_write)
3672
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3673
        else
3674
#endif
3675
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3676
        len -= l;
3677
        buf += l;
3678
        addr += l;
3679
    }
3680
    return 0;
3681
}
3682

    
3683
/* in deterministic execution mode, instructions doing device I/Os
3684
   must be at the end of the TB */
3685
void cpu_io_recompile(CPUState *env, void *retaddr)
3686
{
3687
    TranslationBlock *tb;
3688
    uint32_t n, cflags;
3689
    target_ulong pc, cs_base;
3690
    uint64_t flags;
3691

    
3692
    tb = tb_find_pc((unsigned long)retaddr);
3693
    if (!tb) {
3694
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3695
                  retaddr);
3696
    }
3697
    n = env->icount_decr.u16.low + tb->icount;
3698
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3699
    /* Calculate how many instructions had been executed before the fault
3700
       occurred.  */
3701
    n = n - env->icount_decr.u16.low;
3702
    /* Generate a new TB ending on the I/O insn.  */
3703
    n++;
3704
    /* On MIPS and SH, delay slot instructions can only be restarted if
3705
       they were already the first instruction in the TB.  If this is not
3706
       the first instruction in a TB then re-execute the preceding
3707
       branch.  */
3708
#if defined(TARGET_MIPS)
3709
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3710
        env->active_tc.PC -= 4;
3711
        env->icount_decr.u16.low++;
3712
        env->hflags &= ~MIPS_HFLAG_BMASK;
3713
    }
3714
#elif defined(TARGET_SH4)
3715
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3716
            && n > 1) {
3717
        env->pc -= 2;
3718
        env->icount_decr.u16.low++;
3719
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3720
    }
3721
#endif
3722
    /* This should never happen.  */
3723
    if (n > CF_COUNT_MASK)
3724
        cpu_abort(env, "TB too big during recompile");
3725

    
3726
    cflags = n | CF_LAST_IO;
3727
    pc = tb->pc;
3728
    cs_base = tb->cs_base;
3729
    flags = tb->flags;
3730
    tb_phys_invalidate(tb, -1);
3731
    /* FIXME: In theory this could raise an exception.  In practice
3732
       we have already translated the block once so it's probably ok.  */
3733
    tb_gen_code(env, pc, cs_base, flags, cflags);
3734
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3735
       the first in the TB) then we end up generating a whole new TB and
3736
       repeating the fault, which is horribly inefficient.
3737
       Better would be to execute just this insn uncached, or generate a
3738
       second new TB.  */
3739
    cpu_resume_from_signal(env, NULL);
3740
}
3741

    
3742
void dump_exec_info(FILE *f,
3743
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3744
{
3745
    int i, target_code_size, max_target_code_size;
3746
    int direct_jmp_count, direct_jmp2_count, cross_page;
3747
    TranslationBlock *tb;
3748

    
3749
    target_code_size = 0;
3750
    max_target_code_size = 0;
3751
    cross_page = 0;
3752
    direct_jmp_count = 0;
3753
    direct_jmp2_count = 0;
3754
    for(i = 0; i < nb_tbs; i++) {
3755
        tb = &tbs[i];
3756
        target_code_size += tb->size;
3757
        if (tb->size > max_target_code_size)
3758
            max_target_code_size = tb->size;
3759
        if (tb->page_addr[1] != -1)
3760
            cross_page++;
3761
        if (tb->tb_next_offset[0] != 0xffff) {
3762
            direct_jmp_count++;
3763
            if (tb->tb_next_offset[1] != 0xffff) {
3764
                direct_jmp2_count++;
3765
            }
3766
        }
3767
    }
3768
    /* XXX: avoid using doubles ? */
3769
    cpu_fprintf(f, "Translation buffer state:\n");
3770
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3771
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3772
    cpu_fprintf(f, "TB count            %d/%d\n", 
3773
                nb_tbs, code_gen_max_blocks);
3774
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3775
                nb_tbs ? target_code_size / nb_tbs : 0,
3776
                max_target_code_size);
3777
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3778
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3779
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3780
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3781
            cross_page,
3782
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3783
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3784
                direct_jmp_count,
3785
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3786
                direct_jmp2_count,
3787
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3788
    cpu_fprintf(f, "\nStatistics:\n");
3789
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3790
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3791
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3792
    tcg_dump_info(f, cpu_fprintf);
3793
}
3794

    
3795
#if !defined(CONFIG_USER_ONLY)
3796

    
3797
#define MMUSUFFIX _cmmu
3798
#define GETPC() NULL
3799
#define env cpu_single_env
3800
#define SOFTMMU_CODE_ACCESS
3801

    
3802
#define SHIFT 0
3803
#include "softmmu_template.h"
3804

    
3805
#define SHIFT 1
3806
#include "softmmu_template.h"
3807

    
3808
#define SHIFT 2
3809
#include "softmmu_template.h"
3810

    
3811
#define SHIFT 3
3812
#include "softmmu_template.h"
3813

    
3814
#undef env
3815

    
3816
#endif