Statistics
| Branch: | Revision:

root / exec.c @ b0a46a33

History | View | Annotate | Download (112.2 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#include "qemu-common.h"
38
#include "tcg.h"
39
#include "hw/hw.h"
40
#include "osdep.h"
41
#include "kvm.h"
42
#if defined(CONFIG_USER_ONLY)
43
#include <qemu.h>
44
#endif
45

    
46
//#define DEBUG_TB_INVALIDATE
47
//#define DEBUG_FLUSH
48
//#define DEBUG_TLB
49
//#define DEBUG_UNASSIGNED
50

    
51
/* make various TB consistency checks */
52
//#define DEBUG_TB_CHECK
53
//#define DEBUG_TLB_CHECK
54

    
55
//#define DEBUG_IOPORT
56
//#define DEBUG_SUBPAGE
57

    
58
#if !defined(CONFIG_USER_ONLY)
59
/* TB consistency checks only implemented for usermode emulation.  */
60
#undef DEBUG_TB_CHECK
61
#endif
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#if defined(TARGET_SPARC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67
#elif defined(TARGET_SPARC)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69
#elif defined(TARGET_ALPHA)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72
#elif defined(TARGET_PPC64)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78
#else
79
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80
#define TARGET_PHYS_ADDR_SPACE_BITS 32
81
#endif
82

    
83
static TranslationBlock *tbs;
84
int code_gen_max_blocks;
85
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86
static int nb_tbs;
87
/* any access to the tbs or the page table must use this lock */
88
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89

    
90
#if defined(__arm__) || defined(__sparc_v9__)
91
/* The prologue must be reachable with a direct jump. ARM and Sparc64
92
 have limited branch ranges (possibly also PPC) so place it in a
93
 section close to code segment. */
94
#define code_gen_section                                \
95
    __attribute__((__section__(".gen_code")))           \
96
    __attribute__((aligned (32)))
97
#else
98
#define code_gen_section                                \
99
    __attribute__((aligned (32)))
100
#endif
101

    
102
uint8_t code_gen_prologue[1024] code_gen_section;
103
static uint8_t *code_gen_buffer;
104
static unsigned long code_gen_buffer_size;
105
/* threshold to flush the translated code buffer */
106
static unsigned long code_gen_buffer_max_size;
107
uint8_t *code_gen_ptr;
108

    
109
#if !defined(CONFIG_USER_ONLY)
110
int phys_ram_fd;
111
uint8_t *phys_ram_dirty;
112
static int in_migration;
113

    
114
typedef struct RAMBlock {
115
    uint8_t *host;
116
    ram_addr_t offset;
117
    ram_addr_t length;
118
    struct RAMBlock *next;
119
} RAMBlock;
120

    
121
static RAMBlock *ram_blocks;
122
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
123
   then we can no longer assume contiguous ram offsets, and external uses
124
   of this variable will break.  */
125
ram_addr_t last_ram_offset;
126
#endif
127

    
128
CPUState *first_cpu;
129
/* current CPU in the current thread. It is only valid inside
130
   cpu_exec() */
131
CPUState *cpu_single_env;
132
/* 0 = Do not count executed instructions.
133
   1 = Precise instruction counting.
134
   2 = Adaptive rate instruction counting.  */
135
int use_icount = 0;
136
/* Current instruction counter.  While executing translated code this may
137
   include some instructions that have not yet been executed.  */
138
int64_t qemu_icount;
139

    
140
typedef struct PageDesc {
141
    /* list of TBs intersecting this ram page */
142
    TranslationBlock *first_tb;
143
    /* in order to optimize self modifying code, we count the number
144
       of lookups we do to a given page to use a bitmap */
145
    unsigned int code_write_count;
146
    uint8_t *code_bitmap;
147
#if defined(CONFIG_USER_ONLY)
148
    unsigned long flags;
149
#endif
150
} PageDesc;
151

    
152
typedef struct PhysPageDesc {
153
    /* offset in host memory of the page + io_index in the low bits */
154
    ram_addr_t phys_offset;
155
    ram_addr_t region_offset;
156
} PhysPageDesc;
157

    
158
#define L2_BITS 10
159
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
160
/* XXX: this is a temporary hack for alpha target.
161
 *      In the future, this is to be replaced by a multi-level table
162
 *      to actually be able to handle the complete 64 bits address space.
163
 */
164
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
165
#else
166
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
167
#endif
168

    
169
#define L1_SIZE (1 << L1_BITS)
170
#define L2_SIZE (1 << L2_BITS)
171

    
172
unsigned long qemu_real_host_page_size;
173
unsigned long qemu_host_page_bits;
174
unsigned long qemu_host_page_size;
175
unsigned long qemu_host_page_mask;
176

    
177
/* XXX: for system emulation, it could just be an array */
178
static PageDesc *l1_map[L1_SIZE];
179
static PhysPageDesc **l1_phys_map;
180

    
181
#if !defined(CONFIG_USER_ONLY)
182
static void io_mem_init(void);
183

    
184
/* io memory support */
185
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
186
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
187
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
188
static char io_mem_used[IO_MEM_NB_ENTRIES];
189
static int io_mem_watch;
190
#endif
191

    
192
/* log support */
193
static const char *logfilename = "/tmp/qemu.log";
194
FILE *logfile;
195
int loglevel;
196
static int log_append = 0;
197

    
198
/* statistics */
199
static int tlb_flush_count;
200
static int tb_flush_count;
201
static int tb_phys_invalidate_count;
202

    
203
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
204
typedef struct subpage_t {
205
    target_phys_addr_t base;
206
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
207
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
208
    void *opaque[TARGET_PAGE_SIZE][2][4];
209
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
210
} subpage_t;
211

    
212
#ifdef _WIN32
213
static void map_exec(void *addr, long size)
214
{
215
    DWORD old_protect;
216
    VirtualProtect(addr, size,
217
                   PAGE_EXECUTE_READWRITE, &old_protect);
218
    
219
}
220
#else
221
static void map_exec(void *addr, long size)
222
{
223
    unsigned long start, end, page_size;
224
    
225
    page_size = getpagesize();
226
    start = (unsigned long)addr;
227
    start &= ~(page_size - 1);
228
    
229
    end = (unsigned long)addr + size;
230
    end += page_size - 1;
231
    end &= ~(page_size - 1);
232
    
233
    mprotect((void *)start, end - start,
234
             PROT_READ | PROT_WRITE | PROT_EXEC);
235
}
236
#endif
237

    
238
static void page_init(void)
239
{
240
    /* NOTE: we can always suppose that qemu_host_page_size >=
241
       TARGET_PAGE_SIZE */
242
#ifdef _WIN32
243
    {
244
        SYSTEM_INFO system_info;
245

    
246
        GetSystemInfo(&system_info);
247
        qemu_real_host_page_size = system_info.dwPageSize;
248
    }
249
#else
250
    qemu_real_host_page_size = getpagesize();
251
#endif
252
    if (qemu_host_page_size == 0)
253
        qemu_host_page_size = qemu_real_host_page_size;
254
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
255
        qemu_host_page_size = TARGET_PAGE_SIZE;
256
    qemu_host_page_bits = 0;
257
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
258
        qemu_host_page_bits++;
259
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
260
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
262

    
263
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
264
    {
265
        long long startaddr, endaddr;
266
        FILE *f;
267
        int n;
268

    
269
        mmap_lock();
270
        last_brk = (unsigned long)sbrk(0);
271
        f = fopen("/proc/self/maps", "r");
272
        if (f) {
273
            do {
274
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
275
                if (n == 2) {
276
                    startaddr = MIN(startaddr,
277
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
278
                    endaddr = MIN(endaddr,
279
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
281
                                   TARGET_PAGE_ALIGN(endaddr),
282
                                   PAGE_RESERVED); 
283
                }
284
            } while (!feof(f));
285
            fclose(f);
286
        }
287
        mmap_unlock();
288
    }
289
#endif
290
}
291

    
292
static inline PageDesc **page_l1_map(target_ulong index)
293
{
294
#if TARGET_LONG_BITS > 32
295
    /* Host memory outside guest VM.  For 32-bit targets we have already
296
       excluded high addresses.  */
297
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
298
        return NULL;
299
#endif
300
    return &l1_map[index >> L2_BITS];
301
}
302

    
303
static inline PageDesc *page_find_alloc(target_ulong index)
304
{
305
    PageDesc **lp, *p;
306
    lp = page_l1_map(index);
307
    if (!lp)
308
        return NULL;
309

    
310
    p = *lp;
311
    if (!p) {
312
        /* allocate if not found */
313
#if defined(CONFIG_USER_ONLY)
314
        size_t len = sizeof(PageDesc) * L2_SIZE;
315
        /* Don't use qemu_malloc because it may recurse.  */
316
        p = mmap(0, len, PROT_READ | PROT_WRITE,
317
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
318
        *lp = p;
319
        if (h2g_valid(p)) {
320
            unsigned long addr = h2g(p);
321
            page_set_flags(addr & TARGET_PAGE_MASK,
322
                           TARGET_PAGE_ALIGN(addr + len),
323
                           PAGE_RESERVED); 
324
        }
325
#else
326
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
327
        *lp = p;
328
#endif
329
    }
330
    return p + (index & (L2_SIZE - 1));
331
}
332

    
333
static inline PageDesc *page_find(target_ulong index)
334
{
335
    PageDesc **lp, *p;
336
    lp = page_l1_map(index);
337
    if (!lp)
338
        return NULL;
339

    
340
    p = *lp;
341
    if (!p)
342
        return 0;
343
    return p + (index & (L2_SIZE - 1));
344
}
345

    
346
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
347
{
348
    void **lp, **p;
349
    PhysPageDesc *pd;
350

    
351
    p = (void **)l1_phys_map;
352
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
353

    
354
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
355
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
356
#endif
357
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
358
    p = *lp;
359
    if (!p) {
360
        /* allocate if not found */
361
        if (!alloc)
362
            return NULL;
363
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
364
        memset(p, 0, sizeof(void *) * L1_SIZE);
365
        *lp = p;
366
    }
367
#endif
368
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
369
    pd = *lp;
370
    if (!pd) {
371
        int i;
372
        /* allocate if not found */
373
        if (!alloc)
374
            return NULL;
375
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
376
        *lp = pd;
377
        for (i = 0; i < L2_SIZE; i++) {
378
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
379
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
380
        }
381
    }
382
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
383
}
384

    
385
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
386
{
387
    return phys_page_find_alloc(index, 0);
388
}
389

    
390
#if !defined(CONFIG_USER_ONLY)
391
static void tlb_protect_code(ram_addr_t ram_addr);
392
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
393
                                    target_ulong vaddr);
394
#define mmap_lock() do { } while(0)
395
#define mmap_unlock() do { } while(0)
396
#endif
397

    
398
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399

    
400
#if defined(CONFIG_USER_ONLY)
401
/* Currently it is not recommended to allocate big chunks of data in
402
   user mode. It will change when a dedicated libc will be used */
403
#define USE_STATIC_CODE_GEN_BUFFER
404
#endif
405

    
406
#ifdef USE_STATIC_CODE_GEN_BUFFER
407
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
408
#endif
409

    
410
static void code_gen_alloc(unsigned long tb_size)
411
{
412
#ifdef USE_STATIC_CODE_GEN_BUFFER
413
    code_gen_buffer = static_code_gen_buffer;
414
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415
    map_exec(code_gen_buffer, code_gen_buffer_size);
416
#else
417
    code_gen_buffer_size = tb_size;
418
    if (code_gen_buffer_size == 0) {
419
#if defined(CONFIG_USER_ONLY)
420
        /* in user mode, phys_ram_size is not meaningful */
421
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
422
#else
423
        /* XXX: needs adjustments */
424
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
425
#endif
426
    }
427
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
428
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
429
    /* The code gen buffer location may have constraints depending on
430
       the host cpu and OS */
431
#if defined(__linux__) 
432
    {
433
        int flags;
434
        void *start = NULL;
435

    
436
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
437
#if defined(__x86_64__)
438
        flags |= MAP_32BIT;
439
        /* Cannot map more than that */
440
        if (code_gen_buffer_size > (800 * 1024 * 1024))
441
            code_gen_buffer_size = (800 * 1024 * 1024);
442
#elif defined(__sparc_v9__)
443
        // Map the buffer below 2G, so we can use direct calls and branches
444
        flags |= MAP_FIXED;
445
        start = (void *) 0x60000000UL;
446
        if (code_gen_buffer_size > (512 * 1024 * 1024))
447
            code_gen_buffer_size = (512 * 1024 * 1024);
448
#elif defined(__arm__)
449
        /* Map the buffer below 32M, so we can use direct calls and branches */
450
        flags |= MAP_FIXED;
451
        start = (void *) 0x01000000UL;
452
        if (code_gen_buffer_size > 16 * 1024 * 1024)
453
            code_gen_buffer_size = 16 * 1024 * 1024;
454
#endif
455
        code_gen_buffer = mmap(start, code_gen_buffer_size,
456
                               PROT_WRITE | PROT_READ | PROT_EXEC,
457
                               flags, -1, 0);
458
        if (code_gen_buffer == MAP_FAILED) {
459
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
460
            exit(1);
461
        }
462
    }
463
#elif defined(__FreeBSD__) || defined(__DragonFly__)
464
    {
465
        int flags;
466
        void *addr = NULL;
467
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
468
#if defined(__x86_64__)
469
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
470
         * 0x40000000 is free */
471
        flags |= MAP_FIXED;
472
        addr = (void *)0x40000000;
473
        /* Cannot map more than that */
474
        if (code_gen_buffer_size > (800 * 1024 * 1024))
475
            code_gen_buffer_size = (800 * 1024 * 1024);
476
#endif
477
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
478
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
479
                               flags, -1, 0);
480
        if (code_gen_buffer == MAP_FAILED) {
481
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
482
            exit(1);
483
        }
484
    }
485
#else
486
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
487
    map_exec(code_gen_buffer, code_gen_buffer_size);
488
#endif
489
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
490
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
491
    code_gen_buffer_max_size = code_gen_buffer_size - 
492
        code_gen_max_block_size();
493
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
494
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
495
}
496

    
497
/* Must be called before using the QEMU cpus. 'tb_size' is the size
498
   (in bytes) allocated to the translation buffer. Zero means default
499
   size. */
500
void cpu_exec_init_all(unsigned long tb_size)
501
{
502
    cpu_gen_init();
503
    code_gen_alloc(tb_size);
504
    code_gen_ptr = code_gen_buffer;
505
    page_init();
506
#if !defined(CONFIG_USER_ONLY)
507
    io_mem_init();
508
#endif
509
}
510

    
511
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
512

    
513
#define CPU_COMMON_SAVE_VERSION 1
514

    
515
static void cpu_common_save(QEMUFile *f, void *opaque)
516
{
517
    CPUState *env = opaque;
518

    
519
    cpu_synchronize_state(env, 0);
520

    
521
    qemu_put_be32s(f, &env->halted);
522
    qemu_put_be32s(f, &env->interrupt_request);
523
}
524

    
525
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
526
{
527
    CPUState *env = opaque;
528

    
529
    if (version_id != CPU_COMMON_SAVE_VERSION)
530
        return -EINVAL;
531

    
532
    qemu_get_be32s(f, &env->halted);
533
    qemu_get_be32s(f, &env->interrupt_request);
534
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
535
       version_id is increased. */
536
    env->interrupt_request &= ~0x01;
537
    tlb_flush(env, 1);
538
    cpu_synchronize_state(env, 1);
539

    
540
    return 0;
541
}
542
#endif
543

    
544
void cpu_exec_init(CPUState *env)
545
{
546
    CPUState **penv;
547
    int cpu_index;
548

    
549
#if defined(CONFIG_USER_ONLY)
550
    cpu_list_lock();
551
#endif
552
    env->next_cpu = NULL;
553
    penv = &first_cpu;
554
    cpu_index = 0;
555
    while (*penv != NULL) {
556
        penv = (CPUState **)&(*penv)->next_cpu;
557
        cpu_index++;
558
    }
559
    env->cpu_index = cpu_index;
560
    env->numa_node = 0;
561
    TAILQ_INIT(&env->breakpoints);
562
    TAILQ_INIT(&env->watchpoints);
563
    *penv = env;
564
#if defined(CONFIG_USER_ONLY)
565
    cpu_list_unlock();
566
#endif
567
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
568
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
569
                    cpu_common_save, cpu_common_load, env);
570
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
571
                    cpu_save, cpu_load, env);
572
#endif
573
}
574

    
575
static inline void invalidate_page_bitmap(PageDesc *p)
576
{
577
    if (p->code_bitmap) {
578
        qemu_free(p->code_bitmap);
579
        p->code_bitmap = NULL;
580
    }
581
    p->code_write_count = 0;
582
}
583

    
584
/* set to NULL all the 'first_tb' fields in all PageDescs */
585
static void page_flush_tb(void)
586
{
587
    int i, j;
588
    PageDesc *p;
589

    
590
    for(i = 0; i < L1_SIZE; i++) {
591
        p = l1_map[i];
592
        if (p) {
593
            for(j = 0; j < L2_SIZE; j++) {
594
                p->first_tb = NULL;
595
                invalidate_page_bitmap(p);
596
                p++;
597
            }
598
        }
599
    }
600
}
601

    
602
/* flush all the translation blocks */
603
/* XXX: tb_flush is currently not thread safe */
604
void tb_flush(CPUState *env1)
605
{
606
    CPUState *env;
607
#if defined(DEBUG_FLUSH)
608
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
609
           (unsigned long)(code_gen_ptr - code_gen_buffer),
610
           nb_tbs, nb_tbs > 0 ?
611
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
612
#endif
613
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
614
        cpu_abort(env1, "Internal error: code buffer overflow\n");
615

    
616
    nb_tbs = 0;
617

    
618
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
619
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
620
    }
621

    
622
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
623
    page_flush_tb();
624

    
625
    code_gen_ptr = code_gen_buffer;
626
    /* XXX: flush processor icache at this point if cache flush is
627
       expensive */
628
    tb_flush_count++;
629
}
630

    
631
#ifdef DEBUG_TB_CHECK
632

    
633
static void tb_invalidate_check(target_ulong address)
634
{
635
    TranslationBlock *tb;
636
    int i;
637
    address &= TARGET_PAGE_MASK;
638
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
639
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
640
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
641
                  address >= tb->pc + tb->size)) {
642
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
643
                       address, (long)tb->pc, tb->size);
644
            }
645
        }
646
    }
647
}
648

    
649
/* verify that all the pages have correct rights for code */
650
static void tb_page_check(void)
651
{
652
    TranslationBlock *tb;
653
    int i, flags1, flags2;
654

    
655
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
656
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
657
            flags1 = page_get_flags(tb->pc);
658
            flags2 = page_get_flags(tb->pc + tb->size - 1);
659
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
660
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
661
                       (long)tb->pc, tb->size, flags1, flags2);
662
            }
663
        }
664
    }
665
}
666

    
667
static void tb_jmp_check(TranslationBlock *tb)
668
{
669
    TranslationBlock *tb1;
670
    unsigned int n1;
671

    
672
    /* suppress any remaining jumps to this TB */
673
    tb1 = tb->jmp_first;
674
    for(;;) {
675
        n1 = (long)tb1 & 3;
676
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
677
        if (n1 == 2)
678
            break;
679
        tb1 = tb1->jmp_next[n1];
680
    }
681
    /* check end of list */
682
    if (tb1 != tb) {
683
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
684
    }
685
}
686

    
687
#endif
688

    
689
/* invalidate one TB */
690
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
691
                             int next_offset)
692
{
693
    TranslationBlock *tb1;
694
    for(;;) {
695
        tb1 = *ptb;
696
        if (tb1 == tb) {
697
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
698
            break;
699
        }
700
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
701
    }
702
}
703

    
704
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
705
{
706
    TranslationBlock *tb1;
707
    unsigned int n1;
708

    
709
    for(;;) {
710
        tb1 = *ptb;
711
        n1 = (long)tb1 & 3;
712
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
713
        if (tb1 == tb) {
714
            *ptb = tb1->page_next[n1];
715
            break;
716
        }
717
        ptb = &tb1->page_next[n1];
718
    }
719
}
720

    
721
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
722
{
723
    TranslationBlock *tb1, **ptb;
724
    unsigned int n1;
725

    
726
    ptb = &tb->jmp_next[n];
727
    tb1 = *ptb;
728
    if (tb1) {
729
        /* find tb(n) in circular list */
730
        for(;;) {
731
            tb1 = *ptb;
732
            n1 = (long)tb1 & 3;
733
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
734
            if (n1 == n && tb1 == tb)
735
                break;
736
            if (n1 == 2) {
737
                ptb = &tb1->jmp_first;
738
            } else {
739
                ptb = &tb1->jmp_next[n1];
740
            }
741
        }
742
        /* now we can suppress tb(n) from the list */
743
        *ptb = tb->jmp_next[n];
744

    
745
        tb->jmp_next[n] = NULL;
746
    }
747
}
748

    
749
/* reset the jump entry 'n' of a TB so that it is not chained to
750
   another TB */
751
static inline void tb_reset_jump(TranslationBlock *tb, int n)
752
{
753
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
754
}
755

    
756
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
757
{
758
    CPUState *env;
759
    PageDesc *p;
760
    unsigned int h, n1;
761
    target_phys_addr_t phys_pc;
762
    TranslationBlock *tb1, *tb2;
763

    
764
    /* remove the TB from the hash list */
765
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
766
    h = tb_phys_hash_func(phys_pc);
767
    tb_remove(&tb_phys_hash[h], tb,
768
              offsetof(TranslationBlock, phys_hash_next));
769

    
770
    /* remove the TB from the page list */
771
    if (tb->page_addr[0] != page_addr) {
772
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
773
        tb_page_remove(&p->first_tb, tb);
774
        invalidate_page_bitmap(p);
775
    }
776
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
777
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
778
        tb_page_remove(&p->first_tb, tb);
779
        invalidate_page_bitmap(p);
780
    }
781

    
782
    tb_invalidated_flag = 1;
783

    
784
    /* remove the TB from the hash list */
785
    h = tb_jmp_cache_hash_func(tb->pc);
786
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
787
        if (env->tb_jmp_cache[h] == tb)
788
            env->tb_jmp_cache[h] = NULL;
789
    }
790

    
791
    /* suppress this TB from the two jump lists */
792
    tb_jmp_remove(tb, 0);
793
    tb_jmp_remove(tb, 1);
794

    
795
    /* suppress any remaining jumps to this TB */
796
    tb1 = tb->jmp_first;
797
    for(;;) {
798
        n1 = (long)tb1 & 3;
799
        if (n1 == 2)
800
            break;
801
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
802
        tb2 = tb1->jmp_next[n1];
803
        tb_reset_jump(tb1, n1);
804
        tb1->jmp_next[n1] = NULL;
805
        tb1 = tb2;
806
    }
807
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
808

    
809
    tb_phys_invalidate_count++;
810
}
811

    
812
static inline void set_bits(uint8_t *tab, int start, int len)
813
{
814
    int end, mask, end1;
815

    
816
    end = start + len;
817
    tab += start >> 3;
818
    mask = 0xff << (start & 7);
819
    if ((start & ~7) == (end & ~7)) {
820
        if (start < end) {
821
            mask &= ~(0xff << (end & 7));
822
            *tab |= mask;
823
        }
824
    } else {
825
        *tab++ |= mask;
826
        start = (start + 8) & ~7;
827
        end1 = end & ~7;
828
        while (start < end1) {
829
            *tab++ = 0xff;
830
            start += 8;
831
        }
832
        if (start < end) {
833
            mask = ~(0xff << (end & 7));
834
            *tab |= mask;
835
        }
836
    }
837
}
838

    
839
static void build_page_bitmap(PageDesc *p)
840
{
841
    int n, tb_start, tb_end;
842
    TranslationBlock *tb;
843

    
844
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
845

    
846
    tb = p->first_tb;
847
    while (tb != NULL) {
848
        n = (long)tb & 3;
849
        tb = (TranslationBlock *)((long)tb & ~3);
850
        /* NOTE: this is subtle as a TB may span two physical pages */
851
        if (n == 0) {
852
            /* NOTE: tb_end may be after the end of the page, but
853
               it is not a problem */
854
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
855
            tb_end = tb_start + tb->size;
856
            if (tb_end > TARGET_PAGE_SIZE)
857
                tb_end = TARGET_PAGE_SIZE;
858
        } else {
859
            tb_start = 0;
860
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
861
        }
862
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
863
        tb = tb->page_next[n];
864
    }
865
}
866

    
867
TranslationBlock *tb_gen_code(CPUState *env,
868
                              target_ulong pc, target_ulong cs_base,
869
                              int flags, int cflags)
870
{
871
    TranslationBlock *tb;
872
    uint8_t *tc_ptr;
873
    target_ulong phys_pc, phys_page2, virt_page2;
874
    int code_gen_size;
875

    
876
    phys_pc = get_phys_addr_code(env, pc);
877
    tb = tb_alloc(pc);
878
    if (!tb) {
879
        /* flush must be done */
880
        tb_flush(env);
881
        /* cannot fail at this point */
882
        tb = tb_alloc(pc);
883
        /* Don't forget to invalidate previous TB info.  */
884
        tb_invalidated_flag = 1;
885
    }
886
    tc_ptr = code_gen_ptr;
887
    tb->tc_ptr = tc_ptr;
888
    tb->cs_base = cs_base;
889
    tb->flags = flags;
890
    tb->cflags = cflags;
891
    cpu_gen_code(env, tb, &code_gen_size);
892
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
893

    
894
    /* check next page if needed */
895
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
896
    phys_page2 = -1;
897
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
898
        phys_page2 = get_phys_addr_code(env, virt_page2);
899
    }
900
    tb_link_phys(tb, phys_pc, phys_page2);
901
    return tb;
902
}
903

    
904
/* invalidate all TBs which intersect with the target physical page
905
   starting in range [start;end[. NOTE: start and end must refer to
906
   the same physical page. 'is_cpu_write_access' should be true if called
907
   from a real cpu write access: the virtual CPU will exit the current
908
   TB if code is modified inside this TB. */
909
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
910
                                   int is_cpu_write_access)
911
{
912
    TranslationBlock *tb, *tb_next, *saved_tb;
913
    CPUState *env = cpu_single_env;
914
    target_ulong tb_start, tb_end;
915
    PageDesc *p;
916
    int n;
917
#ifdef TARGET_HAS_PRECISE_SMC
918
    int current_tb_not_found = is_cpu_write_access;
919
    TranslationBlock *current_tb = NULL;
920
    int current_tb_modified = 0;
921
    target_ulong current_pc = 0;
922
    target_ulong current_cs_base = 0;
923
    int current_flags = 0;
924
#endif /* TARGET_HAS_PRECISE_SMC */
925

    
926
    p = page_find(start >> TARGET_PAGE_BITS);
927
    if (!p)
928
        return;
929
    if (!p->code_bitmap &&
930
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
931
        is_cpu_write_access) {
932
        /* build code bitmap */
933
        build_page_bitmap(p);
934
    }
935

    
936
    /* we remove all the TBs in the range [start, end[ */
937
    /* XXX: see if in some cases it could be faster to invalidate all the code */
938
    tb = p->first_tb;
939
    while (tb != NULL) {
940
        n = (long)tb & 3;
941
        tb = (TranslationBlock *)((long)tb & ~3);
942
        tb_next = tb->page_next[n];
943
        /* NOTE: this is subtle as a TB may span two physical pages */
944
        if (n == 0) {
945
            /* NOTE: tb_end may be after the end of the page, but
946
               it is not a problem */
947
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
948
            tb_end = tb_start + tb->size;
949
        } else {
950
            tb_start = tb->page_addr[1];
951
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
952
        }
953
        if (!(tb_end <= start || tb_start >= end)) {
954
#ifdef TARGET_HAS_PRECISE_SMC
955
            if (current_tb_not_found) {
956
                current_tb_not_found = 0;
957
                current_tb = NULL;
958
                if (env->mem_io_pc) {
959
                    /* now we have a real cpu fault */
960
                    current_tb = tb_find_pc(env->mem_io_pc);
961
                }
962
            }
963
            if (current_tb == tb &&
964
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
965
                /* If we are modifying the current TB, we must stop
966
                its execution. We could be more precise by checking
967
                that the modification is after the current PC, but it
968
                would require a specialized function to partially
969
                restore the CPU state */
970

    
971
                current_tb_modified = 1;
972
                cpu_restore_state(current_tb, env,
973
                                  env->mem_io_pc, NULL);
974
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
975
                                     &current_flags);
976
            }
977
#endif /* TARGET_HAS_PRECISE_SMC */
978
            /* we need to do that to handle the case where a signal
979
               occurs while doing tb_phys_invalidate() */
980
            saved_tb = NULL;
981
            if (env) {
982
                saved_tb = env->current_tb;
983
                env->current_tb = NULL;
984
            }
985
            tb_phys_invalidate(tb, -1);
986
            if (env) {
987
                env->current_tb = saved_tb;
988
                if (env->interrupt_request && env->current_tb)
989
                    cpu_interrupt(env, env->interrupt_request);
990
            }
991
        }
992
        tb = tb_next;
993
    }
994
#if !defined(CONFIG_USER_ONLY)
995
    /* if no code remaining, no need to continue to use slow writes */
996
    if (!p->first_tb) {
997
        invalidate_page_bitmap(p);
998
        if (is_cpu_write_access) {
999
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1000
        }
1001
    }
1002
#endif
1003
#ifdef TARGET_HAS_PRECISE_SMC
1004
    if (current_tb_modified) {
1005
        /* we generate a block containing just the instruction
1006
           modifying the memory. It will ensure that it cannot modify
1007
           itself */
1008
        env->current_tb = NULL;
1009
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1010
        cpu_resume_from_signal(env, NULL);
1011
    }
1012
#endif
1013
}
1014

    
1015
/* len must be <= 8 and start must be a multiple of len */
1016
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1017
{
1018
    PageDesc *p;
1019
    int offset, b;
1020
#if 0
1021
    if (1) {
1022
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1023
                  cpu_single_env->mem_io_vaddr, len,
1024
                  cpu_single_env->eip,
1025
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1026
    }
1027
#endif
1028
    p = page_find(start >> TARGET_PAGE_BITS);
1029
    if (!p)
1030
        return;
1031
    if (p->code_bitmap) {
1032
        offset = start & ~TARGET_PAGE_MASK;
1033
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1034
        if (b & ((1 << len) - 1))
1035
            goto do_invalidate;
1036
    } else {
1037
    do_invalidate:
1038
        tb_invalidate_phys_page_range(start, start + len, 1);
1039
    }
1040
}
1041

    
1042
#if !defined(CONFIG_SOFTMMU)
1043
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1044
                                    unsigned long pc, void *puc)
1045
{
1046
    TranslationBlock *tb;
1047
    PageDesc *p;
1048
    int n;
1049
#ifdef TARGET_HAS_PRECISE_SMC
1050
    TranslationBlock *current_tb = NULL;
1051
    CPUState *env = cpu_single_env;
1052
    int current_tb_modified = 0;
1053
    target_ulong current_pc = 0;
1054
    target_ulong current_cs_base = 0;
1055
    int current_flags = 0;
1056
#endif
1057

    
1058
    addr &= TARGET_PAGE_MASK;
1059
    p = page_find(addr >> TARGET_PAGE_BITS);
1060
    if (!p)
1061
        return;
1062
    tb = p->first_tb;
1063
#ifdef TARGET_HAS_PRECISE_SMC
1064
    if (tb && pc != 0) {
1065
        current_tb = tb_find_pc(pc);
1066
    }
1067
#endif
1068
    while (tb != NULL) {
1069
        n = (long)tb & 3;
1070
        tb = (TranslationBlock *)((long)tb & ~3);
1071
#ifdef TARGET_HAS_PRECISE_SMC
1072
        if (current_tb == tb &&
1073
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1074
                /* If we are modifying the current TB, we must stop
1075
                   its execution. We could be more precise by checking
1076
                   that the modification is after the current PC, but it
1077
                   would require a specialized function to partially
1078
                   restore the CPU state */
1079

    
1080
            current_tb_modified = 1;
1081
            cpu_restore_state(current_tb, env, pc, puc);
1082
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1083
                                 &current_flags);
1084
        }
1085
#endif /* TARGET_HAS_PRECISE_SMC */
1086
        tb_phys_invalidate(tb, addr);
1087
        tb = tb->page_next[n];
1088
    }
1089
    p->first_tb = NULL;
1090
#ifdef TARGET_HAS_PRECISE_SMC
1091
    if (current_tb_modified) {
1092
        /* we generate a block containing just the instruction
1093
           modifying the memory. It will ensure that it cannot modify
1094
           itself */
1095
        env->current_tb = NULL;
1096
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1097
        cpu_resume_from_signal(env, puc);
1098
    }
1099
#endif
1100
}
1101
#endif
1102

    
1103
/* add the tb in the target page and protect it if necessary */
1104
static inline void tb_alloc_page(TranslationBlock *tb,
1105
                                 unsigned int n, target_ulong page_addr)
1106
{
1107
    PageDesc *p;
1108
    TranslationBlock *last_first_tb;
1109

    
1110
    tb->page_addr[n] = page_addr;
1111
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1112
    tb->page_next[n] = p->first_tb;
1113
    last_first_tb = p->first_tb;
1114
    p->first_tb = (TranslationBlock *)((long)tb | n);
1115
    invalidate_page_bitmap(p);
1116

    
1117
#if defined(TARGET_HAS_SMC) || 1
1118

    
1119
#if defined(CONFIG_USER_ONLY)
1120
    if (p->flags & PAGE_WRITE) {
1121
        target_ulong addr;
1122
        PageDesc *p2;
1123
        int prot;
1124

    
1125
        /* force the host page as non writable (writes will have a
1126
           page fault + mprotect overhead) */
1127
        page_addr &= qemu_host_page_mask;
1128
        prot = 0;
1129
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1130
            addr += TARGET_PAGE_SIZE) {
1131

    
1132
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1133
            if (!p2)
1134
                continue;
1135
            prot |= p2->flags;
1136
            p2->flags &= ~PAGE_WRITE;
1137
            page_get_flags(addr);
1138
          }
1139
        mprotect(g2h(page_addr), qemu_host_page_size,
1140
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1141
#ifdef DEBUG_TB_INVALIDATE
1142
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1143
               page_addr);
1144
#endif
1145
    }
1146
#else
1147
    /* if some code is already present, then the pages are already
1148
       protected. So we handle the case where only the first TB is
1149
       allocated in a physical page */
1150
    if (!last_first_tb) {
1151
        tlb_protect_code(page_addr);
1152
    }
1153
#endif
1154

    
1155
#endif /* TARGET_HAS_SMC */
1156
}
1157

    
1158
/* Allocate a new translation block. Flush the translation buffer if
1159
   too many translation blocks or too much generated code. */
1160
TranslationBlock *tb_alloc(target_ulong pc)
1161
{
1162
    TranslationBlock *tb;
1163

    
1164
    if (nb_tbs >= code_gen_max_blocks ||
1165
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1166
        return NULL;
1167
    tb = &tbs[nb_tbs++];
1168
    tb->pc = pc;
1169
    tb->cflags = 0;
1170
    return tb;
1171
}
1172

    
1173
void tb_free(TranslationBlock *tb)
1174
{
1175
    /* In practice this is mostly used for single use temporary TB
1176
       Ignore the hard cases and just back up if this TB happens to
1177
       be the last one generated.  */
1178
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1179
        code_gen_ptr = tb->tc_ptr;
1180
        nb_tbs--;
1181
    }
1182
}
1183

    
1184
/* add a new TB and link it to the physical page tables. phys_page2 is
1185
   (-1) to indicate that only one page contains the TB. */
1186
void tb_link_phys(TranslationBlock *tb,
1187
                  target_ulong phys_pc, target_ulong phys_page2)
1188
{
1189
    unsigned int h;
1190
    TranslationBlock **ptb;
1191

    
1192
    /* Grab the mmap lock to stop another thread invalidating this TB
1193
       before we are done.  */
1194
    mmap_lock();
1195
    /* add in the physical hash table */
1196
    h = tb_phys_hash_func(phys_pc);
1197
    ptb = &tb_phys_hash[h];
1198
    tb->phys_hash_next = *ptb;
1199
    *ptb = tb;
1200

    
1201
    /* add in the page list */
1202
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1203
    if (phys_page2 != -1)
1204
        tb_alloc_page(tb, 1, phys_page2);
1205
    else
1206
        tb->page_addr[1] = -1;
1207

    
1208
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1209
    tb->jmp_next[0] = NULL;
1210
    tb->jmp_next[1] = NULL;
1211

    
1212
    /* init original jump addresses */
1213
    if (tb->tb_next_offset[0] != 0xffff)
1214
        tb_reset_jump(tb, 0);
1215
    if (tb->tb_next_offset[1] != 0xffff)
1216
        tb_reset_jump(tb, 1);
1217

    
1218
#ifdef DEBUG_TB_CHECK
1219
    tb_page_check();
1220
#endif
1221
    mmap_unlock();
1222
}
1223

    
1224
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1225
   tb[1].tc_ptr. Return NULL if not found */
1226
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1227
{
1228
    int m_min, m_max, m;
1229
    unsigned long v;
1230
    TranslationBlock *tb;
1231

    
1232
    if (nb_tbs <= 0)
1233
        return NULL;
1234
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1235
        tc_ptr >= (unsigned long)code_gen_ptr)
1236
        return NULL;
1237
    /* binary search (cf Knuth) */
1238
    m_min = 0;
1239
    m_max = nb_tbs - 1;
1240
    while (m_min <= m_max) {
1241
        m = (m_min + m_max) >> 1;
1242
        tb = &tbs[m];
1243
        v = (unsigned long)tb->tc_ptr;
1244
        if (v == tc_ptr)
1245
            return tb;
1246
        else if (tc_ptr < v) {
1247
            m_max = m - 1;
1248
        } else {
1249
            m_min = m + 1;
1250
        }
1251
    }
1252
    return &tbs[m_max];
1253
}
1254

    
1255
static void tb_reset_jump_recursive(TranslationBlock *tb);
1256

    
1257
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1258
{
1259
    TranslationBlock *tb1, *tb_next, **ptb;
1260
    unsigned int n1;
1261

    
1262
    tb1 = tb->jmp_next[n];
1263
    if (tb1 != NULL) {
1264
        /* find head of list */
1265
        for(;;) {
1266
            n1 = (long)tb1 & 3;
1267
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1268
            if (n1 == 2)
1269
                break;
1270
            tb1 = tb1->jmp_next[n1];
1271
        }
1272
        /* we are now sure now that tb jumps to tb1 */
1273
        tb_next = tb1;
1274

    
1275
        /* remove tb from the jmp_first list */
1276
        ptb = &tb_next->jmp_first;
1277
        for(;;) {
1278
            tb1 = *ptb;
1279
            n1 = (long)tb1 & 3;
1280
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1281
            if (n1 == n && tb1 == tb)
1282
                break;
1283
            ptb = &tb1->jmp_next[n1];
1284
        }
1285
        *ptb = tb->jmp_next[n];
1286
        tb->jmp_next[n] = NULL;
1287

    
1288
        /* suppress the jump to next tb in generated code */
1289
        tb_reset_jump(tb, n);
1290

    
1291
        /* suppress jumps in the tb on which we could have jumped */
1292
        tb_reset_jump_recursive(tb_next);
1293
    }
1294
}
1295

    
1296
static void tb_reset_jump_recursive(TranslationBlock *tb)
1297
{
1298
    tb_reset_jump_recursive2(tb, 0);
1299
    tb_reset_jump_recursive2(tb, 1);
1300
}
1301

    
1302
#if defined(TARGET_HAS_ICE)
1303
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1304
{
1305
    target_phys_addr_t addr;
1306
    target_ulong pd;
1307
    ram_addr_t ram_addr;
1308
    PhysPageDesc *p;
1309

    
1310
    addr = cpu_get_phys_page_debug(env, pc);
1311
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1312
    if (!p) {
1313
        pd = IO_MEM_UNASSIGNED;
1314
    } else {
1315
        pd = p->phys_offset;
1316
    }
1317
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1318
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1319
}
1320
#endif
1321

    
1322
/* Add a watchpoint.  */
1323
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1324
                          int flags, CPUWatchpoint **watchpoint)
1325
{
1326
    target_ulong len_mask = ~(len - 1);
1327
    CPUWatchpoint *wp;
1328

    
1329
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1330
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1331
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1332
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1333
        return -EINVAL;
1334
    }
1335
    wp = qemu_malloc(sizeof(*wp));
1336

    
1337
    wp->vaddr = addr;
1338
    wp->len_mask = len_mask;
1339
    wp->flags = flags;
1340

    
1341
    /* keep all GDB-injected watchpoints in front */
1342
    if (flags & BP_GDB)
1343
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1344
    else
1345
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1346

    
1347
    tlb_flush_page(env, addr);
1348

    
1349
    if (watchpoint)
1350
        *watchpoint = wp;
1351
    return 0;
1352
}
1353

    
1354
/* Remove a specific watchpoint.  */
1355
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1356
                          int flags)
1357
{
1358
    target_ulong len_mask = ~(len - 1);
1359
    CPUWatchpoint *wp;
1360

    
1361
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1362
        if (addr == wp->vaddr && len_mask == wp->len_mask
1363
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1364
            cpu_watchpoint_remove_by_ref(env, wp);
1365
            return 0;
1366
        }
1367
    }
1368
    return -ENOENT;
1369
}
1370

    
1371
/* Remove a specific watchpoint by reference.  */
1372
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1373
{
1374
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1375

    
1376
    tlb_flush_page(env, watchpoint->vaddr);
1377

    
1378
    qemu_free(watchpoint);
1379
}
1380

    
1381
/* Remove all matching watchpoints.  */
1382
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1383
{
1384
    CPUWatchpoint *wp, *next;
1385

    
1386
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1387
        if (wp->flags & mask)
1388
            cpu_watchpoint_remove_by_ref(env, wp);
1389
    }
1390
}
1391

    
1392
/* Add a breakpoint.  */
1393
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1394
                          CPUBreakpoint **breakpoint)
1395
{
1396
#if defined(TARGET_HAS_ICE)
1397
    CPUBreakpoint *bp;
1398

    
1399
    bp = qemu_malloc(sizeof(*bp));
1400

    
1401
    bp->pc = pc;
1402
    bp->flags = flags;
1403

    
1404
    /* keep all GDB-injected breakpoints in front */
1405
    if (flags & BP_GDB)
1406
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1407
    else
1408
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1409

    
1410
    breakpoint_invalidate(env, pc);
1411

    
1412
    if (breakpoint)
1413
        *breakpoint = bp;
1414
    return 0;
1415
#else
1416
    return -ENOSYS;
1417
#endif
1418
}
1419

    
1420
/* Remove a specific breakpoint.  */
1421
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1422
{
1423
#if defined(TARGET_HAS_ICE)
1424
    CPUBreakpoint *bp;
1425

    
1426
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1427
        if (bp->pc == pc && bp->flags == flags) {
1428
            cpu_breakpoint_remove_by_ref(env, bp);
1429
            return 0;
1430
        }
1431
    }
1432
    return -ENOENT;
1433
#else
1434
    return -ENOSYS;
1435
#endif
1436
}
1437

    
1438
/* Remove a specific breakpoint by reference.  */
1439
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1440
{
1441
#if defined(TARGET_HAS_ICE)
1442
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1443

    
1444
    breakpoint_invalidate(env, breakpoint->pc);
1445

    
1446
    qemu_free(breakpoint);
1447
#endif
1448
}
1449

    
1450
/* Remove all matching breakpoints. */
1451
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1452
{
1453
#if defined(TARGET_HAS_ICE)
1454
    CPUBreakpoint *bp, *next;
1455

    
1456
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1457
        if (bp->flags & mask)
1458
            cpu_breakpoint_remove_by_ref(env, bp);
1459
    }
1460
#endif
1461
}
1462

    
1463
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1464
   CPU loop after each instruction */
1465
void cpu_single_step(CPUState *env, int enabled)
1466
{
1467
#if defined(TARGET_HAS_ICE)
1468
    if (env->singlestep_enabled != enabled) {
1469
        env->singlestep_enabled = enabled;
1470
        if (kvm_enabled())
1471
            kvm_update_guest_debug(env, 0);
1472
        else {
1473
            /* must flush all the translated code to avoid inconsistencies */
1474
            /* XXX: only flush what is necessary */
1475
            tb_flush(env);
1476
        }
1477
    }
1478
#endif
1479
}
1480

    
1481
/* enable or disable low levels log */
1482
void cpu_set_log(int log_flags)
1483
{
1484
    loglevel = log_flags;
1485
    if (loglevel && !logfile) {
1486
        logfile = fopen(logfilename, log_append ? "a" : "w");
1487
        if (!logfile) {
1488
            perror(logfilename);
1489
            _exit(1);
1490
        }
1491
#if !defined(CONFIG_SOFTMMU)
1492
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1493
        {
1494
            static char logfile_buf[4096];
1495
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1496
        }
1497
#else
1498
        setvbuf(logfile, NULL, _IOLBF, 0);
1499
#endif
1500
        log_append = 1;
1501
    }
1502
    if (!loglevel && logfile) {
1503
        fclose(logfile);
1504
        logfile = NULL;
1505
    }
1506
}
1507

    
1508
void cpu_set_log_filename(const char *filename)
1509
{
1510
    logfilename = strdup(filename);
1511
    if (logfile) {
1512
        fclose(logfile);
1513
        logfile = NULL;
1514
    }
1515
    cpu_set_log(loglevel);
1516
}
1517

    
1518
static void cpu_unlink_tb(CPUState *env)
1519
{
1520
#if defined(USE_NPTL)
1521
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1522
       problem and hope the cpu will stop of its own accord.  For userspace
1523
       emulation this often isn't actually as bad as it sounds.  Often
1524
       signals are used primarily to interrupt blocking syscalls.  */
1525
#else
1526
    TranslationBlock *tb;
1527
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1528

    
1529
    tb = env->current_tb;
1530
    /* if the cpu is currently executing code, we must unlink it and
1531
       all the potentially executing TB */
1532
    if (tb && !testandset(&interrupt_lock)) {
1533
        env->current_tb = NULL;
1534
        tb_reset_jump_recursive(tb);
1535
        resetlock(&interrupt_lock);
1536
    }
1537
#endif
1538
}
1539

    
1540
/* mask must never be zero, except for A20 change call */
1541
void cpu_interrupt(CPUState *env, int mask)
1542
{
1543
    int old_mask;
1544

    
1545
    old_mask = env->interrupt_request;
1546
    env->interrupt_request |= mask;
1547

    
1548
#ifndef CONFIG_USER_ONLY
1549
    /*
1550
     * If called from iothread context, wake the target cpu in
1551
     * case its halted.
1552
     */
1553
    if (!qemu_cpu_self(env)) {
1554
        qemu_cpu_kick(env);
1555
        return;
1556
    }
1557
#endif
1558

    
1559
    if (use_icount) {
1560
        env->icount_decr.u16.high = 0xffff;
1561
#ifndef CONFIG_USER_ONLY
1562
        if (!can_do_io(env)
1563
            && (mask & ~old_mask) != 0) {
1564
            cpu_abort(env, "Raised interrupt while not in I/O function");
1565
        }
1566
#endif
1567
    } else {
1568
        cpu_unlink_tb(env);
1569
    }
1570
}
1571

    
1572
void cpu_reset_interrupt(CPUState *env, int mask)
1573
{
1574
    env->interrupt_request &= ~mask;
1575
}
1576

    
1577
void cpu_exit(CPUState *env)
1578
{
1579
    env->exit_request = 1;
1580
    cpu_unlink_tb(env);
1581
}
1582

    
1583
const CPULogItem cpu_log_items[] = {
1584
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1585
      "show generated host assembly code for each compiled TB" },
1586
    { CPU_LOG_TB_IN_ASM, "in_asm",
1587
      "show target assembly code for each compiled TB" },
1588
    { CPU_LOG_TB_OP, "op",
1589
      "show micro ops for each compiled TB" },
1590
    { CPU_LOG_TB_OP_OPT, "op_opt",
1591
      "show micro ops "
1592
#ifdef TARGET_I386
1593
      "before eflags optimization and "
1594
#endif
1595
      "after liveness analysis" },
1596
    { CPU_LOG_INT, "int",
1597
      "show interrupts/exceptions in short format" },
1598
    { CPU_LOG_EXEC, "exec",
1599
      "show trace before each executed TB (lots of logs)" },
1600
    { CPU_LOG_TB_CPU, "cpu",
1601
      "show CPU state before block translation" },
1602
#ifdef TARGET_I386
1603
    { CPU_LOG_PCALL, "pcall",
1604
      "show protected mode far calls/returns/exceptions" },
1605
    { CPU_LOG_RESET, "cpu_reset",
1606
      "show CPU state before CPU resets" },
1607
#endif
1608
#ifdef DEBUG_IOPORT
1609
    { CPU_LOG_IOPORT, "ioport",
1610
      "show all i/o ports accesses" },
1611
#endif
1612
    { 0, NULL, NULL },
1613
};
1614

    
1615
static int cmp1(const char *s1, int n, const char *s2)
1616
{
1617
    if (strlen(s2) != n)
1618
        return 0;
1619
    return memcmp(s1, s2, n) == 0;
1620
}
1621

    
1622
/* takes a comma separated list of log masks. Return 0 if error. */
1623
int cpu_str_to_log_mask(const char *str)
1624
{
1625
    const CPULogItem *item;
1626
    int mask;
1627
    const char *p, *p1;
1628

    
1629
    p = str;
1630
    mask = 0;
1631
    for(;;) {
1632
        p1 = strchr(p, ',');
1633
        if (!p1)
1634
            p1 = p + strlen(p);
1635
        if(cmp1(p,p1-p,"all")) {
1636
                for(item = cpu_log_items; item->mask != 0; item++) {
1637
                        mask |= item->mask;
1638
                }
1639
        } else {
1640
        for(item = cpu_log_items; item->mask != 0; item++) {
1641
            if (cmp1(p, p1 - p, item->name))
1642
                goto found;
1643
        }
1644
        return 0;
1645
        }
1646
    found:
1647
        mask |= item->mask;
1648
        if (*p1 != ',')
1649
            break;
1650
        p = p1 + 1;
1651
    }
1652
    return mask;
1653
}
1654

    
1655
void cpu_abort(CPUState *env, const char *fmt, ...)
1656
{
1657
    va_list ap;
1658
    va_list ap2;
1659

    
1660
    va_start(ap, fmt);
1661
    va_copy(ap2, ap);
1662
    fprintf(stderr, "qemu: fatal: ");
1663
    vfprintf(stderr, fmt, ap);
1664
    fprintf(stderr, "\n");
1665
#ifdef TARGET_I386
1666
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1667
#else
1668
    cpu_dump_state(env, stderr, fprintf, 0);
1669
#endif
1670
    if (qemu_log_enabled()) {
1671
        qemu_log("qemu: fatal: ");
1672
        qemu_log_vprintf(fmt, ap2);
1673
        qemu_log("\n");
1674
#ifdef TARGET_I386
1675
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1676
#else
1677
        log_cpu_state(env, 0);
1678
#endif
1679
        qemu_log_flush();
1680
        qemu_log_close();
1681
    }
1682
    va_end(ap2);
1683
    va_end(ap);
1684
    abort();
1685
}
1686

    
1687
CPUState *cpu_copy(CPUState *env)
1688
{
1689
    CPUState *new_env = cpu_init(env->cpu_model_str);
1690
    CPUState *next_cpu = new_env->next_cpu;
1691
    int cpu_index = new_env->cpu_index;
1692
#if defined(TARGET_HAS_ICE)
1693
    CPUBreakpoint *bp;
1694
    CPUWatchpoint *wp;
1695
#endif
1696

    
1697
    memcpy(new_env, env, sizeof(CPUState));
1698

    
1699
    /* Preserve chaining and index. */
1700
    new_env->next_cpu = next_cpu;
1701
    new_env->cpu_index = cpu_index;
1702

    
1703
    /* Clone all break/watchpoints.
1704
       Note: Once we support ptrace with hw-debug register access, make sure
1705
       BP_CPU break/watchpoints are handled correctly on clone. */
1706
    TAILQ_INIT(&env->breakpoints);
1707
    TAILQ_INIT(&env->watchpoints);
1708
#if defined(TARGET_HAS_ICE)
1709
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1710
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1711
    }
1712
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1713
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1714
                              wp->flags, NULL);
1715
    }
1716
#endif
1717

    
1718
    return new_env;
1719
}
1720

    
1721
#if !defined(CONFIG_USER_ONLY)
1722

    
1723
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1724
{
1725
    unsigned int i;
1726

    
1727
    /* Discard jump cache entries for any tb which might potentially
1728
       overlap the flushed page.  */
1729
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1730
    memset (&env->tb_jmp_cache[i], 0, 
1731
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1732

    
1733
    i = tb_jmp_cache_hash_page(addr);
1734
    memset (&env->tb_jmp_cache[i], 0, 
1735
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1736
}
1737

    
1738
/* NOTE: if flush_global is true, also flush global entries (not
1739
   implemented yet) */
1740
void tlb_flush(CPUState *env, int flush_global)
1741
{
1742
    int i;
1743

    
1744
#if defined(DEBUG_TLB)
1745
    printf("tlb_flush:\n");
1746
#endif
1747
    /* must reset current TB so that interrupts cannot modify the
1748
       links while we are modifying them */
1749
    env->current_tb = NULL;
1750

    
1751
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1752
        env->tlb_table[0][i].addr_read = -1;
1753
        env->tlb_table[0][i].addr_write = -1;
1754
        env->tlb_table[0][i].addr_code = -1;
1755
        env->tlb_table[1][i].addr_read = -1;
1756
        env->tlb_table[1][i].addr_write = -1;
1757
        env->tlb_table[1][i].addr_code = -1;
1758
#if (NB_MMU_MODES >= 3)
1759
        env->tlb_table[2][i].addr_read = -1;
1760
        env->tlb_table[2][i].addr_write = -1;
1761
        env->tlb_table[2][i].addr_code = -1;
1762
#endif
1763
#if (NB_MMU_MODES >= 4)
1764
        env->tlb_table[3][i].addr_read = -1;
1765
        env->tlb_table[3][i].addr_write = -1;
1766
        env->tlb_table[3][i].addr_code = -1;
1767
#endif
1768
#if (NB_MMU_MODES >= 5)
1769
        env->tlb_table[4][i].addr_read = -1;
1770
        env->tlb_table[4][i].addr_write = -1;
1771
        env->tlb_table[4][i].addr_code = -1;
1772
#endif
1773

    
1774
    }
1775

    
1776
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1777

    
1778
#ifdef CONFIG_KQEMU
1779
    if (env->kqemu_enabled) {
1780
        kqemu_flush(env, flush_global);
1781
    }
1782
#endif
1783
    tlb_flush_count++;
1784
}
1785

    
1786
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1787
{
1788
    if (addr == (tlb_entry->addr_read &
1789
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1790
        addr == (tlb_entry->addr_write &
1791
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1792
        addr == (tlb_entry->addr_code &
1793
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1794
        tlb_entry->addr_read = -1;
1795
        tlb_entry->addr_write = -1;
1796
        tlb_entry->addr_code = -1;
1797
    }
1798
}
1799

    
1800
void tlb_flush_page(CPUState *env, target_ulong addr)
1801
{
1802
    int i;
1803

    
1804
#if defined(DEBUG_TLB)
1805
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1806
#endif
1807
    /* must reset current TB so that interrupts cannot modify the
1808
       links while we are modifying them */
1809
    env->current_tb = NULL;
1810

    
1811
    addr &= TARGET_PAGE_MASK;
1812
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1813
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1814
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1815
#if (NB_MMU_MODES >= 3)
1816
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1817
#endif
1818
#if (NB_MMU_MODES >= 4)
1819
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1820
#endif
1821
#if (NB_MMU_MODES >= 5)
1822
    tlb_flush_entry(&env->tlb_table[4][i], addr);
1823
#endif
1824

    
1825
    tlb_flush_jmp_cache(env, addr);
1826

    
1827
#ifdef CONFIG_KQEMU
1828
    if (env->kqemu_enabled) {
1829
        kqemu_flush_page(env, addr);
1830
    }
1831
#endif
1832
}
1833

    
1834
/* update the TLBs so that writes to code in the virtual page 'addr'
1835
   can be detected */
1836
static void tlb_protect_code(ram_addr_t ram_addr)
1837
{
1838
    cpu_physical_memory_reset_dirty(ram_addr,
1839
                                    ram_addr + TARGET_PAGE_SIZE,
1840
                                    CODE_DIRTY_FLAG);
1841
}
1842

    
1843
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1844
   tested for self modifying code */
1845
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1846
                                    target_ulong vaddr)
1847
{
1848
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1849
}
1850

    
1851
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1852
                                         unsigned long start, unsigned long length)
1853
{
1854
    unsigned long addr;
1855
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1856
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1857
        if ((addr - start) < length) {
1858
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1859
        }
1860
    }
1861
}
1862

    
1863
/* Note: start and end must be within the same ram block.  */
1864
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1865
                                     int dirty_flags)
1866
{
1867
    CPUState *env;
1868
    unsigned long length, start1;
1869
    int i, mask, len;
1870
    uint8_t *p;
1871

    
1872
    start &= TARGET_PAGE_MASK;
1873
    end = TARGET_PAGE_ALIGN(end);
1874

    
1875
    length = end - start;
1876
    if (length == 0)
1877
        return;
1878
    len = length >> TARGET_PAGE_BITS;
1879
#ifdef CONFIG_KQEMU
1880
    /* XXX: should not depend on cpu context */
1881
    env = first_cpu;
1882
    if (env->kqemu_enabled) {
1883
        ram_addr_t addr;
1884
        addr = start;
1885
        for(i = 0; i < len; i++) {
1886
            kqemu_set_notdirty(env, addr);
1887
            addr += TARGET_PAGE_SIZE;
1888
        }
1889
    }
1890
#endif
1891
    mask = ~dirty_flags;
1892
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1893
    for(i = 0; i < len; i++)
1894
        p[i] &= mask;
1895

    
1896
    /* we modify the TLB cache so that the dirty bit will be set again
1897
       when accessing the range */
1898
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1899
    /* Chek that we don't span multiple blocks - this breaks the
1900
       address comparisons below.  */
1901
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1902
            != (end - 1) - start) {
1903
        abort();
1904
    }
1905

    
1906
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1907
        for(i = 0; i < CPU_TLB_SIZE; i++)
1908
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1909
        for(i = 0; i < CPU_TLB_SIZE; i++)
1910
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1911
#if (NB_MMU_MODES >= 3)
1912
        for(i = 0; i < CPU_TLB_SIZE; i++)
1913
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1914
#endif
1915
#if (NB_MMU_MODES >= 4)
1916
        for(i = 0; i < CPU_TLB_SIZE; i++)
1917
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1918
#endif
1919
#if (NB_MMU_MODES >= 5)
1920
        for(i = 0; i < CPU_TLB_SIZE; i++)
1921
            tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1922
#endif
1923
    }
1924
}
1925

    
1926
int cpu_physical_memory_set_dirty_tracking(int enable)
1927
{
1928
    in_migration = enable;
1929
    if (kvm_enabled()) {
1930
        return kvm_set_migration_log(enable);
1931
    }
1932
    return 0;
1933
}
1934

    
1935
int cpu_physical_memory_get_dirty_tracking(void)
1936
{
1937
    return in_migration;
1938
}
1939

    
1940
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1941
                                   target_phys_addr_t end_addr)
1942
{
1943
    int ret = 0;
1944

    
1945
    if (kvm_enabled())
1946
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1947
    return ret;
1948
}
1949

    
1950
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1951
{
1952
    ram_addr_t ram_addr;
1953
    void *p;
1954

    
1955
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1956
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1957
            + tlb_entry->addend);
1958
        ram_addr = qemu_ram_addr_from_host(p);
1959
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1960
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1961
        }
1962
    }
1963
}
1964

    
1965
/* update the TLB according to the current state of the dirty bits */
1966
void cpu_tlb_update_dirty(CPUState *env)
1967
{
1968
    int i;
1969
    for(i = 0; i < CPU_TLB_SIZE; i++)
1970
        tlb_update_dirty(&env->tlb_table[0][i]);
1971
    for(i = 0; i < CPU_TLB_SIZE; i++)
1972
        tlb_update_dirty(&env->tlb_table[1][i]);
1973
#if (NB_MMU_MODES >= 3)
1974
    for(i = 0; i < CPU_TLB_SIZE; i++)
1975
        tlb_update_dirty(&env->tlb_table[2][i]);
1976
#endif
1977
#if (NB_MMU_MODES >= 4)
1978
    for(i = 0; i < CPU_TLB_SIZE; i++)
1979
        tlb_update_dirty(&env->tlb_table[3][i]);
1980
#endif
1981
#if (NB_MMU_MODES >= 5)
1982
    for(i = 0; i < CPU_TLB_SIZE; i++)
1983
        tlb_update_dirty(&env->tlb_table[4][i]);
1984
#endif
1985
}
1986

    
1987
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1988
{
1989
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1990
        tlb_entry->addr_write = vaddr;
1991
}
1992

    
1993
/* update the TLB corresponding to virtual page vaddr
1994
   so that it is no longer dirty */
1995
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1996
{
1997
    int i;
1998

    
1999
    vaddr &= TARGET_PAGE_MASK;
2000
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2001
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
2002
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
2003
#if (NB_MMU_MODES >= 3)
2004
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
2005
#endif
2006
#if (NB_MMU_MODES >= 4)
2007
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
2008
#endif
2009
#if (NB_MMU_MODES >= 5)
2010
    tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
2011
#endif
2012
}
2013

    
2014
/* add a new TLB entry. At most one entry for a given virtual address
2015
   is permitted. Return 0 if OK or 2 if the page could not be mapped
2016
   (can only happen in non SOFTMMU mode for I/O pages or pages
2017
   conflicting with the host address space). */
2018
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2019
                      target_phys_addr_t paddr, int prot,
2020
                      int mmu_idx, int is_softmmu)
2021
{
2022
    PhysPageDesc *p;
2023
    unsigned long pd;
2024
    unsigned int index;
2025
    target_ulong address;
2026
    target_ulong code_address;
2027
    target_phys_addr_t addend;
2028
    int ret;
2029
    CPUTLBEntry *te;
2030
    CPUWatchpoint *wp;
2031
    target_phys_addr_t iotlb;
2032

    
2033
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2034
    if (!p) {
2035
        pd = IO_MEM_UNASSIGNED;
2036
    } else {
2037
        pd = p->phys_offset;
2038
    }
2039
#if defined(DEBUG_TLB)
2040
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2041
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2042
#endif
2043

    
2044
    ret = 0;
2045
    address = vaddr;
2046
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2047
        /* IO memory case (romd handled later) */
2048
        address |= TLB_MMIO;
2049
    }
2050
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2051
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2052
        /* Normal RAM.  */
2053
        iotlb = pd & TARGET_PAGE_MASK;
2054
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2055
            iotlb |= IO_MEM_NOTDIRTY;
2056
        else
2057
            iotlb |= IO_MEM_ROM;
2058
    } else {
2059
        /* IO handlers are currently passed a physical address.
2060
           It would be nice to pass an offset from the base address
2061
           of that region.  This would avoid having to special case RAM,
2062
           and avoid full address decoding in every device.
2063
           We can't use the high bits of pd for this because
2064
           IO_MEM_ROMD uses these as a ram address.  */
2065
        iotlb = (pd & ~TARGET_PAGE_MASK);
2066
        if (p) {
2067
            iotlb += p->region_offset;
2068
        } else {
2069
            iotlb += paddr;
2070
        }
2071
    }
2072

    
2073
    code_address = address;
2074
    /* Make accesses to pages with watchpoints go via the
2075
       watchpoint trap routines.  */
2076
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2077
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2078
            iotlb = io_mem_watch + paddr;
2079
            /* TODO: The memory case can be optimized by not trapping
2080
               reads of pages with a write breakpoint.  */
2081
            address |= TLB_MMIO;
2082
        }
2083
    }
2084

    
2085
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2086
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2087
    te = &env->tlb_table[mmu_idx][index];
2088
    te->addend = addend - vaddr;
2089
    if (prot & PAGE_READ) {
2090
        te->addr_read = address;
2091
    } else {
2092
        te->addr_read = -1;
2093
    }
2094

    
2095
    if (prot & PAGE_EXEC) {
2096
        te->addr_code = code_address;
2097
    } else {
2098
        te->addr_code = -1;
2099
    }
2100
    if (prot & PAGE_WRITE) {
2101
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2102
            (pd & IO_MEM_ROMD)) {
2103
            /* Write access calls the I/O callback.  */
2104
            te->addr_write = address | TLB_MMIO;
2105
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2106
                   !cpu_physical_memory_is_dirty(pd)) {
2107
            te->addr_write = address | TLB_NOTDIRTY;
2108
        } else {
2109
            te->addr_write = address;
2110
        }
2111
    } else {
2112
        te->addr_write = -1;
2113
    }
2114
    return ret;
2115
}
2116

    
2117
#else
2118

    
2119
void tlb_flush(CPUState *env, int flush_global)
2120
{
2121
}
2122

    
2123
void tlb_flush_page(CPUState *env, target_ulong addr)
2124
{
2125
}
2126

    
2127
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2128
                      target_phys_addr_t paddr, int prot,
2129
                      int mmu_idx, int is_softmmu)
2130
{
2131
    return 0;
2132
}
2133

    
2134
/* dump memory mappings */
2135
void page_dump(FILE *f)
2136
{
2137
    unsigned long start, end;
2138
    int i, j, prot, prot1;
2139
    PageDesc *p;
2140

    
2141
    fprintf(f, "%-8s %-8s %-8s %s\n",
2142
            "start", "end", "size", "prot");
2143
    start = -1;
2144
    end = -1;
2145
    prot = 0;
2146
    for(i = 0; i <= L1_SIZE; i++) {
2147
        if (i < L1_SIZE)
2148
            p = l1_map[i];
2149
        else
2150
            p = NULL;
2151
        for(j = 0;j < L2_SIZE; j++) {
2152
            if (!p)
2153
                prot1 = 0;
2154
            else
2155
                prot1 = p[j].flags;
2156
            if (prot1 != prot) {
2157
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2158
                if (start != -1) {
2159
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2160
                            start, end, end - start,
2161
                            prot & PAGE_READ ? 'r' : '-',
2162
                            prot & PAGE_WRITE ? 'w' : '-',
2163
                            prot & PAGE_EXEC ? 'x' : '-');
2164
                }
2165
                if (prot1 != 0)
2166
                    start = end;
2167
                else
2168
                    start = -1;
2169
                prot = prot1;
2170
            }
2171
            if (!p)
2172
                break;
2173
        }
2174
    }
2175
}
2176

    
2177
int page_get_flags(target_ulong address)
2178
{
2179
    PageDesc *p;
2180

    
2181
    p = page_find(address >> TARGET_PAGE_BITS);
2182
    if (!p)
2183
        return 0;
2184
    return p->flags;
2185
}
2186

    
2187
/* modify the flags of a page and invalidate the code if
2188
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2189
   depending on PAGE_WRITE */
2190
void page_set_flags(target_ulong start, target_ulong end, int flags)
2191
{
2192
    PageDesc *p;
2193
    target_ulong addr;
2194

    
2195
    /* mmap_lock should already be held.  */
2196
    start = start & TARGET_PAGE_MASK;
2197
    end = TARGET_PAGE_ALIGN(end);
2198
    if (flags & PAGE_WRITE)
2199
        flags |= PAGE_WRITE_ORG;
2200
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2201
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2202
        /* We may be called for host regions that are outside guest
2203
           address space.  */
2204
        if (!p)
2205
            return;
2206
        /* if the write protection is set, then we invalidate the code
2207
           inside */
2208
        if (!(p->flags & PAGE_WRITE) &&
2209
            (flags & PAGE_WRITE) &&
2210
            p->first_tb) {
2211
            tb_invalidate_phys_page(addr, 0, NULL);
2212
        }
2213
        p->flags = flags;
2214
    }
2215
}
2216

    
2217
int page_check_range(target_ulong start, target_ulong len, int flags)
2218
{
2219
    PageDesc *p;
2220
    target_ulong end;
2221
    target_ulong addr;
2222

    
2223
    if (start + len < start)
2224
        /* we've wrapped around */
2225
        return -1;
2226

    
2227
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2228
    start = start & TARGET_PAGE_MASK;
2229

    
2230
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2231
        p = page_find(addr >> TARGET_PAGE_BITS);
2232
        if( !p )
2233
            return -1;
2234
        if( !(p->flags & PAGE_VALID) )
2235
            return -1;
2236

    
2237
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2238
            return -1;
2239
        if (flags & PAGE_WRITE) {
2240
            if (!(p->flags & PAGE_WRITE_ORG))
2241
                return -1;
2242
            /* unprotect the page if it was put read-only because it
2243
               contains translated code */
2244
            if (!(p->flags & PAGE_WRITE)) {
2245
                if (!page_unprotect(addr, 0, NULL))
2246
                    return -1;
2247
            }
2248
            return 0;
2249
        }
2250
    }
2251
    return 0;
2252
}
2253

    
2254
/* called from signal handler: invalidate the code and unprotect the
2255
   page. Return TRUE if the fault was successfully handled. */
2256
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2257
{
2258
    unsigned int page_index, prot, pindex;
2259
    PageDesc *p, *p1;
2260
    target_ulong host_start, host_end, addr;
2261

    
2262
    /* Technically this isn't safe inside a signal handler.  However we
2263
       know this only ever happens in a synchronous SEGV handler, so in
2264
       practice it seems to be ok.  */
2265
    mmap_lock();
2266

    
2267
    host_start = address & qemu_host_page_mask;
2268
    page_index = host_start >> TARGET_PAGE_BITS;
2269
    p1 = page_find(page_index);
2270
    if (!p1) {
2271
        mmap_unlock();
2272
        return 0;
2273
    }
2274
    host_end = host_start + qemu_host_page_size;
2275
    p = p1;
2276
    prot = 0;
2277
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2278
        prot |= p->flags;
2279
        p++;
2280
    }
2281
    /* if the page was really writable, then we change its
2282
       protection back to writable */
2283
    if (prot & PAGE_WRITE_ORG) {
2284
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2285
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2286
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2287
                     (prot & PAGE_BITS) | PAGE_WRITE);
2288
            p1[pindex].flags |= PAGE_WRITE;
2289
            /* and since the content will be modified, we must invalidate
2290
               the corresponding translated code. */
2291
            tb_invalidate_phys_page(address, pc, puc);
2292
#ifdef DEBUG_TB_CHECK
2293
            tb_invalidate_check(address);
2294
#endif
2295
            mmap_unlock();
2296
            return 1;
2297
        }
2298
    }
2299
    mmap_unlock();
2300
    return 0;
2301
}
2302

    
2303
static inline void tlb_set_dirty(CPUState *env,
2304
                                 unsigned long addr, target_ulong vaddr)
2305
{
2306
}
2307
#endif /* defined(CONFIG_USER_ONLY) */
2308

    
2309
#if !defined(CONFIG_USER_ONLY)
2310

    
2311
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2312
                             ram_addr_t memory, ram_addr_t region_offset);
2313
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2314
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2315
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2316
                      need_subpage)                                     \
2317
    do {                                                                \
2318
        if (addr > start_addr)                                          \
2319
            start_addr2 = 0;                                            \
2320
        else {                                                          \
2321
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2322
            if (start_addr2 > 0)                                        \
2323
                need_subpage = 1;                                       \
2324
        }                                                               \
2325
                                                                        \
2326
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2327
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2328
        else {                                                          \
2329
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2330
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2331
                need_subpage = 1;                                       \
2332
        }                                                               \
2333
    } while (0)
2334

    
2335
/* register physical memory. 'size' must be a multiple of the target
2336
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2337
   io memory page.  The address used when calling the IO function is
2338
   the offset from the start of the region, plus region_offset.  Both
2339
   start_addr and region_offset are rounded down to a page boundary
2340
   before calculating this offset.  This should not be a problem unless
2341
   the low bits of start_addr and region_offset differ.  */
2342
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2343
                                         ram_addr_t size,
2344
                                         ram_addr_t phys_offset,
2345
                                         ram_addr_t region_offset)
2346
{
2347
    target_phys_addr_t addr, end_addr;
2348
    PhysPageDesc *p;
2349
    CPUState *env;
2350
    ram_addr_t orig_size = size;
2351
    void *subpage;
2352

    
2353
#ifdef CONFIG_KQEMU
2354
    /* XXX: should not depend on cpu context */
2355
    env = first_cpu;
2356
    if (env->kqemu_enabled) {
2357
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2358
    }
2359
#endif
2360
    if (kvm_enabled())
2361
        kvm_set_phys_mem(start_addr, size, phys_offset);
2362

    
2363
    if (phys_offset == IO_MEM_UNASSIGNED) {
2364
        region_offset = start_addr;
2365
    }
2366
    region_offset &= TARGET_PAGE_MASK;
2367
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2368
    end_addr = start_addr + (target_phys_addr_t)size;
2369
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2370
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2371
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2372
            ram_addr_t orig_memory = p->phys_offset;
2373
            target_phys_addr_t start_addr2, end_addr2;
2374
            int need_subpage = 0;
2375

    
2376
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2377
                          need_subpage);
2378
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2379
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2380
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2381
                                           &p->phys_offset, orig_memory,
2382
                                           p->region_offset);
2383
                } else {
2384
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2385
                                            >> IO_MEM_SHIFT];
2386
                }
2387
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2388
                                 region_offset);
2389
                p->region_offset = 0;
2390
            } else {
2391
                p->phys_offset = phys_offset;
2392
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2393
                    (phys_offset & IO_MEM_ROMD))
2394
                    phys_offset += TARGET_PAGE_SIZE;
2395
            }
2396
        } else {
2397
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2398
            p->phys_offset = phys_offset;
2399
            p->region_offset = region_offset;
2400
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2401
                (phys_offset & IO_MEM_ROMD)) {
2402
                phys_offset += TARGET_PAGE_SIZE;
2403
            } else {
2404
                target_phys_addr_t start_addr2, end_addr2;
2405
                int need_subpage = 0;
2406

    
2407
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2408
                              end_addr2, need_subpage);
2409

    
2410
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2411
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2412
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2413
                                           addr & TARGET_PAGE_MASK);
2414
                    subpage_register(subpage, start_addr2, end_addr2,
2415
                                     phys_offset, region_offset);
2416
                    p->region_offset = 0;
2417
                }
2418
            }
2419
        }
2420
        region_offset += TARGET_PAGE_SIZE;
2421
    }
2422

    
2423
    /* since each CPU stores ram addresses in its TLB cache, we must
2424
       reset the modified entries */
2425
    /* XXX: slow ! */
2426
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2427
        tlb_flush(env, 1);
2428
    }
2429
}
2430

    
2431
/* XXX: temporary until new memory mapping API */
2432
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2433
{
2434
    PhysPageDesc *p;
2435

    
2436
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2437
    if (!p)
2438
        return IO_MEM_UNASSIGNED;
2439
    return p->phys_offset;
2440
}
2441

    
2442
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2443
{
2444
    if (kvm_enabled())
2445
        kvm_coalesce_mmio_region(addr, size);
2446
}
2447

    
2448
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2449
{
2450
    if (kvm_enabled())
2451
        kvm_uncoalesce_mmio_region(addr, size);
2452
}
2453

    
2454
#ifdef CONFIG_KQEMU
2455
/* XXX: better than nothing */
2456
static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2457
{
2458
    ram_addr_t addr;
2459
    if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2460
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2461
                (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2462
        abort();
2463
    }
2464
    addr = last_ram_offset;
2465
    last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2466
    return addr;
2467
}
2468
#endif
2469

    
2470
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2471
{
2472
    RAMBlock *new_block;
2473

    
2474
#ifdef CONFIG_KQEMU
2475
    if (kqemu_phys_ram_base) {
2476
        return kqemu_ram_alloc(size);
2477
    }
2478
#endif
2479

    
2480
    size = TARGET_PAGE_ALIGN(size);
2481
    new_block = qemu_malloc(sizeof(*new_block));
2482

    
2483
    new_block->host = qemu_vmalloc(size);
2484
    new_block->offset = last_ram_offset;
2485
    new_block->length = size;
2486

    
2487
    new_block->next = ram_blocks;
2488
    ram_blocks = new_block;
2489

    
2490
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2491
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2492
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2493
           0xff, size >> TARGET_PAGE_BITS);
2494

    
2495
    last_ram_offset += size;
2496

    
2497
    if (kvm_enabled())
2498
        kvm_setup_guest_memory(new_block->host, size);
2499

    
2500
    return new_block->offset;
2501
}
2502

    
2503
void qemu_ram_free(ram_addr_t addr)
2504
{
2505
    /* TODO: implement this.  */
2506
}
2507

    
2508
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2509
   With the exception of the softmmu code in this file, this should
2510
   only be used for local memory (e.g. video ram) that the device owns,
2511
   and knows it isn't going to access beyond the end of the block.
2512

2513
   It should not be used for general purpose DMA.
2514
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2515
 */
2516
void *qemu_get_ram_ptr(ram_addr_t addr)
2517
{
2518
    RAMBlock *prev;
2519
    RAMBlock **prevp;
2520
    RAMBlock *block;
2521

    
2522
#ifdef CONFIG_KQEMU
2523
    if (kqemu_phys_ram_base) {
2524
        return kqemu_phys_ram_base + addr;
2525
    }
2526
#endif
2527

    
2528
    prev = NULL;
2529
    prevp = &ram_blocks;
2530
    block = ram_blocks;
2531
    while (block && (block->offset > addr
2532
                     || block->offset + block->length <= addr)) {
2533
        if (prev)
2534
          prevp = &prev->next;
2535
        prev = block;
2536
        block = block->next;
2537
    }
2538
    if (!block) {
2539
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2540
        abort();
2541
    }
2542
    /* Move this entry to to start of the list.  */
2543
    if (prev) {
2544
        prev->next = block->next;
2545
        block->next = *prevp;
2546
        *prevp = block;
2547
    }
2548
    return block->host + (addr - block->offset);
2549
}
2550

    
2551
/* Some of the softmmu routines need to translate from a host pointer
2552
   (typically a TLB entry) back to a ram offset.  */
2553
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2554
{
2555
    RAMBlock *prev;
2556
    RAMBlock **prevp;
2557
    RAMBlock *block;
2558
    uint8_t *host = ptr;
2559

    
2560
#ifdef CONFIG_KQEMU
2561
    if (kqemu_phys_ram_base) {
2562
        return host - kqemu_phys_ram_base;
2563
    }
2564
#endif
2565

    
2566
    prev = NULL;
2567
    prevp = &ram_blocks;
2568
    block = ram_blocks;
2569
    while (block && (block->host > host
2570
                     || block->host + block->length <= host)) {
2571
        if (prev)
2572
          prevp = &prev->next;
2573
        prev = block;
2574
        block = block->next;
2575
    }
2576
    if (!block) {
2577
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2578
        abort();
2579
    }
2580
    return block->offset + (host - block->host);
2581
}
2582

    
2583
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2584
{
2585
#ifdef DEBUG_UNASSIGNED
2586
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2587
#endif
2588
#if defined(TARGET_SPARC)
2589
    do_unassigned_access(addr, 0, 0, 0, 1);
2590
#endif
2591
    return 0;
2592
}
2593

    
2594
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2595
{
2596
#ifdef DEBUG_UNASSIGNED
2597
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2598
#endif
2599
#if defined(TARGET_SPARC)
2600
    do_unassigned_access(addr, 0, 0, 0, 2);
2601
#endif
2602
    return 0;
2603
}
2604

    
2605
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2606
{
2607
#ifdef DEBUG_UNASSIGNED
2608
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2609
#endif
2610
#if defined(TARGET_SPARC)
2611
    do_unassigned_access(addr, 0, 0, 0, 4);
2612
#endif
2613
    return 0;
2614
}
2615

    
2616
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2617
{
2618
#ifdef DEBUG_UNASSIGNED
2619
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2620
#endif
2621
#if defined(TARGET_SPARC)
2622
    do_unassigned_access(addr, 1, 0, 0, 1);
2623
#endif
2624
}
2625

    
2626
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2627
{
2628
#ifdef DEBUG_UNASSIGNED
2629
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2630
#endif
2631
#if defined(TARGET_SPARC)
2632
    do_unassigned_access(addr, 1, 0, 0, 2);
2633
#endif
2634
}
2635

    
2636
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2637
{
2638
#ifdef DEBUG_UNASSIGNED
2639
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2640
#endif
2641
#if defined(TARGET_SPARC)
2642
    do_unassigned_access(addr, 1, 0, 0, 4);
2643
#endif
2644
}
2645

    
2646
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2647
    unassigned_mem_readb,
2648
    unassigned_mem_readw,
2649
    unassigned_mem_readl,
2650
};
2651

    
2652
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2653
    unassigned_mem_writeb,
2654
    unassigned_mem_writew,
2655
    unassigned_mem_writel,
2656
};
2657

    
2658
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2659
                                uint32_t val)
2660
{
2661
    int dirty_flags;
2662
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2663
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2664
#if !defined(CONFIG_USER_ONLY)
2665
        tb_invalidate_phys_page_fast(ram_addr, 1);
2666
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2667
#endif
2668
    }
2669
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2670
#ifdef CONFIG_KQEMU
2671
    if (cpu_single_env->kqemu_enabled &&
2672
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2673
        kqemu_modify_page(cpu_single_env, ram_addr);
2674
#endif
2675
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2676
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2677
    /* we remove the notdirty callback only if the code has been
2678
       flushed */
2679
    if (dirty_flags == 0xff)
2680
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2681
}
2682

    
2683
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2684
                                uint32_t val)
2685
{
2686
    int dirty_flags;
2687
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2688
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2689
#if !defined(CONFIG_USER_ONLY)
2690
        tb_invalidate_phys_page_fast(ram_addr, 2);
2691
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2692
#endif
2693
    }
2694
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2695
#ifdef CONFIG_KQEMU
2696
    if (cpu_single_env->kqemu_enabled &&
2697
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2698
        kqemu_modify_page(cpu_single_env, ram_addr);
2699
#endif
2700
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2701
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2702
    /* we remove the notdirty callback only if the code has been
2703
       flushed */
2704
    if (dirty_flags == 0xff)
2705
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2706
}
2707

    
2708
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2709
                                uint32_t val)
2710
{
2711
    int dirty_flags;
2712
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2713
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2714
#if !defined(CONFIG_USER_ONLY)
2715
        tb_invalidate_phys_page_fast(ram_addr, 4);
2716
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2717
#endif
2718
    }
2719
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2720
#ifdef CONFIG_KQEMU
2721
    if (cpu_single_env->kqemu_enabled &&
2722
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2723
        kqemu_modify_page(cpu_single_env, ram_addr);
2724
#endif
2725
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2726
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2727
    /* we remove the notdirty callback only if the code has been
2728
       flushed */
2729
    if (dirty_flags == 0xff)
2730
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2731
}
2732

    
2733
static CPUReadMemoryFunc *error_mem_read[3] = {
2734
    NULL, /* never used */
2735
    NULL, /* never used */
2736
    NULL, /* never used */
2737
};
2738

    
2739
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2740
    notdirty_mem_writeb,
2741
    notdirty_mem_writew,
2742
    notdirty_mem_writel,
2743
};
2744

    
2745
/* Generate a debug exception if a watchpoint has been hit.  */
2746
static void check_watchpoint(int offset, int len_mask, int flags)
2747
{
2748
    CPUState *env = cpu_single_env;
2749
    target_ulong pc, cs_base;
2750
    TranslationBlock *tb;
2751
    target_ulong vaddr;
2752
    CPUWatchpoint *wp;
2753
    int cpu_flags;
2754

    
2755
    if (env->watchpoint_hit) {
2756
        /* We re-entered the check after replacing the TB. Now raise
2757
         * the debug interrupt so that is will trigger after the
2758
         * current instruction. */
2759
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2760
        return;
2761
    }
2762
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2763
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2764
        if ((vaddr == (wp->vaddr & len_mask) ||
2765
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2766
            wp->flags |= BP_WATCHPOINT_HIT;
2767
            if (!env->watchpoint_hit) {
2768
                env->watchpoint_hit = wp;
2769
                tb = tb_find_pc(env->mem_io_pc);
2770
                if (!tb) {
2771
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2772
                              "pc=%p", (void *)env->mem_io_pc);
2773
                }
2774
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2775
                tb_phys_invalidate(tb, -1);
2776
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2777
                    env->exception_index = EXCP_DEBUG;
2778
                } else {
2779
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2780
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2781
                }
2782
                cpu_resume_from_signal(env, NULL);
2783
            }
2784
        } else {
2785
            wp->flags &= ~BP_WATCHPOINT_HIT;
2786
        }
2787
    }
2788
}
2789

    
2790
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2791
   so these check for a hit then pass through to the normal out-of-line
2792
   phys routines.  */
2793
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2794
{
2795
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2796
    return ldub_phys(addr);
2797
}
2798

    
2799
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2800
{
2801
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2802
    return lduw_phys(addr);
2803
}
2804

    
2805
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2806
{
2807
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2808
    return ldl_phys(addr);
2809
}
2810

    
2811
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2812
                             uint32_t val)
2813
{
2814
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2815
    stb_phys(addr, val);
2816
}
2817

    
2818
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2819
                             uint32_t val)
2820
{
2821
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2822
    stw_phys(addr, val);
2823
}
2824

    
2825
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2826
                             uint32_t val)
2827
{
2828
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2829
    stl_phys(addr, val);
2830
}
2831

    
2832
static CPUReadMemoryFunc *watch_mem_read[3] = {
2833
    watch_mem_readb,
2834
    watch_mem_readw,
2835
    watch_mem_readl,
2836
};
2837

    
2838
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2839
    watch_mem_writeb,
2840
    watch_mem_writew,
2841
    watch_mem_writel,
2842
};
2843

    
2844
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2845
                                 unsigned int len)
2846
{
2847
    uint32_t ret;
2848
    unsigned int idx;
2849

    
2850
    idx = SUBPAGE_IDX(addr);
2851
#if defined(DEBUG_SUBPAGE)
2852
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2853
           mmio, len, addr, idx);
2854
#endif
2855
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2856
                                       addr + mmio->region_offset[idx][0][len]);
2857

    
2858
    return ret;
2859
}
2860

    
2861
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2862
                              uint32_t value, unsigned int len)
2863
{
2864
    unsigned int idx;
2865

    
2866
    idx = SUBPAGE_IDX(addr);
2867
#if defined(DEBUG_SUBPAGE)
2868
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2869
           mmio, len, addr, idx, value);
2870
#endif
2871
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2872
                                  addr + mmio->region_offset[idx][1][len],
2873
                                  value);
2874
}
2875

    
2876
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2877
{
2878
#if defined(DEBUG_SUBPAGE)
2879
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2880
#endif
2881

    
2882
    return subpage_readlen(opaque, addr, 0);
2883
}
2884

    
2885
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2886
                            uint32_t value)
2887
{
2888
#if defined(DEBUG_SUBPAGE)
2889
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2890
#endif
2891
    subpage_writelen(opaque, addr, value, 0);
2892
}
2893

    
2894
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2895
{
2896
#if defined(DEBUG_SUBPAGE)
2897
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2898
#endif
2899

    
2900
    return subpage_readlen(opaque, addr, 1);
2901
}
2902

    
2903
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2904
                            uint32_t value)
2905
{
2906
#if defined(DEBUG_SUBPAGE)
2907
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2908
#endif
2909
    subpage_writelen(opaque, addr, value, 1);
2910
}
2911

    
2912
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2913
{
2914
#if defined(DEBUG_SUBPAGE)
2915
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2916
#endif
2917

    
2918
    return subpage_readlen(opaque, addr, 2);
2919
}
2920

    
2921
static void subpage_writel (void *opaque,
2922
                         target_phys_addr_t addr, uint32_t value)
2923
{
2924
#if defined(DEBUG_SUBPAGE)
2925
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2926
#endif
2927
    subpage_writelen(opaque, addr, value, 2);
2928
}
2929

    
2930
static CPUReadMemoryFunc *subpage_read[] = {
2931
    &subpage_readb,
2932
    &subpage_readw,
2933
    &subpage_readl,
2934
};
2935

    
2936
static CPUWriteMemoryFunc *subpage_write[] = {
2937
    &subpage_writeb,
2938
    &subpage_writew,
2939
    &subpage_writel,
2940
};
2941

    
2942
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2943
                             ram_addr_t memory, ram_addr_t region_offset)
2944
{
2945
    int idx, eidx;
2946
    unsigned int i;
2947

    
2948
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2949
        return -1;
2950
    idx = SUBPAGE_IDX(start);
2951
    eidx = SUBPAGE_IDX(end);
2952
#if defined(DEBUG_SUBPAGE)
2953
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2954
           mmio, start, end, idx, eidx, memory);
2955
#endif
2956
    memory >>= IO_MEM_SHIFT;
2957
    for (; idx <= eidx; idx++) {
2958
        for (i = 0; i < 4; i++) {
2959
            if (io_mem_read[memory][i]) {
2960
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2961
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2962
                mmio->region_offset[idx][0][i] = region_offset;
2963
            }
2964
            if (io_mem_write[memory][i]) {
2965
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2966
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2967
                mmio->region_offset[idx][1][i] = region_offset;
2968
            }
2969
        }
2970
    }
2971

    
2972
    return 0;
2973
}
2974

    
2975
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2976
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2977
{
2978
    subpage_t *mmio;
2979
    int subpage_memory;
2980

    
2981
    mmio = qemu_mallocz(sizeof(subpage_t));
2982

    
2983
    mmio->base = base;
2984
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2985
#if defined(DEBUG_SUBPAGE)
2986
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2987
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2988
#endif
2989
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2990
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2991
                         region_offset);
2992

    
2993
    return mmio;
2994
}
2995

    
2996
static int get_free_io_mem_idx(void)
2997
{
2998
    int i;
2999

    
3000
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3001
        if (!io_mem_used[i]) {
3002
            io_mem_used[i] = 1;
3003
            return i;
3004
        }
3005

    
3006
    return -1;
3007
}
3008

    
3009
static void io_mem_init(void)
3010
{
3011
    int i;
3012

    
3013
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
3014
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3015
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
3016
    for (i=0; i<5; i++)
3017
        io_mem_used[i] = 1;
3018

    
3019
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
3020
                                          watch_mem_write, NULL);
3021
#ifdef CONFIG_KQEMU
3022
    if (kqemu_phys_ram_base) {
3023
        /* alloc dirty bits array */
3024
        phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3025
        memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3026
    }
3027
#endif
3028
}
3029

    
3030
/* mem_read and mem_write are arrays of functions containing the
3031
   function to access byte (index 0), word (index 1) and dword (index
3032
   2). Functions can be omitted with a NULL function pointer.
3033
   If io_index is non zero, the corresponding io zone is
3034
   modified. If it is zero, a new io zone is allocated. The return
3035
   value can be used with cpu_register_physical_memory(). (-1) is
3036
   returned if error. */
3037
int cpu_register_io_memory(int io_index,
3038
                           CPUReadMemoryFunc **mem_read,
3039
                           CPUWriteMemoryFunc **mem_write,
3040
                           void *opaque)
3041
{
3042
    int i, subwidth = 0;
3043

    
3044
    if (io_index <= 0) {
3045
        io_index = get_free_io_mem_idx();
3046
        if (io_index == -1)
3047
            return io_index;
3048
    } else {
3049
        if (io_index >= IO_MEM_NB_ENTRIES)
3050
            return -1;
3051
    }
3052

    
3053
    for(i = 0;i < 3; i++) {
3054
        if (!mem_read[i] || !mem_write[i])
3055
            subwidth = IO_MEM_SUBWIDTH;
3056
        io_mem_read[io_index][i] = mem_read[i];
3057
        io_mem_write[io_index][i] = mem_write[i];
3058
    }
3059
    io_mem_opaque[io_index] = opaque;
3060
    return (io_index << IO_MEM_SHIFT) | subwidth;
3061
}
3062

    
3063
void cpu_unregister_io_memory(int io_table_address)
3064
{
3065
    int i;
3066
    int io_index = io_table_address >> IO_MEM_SHIFT;
3067

    
3068
    for (i=0;i < 3; i++) {
3069
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3070
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3071
    }
3072
    io_mem_opaque[io_index] = NULL;
3073
    io_mem_used[io_index] = 0;
3074
}
3075

    
3076
#endif /* !defined(CONFIG_USER_ONLY) */
3077

    
3078
/* physical memory access (slow version, mainly for debug) */
3079
#if defined(CONFIG_USER_ONLY)
3080
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3081
                            int len, int is_write)
3082
{
3083
    int l, flags;
3084
    target_ulong page;
3085
    void * p;
3086

    
3087
    while (len > 0) {
3088
        page = addr & TARGET_PAGE_MASK;
3089
        l = (page + TARGET_PAGE_SIZE) - addr;
3090
        if (l > len)
3091
            l = len;
3092
        flags = page_get_flags(page);
3093
        if (!(flags & PAGE_VALID))
3094
            return;
3095
        if (is_write) {
3096
            if (!(flags & PAGE_WRITE))
3097
                return;
3098
            /* XXX: this code should not depend on lock_user */
3099
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3100
                /* FIXME - should this return an error rather than just fail? */
3101
                return;
3102
            memcpy(p, buf, l);
3103
            unlock_user(p, addr, l);
3104
        } else {
3105
            if (!(flags & PAGE_READ))
3106
                return;
3107
            /* XXX: this code should not depend on lock_user */
3108
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3109
                /* FIXME - should this return an error rather than just fail? */
3110
                return;
3111
            memcpy(buf, p, l);
3112
            unlock_user(p, addr, 0);
3113
        }
3114
        len -= l;
3115
        buf += l;
3116
        addr += l;
3117
    }
3118
}
3119

    
3120
#else
3121
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3122
                            int len, int is_write)
3123
{
3124
    int l, io_index;
3125
    uint8_t *ptr;
3126
    uint32_t val;
3127
    target_phys_addr_t page;
3128
    unsigned long pd;
3129
    PhysPageDesc *p;
3130

    
3131
    while (len > 0) {
3132
        page = addr & TARGET_PAGE_MASK;
3133
        l = (page + TARGET_PAGE_SIZE) - addr;
3134
        if (l > len)
3135
            l = len;
3136
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3137
        if (!p) {
3138
            pd = IO_MEM_UNASSIGNED;
3139
        } else {
3140
            pd = p->phys_offset;
3141
        }
3142

    
3143
        if (is_write) {
3144
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3145
                target_phys_addr_t addr1 = addr;
3146
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3147
                if (p)
3148
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3149
                /* XXX: could force cpu_single_env to NULL to avoid
3150
                   potential bugs */
3151
                if (l >= 4 && ((addr1 & 3) == 0)) {
3152
                    /* 32 bit write access */
3153
                    val = ldl_p(buf);
3154
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3155
                    l = 4;
3156
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3157
                    /* 16 bit write access */
3158
                    val = lduw_p(buf);
3159
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3160
                    l = 2;
3161
                } else {
3162
                    /* 8 bit write access */
3163
                    val = ldub_p(buf);
3164
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3165
                    l = 1;
3166
                }
3167
            } else {
3168
                unsigned long addr1;
3169
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3170
                /* RAM case */
3171
                ptr = qemu_get_ram_ptr(addr1);
3172
                memcpy(ptr, buf, l);
3173
                if (!cpu_physical_memory_is_dirty(addr1)) {
3174
                    /* invalidate code */
3175
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3176
                    /* set dirty bit */
3177
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3178
                        (0xff & ~CODE_DIRTY_FLAG);
3179
                }
3180
            }
3181
        } else {
3182
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3183
                !(pd & IO_MEM_ROMD)) {
3184
                target_phys_addr_t addr1 = addr;
3185
                /* I/O case */
3186
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3187
                if (p)
3188
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3189
                if (l >= 4 && ((addr1 & 3) == 0)) {
3190
                    /* 32 bit read access */
3191
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3192
                    stl_p(buf, val);
3193
                    l = 4;
3194
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3195
                    /* 16 bit read access */
3196
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3197
                    stw_p(buf, val);
3198
                    l = 2;
3199
                } else {
3200
                    /* 8 bit read access */
3201
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3202
                    stb_p(buf, val);
3203
                    l = 1;
3204
                }
3205
            } else {
3206
                /* RAM case */
3207
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3208
                    (addr & ~TARGET_PAGE_MASK);
3209
                memcpy(buf, ptr, l);
3210
            }
3211
        }
3212
        len -= l;
3213
        buf += l;
3214
        addr += l;
3215
    }
3216
}
3217

    
3218
/* used for ROM loading : can write in RAM and ROM */
3219
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3220
                                   const uint8_t *buf, int len)
3221
{
3222
    int l;
3223
    uint8_t *ptr;
3224
    target_phys_addr_t page;
3225
    unsigned long pd;
3226
    PhysPageDesc *p;
3227

    
3228
    while (len > 0) {
3229
        page = addr & TARGET_PAGE_MASK;
3230
        l = (page + TARGET_PAGE_SIZE) - addr;
3231
        if (l > len)
3232
            l = len;
3233
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3234
        if (!p) {
3235
            pd = IO_MEM_UNASSIGNED;
3236
        } else {
3237
            pd = p->phys_offset;
3238
        }
3239

    
3240
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3241
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3242
            !(pd & IO_MEM_ROMD)) {
3243
            /* do nothing */
3244
        } else {
3245
            unsigned long addr1;
3246
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3247
            /* ROM/RAM case */
3248
            ptr = qemu_get_ram_ptr(addr1);
3249
            memcpy(ptr, buf, l);
3250
        }
3251
        len -= l;
3252
        buf += l;
3253
        addr += l;
3254
    }
3255
}
3256

    
3257
typedef struct {
3258
    void *buffer;
3259
    target_phys_addr_t addr;
3260
    target_phys_addr_t len;
3261
} BounceBuffer;
3262

    
3263
static BounceBuffer bounce;
3264

    
3265
typedef struct MapClient {
3266
    void *opaque;
3267
    void (*callback)(void *opaque);
3268
    LIST_ENTRY(MapClient) link;
3269
} MapClient;
3270

    
3271
static LIST_HEAD(map_client_list, MapClient) map_client_list
3272
    = LIST_HEAD_INITIALIZER(map_client_list);
3273

    
3274
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3275
{
3276
    MapClient *client = qemu_malloc(sizeof(*client));
3277

    
3278
    client->opaque = opaque;
3279
    client->callback = callback;
3280
    LIST_INSERT_HEAD(&map_client_list, client, link);
3281
    return client;
3282
}
3283

    
3284
void cpu_unregister_map_client(void *_client)
3285
{
3286
    MapClient *client = (MapClient *)_client;
3287

    
3288
    LIST_REMOVE(client, link);
3289
}
3290

    
3291
static void cpu_notify_map_clients(void)
3292
{
3293
    MapClient *client;
3294

    
3295
    while (!LIST_EMPTY(&map_client_list)) {
3296
        client = LIST_FIRST(&map_client_list);
3297
        client->callback(client->opaque);
3298
        LIST_REMOVE(client, link);
3299
    }
3300
}
3301

    
3302
/* Map a physical memory region into a host virtual address.
3303
 * May map a subset of the requested range, given by and returned in *plen.
3304
 * May return NULL if resources needed to perform the mapping are exhausted.
3305
 * Use only for reads OR writes - not for read-modify-write operations.
3306
 * Use cpu_register_map_client() to know when retrying the map operation is
3307
 * likely to succeed.
3308
 */
3309
void *cpu_physical_memory_map(target_phys_addr_t addr,
3310
                              target_phys_addr_t *plen,
3311
                              int is_write)
3312
{
3313
    target_phys_addr_t len = *plen;
3314
    target_phys_addr_t done = 0;
3315
    int l;
3316
    uint8_t *ret = NULL;
3317
    uint8_t *ptr;
3318
    target_phys_addr_t page;
3319
    unsigned long pd;
3320
    PhysPageDesc *p;
3321
    unsigned long addr1;
3322

    
3323
    while (len > 0) {
3324
        page = addr & TARGET_PAGE_MASK;
3325
        l = (page + TARGET_PAGE_SIZE) - addr;
3326
        if (l > len)
3327
            l = len;
3328
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3329
        if (!p) {
3330
            pd = IO_MEM_UNASSIGNED;
3331
        } else {
3332
            pd = p->phys_offset;
3333
        }
3334

    
3335
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3336
            if (done || bounce.buffer) {
3337
                break;
3338
            }
3339
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3340
            bounce.addr = addr;
3341
            bounce.len = l;
3342
            if (!is_write) {
3343
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3344
            }
3345
            ptr = bounce.buffer;
3346
        } else {
3347
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3348
            ptr = qemu_get_ram_ptr(addr1);
3349
        }
3350
        if (!done) {
3351
            ret = ptr;
3352
        } else if (ret + done != ptr) {
3353
            break;
3354
        }
3355

    
3356
        len -= l;
3357
        addr += l;
3358
        done += l;
3359
    }
3360
    *plen = done;
3361
    return ret;
3362
}
3363

    
3364
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3365
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3366
 * the amount of memory that was actually read or written by the caller.
3367
 */
3368
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3369
                               int is_write, target_phys_addr_t access_len)
3370
{
3371
    if (buffer != bounce.buffer) {
3372
        if (is_write) {
3373
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3374
            while (access_len) {
3375
                unsigned l;
3376
                l = TARGET_PAGE_SIZE;
3377
                if (l > access_len)
3378
                    l = access_len;
3379
                if (!cpu_physical_memory_is_dirty(addr1)) {
3380
                    /* invalidate code */
3381
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3382
                    /* set dirty bit */
3383
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3384
                        (0xff & ~CODE_DIRTY_FLAG);
3385
                }
3386
                addr1 += l;
3387
                access_len -= l;
3388
            }
3389
        }
3390
        return;
3391
    }
3392
    if (is_write) {
3393
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3394
    }
3395
    qemu_free(bounce.buffer);
3396
    bounce.buffer = NULL;
3397
    cpu_notify_map_clients();
3398
}
3399

    
3400
/* warning: addr must be aligned */
3401
uint32_t ldl_phys(target_phys_addr_t addr)
3402
{
3403
    int io_index;
3404
    uint8_t *ptr;
3405
    uint32_t val;
3406
    unsigned long pd;
3407
    PhysPageDesc *p;
3408

    
3409
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3410
    if (!p) {
3411
        pd = IO_MEM_UNASSIGNED;
3412
    } else {
3413
        pd = p->phys_offset;
3414
    }
3415

    
3416
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3417
        !(pd & IO_MEM_ROMD)) {
3418
        /* I/O case */
3419
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3420
        if (p)
3421
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3422
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3423
    } else {
3424
        /* RAM case */
3425
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3426
            (addr & ~TARGET_PAGE_MASK);
3427
        val = ldl_p(ptr);
3428
    }
3429
    return val;
3430
}
3431

    
3432
/* warning: addr must be aligned */
3433
uint64_t ldq_phys(target_phys_addr_t addr)
3434
{
3435
    int io_index;
3436
    uint8_t *ptr;
3437
    uint64_t val;
3438
    unsigned long pd;
3439
    PhysPageDesc *p;
3440

    
3441
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3442
    if (!p) {
3443
        pd = IO_MEM_UNASSIGNED;
3444
    } else {
3445
        pd = p->phys_offset;
3446
    }
3447

    
3448
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3449
        !(pd & IO_MEM_ROMD)) {
3450
        /* I/O case */
3451
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3452
        if (p)
3453
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3454
#ifdef TARGET_WORDS_BIGENDIAN
3455
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3456
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3457
#else
3458
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3459
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3460
#endif
3461
    } else {
3462
        /* RAM case */
3463
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3464
            (addr & ~TARGET_PAGE_MASK);
3465
        val = ldq_p(ptr);
3466
    }
3467
    return val;
3468
}
3469

    
3470
/* XXX: optimize */
3471
uint32_t ldub_phys(target_phys_addr_t addr)
3472
{
3473
    uint8_t val;
3474
    cpu_physical_memory_read(addr, &val, 1);
3475
    return val;
3476
}
3477

    
3478
/* XXX: optimize */
3479
uint32_t lduw_phys(target_phys_addr_t addr)
3480
{
3481
    uint16_t val;
3482
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3483
    return tswap16(val);
3484
}
3485

    
3486
/* warning: addr must be aligned. The ram page is not masked as dirty
3487
   and the code inside is not invalidated. It is useful if the dirty
3488
   bits are used to track modified PTEs */
3489
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3490
{
3491
    int io_index;
3492
    uint8_t *ptr;
3493
    unsigned long pd;
3494
    PhysPageDesc *p;
3495

    
3496
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3497
    if (!p) {
3498
        pd = IO_MEM_UNASSIGNED;
3499
    } else {
3500
        pd = p->phys_offset;
3501
    }
3502

    
3503
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3504
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3505
        if (p)
3506
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3507
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3508
    } else {
3509
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3510
        ptr = qemu_get_ram_ptr(addr1);
3511
        stl_p(ptr, val);
3512

    
3513
        if (unlikely(in_migration)) {
3514
            if (!cpu_physical_memory_is_dirty(addr1)) {
3515
                /* invalidate code */
3516
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3517
                /* set dirty bit */
3518
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3519
                    (0xff & ~CODE_DIRTY_FLAG);
3520
            }
3521
        }
3522
    }
3523
}
3524

    
3525
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3526
{
3527
    int io_index;
3528
    uint8_t *ptr;
3529
    unsigned long pd;
3530
    PhysPageDesc *p;
3531

    
3532
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3533
    if (!p) {
3534
        pd = IO_MEM_UNASSIGNED;
3535
    } else {
3536
        pd = p->phys_offset;
3537
    }
3538

    
3539
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3540
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3541
        if (p)
3542
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3543
#ifdef TARGET_WORDS_BIGENDIAN
3544
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3545
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3546
#else
3547
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3548
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3549
#endif
3550
    } else {
3551
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3552
            (addr & ~TARGET_PAGE_MASK);
3553
        stq_p(ptr, val);
3554
    }
3555
}
3556

    
3557
/* warning: addr must be aligned */
3558
void stl_phys(target_phys_addr_t addr, uint32_t val)
3559
{
3560
    int io_index;
3561
    uint8_t *ptr;
3562
    unsigned long pd;
3563
    PhysPageDesc *p;
3564

    
3565
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3566
    if (!p) {
3567
        pd = IO_MEM_UNASSIGNED;
3568
    } else {
3569
        pd = p->phys_offset;
3570
    }
3571

    
3572
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3573
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3574
        if (p)
3575
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3576
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3577
    } else {
3578
        unsigned long addr1;
3579
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3580
        /* RAM case */
3581
        ptr = qemu_get_ram_ptr(addr1);
3582
        stl_p(ptr, val);
3583
        if (!cpu_physical_memory_is_dirty(addr1)) {
3584
            /* invalidate code */
3585
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3586
            /* set dirty bit */
3587
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3588
                (0xff & ~CODE_DIRTY_FLAG);
3589
        }
3590
    }
3591
}
3592

    
3593
/* XXX: optimize */
3594
void stb_phys(target_phys_addr_t addr, uint32_t val)
3595
{
3596
    uint8_t v = val;
3597
    cpu_physical_memory_write(addr, &v, 1);
3598
}
3599

    
3600
/* XXX: optimize */
3601
void stw_phys(target_phys_addr_t addr, uint32_t val)
3602
{
3603
    uint16_t v = tswap16(val);
3604
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3605
}
3606

    
3607
/* XXX: optimize */
3608
void stq_phys(target_phys_addr_t addr, uint64_t val)
3609
{
3610
    val = tswap64(val);
3611
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3612
}
3613

    
3614
#endif
3615

    
3616
/* virtual memory access for debug (includes writing to ROM) */
3617
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3618
                        uint8_t *buf, int len, int is_write)
3619
{
3620
    int l;
3621
    target_phys_addr_t phys_addr;
3622
    target_ulong page;
3623

    
3624
    while (len > 0) {
3625
        page = addr & TARGET_PAGE_MASK;
3626
        phys_addr = cpu_get_phys_page_debug(env, page);
3627
        /* if no physical page mapped, return an error */
3628
        if (phys_addr == -1)
3629
            return -1;
3630
        l = (page + TARGET_PAGE_SIZE) - addr;
3631
        if (l > len)
3632
            l = len;
3633
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3634
#if !defined(CONFIG_USER_ONLY)
3635
        if (is_write)
3636
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3637
        else
3638
#endif
3639
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3640
        len -= l;
3641
        buf += l;
3642
        addr += l;
3643
    }
3644
    return 0;
3645
}
3646

    
3647
/* in deterministic execution mode, instructions doing device I/Os
3648
   must be at the end of the TB */
3649
void cpu_io_recompile(CPUState *env, void *retaddr)
3650
{
3651
    TranslationBlock *tb;
3652
    uint32_t n, cflags;
3653
    target_ulong pc, cs_base;
3654
    uint64_t flags;
3655

    
3656
    tb = tb_find_pc((unsigned long)retaddr);
3657
    if (!tb) {
3658
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3659
                  retaddr);
3660
    }
3661
    n = env->icount_decr.u16.low + tb->icount;
3662
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3663
    /* Calculate how many instructions had been executed before the fault
3664
       occurred.  */
3665
    n = n - env->icount_decr.u16.low;
3666
    /* Generate a new TB ending on the I/O insn.  */
3667
    n++;
3668
    /* On MIPS and SH, delay slot instructions can only be restarted if
3669
       they were already the first instruction in the TB.  If this is not
3670
       the first instruction in a TB then re-execute the preceding
3671
       branch.  */
3672
#if defined(TARGET_MIPS)
3673
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3674
        env->active_tc.PC -= 4;
3675
        env->icount_decr.u16.low++;
3676
        env->hflags &= ~MIPS_HFLAG_BMASK;
3677
    }
3678
#elif defined(TARGET_SH4)
3679
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3680
            && n > 1) {
3681
        env->pc -= 2;
3682
        env->icount_decr.u16.low++;
3683
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3684
    }
3685
#endif
3686
    /* This should never happen.  */
3687
    if (n > CF_COUNT_MASK)
3688
        cpu_abort(env, "TB too big during recompile");
3689

    
3690
    cflags = n | CF_LAST_IO;
3691
    pc = tb->pc;
3692
    cs_base = tb->cs_base;
3693
    flags = tb->flags;
3694
    tb_phys_invalidate(tb, -1);
3695
    /* FIXME: In theory this could raise an exception.  In practice
3696
       we have already translated the block once so it's probably ok.  */
3697
    tb_gen_code(env, pc, cs_base, flags, cflags);
3698
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3699
       the first in the TB) then we end up generating a whole new TB and
3700
       repeating the fault, which is horribly inefficient.
3701
       Better would be to execute just this insn uncached, or generate a
3702
       second new TB.  */
3703
    cpu_resume_from_signal(env, NULL);
3704
}
3705

    
3706
void dump_exec_info(FILE *f,
3707
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3708
{
3709
    int i, target_code_size, max_target_code_size;
3710
    int direct_jmp_count, direct_jmp2_count, cross_page;
3711
    TranslationBlock *tb;
3712

    
3713
    target_code_size = 0;
3714
    max_target_code_size = 0;
3715
    cross_page = 0;
3716
    direct_jmp_count = 0;
3717
    direct_jmp2_count = 0;
3718
    for(i = 0; i < nb_tbs; i++) {
3719
        tb = &tbs[i];
3720
        target_code_size += tb->size;
3721
        if (tb->size > max_target_code_size)
3722
            max_target_code_size = tb->size;
3723
        if (tb->page_addr[1] != -1)
3724
            cross_page++;
3725
        if (tb->tb_next_offset[0] != 0xffff) {
3726
            direct_jmp_count++;
3727
            if (tb->tb_next_offset[1] != 0xffff) {
3728
                direct_jmp2_count++;
3729
            }
3730
        }
3731
    }
3732
    /* XXX: avoid using doubles ? */
3733
    cpu_fprintf(f, "Translation buffer state:\n");
3734
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3735
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3736
    cpu_fprintf(f, "TB count            %d/%d\n", 
3737
                nb_tbs, code_gen_max_blocks);
3738
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3739
                nb_tbs ? target_code_size / nb_tbs : 0,
3740
                max_target_code_size);
3741
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3742
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3743
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3744
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3745
            cross_page,
3746
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3747
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3748
                direct_jmp_count,
3749
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3750
                direct_jmp2_count,
3751
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3752
    cpu_fprintf(f, "\nStatistics:\n");
3753
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3754
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3755
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3756
    tcg_dump_info(f, cpu_fprintf);
3757
}
3758

    
3759
#if !defined(CONFIG_USER_ONLY)
3760

    
3761
#define MMUSUFFIX _cmmu
3762
#define GETPC() NULL
3763
#define env cpu_single_env
3764
#define SOFTMMU_CODE_ACCESS
3765

    
3766
#define SHIFT 0
3767
#include "softmmu_template.h"
3768

    
3769
#define SHIFT 1
3770
#include "softmmu_template.h"
3771

    
3772
#define SHIFT 2
3773
#include "softmmu_template.h"
3774

    
3775
#define SHIFT 3
3776
#include "softmmu_template.h"
3777

    
3778
#undef env
3779

    
3780
#endif