Statistics
| Branch: | Revision:

root / exec.c @ fad6cb1a

History | View | Annotate | Download (102.4 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#include "osdep.h"
42
#include "kvm.h"
43
#if defined(CONFIG_USER_ONLY)
44
#include <qemu.h>
45
#endif
46

    
47
//#define DEBUG_TB_INVALIDATE
48
//#define DEBUG_FLUSH
49
//#define DEBUG_TLB
50
//#define DEBUG_UNASSIGNED
51

    
52
/* make various TB consistency checks */
53
//#define DEBUG_TB_CHECK
54
//#define DEBUG_TLB_CHECK
55

    
56
//#define DEBUG_IOPORT
57
//#define DEBUG_SUBPAGE
58

    
59
#if !defined(CONFIG_USER_ONLY)
60
/* TB consistency checks only implemented for usermode emulation.  */
61
#undef DEBUG_TB_CHECK
62
#endif
63

    
64
#define SMC_BITMAP_USE_THRESHOLD 10
65

    
66
#define MMAP_AREA_START        0x00000000
67
#define MMAP_AREA_END          0xa8000000
68

    
69
#if defined(TARGET_SPARC64)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 41
71
#elif defined(TARGET_SPARC)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 36
73
#elif defined(TARGET_ALPHA)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#define TARGET_VIRT_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_PPC64)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 42
80
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81
#define TARGET_PHYS_ADDR_SPACE_BITS 36
82
#else
83
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84
#define TARGET_PHYS_ADDR_SPACE_BITS 32
85
#endif
86

    
87
static TranslationBlock *tbs;
88
int code_gen_max_blocks;
89
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90
static int nb_tbs;
91
/* any access to the tbs or the page table must use this lock */
92
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93

    
94
#if defined(__arm__) || defined(__sparc_v9__)
95
/* The prologue must be reachable with a direct jump. ARM and Sparc64
96
 have limited branch ranges (possibly also PPC) so place it in a
97
 section close to code segment. */
98
#define code_gen_section                                \
99
    __attribute__((__section__(".gen_code")))           \
100
    __attribute__((aligned (32)))
101
#else
102
#define code_gen_section                                \
103
    __attribute__((aligned (32)))
104
#endif
105

    
106
uint8_t code_gen_prologue[1024] code_gen_section;
107
static uint8_t *code_gen_buffer;
108
static unsigned long code_gen_buffer_size;
109
/* threshold to flush the translated code buffer */
110
static unsigned long code_gen_buffer_max_size;
111
uint8_t *code_gen_ptr;
112

    
113
#if !defined(CONFIG_USER_ONLY)
114
ram_addr_t phys_ram_size;
115
int phys_ram_fd;
116
uint8_t *phys_ram_base;
117
uint8_t *phys_ram_dirty;
118
static int in_migration;
119
static ram_addr_t phys_ram_alloc_offset = 0;
120
#endif
121

    
122
CPUState *first_cpu;
123
/* current CPU in the current thread. It is only valid inside
124
   cpu_exec() */
125
CPUState *cpu_single_env;
126
/* 0 = Do not count executed instructions.
127
   1 = Precise instruction counting.
128
   2 = Adaptive rate instruction counting.  */
129
int use_icount = 0;
130
/* Current instruction counter.  While executing translated code this may
131
   include some instructions that have not yet been executed.  */
132
int64_t qemu_icount;
133

    
134
typedef struct PageDesc {
135
    /* list of TBs intersecting this ram page */
136
    TranslationBlock *first_tb;
137
    /* in order to optimize self modifying code, we count the number
138
       of lookups we do to a given page to use a bitmap */
139
    unsigned int code_write_count;
140
    uint8_t *code_bitmap;
141
#if defined(CONFIG_USER_ONLY)
142
    unsigned long flags;
143
#endif
144
} PageDesc;
145

    
146
typedef struct PhysPageDesc {
147
    /* offset in host memory of the page + io_index in the low bits */
148
    ram_addr_t phys_offset;
149
    ram_addr_t region_offset;
150
} PhysPageDesc;
151

    
152
#define L2_BITS 10
153
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154
/* XXX: this is a temporary hack for alpha target.
155
 *      In the future, this is to be replaced by a multi-level table
156
 *      to actually be able to handle the complete 64 bits address space.
157
 */
158
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159
#else
160
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
161
#endif
162

    
163
#define L1_SIZE (1 << L1_BITS)
164
#define L2_SIZE (1 << L2_BITS)
165

    
166
unsigned long qemu_real_host_page_size;
167
unsigned long qemu_host_page_bits;
168
unsigned long qemu_host_page_size;
169
unsigned long qemu_host_page_mask;
170

    
171
/* XXX: for system emulation, it could just be an array */
172
static PageDesc *l1_map[L1_SIZE];
173
static PhysPageDesc **l1_phys_map;
174

    
175
#if !defined(CONFIG_USER_ONLY)
176
static void io_mem_init(void);
177

    
178
/* io memory support */
179
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
181
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
182
static int io_mem_nb;
183
static int io_mem_watch;
184
#endif
185

    
186
/* log support */
187
static const char *logfilename = "/tmp/qemu.log";
188
FILE *logfile;
189
int loglevel;
190
static int log_append = 0;
191

    
192
/* statistics */
193
static int tlb_flush_count;
194
static int tb_flush_count;
195
static int tb_phys_invalidate_count;
196

    
197
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198
typedef struct subpage_t {
199
    target_phys_addr_t base;
200
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202
    void *opaque[TARGET_PAGE_SIZE][2][4];
203
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
204
} subpage_t;
205

    
206
#ifdef _WIN32
207
static void map_exec(void *addr, long size)
208
{
209
    DWORD old_protect;
210
    VirtualProtect(addr, size,
211
                   PAGE_EXECUTE_READWRITE, &old_protect);
212
    
213
}
214
#else
215
static void map_exec(void *addr, long size)
216
{
217
    unsigned long start, end, page_size;
218
    
219
    page_size = getpagesize();
220
    start = (unsigned long)addr;
221
    start &= ~(page_size - 1);
222
    
223
    end = (unsigned long)addr + size;
224
    end += page_size - 1;
225
    end &= ~(page_size - 1);
226
    
227
    mprotect((void *)start, end - start,
228
             PROT_READ | PROT_WRITE | PROT_EXEC);
229
}
230
#endif
231

    
232
static void page_init(void)
233
{
234
    /* NOTE: we can always suppose that qemu_host_page_size >=
235
       TARGET_PAGE_SIZE */
236
#ifdef _WIN32
237
    {
238
        SYSTEM_INFO system_info;
239

    
240
        GetSystemInfo(&system_info);
241
        qemu_real_host_page_size = system_info.dwPageSize;
242
    }
243
#else
244
    qemu_real_host_page_size = getpagesize();
245
#endif
246
    if (qemu_host_page_size == 0)
247
        qemu_host_page_size = qemu_real_host_page_size;
248
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
249
        qemu_host_page_size = TARGET_PAGE_SIZE;
250
    qemu_host_page_bits = 0;
251
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252
        qemu_host_page_bits++;
253
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
254
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
256

    
257
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
258
    {
259
        long long startaddr, endaddr;
260
        FILE *f;
261
        int n;
262

    
263
        mmap_lock();
264
        last_brk = (unsigned long)sbrk(0);
265
        f = fopen("/proc/self/maps", "r");
266
        if (f) {
267
            do {
268
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269
                if (n == 2) {
270
                    startaddr = MIN(startaddr,
271
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272
                    endaddr = MIN(endaddr,
273
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
274
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
275
                                   TARGET_PAGE_ALIGN(endaddr),
276
                                   PAGE_RESERVED); 
277
                }
278
            } while (!feof(f));
279
            fclose(f);
280
        }
281
        mmap_unlock();
282
    }
283
#endif
284
}
285

    
286
static inline PageDesc **page_l1_map(target_ulong index)
287
{
288
#if TARGET_LONG_BITS > 32
289
    /* Host memory outside guest VM.  For 32-bit targets we have already
290
       excluded high addresses.  */
291
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
292
        return NULL;
293
#endif
294
    return &l1_map[index >> L2_BITS];
295
}
296

    
297
static inline PageDesc *page_find_alloc(target_ulong index)
298
{
299
    PageDesc **lp, *p;
300
    lp = page_l1_map(index);
301
    if (!lp)
302
        return NULL;
303

    
304
    p = *lp;
305
    if (!p) {
306
        /* allocate if not found */
307
#if defined(CONFIG_USER_ONLY)
308
        size_t len = sizeof(PageDesc) * L2_SIZE;
309
        /* Don't use qemu_malloc because it may recurse.  */
310
        p = mmap(0, len, PROT_READ | PROT_WRITE,
311
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
312
        *lp = p;
313
        if (h2g_valid(p)) {
314
            unsigned long addr = h2g(p);
315
            page_set_flags(addr & TARGET_PAGE_MASK,
316
                           TARGET_PAGE_ALIGN(addr + len),
317
                           PAGE_RESERVED); 
318
        }
319
#else
320
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321
        *lp = p;
322
#endif
323
    }
324
    return p + (index & (L2_SIZE - 1));
325
}
326

    
327
static inline PageDesc *page_find(target_ulong index)
328
{
329
    PageDesc **lp, *p;
330
    lp = page_l1_map(index);
331
    if (!lp)
332
        return NULL;
333

    
334
    p = *lp;
335
    if (!p)
336
        return 0;
337
    return p + (index & (L2_SIZE - 1));
338
}
339

    
340
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
341
{
342
    void **lp, **p;
343
    PhysPageDesc *pd;
344

    
345
    p = (void **)l1_phys_map;
346
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
347

    
348
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350
#endif
351
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
352
    p = *lp;
353
    if (!p) {
354
        /* allocate if not found */
355
        if (!alloc)
356
            return NULL;
357
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358
        memset(p, 0, sizeof(void *) * L1_SIZE);
359
        *lp = p;
360
    }
361
#endif
362
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
363
    pd = *lp;
364
    if (!pd) {
365
        int i;
366
        /* allocate if not found */
367
        if (!alloc)
368
            return NULL;
369
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370
        *lp = pd;
371
        for (i = 0; i < L2_SIZE; i++)
372
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
373
    }
374
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
375
}
376

    
377
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
378
{
379
    return phys_page_find_alloc(index, 0);
380
}
381

    
382
#if !defined(CONFIG_USER_ONLY)
383
static void tlb_protect_code(ram_addr_t ram_addr);
384
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
385
                                    target_ulong vaddr);
386
#define mmap_lock() do { } while(0)
387
#define mmap_unlock() do { } while(0)
388
#endif
389

    
390
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
391

    
392
#if defined(CONFIG_USER_ONLY)
393
/* Currently it is not recommanded to allocate big chunks of data in
394
   user mode. It will change when a dedicated libc will be used */
395
#define USE_STATIC_CODE_GEN_BUFFER
396
#endif
397

    
398
#ifdef USE_STATIC_CODE_GEN_BUFFER
399
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
400
#endif
401

    
402
static void code_gen_alloc(unsigned long tb_size)
403
{
404
#ifdef USE_STATIC_CODE_GEN_BUFFER
405
    code_gen_buffer = static_code_gen_buffer;
406
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
407
    map_exec(code_gen_buffer, code_gen_buffer_size);
408
#else
409
    code_gen_buffer_size = tb_size;
410
    if (code_gen_buffer_size == 0) {
411
#if defined(CONFIG_USER_ONLY)
412
        /* in user mode, phys_ram_size is not meaningful */
413
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
414
#else
415
        /* XXX: needs ajustments */
416
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
417
#endif
418
    }
419
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
420
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
421
    /* The code gen buffer location may have constraints depending on
422
       the host cpu and OS */
423
#if defined(__linux__) 
424
    {
425
        int flags;
426
        void *start = NULL;
427

    
428
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
429
#if defined(__x86_64__)
430
        flags |= MAP_32BIT;
431
        /* Cannot map more than that */
432
        if (code_gen_buffer_size > (800 * 1024 * 1024))
433
            code_gen_buffer_size = (800 * 1024 * 1024);
434
#elif defined(__sparc_v9__)
435
        // Map the buffer below 2G, so we can use direct calls and branches
436
        flags |= MAP_FIXED;
437
        start = (void *) 0x60000000UL;
438
        if (code_gen_buffer_size > (512 * 1024 * 1024))
439
            code_gen_buffer_size = (512 * 1024 * 1024);
440
#elif defined(__arm__)
441
        /* Map the buffer below 32M, so we can use direct calls and branches */
442
        flags |= MAP_FIXED;
443
        start = (void *) 0x01000000UL;
444
        if (code_gen_buffer_size > 16 * 1024 * 1024)
445
            code_gen_buffer_size = 16 * 1024 * 1024;
446
#endif
447
        code_gen_buffer = mmap(start, code_gen_buffer_size,
448
                               PROT_WRITE | PROT_READ | PROT_EXEC,
449
                               flags, -1, 0);
450
        if (code_gen_buffer == MAP_FAILED) {
451
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
452
            exit(1);
453
        }
454
    }
455
#elif defined(__FreeBSD__)
456
    {
457
        int flags;
458
        void *addr = NULL;
459
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
460
#if defined(__x86_64__)
461
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
462
         * 0x40000000 is free */
463
        flags |= MAP_FIXED;
464
        addr = (void *)0x40000000;
465
        /* Cannot map more than that */
466
        if (code_gen_buffer_size > (800 * 1024 * 1024))
467
            code_gen_buffer_size = (800 * 1024 * 1024);
468
#endif
469
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
470
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
471
                               flags, -1, 0);
472
        if (code_gen_buffer == MAP_FAILED) {
473
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
474
            exit(1);
475
        }
476
    }
477
#else
478
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
479
    if (!code_gen_buffer) {
480
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
481
        exit(1);
482
    }
483
    map_exec(code_gen_buffer, code_gen_buffer_size);
484
#endif
485
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
486
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
487
    code_gen_buffer_max_size = code_gen_buffer_size - 
488
        code_gen_max_block_size();
489
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
490
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
491
}
492

    
493
/* Must be called before using the QEMU cpus. 'tb_size' is the size
494
   (in bytes) allocated to the translation buffer. Zero means default
495
   size. */
496
void cpu_exec_init_all(unsigned long tb_size)
497
{
498
    cpu_gen_init();
499
    code_gen_alloc(tb_size);
500
    code_gen_ptr = code_gen_buffer;
501
    page_init();
502
#if !defined(CONFIG_USER_ONLY)
503
    io_mem_init();
504
#endif
505
}
506

    
507
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
508

    
509
#define CPU_COMMON_SAVE_VERSION 1
510

    
511
static void cpu_common_save(QEMUFile *f, void *opaque)
512
{
513
    CPUState *env = opaque;
514

    
515
    qemu_put_be32s(f, &env->halted);
516
    qemu_put_be32s(f, &env->interrupt_request);
517
}
518

    
519
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
520
{
521
    CPUState *env = opaque;
522

    
523
    if (version_id != CPU_COMMON_SAVE_VERSION)
524
        return -EINVAL;
525

    
526
    qemu_get_be32s(f, &env->halted);
527
    qemu_get_be32s(f, &env->interrupt_request);
528
    tlb_flush(env, 1);
529

    
530
    return 0;
531
}
532
#endif
533

    
534
void cpu_exec_init(CPUState *env)
535
{
536
    CPUState **penv;
537
    int cpu_index;
538

    
539
    env->next_cpu = NULL;
540
    penv = &first_cpu;
541
    cpu_index = 0;
542
    while (*penv != NULL) {
543
        penv = (CPUState **)&(*penv)->next_cpu;
544
        cpu_index++;
545
    }
546
    env->cpu_index = cpu_index;
547
    TAILQ_INIT(&env->breakpoints);
548
    TAILQ_INIT(&env->watchpoints);
549
    *penv = env;
550
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
551
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
552
                    cpu_common_save, cpu_common_load, env);
553
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
554
                    cpu_save, cpu_load, env);
555
#endif
556
}
557

    
558
static inline void invalidate_page_bitmap(PageDesc *p)
559
{
560
    if (p->code_bitmap) {
561
        qemu_free(p->code_bitmap);
562
        p->code_bitmap = NULL;
563
    }
564
    p->code_write_count = 0;
565
}
566

    
567
/* set to NULL all the 'first_tb' fields in all PageDescs */
568
static void page_flush_tb(void)
569
{
570
    int i, j;
571
    PageDesc *p;
572

    
573
    for(i = 0; i < L1_SIZE; i++) {
574
        p = l1_map[i];
575
        if (p) {
576
            for(j = 0; j < L2_SIZE; j++) {
577
                p->first_tb = NULL;
578
                invalidate_page_bitmap(p);
579
                p++;
580
            }
581
        }
582
    }
583
}
584

    
585
/* flush all the translation blocks */
586
/* XXX: tb_flush is currently not thread safe */
587
void tb_flush(CPUState *env1)
588
{
589
    CPUState *env;
590
#if defined(DEBUG_FLUSH)
591
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
592
           (unsigned long)(code_gen_ptr - code_gen_buffer),
593
           nb_tbs, nb_tbs > 0 ?
594
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
595
#endif
596
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
597
        cpu_abort(env1, "Internal error: code buffer overflow\n");
598

    
599
    nb_tbs = 0;
600

    
601
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
602
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
603
    }
604

    
605
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
606
    page_flush_tb();
607

    
608
    code_gen_ptr = code_gen_buffer;
609
    /* XXX: flush processor icache at this point if cache flush is
610
       expensive */
611
    tb_flush_count++;
612
}
613

    
614
#ifdef DEBUG_TB_CHECK
615

    
616
static void tb_invalidate_check(target_ulong address)
617
{
618
    TranslationBlock *tb;
619
    int i;
620
    address &= TARGET_PAGE_MASK;
621
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
622
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
623
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
624
                  address >= tb->pc + tb->size)) {
625
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
626
                       address, (long)tb->pc, tb->size);
627
            }
628
        }
629
    }
630
}
631

    
632
/* verify that all the pages have correct rights for code */
633
static void tb_page_check(void)
634
{
635
    TranslationBlock *tb;
636
    int i, flags1, flags2;
637

    
638
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
639
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
640
            flags1 = page_get_flags(tb->pc);
641
            flags2 = page_get_flags(tb->pc + tb->size - 1);
642
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
643
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
644
                       (long)tb->pc, tb->size, flags1, flags2);
645
            }
646
        }
647
    }
648
}
649

    
650
static void tb_jmp_check(TranslationBlock *tb)
651
{
652
    TranslationBlock *tb1;
653
    unsigned int n1;
654

    
655
    /* suppress any remaining jumps to this TB */
656
    tb1 = tb->jmp_first;
657
    for(;;) {
658
        n1 = (long)tb1 & 3;
659
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
660
        if (n1 == 2)
661
            break;
662
        tb1 = tb1->jmp_next[n1];
663
    }
664
    /* check end of list */
665
    if (tb1 != tb) {
666
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
667
    }
668
}
669

    
670
#endif
671

    
672
/* invalidate one TB */
673
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
674
                             int next_offset)
675
{
676
    TranslationBlock *tb1;
677
    for(;;) {
678
        tb1 = *ptb;
679
        if (tb1 == tb) {
680
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
681
            break;
682
        }
683
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
684
    }
685
}
686

    
687
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
688
{
689
    TranslationBlock *tb1;
690
    unsigned int n1;
691

    
692
    for(;;) {
693
        tb1 = *ptb;
694
        n1 = (long)tb1 & 3;
695
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
696
        if (tb1 == tb) {
697
            *ptb = tb1->page_next[n1];
698
            break;
699
        }
700
        ptb = &tb1->page_next[n1];
701
    }
702
}
703

    
704
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
705
{
706
    TranslationBlock *tb1, **ptb;
707
    unsigned int n1;
708

    
709
    ptb = &tb->jmp_next[n];
710
    tb1 = *ptb;
711
    if (tb1) {
712
        /* find tb(n) in circular list */
713
        for(;;) {
714
            tb1 = *ptb;
715
            n1 = (long)tb1 & 3;
716
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
717
            if (n1 == n && tb1 == tb)
718
                break;
719
            if (n1 == 2) {
720
                ptb = &tb1->jmp_first;
721
            } else {
722
                ptb = &tb1->jmp_next[n1];
723
            }
724
        }
725
        /* now we can suppress tb(n) from the list */
726
        *ptb = tb->jmp_next[n];
727

    
728
        tb->jmp_next[n] = NULL;
729
    }
730
}
731

    
732
/* reset the jump entry 'n' of a TB so that it is not chained to
733
   another TB */
734
static inline void tb_reset_jump(TranslationBlock *tb, int n)
735
{
736
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
737
}
738

    
739
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
740
{
741
    CPUState *env;
742
    PageDesc *p;
743
    unsigned int h, n1;
744
    target_phys_addr_t phys_pc;
745
    TranslationBlock *tb1, *tb2;
746

    
747
    /* remove the TB from the hash list */
748
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
749
    h = tb_phys_hash_func(phys_pc);
750
    tb_remove(&tb_phys_hash[h], tb,
751
              offsetof(TranslationBlock, phys_hash_next));
752

    
753
    /* remove the TB from the page list */
754
    if (tb->page_addr[0] != page_addr) {
755
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
756
        tb_page_remove(&p->first_tb, tb);
757
        invalidate_page_bitmap(p);
758
    }
759
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
760
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
761
        tb_page_remove(&p->first_tb, tb);
762
        invalidate_page_bitmap(p);
763
    }
764

    
765
    tb_invalidated_flag = 1;
766

    
767
    /* remove the TB from the hash list */
768
    h = tb_jmp_cache_hash_func(tb->pc);
769
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
770
        if (env->tb_jmp_cache[h] == tb)
771
            env->tb_jmp_cache[h] = NULL;
772
    }
773

    
774
    /* suppress this TB from the two jump lists */
775
    tb_jmp_remove(tb, 0);
776
    tb_jmp_remove(tb, 1);
777

    
778
    /* suppress any remaining jumps to this TB */
779
    tb1 = tb->jmp_first;
780
    for(;;) {
781
        n1 = (long)tb1 & 3;
782
        if (n1 == 2)
783
            break;
784
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
785
        tb2 = tb1->jmp_next[n1];
786
        tb_reset_jump(tb1, n1);
787
        tb1->jmp_next[n1] = NULL;
788
        tb1 = tb2;
789
    }
790
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
791

    
792
    tb_phys_invalidate_count++;
793
}
794

    
795
static inline void set_bits(uint8_t *tab, int start, int len)
796
{
797
    int end, mask, end1;
798

    
799
    end = start + len;
800
    tab += start >> 3;
801
    mask = 0xff << (start & 7);
802
    if ((start & ~7) == (end & ~7)) {
803
        if (start < end) {
804
            mask &= ~(0xff << (end & 7));
805
            *tab |= mask;
806
        }
807
    } else {
808
        *tab++ |= mask;
809
        start = (start + 8) & ~7;
810
        end1 = end & ~7;
811
        while (start < end1) {
812
            *tab++ = 0xff;
813
            start += 8;
814
        }
815
        if (start < end) {
816
            mask = ~(0xff << (end & 7));
817
            *tab |= mask;
818
        }
819
    }
820
}
821

    
822
static void build_page_bitmap(PageDesc *p)
823
{
824
    int n, tb_start, tb_end;
825
    TranslationBlock *tb;
826

    
827
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
828
    if (!p->code_bitmap)
829
        return;
830

    
831
    tb = p->first_tb;
832
    while (tb != NULL) {
833
        n = (long)tb & 3;
834
        tb = (TranslationBlock *)((long)tb & ~3);
835
        /* NOTE: this is subtle as a TB may span two physical pages */
836
        if (n == 0) {
837
            /* NOTE: tb_end may be after the end of the page, but
838
               it is not a problem */
839
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
840
            tb_end = tb_start + tb->size;
841
            if (tb_end > TARGET_PAGE_SIZE)
842
                tb_end = TARGET_PAGE_SIZE;
843
        } else {
844
            tb_start = 0;
845
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
846
        }
847
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
848
        tb = tb->page_next[n];
849
    }
850
}
851

    
852
TranslationBlock *tb_gen_code(CPUState *env,
853
                              target_ulong pc, target_ulong cs_base,
854
                              int flags, int cflags)
855
{
856
    TranslationBlock *tb;
857
    uint8_t *tc_ptr;
858
    target_ulong phys_pc, phys_page2, virt_page2;
859
    int code_gen_size;
860

    
861
    phys_pc = get_phys_addr_code(env, pc);
862
    tb = tb_alloc(pc);
863
    if (!tb) {
864
        /* flush must be done */
865
        tb_flush(env);
866
        /* cannot fail at this point */
867
        tb = tb_alloc(pc);
868
        /* Don't forget to invalidate previous TB info.  */
869
        tb_invalidated_flag = 1;
870
    }
871
    tc_ptr = code_gen_ptr;
872
    tb->tc_ptr = tc_ptr;
873
    tb->cs_base = cs_base;
874
    tb->flags = flags;
875
    tb->cflags = cflags;
876
    cpu_gen_code(env, tb, &code_gen_size);
877
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
878

    
879
    /* check next page if needed */
880
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
881
    phys_page2 = -1;
882
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
883
        phys_page2 = get_phys_addr_code(env, virt_page2);
884
    }
885
    tb_link_phys(tb, phys_pc, phys_page2);
886
    return tb;
887
}
888

    
889
/* invalidate all TBs which intersect with the target physical page
890
   starting in range [start;end[. NOTE: start and end must refer to
891
   the same physical page. 'is_cpu_write_access' should be true if called
892
   from a real cpu write access: the virtual CPU will exit the current
893
   TB if code is modified inside this TB. */
894
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
895
                                   int is_cpu_write_access)
896
{
897
    TranslationBlock *tb, *tb_next, *saved_tb;
898
    CPUState *env = cpu_single_env;
899
    target_ulong tb_start, tb_end;
900
    PageDesc *p;
901
    int n;
902
#ifdef TARGET_HAS_PRECISE_SMC
903
    int current_tb_not_found = is_cpu_write_access;
904
    TranslationBlock *current_tb = NULL;
905
    int current_tb_modified = 0;
906
    target_ulong current_pc = 0;
907
    target_ulong current_cs_base = 0;
908
    int current_flags = 0;
909
#endif /* TARGET_HAS_PRECISE_SMC */
910

    
911
    p = page_find(start >> TARGET_PAGE_BITS);
912
    if (!p)
913
        return;
914
    if (!p->code_bitmap &&
915
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
916
        is_cpu_write_access) {
917
        /* build code bitmap */
918
        build_page_bitmap(p);
919
    }
920

    
921
    /* we remove all the TBs in the range [start, end[ */
922
    /* XXX: see if in some cases it could be faster to invalidate all the code */
923
    tb = p->first_tb;
924
    while (tb != NULL) {
925
        n = (long)tb & 3;
926
        tb = (TranslationBlock *)((long)tb & ~3);
927
        tb_next = tb->page_next[n];
928
        /* NOTE: this is subtle as a TB may span two physical pages */
929
        if (n == 0) {
930
            /* NOTE: tb_end may be after the end of the page, but
931
               it is not a problem */
932
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
933
            tb_end = tb_start + tb->size;
934
        } else {
935
            tb_start = tb->page_addr[1];
936
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
937
        }
938
        if (!(tb_end <= start || tb_start >= end)) {
939
#ifdef TARGET_HAS_PRECISE_SMC
940
            if (current_tb_not_found) {
941
                current_tb_not_found = 0;
942
                current_tb = NULL;
943
                if (env->mem_io_pc) {
944
                    /* now we have a real cpu fault */
945
                    current_tb = tb_find_pc(env->mem_io_pc);
946
                }
947
            }
948
            if (current_tb == tb &&
949
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
950
                /* If we are modifying the current TB, we must stop
951
                its execution. We could be more precise by checking
952
                that the modification is after the current PC, but it
953
                would require a specialized function to partially
954
                restore the CPU state */
955

    
956
                current_tb_modified = 1;
957
                cpu_restore_state(current_tb, env,
958
                                  env->mem_io_pc, NULL);
959
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
960
                                     &current_flags);
961
            }
962
#endif /* TARGET_HAS_PRECISE_SMC */
963
            /* we need to do that to handle the case where a signal
964
               occurs while doing tb_phys_invalidate() */
965
            saved_tb = NULL;
966
            if (env) {
967
                saved_tb = env->current_tb;
968
                env->current_tb = NULL;
969
            }
970
            tb_phys_invalidate(tb, -1);
971
            if (env) {
972
                env->current_tb = saved_tb;
973
                if (env->interrupt_request && env->current_tb)
974
                    cpu_interrupt(env, env->interrupt_request);
975
            }
976
        }
977
        tb = tb_next;
978
    }
979
#if !defined(CONFIG_USER_ONLY)
980
    /* if no code remaining, no need to continue to use slow writes */
981
    if (!p->first_tb) {
982
        invalidate_page_bitmap(p);
983
        if (is_cpu_write_access) {
984
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
985
        }
986
    }
987
#endif
988
#ifdef TARGET_HAS_PRECISE_SMC
989
    if (current_tb_modified) {
990
        /* we generate a block containing just the instruction
991
           modifying the memory. It will ensure that it cannot modify
992
           itself */
993
        env->current_tb = NULL;
994
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
995
        cpu_resume_from_signal(env, NULL);
996
    }
997
#endif
998
}
999

    
1000
/* len must be <= 8 and start must be a multiple of len */
1001
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1002
{
1003
    PageDesc *p;
1004
    int offset, b;
1005
#if 0
1006
    if (1) {
1007
        if (loglevel) {
1008
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1009
                   cpu_single_env->mem_io_vaddr, len,
1010
                   cpu_single_env->eip,
1011
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1012
        }
1013
    }
1014
#endif
1015
    p = page_find(start >> TARGET_PAGE_BITS);
1016
    if (!p)
1017
        return;
1018
    if (p->code_bitmap) {
1019
        offset = start & ~TARGET_PAGE_MASK;
1020
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1021
        if (b & ((1 << len) - 1))
1022
            goto do_invalidate;
1023
    } else {
1024
    do_invalidate:
1025
        tb_invalidate_phys_page_range(start, start + len, 1);
1026
    }
1027
}
1028

    
1029
#if !defined(CONFIG_SOFTMMU)
1030
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1031
                                    unsigned long pc, void *puc)
1032
{
1033
    TranslationBlock *tb;
1034
    PageDesc *p;
1035
    int n;
1036
#ifdef TARGET_HAS_PRECISE_SMC
1037
    TranslationBlock *current_tb = NULL;
1038
    CPUState *env = cpu_single_env;
1039
    int current_tb_modified = 0;
1040
    target_ulong current_pc = 0;
1041
    target_ulong current_cs_base = 0;
1042
    int current_flags = 0;
1043
#endif
1044

    
1045
    addr &= TARGET_PAGE_MASK;
1046
    p = page_find(addr >> TARGET_PAGE_BITS);
1047
    if (!p)
1048
        return;
1049
    tb = p->first_tb;
1050
#ifdef TARGET_HAS_PRECISE_SMC
1051
    if (tb && pc != 0) {
1052
        current_tb = tb_find_pc(pc);
1053
    }
1054
#endif
1055
    while (tb != NULL) {
1056
        n = (long)tb & 3;
1057
        tb = (TranslationBlock *)((long)tb & ~3);
1058
#ifdef TARGET_HAS_PRECISE_SMC
1059
        if (current_tb == tb &&
1060
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1061
                /* If we are modifying the current TB, we must stop
1062
                   its execution. We could be more precise by checking
1063
                   that the modification is after the current PC, but it
1064
                   would require a specialized function to partially
1065
                   restore the CPU state */
1066

    
1067
            current_tb_modified = 1;
1068
            cpu_restore_state(current_tb, env, pc, puc);
1069
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1070
                                 &current_flags);
1071
        }
1072
#endif /* TARGET_HAS_PRECISE_SMC */
1073
        tb_phys_invalidate(tb, addr);
1074
        tb = tb->page_next[n];
1075
    }
1076
    p->first_tb = NULL;
1077
#ifdef TARGET_HAS_PRECISE_SMC
1078
    if (current_tb_modified) {
1079
        /* we generate a block containing just the instruction
1080
           modifying the memory. It will ensure that it cannot modify
1081
           itself */
1082
        env->current_tb = NULL;
1083
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1084
        cpu_resume_from_signal(env, puc);
1085
    }
1086
#endif
1087
}
1088
#endif
1089

    
1090
/* add the tb in the target page and protect it if necessary */
1091
static inline void tb_alloc_page(TranslationBlock *tb,
1092
                                 unsigned int n, target_ulong page_addr)
1093
{
1094
    PageDesc *p;
1095
    TranslationBlock *last_first_tb;
1096

    
1097
    tb->page_addr[n] = page_addr;
1098
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1099
    tb->page_next[n] = p->first_tb;
1100
    last_first_tb = p->first_tb;
1101
    p->first_tb = (TranslationBlock *)((long)tb | n);
1102
    invalidate_page_bitmap(p);
1103

    
1104
#if defined(TARGET_HAS_SMC) || 1
1105

    
1106
#if defined(CONFIG_USER_ONLY)
1107
    if (p->flags & PAGE_WRITE) {
1108
        target_ulong addr;
1109
        PageDesc *p2;
1110
        int prot;
1111

    
1112
        /* force the host page as non writable (writes will have a
1113
           page fault + mprotect overhead) */
1114
        page_addr &= qemu_host_page_mask;
1115
        prot = 0;
1116
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1117
            addr += TARGET_PAGE_SIZE) {
1118

    
1119
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1120
            if (!p2)
1121
                continue;
1122
            prot |= p2->flags;
1123
            p2->flags &= ~PAGE_WRITE;
1124
            page_get_flags(addr);
1125
          }
1126
        mprotect(g2h(page_addr), qemu_host_page_size,
1127
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1128
#ifdef DEBUG_TB_INVALIDATE
1129
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1130
               page_addr);
1131
#endif
1132
    }
1133
#else
1134
    /* if some code is already present, then the pages are already
1135
       protected. So we handle the case where only the first TB is
1136
       allocated in a physical page */
1137
    if (!last_first_tb) {
1138
        tlb_protect_code(page_addr);
1139
    }
1140
#endif
1141

    
1142
#endif /* TARGET_HAS_SMC */
1143
}
1144

    
1145
/* Allocate a new translation block. Flush the translation buffer if
1146
   too many translation blocks or too much generated code. */
1147
TranslationBlock *tb_alloc(target_ulong pc)
1148
{
1149
    TranslationBlock *tb;
1150

    
1151
    if (nb_tbs >= code_gen_max_blocks ||
1152
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1153
        return NULL;
1154
    tb = &tbs[nb_tbs++];
1155
    tb->pc = pc;
1156
    tb->cflags = 0;
1157
    return tb;
1158
}
1159

    
1160
void tb_free(TranslationBlock *tb)
1161
{
1162
    /* In practice this is mostly used for single use temporary TB
1163
       Ignore the hard cases and just back up if this TB happens to
1164
       be the last one generated.  */
1165
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1166
        code_gen_ptr = tb->tc_ptr;
1167
        nb_tbs--;
1168
    }
1169
}
1170

    
1171
/* add a new TB and link it to the physical page tables. phys_page2 is
1172
   (-1) to indicate that only one page contains the TB. */
1173
void tb_link_phys(TranslationBlock *tb,
1174
                  target_ulong phys_pc, target_ulong phys_page2)
1175
{
1176
    unsigned int h;
1177
    TranslationBlock **ptb;
1178

    
1179
    /* Grab the mmap lock to stop another thread invalidating this TB
1180
       before we are done.  */
1181
    mmap_lock();
1182
    /* add in the physical hash table */
1183
    h = tb_phys_hash_func(phys_pc);
1184
    ptb = &tb_phys_hash[h];
1185
    tb->phys_hash_next = *ptb;
1186
    *ptb = tb;
1187

    
1188
    /* add in the page list */
1189
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1190
    if (phys_page2 != -1)
1191
        tb_alloc_page(tb, 1, phys_page2);
1192
    else
1193
        tb->page_addr[1] = -1;
1194

    
1195
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1196
    tb->jmp_next[0] = NULL;
1197
    tb->jmp_next[1] = NULL;
1198

    
1199
    /* init original jump addresses */
1200
    if (tb->tb_next_offset[0] != 0xffff)
1201
        tb_reset_jump(tb, 0);
1202
    if (tb->tb_next_offset[1] != 0xffff)
1203
        tb_reset_jump(tb, 1);
1204

    
1205
#ifdef DEBUG_TB_CHECK
1206
    tb_page_check();
1207
#endif
1208
    mmap_unlock();
1209
}
1210

    
1211
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1212
   tb[1].tc_ptr. Return NULL if not found */
1213
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1214
{
1215
    int m_min, m_max, m;
1216
    unsigned long v;
1217
    TranslationBlock *tb;
1218

    
1219
    if (nb_tbs <= 0)
1220
        return NULL;
1221
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1222
        tc_ptr >= (unsigned long)code_gen_ptr)
1223
        return NULL;
1224
    /* binary search (cf Knuth) */
1225
    m_min = 0;
1226
    m_max = nb_tbs - 1;
1227
    while (m_min <= m_max) {
1228
        m = (m_min + m_max) >> 1;
1229
        tb = &tbs[m];
1230
        v = (unsigned long)tb->tc_ptr;
1231
        if (v == tc_ptr)
1232
            return tb;
1233
        else if (tc_ptr < v) {
1234
            m_max = m - 1;
1235
        } else {
1236
            m_min = m + 1;
1237
        }
1238
    }
1239
    return &tbs[m_max];
1240
}
1241

    
1242
static void tb_reset_jump_recursive(TranslationBlock *tb);
1243

    
1244
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1245
{
1246
    TranslationBlock *tb1, *tb_next, **ptb;
1247
    unsigned int n1;
1248

    
1249
    tb1 = tb->jmp_next[n];
1250
    if (tb1 != NULL) {
1251
        /* find head of list */
1252
        for(;;) {
1253
            n1 = (long)tb1 & 3;
1254
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1255
            if (n1 == 2)
1256
                break;
1257
            tb1 = tb1->jmp_next[n1];
1258
        }
1259
        /* we are now sure now that tb jumps to tb1 */
1260
        tb_next = tb1;
1261

    
1262
        /* remove tb from the jmp_first list */
1263
        ptb = &tb_next->jmp_first;
1264
        for(;;) {
1265
            tb1 = *ptb;
1266
            n1 = (long)tb1 & 3;
1267
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1268
            if (n1 == n && tb1 == tb)
1269
                break;
1270
            ptb = &tb1->jmp_next[n1];
1271
        }
1272
        *ptb = tb->jmp_next[n];
1273
        tb->jmp_next[n] = NULL;
1274

    
1275
        /* suppress the jump to next tb in generated code */
1276
        tb_reset_jump(tb, n);
1277

    
1278
        /* suppress jumps in the tb on which we could have jumped */
1279
        tb_reset_jump_recursive(tb_next);
1280
    }
1281
}
1282

    
1283
static void tb_reset_jump_recursive(TranslationBlock *tb)
1284
{
1285
    tb_reset_jump_recursive2(tb, 0);
1286
    tb_reset_jump_recursive2(tb, 1);
1287
}
1288

    
1289
#if defined(TARGET_HAS_ICE)
1290
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1291
{
1292
    target_phys_addr_t addr;
1293
    target_ulong pd;
1294
    ram_addr_t ram_addr;
1295
    PhysPageDesc *p;
1296

    
1297
    addr = cpu_get_phys_page_debug(env, pc);
1298
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1299
    if (!p) {
1300
        pd = IO_MEM_UNASSIGNED;
1301
    } else {
1302
        pd = p->phys_offset;
1303
    }
1304
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1305
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1306
}
1307
#endif
1308

    
1309
/* Add a watchpoint.  */
1310
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1311
                          int flags, CPUWatchpoint **watchpoint)
1312
{
1313
    target_ulong len_mask = ~(len - 1);
1314
    CPUWatchpoint *wp;
1315

    
1316
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1317
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1318
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1319
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1320
        return -EINVAL;
1321
    }
1322
    wp = qemu_malloc(sizeof(*wp));
1323
    if (!wp)
1324
        return -ENOMEM;
1325

    
1326
    wp->vaddr = addr;
1327
    wp->len_mask = len_mask;
1328
    wp->flags = flags;
1329

    
1330
    /* keep all GDB-injected watchpoints in front */
1331
    if (flags & BP_GDB)
1332
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1333
    else
1334
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1335

    
1336
    tlb_flush_page(env, addr);
1337

    
1338
    if (watchpoint)
1339
        *watchpoint = wp;
1340
    return 0;
1341
}
1342

    
1343
/* Remove a specific watchpoint.  */
1344
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1345
                          int flags)
1346
{
1347
    target_ulong len_mask = ~(len - 1);
1348
    CPUWatchpoint *wp;
1349

    
1350
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1351
        if (addr == wp->vaddr && len_mask == wp->len_mask
1352
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1353
            cpu_watchpoint_remove_by_ref(env, wp);
1354
            return 0;
1355
        }
1356
    }
1357
    return -ENOENT;
1358
}
1359

    
1360
/* Remove a specific watchpoint by reference.  */
1361
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1362
{
1363
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1364

    
1365
    tlb_flush_page(env, watchpoint->vaddr);
1366

    
1367
    qemu_free(watchpoint);
1368
}
1369

    
1370
/* Remove all matching watchpoints.  */
1371
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1372
{
1373
    CPUWatchpoint *wp, *next;
1374

    
1375
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1376
        if (wp->flags & mask)
1377
            cpu_watchpoint_remove_by_ref(env, wp);
1378
    }
1379
}
1380

    
1381
/* Add a breakpoint.  */
1382
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1383
                          CPUBreakpoint **breakpoint)
1384
{
1385
#if defined(TARGET_HAS_ICE)
1386
    CPUBreakpoint *bp;
1387

    
1388
    bp = qemu_malloc(sizeof(*bp));
1389
    if (!bp)
1390
        return -ENOMEM;
1391

    
1392
    bp->pc = pc;
1393
    bp->flags = flags;
1394

    
1395
    /* keep all GDB-injected breakpoints in front */
1396
    if (flags & BP_GDB)
1397
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1398
    else
1399
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1400

    
1401
    breakpoint_invalidate(env, pc);
1402

    
1403
    if (breakpoint)
1404
        *breakpoint = bp;
1405
    return 0;
1406
#else
1407
    return -ENOSYS;
1408
#endif
1409
}
1410

    
1411
/* Remove a specific breakpoint.  */
1412
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1413
{
1414
#if defined(TARGET_HAS_ICE)
1415
    CPUBreakpoint *bp;
1416

    
1417
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1418
        if (bp->pc == pc && bp->flags == flags) {
1419
            cpu_breakpoint_remove_by_ref(env, bp);
1420
            return 0;
1421
        }
1422
    }
1423
    return -ENOENT;
1424
#else
1425
    return -ENOSYS;
1426
#endif
1427
}
1428

    
1429
/* Remove a specific breakpoint by reference.  */
1430
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1431
{
1432
#if defined(TARGET_HAS_ICE)
1433
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1434

    
1435
    breakpoint_invalidate(env, breakpoint->pc);
1436

    
1437
    qemu_free(breakpoint);
1438
#endif
1439
}
1440

    
1441
/* Remove all matching breakpoints. */
1442
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1443
{
1444
#if defined(TARGET_HAS_ICE)
1445
    CPUBreakpoint *bp, *next;
1446

    
1447
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1448
        if (bp->flags & mask)
1449
            cpu_breakpoint_remove_by_ref(env, bp);
1450
    }
1451
#endif
1452
}
1453

    
1454
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1455
   CPU loop after each instruction */
1456
void cpu_single_step(CPUState *env, int enabled)
1457
{
1458
#if defined(TARGET_HAS_ICE)
1459
    if (env->singlestep_enabled != enabled) {
1460
        env->singlestep_enabled = enabled;
1461
        /* must flush all the translated code to avoid inconsistancies */
1462
        /* XXX: only flush what is necessary */
1463
        tb_flush(env);
1464
    }
1465
#endif
1466
}
1467

    
1468
/* enable or disable low levels log */
1469
void cpu_set_log(int log_flags)
1470
{
1471
    loglevel = log_flags;
1472
    if (loglevel && !logfile) {
1473
        logfile = fopen(logfilename, log_append ? "a" : "w");
1474
        if (!logfile) {
1475
            perror(logfilename);
1476
            _exit(1);
1477
        }
1478
#if !defined(CONFIG_SOFTMMU)
1479
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1480
        {
1481
            static char logfile_buf[4096];
1482
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1483
        }
1484
#else
1485
        setvbuf(logfile, NULL, _IOLBF, 0);
1486
#endif
1487
        log_append = 1;
1488
    }
1489
    if (!loglevel && logfile) {
1490
        fclose(logfile);
1491
        logfile = NULL;
1492
    }
1493
}
1494

    
1495
void cpu_set_log_filename(const char *filename)
1496
{
1497
    logfilename = strdup(filename);
1498
    if (logfile) {
1499
        fclose(logfile);
1500
        logfile = NULL;
1501
    }
1502
    cpu_set_log(loglevel);
1503
}
1504

    
1505
/* mask must never be zero, except for A20 change call */
1506
void cpu_interrupt(CPUState *env, int mask)
1507
{
1508
#if !defined(USE_NPTL)
1509
    TranslationBlock *tb;
1510
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1511
#endif
1512
    int old_mask;
1513

    
1514
    old_mask = env->interrupt_request;
1515
    /* FIXME: This is probably not threadsafe.  A different thread could
1516
       be in the middle of a read-modify-write operation.  */
1517
    env->interrupt_request |= mask;
1518
#if defined(USE_NPTL)
1519
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1520
       problem and hope the cpu will stop of its own accord.  For userspace
1521
       emulation this often isn't actually as bad as it sounds.  Often
1522
       signals are used primarily to interrupt blocking syscalls.  */
1523
#else
1524
    if (use_icount) {
1525
        env->icount_decr.u16.high = 0xffff;
1526
#ifndef CONFIG_USER_ONLY
1527
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1528
           an async event happened and we need to process it.  */
1529
        if (!can_do_io(env)
1530
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1531
            cpu_abort(env, "Raised interrupt while not in I/O function");
1532
        }
1533
#endif
1534
    } else {
1535
        tb = env->current_tb;
1536
        /* if the cpu is currently executing code, we must unlink it and
1537
           all the potentially executing TB */
1538
        if (tb && !testandset(&interrupt_lock)) {
1539
            env->current_tb = NULL;
1540
            tb_reset_jump_recursive(tb);
1541
            resetlock(&interrupt_lock);
1542
        }
1543
    }
1544
#endif
1545
}
1546

    
1547
void cpu_reset_interrupt(CPUState *env, int mask)
1548
{
1549
    env->interrupt_request &= ~mask;
1550
}
1551

    
1552
const CPULogItem cpu_log_items[] = {
1553
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1554
      "show generated host assembly code for each compiled TB" },
1555
    { CPU_LOG_TB_IN_ASM, "in_asm",
1556
      "show target assembly code for each compiled TB" },
1557
    { CPU_LOG_TB_OP, "op",
1558
      "show micro ops for each compiled TB" },
1559
    { CPU_LOG_TB_OP_OPT, "op_opt",
1560
      "show micro ops "
1561
#ifdef TARGET_I386
1562
      "before eflags optimization and "
1563
#endif
1564
      "after liveness analysis" },
1565
    { CPU_LOG_INT, "int",
1566
      "show interrupts/exceptions in short format" },
1567
    { CPU_LOG_EXEC, "exec",
1568
      "show trace before each executed TB (lots of logs)" },
1569
    { CPU_LOG_TB_CPU, "cpu",
1570
      "show CPU state before block translation" },
1571
#ifdef TARGET_I386
1572
    { CPU_LOG_PCALL, "pcall",
1573
      "show protected mode far calls/returns/exceptions" },
1574
#endif
1575
#ifdef DEBUG_IOPORT
1576
    { CPU_LOG_IOPORT, "ioport",
1577
      "show all i/o ports accesses" },
1578
#endif
1579
    { 0, NULL, NULL },
1580
};
1581

    
1582
static int cmp1(const char *s1, int n, const char *s2)
1583
{
1584
    if (strlen(s2) != n)
1585
        return 0;
1586
    return memcmp(s1, s2, n) == 0;
1587
}
1588

    
1589
/* takes a comma separated list of log masks. Return 0 if error. */
1590
int cpu_str_to_log_mask(const char *str)
1591
{
1592
    const CPULogItem *item;
1593
    int mask;
1594
    const char *p, *p1;
1595

    
1596
    p = str;
1597
    mask = 0;
1598
    for(;;) {
1599
        p1 = strchr(p, ',');
1600
        if (!p1)
1601
            p1 = p + strlen(p);
1602
        if(cmp1(p,p1-p,"all")) {
1603
                for(item = cpu_log_items; item->mask != 0; item++) {
1604
                        mask |= item->mask;
1605
                }
1606
        } else {
1607
        for(item = cpu_log_items; item->mask != 0; item++) {
1608
            if (cmp1(p, p1 - p, item->name))
1609
                goto found;
1610
        }
1611
        return 0;
1612
        }
1613
    found:
1614
        mask |= item->mask;
1615
        if (*p1 != ',')
1616
            break;
1617
        p = p1 + 1;
1618
    }
1619
    return mask;
1620
}
1621

    
1622
void cpu_abort(CPUState *env, const char *fmt, ...)
1623
{
1624
    va_list ap;
1625
    va_list ap2;
1626

    
1627
    va_start(ap, fmt);
1628
    va_copy(ap2, ap);
1629
    fprintf(stderr, "qemu: fatal: ");
1630
    vfprintf(stderr, fmt, ap);
1631
    fprintf(stderr, "\n");
1632
#ifdef TARGET_I386
1633
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1634
#else
1635
    cpu_dump_state(env, stderr, fprintf, 0);
1636
#endif
1637
    if (logfile) {
1638
        fprintf(logfile, "qemu: fatal: ");
1639
        vfprintf(logfile, fmt, ap2);
1640
        fprintf(logfile, "\n");
1641
#ifdef TARGET_I386
1642
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1643
#else
1644
        cpu_dump_state(env, logfile, fprintf, 0);
1645
#endif
1646
        fflush(logfile);
1647
        fclose(logfile);
1648
    }
1649
    va_end(ap2);
1650
    va_end(ap);
1651
    abort();
1652
}
1653

    
1654
CPUState *cpu_copy(CPUState *env)
1655
{
1656
    CPUState *new_env = cpu_init(env->cpu_model_str);
1657
    /* preserve chaining and index */
1658
    CPUState *next_cpu = new_env->next_cpu;
1659
    int cpu_index = new_env->cpu_index;
1660
    memcpy(new_env, env, sizeof(CPUState));
1661
    new_env->next_cpu = next_cpu;
1662
    new_env->cpu_index = cpu_index;
1663
    return new_env;
1664
}
1665

    
1666
#if !defined(CONFIG_USER_ONLY)
1667

    
1668
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1669
{
1670
    unsigned int i;
1671

    
1672
    /* Discard jump cache entries for any tb which might potentially
1673
       overlap the flushed page.  */
1674
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1675
    memset (&env->tb_jmp_cache[i], 0, 
1676
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1677

    
1678
    i = tb_jmp_cache_hash_page(addr);
1679
    memset (&env->tb_jmp_cache[i], 0, 
1680
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1681
}
1682

    
1683
/* NOTE: if flush_global is true, also flush global entries (not
1684
   implemented yet) */
1685
void tlb_flush(CPUState *env, int flush_global)
1686
{
1687
    int i;
1688

    
1689
#if defined(DEBUG_TLB)
1690
    printf("tlb_flush:\n");
1691
#endif
1692
    /* must reset current TB so that interrupts cannot modify the
1693
       links while we are modifying them */
1694
    env->current_tb = NULL;
1695

    
1696
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1697
        env->tlb_table[0][i].addr_read = -1;
1698
        env->tlb_table[0][i].addr_write = -1;
1699
        env->tlb_table[0][i].addr_code = -1;
1700
        env->tlb_table[1][i].addr_read = -1;
1701
        env->tlb_table[1][i].addr_write = -1;
1702
        env->tlb_table[1][i].addr_code = -1;
1703
#if (NB_MMU_MODES >= 3)
1704
        env->tlb_table[2][i].addr_read = -1;
1705
        env->tlb_table[2][i].addr_write = -1;
1706
        env->tlb_table[2][i].addr_code = -1;
1707
#if (NB_MMU_MODES == 4)
1708
        env->tlb_table[3][i].addr_read = -1;
1709
        env->tlb_table[3][i].addr_write = -1;
1710
        env->tlb_table[3][i].addr_code = -1;
1711
#endif
1712
#endif
1713
    }
1714

    
1715
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1716

    
1717
#ifdef USE_KQEMU
1718
    if (env->kqemu_enabled) {
1719
        kqemu_flush(env, flush_global);
1720
    }
1721
#endif
1722
    tlb_flush_count++;
1723
}
1724

    
1725
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1726
{
1727
    if (addr == (tlb_entry->addr_read &
1728
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1729
        addr == (tlb_entry->addr_write &
1730
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1731
        addr == (tlb_entry->addr_code &
1732
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1733
        tlb_entry->addr_read = -1;
1734
        tlb_entry->addr_write = -1;
1735
        tlb_entry->addr_code = -1;
1736
    }
1737
}
1738

    
1739
void tlb_flush_page(CPUState *env, target_ulong addr)
1740
{
1741
    int i;
1742

    
1743
#if defined(DEBUG_TLB)
1744
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1745
#endif
1746
    /* must reset current TB so that interrupts cannot modify the
1747
       links while we are modifying them */
1748
    env->current_tb = NULL;
1749

    
1750
    addr &= TARGET_PAGE_MASK;
1751
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1752
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1753
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1754
#if (NB_MMU_MODES >= 3)
1755
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1756
#if (NB_MMU_MODES == 4)
1757
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1758
#endif
1759
#endif
1760

    
1761
    tlb_flush_jmp_cache(env, addr);
1762

    
1763
#ifdef USE_KQEMU
1764
    if (env->kqemu_enabled) {
1765
        kqemu_flush_page(env, addr);
1766
    }
1767
#endif
1768
}
1769

    
1770
/* update the TLBs so that writes to code in the virtual page 'addr'
1771
   can be detected */
1772
static void tlb_protect_code(ram_addr_t ram_addr)
1773
{
1774
    cpu_physical_memory_reset_dirty(ram_addr,
1775
                                    ram_addr + TARGET_PAGE_SIZE,
1776
                                    CODE_DIRTY_FLAG);
1777
}
1778

    
1779
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1780
   tested for self modifying code */
1781
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1782
                                    target_ulong vaddr)
1783
{
1784
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1785
}
1786

    
1787
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1788
                                         unsigned long start, unsigned long length)
1789
{
1790
    unsigned long addr;
1791
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1792
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1793
        if ((addr - start) < length) {
1794
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1795
        }
1796
    }
1797
}
1798

    
1799
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1800
                                     int dirty_flags)
1801
{
1802
    CPUState *env;
1803
    unsigned long length, start1;
1804
    int i, mask, len;
1805
    uint8_t *p;
1806

    
1807
    start &= TARGET_PAGE_MASK;
1808
    end = TARGET_PAGE_ALIGN(end);
1809

    
1810
    length = end - start;
1811
    if (length == 0)
1812
        return;
1813
    len = length >> TARGET_PAGE_BITS;
1814
#ifdef USE_KQEMU
1815
    /* XXX: should not depend on cpu context */
1816
    env = first_cpu;
1817
    if (env->kqemu_enabled) {
1818
        ram_addr_t addr;
1819
        addr = start;
1820
        for(i = 0; i < len; i++) {
1821
            kqemu_set_notdirty(env, addr);
1822
            addr += TARGET_PAGE_SIZE;
1823
        }
1824
    }
1825
#endif
1826
    mask = ~dirty_flags;
1827
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1828
    for(i = 0; i < len; i++)
1829
        p[i] &= mask;
1830

    
1831
    /* we modify the TLB cache so that the dirty bit will be set again
1832
       when accessing the range */
1833
    start1 = start + (unsigned long)phys_ram_base;
1834
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1835
        for(i = 0; i < CPU_TLB_SIZE; i++)
1836
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1837
        for(i = 0; i < CPU_TLB_SIZE; i++)
1838
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1839
#if (NB_MMU_MODES >= 3)
1840
        for(i = 0; i < CPU_TLB_SIZE; i++)
1841
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1842
#if (NB_MMU_MODES == 4)
1843
        for(i = 0; i < CPU_TLB_SIZE; i++)
1844
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1845
#endif
1846
#endif
1847
    }
1848
}
1849

    
1850
int cpu_physical_memory_set_dirty_tracking(int enable)
1851
{
1852
    in_migration = enable;
1853
    return 0;
1854
}
1855

    
1856
int cpu_physical_memory_get_dirty_tracking(void)
1857
{
1858
    return in_migration;
1859
}
1860

    
1861
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1862
{
1863
    if (kvm_enabled())
1864
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1865
}
1866

    
1867
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1868
{
1869
    ram_addr_t ram_addr;
1870

    
1871
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1872
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1873
            tlb_entry->addend - (unsigned long)phys_ram_base;
1874
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1875
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1876
        }
1877
    }
1878
}
1879

    
1880
/* update the TLB according to the current state of the dirty bits */
1881
void cpu_tlb_update_dirty(CPUState *env)
1882
{
1883
    int i;
1884
    for(i = 0; i < CPU_TLB_SIZE; i++)
1885
        tlb_update_dirty(&env->tlb_table[0][i]);
1886
    for(i = 0; i < CPU_TLB_SIZE; i++)
1887
        tlb_update_dirty(&env->tlb_table[1][i]);
1888
#if (NB_MMU_MODES >= 3)
1889
    for(i = 0; i < CPU_TLB_SIZE; i++)
1890
        tlb_update_dirty(&env->tlb_table[2][i]);
1891
#if (NB_MMU_MODES == 4)
1892
    for(i = 0; i < CPU_TLB_SIZE; i++)
1893
        tlb_update_dirty(&env->tlb_table[3][i]);
1894
#endif
1895
#endif
1896
}
1897

    
1898
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1899
{
1900
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1901
        tlb_entry->addr_write = vaddr;
1902
}
1903

    
1904
/* update the TLB corresponding to virtual page vaddr
1905
   so that it is no longer dirty */
1906
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1907
{
1908
    int i;
1909

    
1910
    vaddr &= TARGET_PAGE_MASK;
1911
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1912
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1913
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1914
#if (NB_MMU_MODES >= 3)
1915
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1916
#if (NB_MMU_MODES == 4)
1917
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1918
#endif
1919
#endif
1920
}
1921

    
1922
/* add a new TLB entry. At most one entry for a given virtual address
1923
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1924
   (can only happen in non SOFTMMU mode for I/O pages or pages
1925
   conflicting with the host address space). */
1926
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1927
                      target_phys_addr_t paddr, int prot,
1928
                      int mmu_idx, int is_softmmu)
1929
{
1930
    PhysPageDesc *p;
1931
    unsigned long pd;
1932
    unsigned int index;
1933
    target_ulong address;
1934
    target_ulong code_address;
1935
    target_phys_addr_t addend;
1936
    int ret;
1937
    CPUTLBEntry *te;
1938
    CPUWatchpoint *wp;
1939
    target_phys_addr_t iotlb;
1940

    
1941
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1942
    if (!p) {
1943
        pd = IO_MEM_UNASSIGNED;
1944
    } else {
1945
        pd = p->phys_offset;
1946
    }
1947
#if defined(DEBUG_TLB)
1948
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1949
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1950
#endif
1951

    
1952
    ret = 0;
1953
    address = vaddr;
1954
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1955
        /* IO memory case (romd handled later) */
1956
        address |= TLB_MMIO;
1957
    }
1958
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1959
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1960
        /* Normal RAM.  */
1961
        iotlb = pd & TARGET_PAGE_MASK;
1962
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1963
            iotlb |= IO_MEM_NOTDIRTY;
1964
        else
1965
            iotlb |= IO_MEM_ROM;
1966
    } else {
1967
        /* IO handlers are currently passed a phsical address.
1968
           It would be nice to pass an offset from the base address
1969
           of that region.  This would avoid having to special case RAM,
1970
           and avoid full address decoding in every device.
1971
           We can't use the high bits of pd for this because
1972
           IO_MEM_ROMD uses these as a ram address.  */
1973
        iotlb = (pd & ~TARGET_PAGE_MASK);
1974
        if (p) {
1975
            iotlb += p->region_offset;
1976
        } else {
1977
            iotlb += paddr;
1978
        }
1979
    }
1980

    
1981
    code_address = address;
1982
    /* Make accesses to pages with watchpoints go via the
1983
       watchpoint trap routines.  */
1984
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1985
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1986
            iotlb = io_mem_watch + paddr;
1987
            /* TODO: The memory case can be optimized by not trapping
1988
               reads of pages with a write breakpoint.  */
1989
            address |= TLB_MMIO;
1990
        }
1991
    }
1992

    
1993
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1994
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
1995
    te = &env->tlb_table[mmu_idx][index];
1996
    te->addend = addend - vaddr;
1997
    if (prot & PAGE_READ) {
1998
        te->addr_read = address;
1999
    } else {
2000
        te->addr_read = -1;
2001
    }
2002

    
2003
    if (prot & PAGE_EXEC) {
2004
        te->addr_code = code_address;
2005
    } else {
2006
        te->addr_code = -1;
2007
    }
2008
    if (prot & PAGE_WRITE) {
2009
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2010
            (pd & IO_MEM_ROMD)) {
2011
            /* Write access calls the I/O callback.  */
2012
            te->addr_write = address | TLB_MMIO;
2013
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2014
                   !cpu_physical_memory_is_dirty(pd)) {
2015
            te->addr_write = address | TLB_NOTDIRTY;
2016
        } else {
2017
            te->addr_write = address;
2018
        }
2019
    } else {
2020
        te->addr_write = -1;
2021
    }
2022
    return ret;
2023
}
2024

    
2025
#else
2026

    
2027
void tlb_flush(CPUState *env, int flush_global)
2028
{
2029
}
2030

    
2031
void tlb_flush_page(CPUState *env, target_ulong addr)
2032
{
2033
}
2034

    
2035
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2036
                      target_phys_addr_t paddr, int prot,
2037
                      int mmu_idx, int is_softmmu)
2038
{
2039
    return 0;
2040
}
2041

    
2042
/* dump memory mappings */
2043
void page_dump(FILE *f)
2044
{
2045
    unsigned long start, end;
2046
    int i, j, prot, prot1;
2047
    PageDesc *p;
2048

    
2049
    fprintf(f, "%-8s %-8s %-8s %s\n",
2050
            "start", "end", "size", "prot");
2051
    start = -1;
2052
    end = -1;
2053
    prot = 0;
2054
    for(i = 0; i <= L1_SIZE; i++) {
2055
        if (i < L1_SIZE)
2056
            p = l1_map[i];
2057
        else
2058
            p = NULL;
2059
        for(j = 0;j < L2_SIZE; j++) {
2060
            if (!p)
2061
                prot1 = 0;
2062
            else
2063
                prot1 = p[j].flags;
2064
            if (prot1 != prot) {
2065
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2066
                if (start != -1) {
2067
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2068
                            start, end, end - start,
2069
                            prot & PAGE_READ ? 'r' : '-',
2070
                            prot & PAGE_WRITE ? 'w' : '-',
2071
                            prot & PAGE_EXEC ? 'x' : '-');
2072
                }
2073
                if (prot1 != 0)
2074
                    start = end;
2075
                else
2076
                    start = -1;
2077
                prot = prot1;
2078
            }
2079
            if (!p)
2080
                break;
2081
        }
2082
    }
2083
}
2084

    
2085
int page_get_flags(target_ulong address)
2086
{
2087
    PageDesc *p;
2088

    
2089
    p = page_find(address >> TARGET_PAGE_BITS);
2090
    if (!p)
2091
        return 0;
2092
    return p->flags;
2093
}
2094

    
2095
/* modify the flags of a page and invalidate the code if
2096
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2097
   depending on PAGE_WRITE */
2098
void page_set_flags(target_ulong start, target_ulong end, int flags)
2099
{
2100
    PageDesc *p;
2101
    target_ulong addr;
2102

    
2103
    /* mmap_lock should already be held.  */
2104
    start = start & TARGET_PAGE_MASK;
2105
    end = TARGET_PAGE_ALIGN(end);
2106
    if (flags & PAGE_WRITE)
2107
        flags |= PAGE_WRITE_ORG;
2108
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2109
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2110
        /* We may be called for host regions that are outside guest
2111
           address space.  */
2112
        if (!p)
2113
            return;
2114
        /* if the write protection is set, then we invalidate the code
2115
           inside */
2116
        if (!(p->flags & PAGE_WRITE) &&
2117
            (flags & PAGE_WRITE) &&
2118
            p->first_tb) {
2119
            tb_invalidate_phys_page(addr, 0, NULL);
2120
        }
2121
        p->flags = flags;
2122
    }
2123
}
2124

    
2125
int page_check_range(target_ulong start, target_ulong len, int flags)
2126
{
2127
    PageDesc *p;
2128
    target_ulong end;
2129
    target_ulong addr;
2130

    
2131
    if (start + len < start)
2132
        /* we've wrapped around */
2133
        return -1;
2134

    
2135
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2136
    start = start & TARGET_PAGE_MASK;
2137

    
2138
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2139
        p = page_find(addr >> TARGET_PAGE_BITS);
2140
        if( !p )
2141
            return -1;
2142
        if( !(p->flags & PAGE_VALID) )
2143
            return -1;
2144

    
2145
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2146
            return -1;
2147
        if (flags & PAGE_WRITE) {
2148
            if (!(p->flags & PAGE_WRITE_ORG))
2149
                return -1;
2150
            /* unprotect the page if it was put read-only because it
2151
               contains translated code */
2152
            if (!(p->flags & PAGE_WRITE)) {
2153
                if (!page_unprotect(addr, 0, NULL))
2154
                    return -1;
2155
            }
2156
            return 0;
2157
        }
2158
    }
2159
    return 0;
2160
}
2161

    
2162
/* called from signal handler: invalidate the code and unprotect the
2163
   page. Return TRUE if the fault was succesfully handled. */
2164
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2165
{
2166
    unsigned int page_index, prot, pindex;
2167
    PageDesc *p, *p1;
2168
    target_ulong host_start, host_end, addr;
2169

    
2170
    /* Technically this isn't safe inside a signal handler.  However we
2171
       know this only ever happens in a synchronous SEGV handler, so in
2172
       practice it seems to be ok.  */
2173
    mmap_lock();
2174

    
2175
    host_start = address & qemu_host_page_mask;
2176
    page_index = host_start >> TARGET_PAGE_BITS;
2177
    p1 = page_find(page_index);
2178
    if (!p1) {
2179
        mmap_unlock();
2180
        return 0;
2181
    }
2182
    host_end = host_start + qemu_host_page_size;
2183
    p = p1;
2184
    prot = 0;
2185
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2186
        prot |= p->flags;
2187
        p++;
2188
    }
2189
    /* if the page was really writable, then we change its
2190
       protection back to writable */
2191
    if (prot & PAGE_WRITE_ORG) {
2192
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2193
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2194
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2195
                     (prot & PAGE_BITS) | PAGE_WRITE);
2196
            p1[pindex].flags |= PAGE_WRITE;
2197
            /* and since the content will be modified, we must invalidate
2198
               the corresponding translated code. */
2199
            tb_invalidate_phys_page(address, pc, puc);
2200
#ifdef DEBUG_TB_CHECK
2201
            tb_invalidate_check(address);
2202
#endif
2203
            mmap_unlock();
2204
            return 1;
2205
        }
2206
    }
2207
    mmap_unlock();
2208
    return 0;
2209
}
2210

    
2211
static inline void tlb_set_dirty(CPUState *env,
2212
                                 unsigned long addr, target_ulong vaddr)
2213
{
2214
}
2215
#endif /* defined(CONFIG_USER_ONLY) */
2216

    
2217
#if !defined(CONFIG_USER_ONLY)
2218

    
2219
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2220
                             ram_addr_t memory, ram_addr_t region_offset);
2221
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2222
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2223
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2224
                      need_subpage)                                     \
2225
    do {                                                                \
2226
        if (addr > start_addr)                                          \
2227
            start_addr2 = 0;                                            \
2228
        else {                                                          \
2229
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2230
            if (start_addr2 > 0)                                        \
2231
                need_subpage = 1;                                       \
2232
        }                                                               \
2233
                                                                        \
2234
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2235
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2236
        else {                                                          \
2237
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2238
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2239
                need_subpage = 1;                                       \
2240
        }                                                               \
2241
    } while (0)
2242

    
2243
/* register physical memory. 'size' must be a multiple of the target
2244
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2245
   io memory page.  The address used when calling the IO function is
2246
   the offset from the start of the region, plus region_offset.  Both
2247
   start_region and regon_offset are rounded down to a page boundary
2248
   before calculating this offset.  This should not be a problem unless
2249
   the low bits of start_addr and region_offset differ.  */
2250
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2251
                                         ram_addr_t size,
2252
                                         ram_addr_t phys_offset,
2253
                                         ram_addr_t region_offset)
2254
{
2255
    target_phys_addr_t addr, end_addr;
2256
    PhysPageDesc *p;
2257
    CPUState *env;
2258
    ram_addr_t orig_size = size;
2259
    void *subpage;
2260

    
2261
#ifdef USE_KQEMU
2262
    /* XXX: should not depend on cpu context */
2263
    env = first_cpu;
2264
    if (env->kqemu_enabled) {
2265
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2266
    }
2267
#endif
2268
    if (kvm_enabled())
2269
        kvm_set_phys_mem(start_addr, size, phys_offset);
2270

    
2271
    region_offset &= TARGET_PAGE_MASK;
2272
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2273
    end_addr = start_addr + (target_phys_addr_t)size;
2274
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2275
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2276
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2277
            ram_addr_t orig_memory = p->phys_offset;
2278
            target_phys_addr_t start_addr2, end_addr2;
2279
            int need_subpage = 0;
2280

    
2281
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2282
                          need_subpage);
2283
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2284
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2285
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2286
                                           &p->phys_offset, orig_memory,
2287
                                           p->region_offset);
2288
                } else {
2289
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2290
                                            >> IO_MEM_SHIFT];
2291
                }
2292
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2293
                                 region_offset);
2294
                p->region_offset = 0;
2295
            } else {
2296
                p->phys_offset = phys_offset;
2297
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2298
                    (phys_offset & IO_MEM_ROMD))
2299
                    phys_offset += TARGET_PAGE_SIZE;
2300
            }
2301
        } else {
2302
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2303
            p->phys_offset = phys_offset;
2304
            p->region_offset = region_offset;
2305
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2306
                (phys_offset & IO_MEM_ROMD)) {
2307
                phys_offset += TARGET_PAGE_SIZE;
2308
            } else {
2309
                target_phys_addr_t start_addr2, end_addr2;
2310
                int need_subpage = 0;
2311

    
2312
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2313
                              end_addr2, need_subpage);
2314

    
2315
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2316
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2317
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2318
                                           0);
2319
                    subpage_register(subpage, start_addr2, end_addr2,
2320
                                     phys_offset, region_offset);
2321
                    p->region_offset = 0;
2322
                }
2323
            }
2324
        }
2325
        region_offset += TARGET_PAGE_SIZE;
2326
    }
2327

    
2328
    /* since each CPU stores ram addresses in its TLB cache, we must
2329
       reset the modified entries */
2330
    /* XXX: slow ! */
2331
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2332
        tlb_flush(env, 1);
2333
    }
2334
}
2335

    
2336
/* XXX: temporary until new memory mapping API */
2337
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2338
{
2339
    PhysPageDesc *p;
2340

    
2341
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2342
    if (!p)
2343
        return IO_MEM_UNASSIGNED;
2344
    return p->phys_offset;
2345
}
2346

    
2347
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2348
{
2349
    if (kvm_enabled())
2350
        kvm_coalesce_mmio_region(addr, size);
2351
}
2352

    
2353
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2354
{
2355
    if (kvm_enabled())
2356
        kvm_uncoalesce_mmio_region(addr, size);
2357
}
2358

    
2359
/* XXX: better than nothing */
2360
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2361
{
2362
    ram_addr_t addr;
2363
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2364
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2365
                (uint64_t)size, (uint64_t)phys_ram_size);
2366
        abort();
2367
    }
2368
    addr = phys_ram_alloc_offset;
2369
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2370
    return addr;
2371
}
2372

    
2373
void qemu_ram_free(ram_addr_t addr)
2374
{
2375
}
2376

    
2377
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2378
{
2379
#ifdef DEBUG_UNASSIGNED
2380
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2381
#endif
2382
#if defined(TARGET_SPARC)
2383
    do_unassigned_access(addr, 0, 0, 0, 1);
2384
#endif
2385
    return 0;
2386
}
2387

    
2388
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2389
{
2390
#ifdef DEBUG_UNASSIGNED
2391
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2392
#endif
2393
#if defined(TARGET_SPARC)
2394
    do_unassigned_access(addr, 0, 0, 0, 2);
2395
#endif
2396
    return 0;
2397
}
2398

    
2399
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2400
{
2401
#ifdef DEBUG_UNASSIGNED
2402
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2403
#endif
2404
#if defined(TARGET_SPARC)
2405
    do_unassigned_access(addr, 0, 0, 0, 4);
2406
#endif
2407
    return 0;
2408
}
2409

    
2410
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2411
{
2412
#ifdef DEBUG_UNASSIGNED
2413
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2414
#endif
2415
#if defined(TARGET_SPARC)
2416
    do_unassigned_access(addr, 1, 0, 0, 1);
2417
#endif
2418
}
2419

    
2420
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2421
{
2422
#ifdef DEBUG_UNASSIGNED
2423
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2424
#endif
2425
#if defined(TARGET_SPARC)
2426
    do_unassigned_access(addr, 1, 0, 0, 2);
2427
#endif
2428
}
2429

    
2430
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2431
{
2432
#ifdef DEBUG_UNASSIGNED
2433
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2434
#endif
2435
#if defined(TARGET_SPARC)
2436
    do_unassigned_access(addr, 1, 0, 0, 4);
2437
#endif
2438
}
2439

    
2440
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2441
    unassigned_mem_readb,
2442
    unassigned_mem_readw,
2443
    unassigned_mem_readl,
2444
};
2445

    
2446
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2447
    unassigned_mem_writeb,
2448
    unassigned_mem_writew,
2449
    unassigned_mem_writel,
2450
};
2451

    
2452
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2453
                                uint32_t val)
2454
{
2455
    int dirty_flags;
2456
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2457
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2458
#if !defined(CONFIG_USER_ONLY)
2459
        tb_invalidate_phys_page_fast(ram_addr, 1);
2460
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2461
#endif
2462
    }
2463
    stb_p(phys_ram_base + ram_addr, val);
2464
#ifdef USE_KQEMU
2465
    if (cpu_single_env->kqemu_enabled &&
2466
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2467
        kqemu_modify_page(cpu_single_env, ram_addr);
2468
#endif
2469
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2470
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2471
    /* we remove the notdirty callback only if the code has been
2472
       flushed */
2473
    if (dirty_flags == 0xff)
2474
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2475
}
2476

    
2477
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2478
                                uint32_t val)
2479
{
2480
    int dirty_flags;
2481
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2482
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2483
#if !defined(CONFIG_USER_ONLY)
2484
        tb_invalidate_phys_page_fast(ram_addr, 2);
2485
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2486
#endif
2487
    }
2488
    stw_p(phys_ram_base + ram_addr, val);
2489
#ifdef USE_KQEMU
2490
    if (cpu_single_env->kqemu_enabled &&
2491
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2492
        kqemu_modify_page(cpu_single_env, ram_addr);
2493
#endif
2494
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2495
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2496
    /* we remove the notdirty callback only if the code has been
2497
       flushed */
2498
    if (dirty_flags == 0xff)
2499
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2500
}
2501

    
2502
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2503
                                uint32_t val)
2504
{
2505
    int dirty_flags;
2506
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2507
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2508
#if !defined(CONFIG_USER_ONLY)
2509
        tb_invalidate_phys_page_fast(ram_addr, 4);
2510
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2511
#endif
2512
    }
2513
    stl_p(phys_ram_base + ram_addr, val);
2514
#ifdef USE_KQEMU
2515
    if (cpu_single_env->kqemu_enabled &&
2516
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2517
        kqemu_modify_page(cpu_single_env, ram_addr);
2518
#endif
2519
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2520
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2521
    /* we remove the notdirty callback only if the code has been
2522
       flushed */
2523
    if (dirty_flags == 0xff)
2524
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2525
}
2526

    
2527
static CPUReadMemoryFunc *error_mem_read[3] = {
2528
    NULL, /* never used */
2529
    NULL, /* never used */
2530
    NULL, /* never used */
2531
};
2532

    
2533
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2534
    notdirty_mem_writeb,
2535
    notdirty_mem_writew,
2536
    notdirty_mem_writel,
2537
};
2538

    
2539
/* Generate a debug exception if a watchpoint has been hit.  */
2540
static void check_watchpoint(int offset, int len_mask, int flags)
2541
{
2542
    CPUState *env = cpu_single_env;
2543
    target_ulong pc, cs_base;
2544
    TranslationBlock *tb;
2545
    target_ulong vaddr;
2546
    CPUWatchpoint *wp;
2547
    int cpu_flags;
2548

    
2549
    if (env->watchpoint_hit) {
2550
        /* We re-entered the check after replacing the TB. Now raise
2551
         * the debug interrupt so that is will trigger after the
2552
         * current instruction. */
2553
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2554
        return;
2555
    }
2556
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2557
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2558
        if ((vaddr == (wp->vaddr & len_mask) ||
2559
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2560
            wp->flags |= BP_WATCHPOINT_HIT;
2561
            if (!env->watchpoint_hit) {
2562
                env->watchpoint_hit = wp;
2563
                tb = tb_find_pc(env->mem_io_pc);
2564
                if (!tb) {
2565
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2566
                              "pc=%p", (void *)env->mem_io_pc);
2567
                }
2568
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2569
                tb_phys_invalidate(tb, -1);
2570
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2571
                    env->exception_index = EXCP_DEBUG;
2572
                } else {
2573
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2574
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2575
                }
2576
                cpu_resume_from_signal(env, NULL);
2577
            }
2578
        } else {
2579
            wp->flags &= ~BP_WATCHPOINT_HIT;
2580
        }
2581
    }
2582
}
2583

    
2584
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2585
   so these check for a hit then pass through to the normal out-of-line
2586
   phys routines.  */
2587
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2588
{
2589
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2590
    return ldub_phys(addr);
2591
}
2592

    
2593
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2594
{
2595
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2596
    return lduw_phys(addr);
2597
}
2598

    
2599
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2600
{
2601
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2602
    return ldl_phys(addr);
2603
}
2604

    
2605
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2606
                             uint32_t val)
2607
{
2608
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2609
    stb_phys(addr, val);
2610
}
2611

    
2612
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2613
                             uint32_t val)
2614
{
2615
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2616
    stw_phys(addr, val);
2617
}
2618

    
2619
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2620
                             uint32_t val)
2621
{
2622
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2623
    stl_phys(addr, val);
2624
}
2625

    
2626
static CPUReadMemoryFunc *watch_mem_read[3] = {
2627
    watch_mem_readb,
2628
    watch_mem_readw,
2629
    watch_mem_readl,
2630
};
2631

    
2632
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2633
    watch_mem_writeb,
2634
    watch_mem_writew,
2635
    watch_mem_writel,
2636
};
2637

    
2638
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2639
                                 unsigned int len)
2640
{
2641
    uint32_t ret;
2642
    unsigned int idx;
2643

    
2644
    idx = SUBPAGE_IDX(addr);
2645
#if defined(DEBUG_SUBPAGE)
2646
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2647
           mmio, len, addr, idx);
2648
#endif
2649
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2650
                                       addr + mmio->region_offset[idx][0][len]);
2651

    
2652
    return ret;
2653
}
2654

    
2655
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2656
                              uint32_t value, unsigned int len)
2657
{
2658
    unsigned int idx;
2659

    
2660
    idx = SUBPAGE_IDX(addr);
2661
#if defined(DEBUG_SUBPAGE)
2662
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2663
           mmio, len, addr, idx, value);
2664
#endif
2665
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2666
                                  addr + mmio->region_offset[idx][1][len],
2667
                                  value);
2668
}
2669

    
2670
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2671
{
2672
#if defined(DEBUG_SUBPAGE)
2673
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2674
#endif
2675

    
2676
    return subpage_readlen(opaque, addr, 0);
2677
}
2678

    
2679
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2680
                            uint32_t value)
2681
{
2682
#if defined(DEBUG_SUBPAGE)
2683
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2684
#endif
2685
    subpage_writelen(opaque, addr, value, 0);
2686
}
2687

    
2688
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2689
{
2690
#if defined(DEBUG_SUBPAGE)
2691
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2692
#endif
2693

    
2694
    return subpage_readlen(opaque, addr, 1);
2695
}
2696

    
2697
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2698
                            uint32_t value)
2699
{
2700
#if defined(DEBUG_SUBPAGE)
2701
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2702
#endif
2703
    subpage_writelen(opaque, addr, value, 1);
2704
}
2705

    
2706
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2707
{
2708
#if defined(DEBUG_SUBPAGE)
2709
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2710
#endif
2711

    
2712
    return subpage_readlen(opaque, addr, 2);
2713
}
2714

    
2715
static void subpage_writel (void *opaque,
2716
                         target_phys_addr_t addr, uint32_t value)
2717
{
2718
#if defined(DEBUG_SUBPAGE)
2719
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2720
#endif
2721
    subpage_writelen(opaque, addr, value, 2);
2722
}
2723

    
2724
static CPUReadMemoryFunc *subpage_read[] = {
2725
    &subpage_readb,
2726
    &subpage_readw,
2727
    &subpage_readl,
2728
};
2729

    
2730
static CPUWriteMemoryFunc *subpage_write[] = {
2731
    &subpage_writeb,
2732
    &subpage_writew,
2733
    &subpage_writel,
2734
};
2735

    
2736
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2737
                             ram_addr_t memory, ram_addr_t region_offset)
2738
{
2739
    int idx, eidx;
2740
    unsigned int i;
2741

    
2742
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2743
        return -1;
2744
    idx = SUBPAGE_IDX(start);
2745
    eidx = SUBPAGE_IDX(end);
2746
#if defined(DEBUG_SUBPAGE)
2747
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2748
           mmio, start, end, idx, eidx, memory);
2749
#endif
2750
    memory >>= IO_MEM_SHIFT;
2751
    for (; idx <= eidx; idx++) {
2752
        for (i = 0; i < 4; i++) {
2753
            if (io_mem_read[memory][i]) {
2754
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2755
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2756
                mmio->region_offset[idx][0][i] = region_offset;
2757
            }
2758
            if (io_mem_write[memory][i]) {
2759
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2760
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2761
                mmio->region_offset[idx][1][i] = region_offset;
2762
            }
2763
        }
2764
    }
2765

    
2766
    return 0;
2767
}
2768

    
2769
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2770
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2771
{
2772
    subpage_t *mmio;
2773
    int subpage_memory;
2774

    
2775
    mmio = qemu_mallocz(sizeof(subpage_t));
2776
    if (mmio != NULL) {
2777
        mmio->base = base;
2778
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2779
#if defined(DEBUG_SUBPAGE)
2780
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2781
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2782
#endif
2783
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2784
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2785
                         region_offset);
2786
    }
2787

    
2788
    return mmio;
2789
}
2790

    
2791
static void io_mem_init(void)
2792
{
2793
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2794
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2795
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2796
    io_mem_nb = 5;
2797

    
2798
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2799
                                          watch_mem_write, NULL);
2800
    /* alloc dirty bits array */
2801
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2802
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2803
}
2804

    
2805
/* mem_read and mem_write are arrays of functions containing the
2806
   function to access byte (index 0), word (index 1) and dword (index
2807
   2). Functions can be omitted with a NULL function pointer. The
2808
   registered functions may be modified dynamically later.
2809
   If io_index is non zero, the corresponding io zone is
2810
   modified. If it is zero, a new io zone is allocated. The return
2811
   value can be used with cpu_register_physical_memory(). (-1) is
2812
   returned if error. */
2813
int cpu_register_io_memory(int io_index,
2814
                           CPUReadMemoryFunc **mem_read,
2815
                           CPUWriteMemoryFunc **mem_write,
2816
                           void *opaque)
2817
{
2818
    int i, subwidth = 0;
2819

    
2820
    if (io_index <= 0) {
2821
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2822
            return -1;
2823
        io_index = io_mem_nb++;
2824
    } else {
2825
        if (io_index >= IO_MEM_NB_ENTRIES)
2826
            return -1;
2827
    }
2828

    
2829
    for(i = 0;i < 3; i++) {
2830
        if (!mem_read[i] || !mem_write[i])
2831
            subwidth = IO_MEM_SUBWIDTH;
2832
        io_mem_read[io_index][i] = mem_read[i];
2833
        io_mem_write[io_index][i] = mem_write[i];
2834
    }
2835
    io_mem_opaque[io_index] = opaque;
2836
    return (io_index << IO_MEM_SHIFT) | subwidth;
2837
}
2838

    
2839
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2840
{
2841
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2842
}
2843

    
2844
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2845
{
2846
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2847
}
2848

    
2849
#endif /* !defined(CONFIG_USER_ONLY) */
2850

    
2851
/* physical memory access (slow version, mainly for debug) */
2852
#if defined(CONFIG_USER_ONLY)
2853
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2854
                            int len, int is_write)
2855
{
2856
    int l, flags;
2857
    target_ulong page;
2858
    void * p;
2859

    
2860
    while (len > 0) {
2861
        page = addr & TARGET_PAGE_MASK;
2862
        l = (page + TARGET_PAGE_SIZE) - addr;
2863
        if (l > len)
2864
            l = len;
2865
        flags = page_get_flags(page);
2866
        if (!(flags & PAGE_VALID))
2867
            return;
2868
        if (is_write) {
2869
            if (!(flags & PAGE_WRITE))
2870
                return;
2871
            /* XXX: this code should not depend on lock_user */
2872
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2873
                /* FIXME - should this return an error rather than just fail? */
2874
                return;
2875
            memcpy(p, buf, l);
2876
            unlock_user(p, addr, l);
2877
        } else {
2878
            if (!(flags & PAGE_READ))
2879
                return;
2880
            /* XXX: this code should not depend on lock_user */
2881
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2882
                /* FIXME - should this return an error rather than just fail? */
2883
                return;
2884
            memcpy(buf, p, l);
2885
            unlock_user(p, addr, 0);
2886
        }
2887
        len -= l;
2888
        buf += l;
2889
        addr += l;
2890
    }
2891
}
2892

    
2893
#else
2894
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2895
                            int len, int is_write)
2896
{
2897
    int l, io_index;
2898
    uint8_t *ptr;
2899
    uint32_t val;
2900
    target_phys_addr_t page;
2901
    unsigned long pd;
2902
    PhysPageDesc *p;
2903

    
2904
    while (len > 0) {
2905
        page = addr & TARGET_PAGE_MASK;
2906
        l = (page + TARGET_PAGE_SIZE) - addr;
2907
        if (l > len)
2908
            l = len;
2909
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2910
        if (!p) {
2911
            pd = IO_MEM_UNASSIGNED;
2912
        } else {
2913
            pd = p->phys_offset;
2914
        }
2915

    
2916
        if (is_write) {
2917
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2918
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2919
                if (p)
2920
                    addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2921
                /* XXX: could force cpu_single_env to NULL to avoid
2922
                   potential bugs */
2923
                if (l >= 4 && ((addr & 3) == 0)) {
2924
                    /* 32 bit write access */
2925
                    val = ldl_p(buf);
2926
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2927
                    l = 4;
2928
                } else if (l >= 2 && ((addr & 1) == 0)) {
2929
                    /* 16 bit write access */
2930
                    val = lduw_p(buf);
2931
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2932
                    l = 2;
2933
                } else {
2934
                    /* 8 bit write access */
2935
                    val = ldub_p(buf);
2936
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2937
                    l = 1;
2938
                }
2939
            } else {
2940
                unsigned long addr1;
2941
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2942
                /* RAM case */
2943
                ptr = phys_ram_base + addr1;
2944
                memcpy(ptr, buf, l);
2945
                if (!cpu_physical_memory_is_dirty(addr1)) {
2946
                    /* invalidate code */
2947
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2948
                    /* set dirty bit */
2949
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2950
                        (0xff & ~CODE_DIRTY_FLAG);
2951
                }
2952
            }
2953
        } else {
2954
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2955
                !(pd & IO_MEM_ROMD)) {
2956
                /* I/O case */
2957
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2958
                if (p)
2959
                    addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2960
                if (l >= 4 && ((addr & 3) == 0)) {
2961
                    /* 32 bit read access */
2962
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2963
                    stl_p(buf, val);
2964
                    l = 4;
2965
                } else if (l >= 2 && ((addr & 1) == 0)) {
2966
                    /* 16 bit read access */
2967
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2968
                    stw_p(buf, val);
2969
                    l = 2;
2970
                } else {
2971
                    /* 8 bit read access */
2972
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2973
                    stb_p(buf, val);
2974
                    l = 1;
2975
                }
2976
            } else {
2977
                /* RAM case */
2978
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2979
                    (addr & ~TARGET_PAGE_MASK);
2980
                memcpy(buf, ptr, l);
2981
            }
2982
        }
2983
        len -= l;
2984
        buf += l;
2985
        addr += l;
2986
    }
2987
}
2988

    
2989
/* used for ROM loading : can write in RAM and ROM */
2990
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2991
                                   const uint8_t *buf, int len)
2992
{
2993
    int l;
2994
    uint8_t *ptr;
2995
    target_phys_addr_t page;
2996
    unsigned long pd;
2997
    PhysPageDesc *p;
2998

    
2999
    while (len > 0) {
3000
        page = addr & TARGET_PAGE_MASK;
3001
        l = (page + TARGET_PAGE_SIZE) - addr;
3002
        if (l > len)
3003
            l = len;
3004
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3005
        if (!p) {
3006
            pd = IO_MEM_UNASSIGNED;
3007
        } else {
3008
            pd = p->phys_offset;
3009
        }
3010

    
3011
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3012
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3013
            !(pd & IO_MEM_ROMD)) {
3014
            /* do nothing */
3015
        } else {
3016
            unsigned long addr1;
3017
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3018
            /* ROM/RAM case */
3019
            ptr = phys_ram_base + addr1;
3020
            memcpy(ptr, buf, l);
3021
        }
3022
        len -= l;
3023
        buf += l;
3024
        addr += l;
3025
    }
3026
}
3027

    
3028

    
3029
/* warning: addr must be aligned */
3030
uint32_t ldl_phys(target_phys_addr_t addr)
3031
{
3032
    int io_index;
3033
    uint8_t *ptr;
3034
    uint32_t val;
3035
    unsigned long pd;
3036
    PhysPageDesc *p;
3037

    
3038
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3039
    if (!p) {
3040
        pd = IO_MEM_UNASSIGNED;
3041
    } else {
3042
        pd = p->phys_offset;
3043
    }
3044

    
3045
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3046
        !(pd & IO_MEM_ROMD)) {
3047
        /* I/O case */
3048
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3049
        if (p)
3050
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3051
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3052
    } else {
3053
        /* RAM case */
3054
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3055
            (addr & ~TARGET_PAGE_MASK);
3056
        val = ldl_p(ptr);
3057
    }
3058
    return val;
3059
}
3060

    
3061
/* warning: addr must be aligned */
3062
uint64_t ldq_phys(target_phys_addr_t addr)
3063
{
3064
    int io_index;
3065
    uint8_t *ptr;
3066
    uint64_t val;
3067
    unsigned long pd;
3068
    PhysPageDesc *p;
3069

    
3070
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3071
    if (!p) {
3072
        pd = IO_MEM_UNASSIGNED;
3073
    } else {
3074
        pd = p->phys_offset;
3075
    }
3076

    
3077
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3078
        !(pd & IO_MEM_ROMD)) {
3079
        /* I/O case */
3080
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3081
        if (p)
3082
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3083
#ifdef TARGET_WORDS_BIGENDIAN
3084
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3085
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3086
#else
3087
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3088
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3089
#endif
3090
    } else {
3091
        /* RAM case */
3092
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3093
            (addr & ~TARGET_PAGE_MASK);
3094
        val = ldq_p(ptr);
3095
    }
3096
    return val;
3097
}
3098

    
3099
/* XXX: optimize */
3100
uint32_t ldub_phys(target_phys_addr_t addr)
3101
{
3102
    uint8_t val;
3103
    cpu_physical_memory_read(addr, &val, 1);
3104
    return val;
3105
}
3106

    
3107
/* XXX: optimize */
3108
uint32_t lduw_phys(target_phys_addr_t addr)
3109
{
3110
    uint16_t val;
3111
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3112
    return tswap16(val);
3113
}
3114

    
3115
/* warning: addr must be aligned. The ram page is not masked as dirty
3116
   and the code inside is not invalidated. It is useful if the dirty
3117
   bits are used to track modified PTEs */
3118
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3119
{
3120
    int io_index;
3121
    uint8_t *ptr;
3122
    unsigned long pd;
3123
    PhysPageDesc *p;
3124

    
3125
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3126
    if (!p) {
3127
        pd = IO_MEM_UNASSIGNED;
3128
    } else {
3129
        pd = p->phys_offset;
3130
    }
3131

    
3132
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3133
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3134
        if (p)
3135
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3136
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3137
    } else {
3138
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3139
        ptr = phys_ram_base + addr1;
3140
        stl_p(ptr, val);
3141

    
3142
        if (unlikely(in_migration)) {
3143
            if (!cpu_physical_memory_is_dirty(addr1)) {
3144
                /* invalidate code */
3145
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3146
                /* set dirty bit */
3147
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3148
                    (0xff & ~CODE_DIRTY_FLAG);
3149
            }
3150
        }
3151
    }
3152
}
3153

    
3154
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3155
{
3156
    int io_index;
3157
    uint8_t *ptr;
3158
    unsigned long pd;
3159
    PhysPageDesc *p;
3160

    
3161
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3162
    if (!p) {
3163
        pd = IO_MEM_UNASSIGNED;
3164
    } else {
3165
        pd = p->phys_offset;
3166
    }
3167

    
3168
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3169
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3170
        if (p)
3171
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3172
#ifdef TARGET_WORDS_BIGENDIAN
3173
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3174
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3175
#else
3176
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3177
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3178
#endif
3179
    } else {
3180
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3181
            (addr & ~TARGET_PAGE_MASK);
3182
        stq_p(ptr, val);
3183
    }
3184
}
3185

    
3186
/* warning: addr must be aligned */
3187
void stl_phys(target_phys_addr_t addr, uint32_t val)
3188
{
3189
    int io_index;
3190
    uint8_t *ptr;
3191
    unsigned long pd;
3192
    PhysPageDesc *p;
3193

    
3194
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3195
    if (!p) {
3196
        pd = IO_MEM_UNASSIGNED;
3197
    } else {
3198
        pd = p->phys_offset;
3199
    }
3200

    
3201
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3202
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3203
        if (p)
3204
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3205
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3206
    } else {
3207
        unsigned long addr1;
3208
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3209
        /* RAM case */
3210
        ptr = phys_ram_base + addr1;
3211
        stl_p(ptr, val);
3212
        if (!cpu_physical_memory_is_dirty(addr1)) {
3213
            /* invalidate code */
3214
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3215
            /* set dirty bit */
3216
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3217
                (0xff & ~CODE_DIRTY_FLAG);
3218
        }
3219
    }
3220
}
3221

    
3222
/* XXX: optimize */
3223
void stb_phys(target_phys_addr_t addr, uint32_t val)
3224
{
3225
    uint8_t v = val;
3226
    cpu_physical_memory_write(addr, &v, 1);
3227
}
3228

    
3229
/* XXX: optimize */
3230
void stw_phys(target_phys_addr_t addr, uint32_t val)
3231
{
3232
    uint16_t v = tswap16(val);
3233
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3234
}
3235

    
3236
/* XXX: optimize */
3237
void stq_phys(target_phys_addr_t addr, uint64_t val)
3238
{
3239
    val = tswap64(val);
3240
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3241
}
3242

    
3243
#endif
3244

    
3245
/* virtual memory access for debug */
3246
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3247
                        uint8_t *buf, int len, int is_write)
3248
{
3249
    int l;
3250
    target_phys_addr_t phys_addr;
3251
    target_ulong page;
3252

    
3253
    while (len > 0) {
3254
        page = addr & TARGET_PAGE_MASK;
3255
        phys_addr = cpu_get_phys_page_debug(env, page);
3256
        /* if no physical page mapped, return an error */
3257
        if (phys_addr == -1)
3258
            return -1;
3259
        l = (page + TARGET_PAGE_SIZE) - addr;
3260
        if (l > len)
3261
            l = len;
3262
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3263
                               buf, l, is_write);
3264
        len -= l;
3265
        buf += l;
3266
        addr += l;
3267
    }
3268
    return 0;
3269
}
3270

    
3271
/* in deterministic execution mode, instructions doing device I/Os
3272
   must be at the end of the TB */
3273
void cpu_io_recompile(CPUState *env, void *retaddr)
3274
{
3275
    TranslationBlock *tb;
3276
    uint32_t n, cflags;
3277
    target_ulong pc, cs_base;
3278
    uint64_t flags;
3279

    
3280
    tb = tb_find_pc((unsigned long)retaddr);
3281
    if (!tb) {
3282
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3283
                  retaddr);
3284
    }
3285
    n = env->icount_decr.u16.low + tb->icount;
3286
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3287
    /* Calculate how many instructions had been executed before the fault
3288
       occurred.  */
3289
    n = n - env->icount_decr.u16.low;
3290
    /* Generate a new TB ending on the I/O insn.  */
3291
    n++;
3292
    /* On MIPS and SH, delay slot instructions can only be restarted if
3293
       they were already the first instruction in the TB.  If this is not
3294
       the first instruction in a TB then re-execute the preceding
3295
       branch.  */
3296
#if defined(TARGET_MIPS)
3297
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3298
        env->active_tc.PC -= 4;
3299
        env->icount_decr.u16.low++;
3300
        env->hflags &= ~MIPS_HFLAG_BMASK;
3301
    }
3302
#elif defined(TARGET_SH4)
3303
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3304
            && n > 1) {
3305
        env->pc -= 2;
3306
        env->icount_decr.u16.low++;
3307
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3308
    }
3309
#endif
3310
    /* This should never happen.  */
3311
    if (n > CF_COUNT_MASK)
3312
        cpu_abort(env, "TB too big during recompile");
3313

    
3314
    cflags = n | CF_LAST_IO;
3315
    pc = tb->pc;
3316
    cs_base = tb->cs_base;
3317
    flags = tb->flags;
3318
    tb_phys_invalidate(tb, -1);
3319
    /* FIXME: In theory this could raise an exception.  In practice
3320
       we have already translated the block once so it's probably ok.  */
3321
    tb_gen_code(env, pc, cs_base, flags, cflags);
3322
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3323
       the first in the TB) then we end up generating a whole new TB and
3324
       repeating the fault, which is horribly inefficient.
3325
       Better would be to execute just this insn uncached, or generate a
3326
       second new TB.  */
3327
    cpu_resume_from_signal(env, NULL);
3328
}
3329

    
3330
void dump_exec_info(FILE *f,
3331
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3332
{
3333
    int i, target_code_size, max_target_code_size;
3334
    int direct_jmp_count, direct_jmp2_count, cross_page;
3335
    TranslationBlock *tb;
3336

    
3337
    target_code_size = 0;
3338
    max_target_code_size = 0;
3339
    cross_page = 0;
3340
    direct_jmp_count = 0;
3341
    direct_jmp2_count = 0;
3342
    for(i = 0; i < nb_tbs; i++) {
3343
        tb = &tbs[i];
3344
        target_code_size += tb->size;
3345
        if (tb->size > max_target_code_size)
3346
            max_target_code_size = tb->size;
3347
        if (tb->page_addr[1] != -1)
3348
            cross_page++;
3349
        if (tb->tb_next_offset[0] != 0xffff) {
3350
            direct_jmp_count++;
3351
            if (tb->tb_next_offset[1] != 0xffff) {
3352
                direct_jmp2_count++;
3353
            }
3354
        }
3355
    }
3356
    /* XXX: avoid using doubles ? */
3357
    cpu_fprintf(f, "Translation buffer state:\n");
3358
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3359
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3360
    cpu_fprintf(f, "TB count            %d/%d\n", 
3361
                nb_tbs, code_gen_max_blocks);
3362
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3363
                nb_tbs ? target_code_size / nb_tbs : 0,
3364
                max_target_code_size);
3365
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3366
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3367
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3368
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3369
            cross_page,
3370
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3371
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3372
                direct_jmp_count,
3373
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3374
                direct_jmp2_count,
3375
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3376
    cpu_fprintf(f, "\nStatistics:\n");
3377
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3378
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3379
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3380
    tcg_dump_info(f, cpu_fprintf);
3381
}
3382

    
3383
#if !defined(CONFIG_USER_ONLY)
3384

    
3385
#define MMUSUFFIX _cmmu
3386
#define GETPC() NULL
3387
#define env cpu_single_env
3388
#define SOFTMMU_CODE_ACCESS
3389

    
3390
#define SHIFT 0
3391
#include "softmmu_template.h"
3392

    
3393
#define SHIFT 1
3394
#include "softmmu_template.h"
3395

    
3396
#define SHIFT 2
3397
#include "softmmu_template.h"
3398

    
3399
#define SHIFT 3
3400
#include "softmmu_template.h"
3401

    
3402
#undef env
3403

    
3404
#endif