Statistics
| Branch: | Revision:

root / exec.c @ 2a913eb1

History | View | Annotate | Download (100 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#include "osdep.h"
42
#include "kvm.h"
43
#if defined(CONFIG_USER_ONLY)
44
#include <qemu.h>
45
#endif
46

    
47
//#define DEBUG_TB_INVALIDATE
48
//#define DEBUG_FLUSH
49
//#define DEBUG_TLB
50
//#define DEBUG_UNASSIGNED
51

    
52
/* make various TB consistency checks */
53
//#define DEBUG_TB_CHECK
54
//#define DEBUG_TLB_CHECK
55

    
56
//#define DEBUG_IOPORT
57
//#define DEBUG_SUBPAGE
58

    
59
#if !defined(CONFIG_USER_ONLY)
60
/* TB consistency checks only implemented for usermode emulation.  */
61
#undef DEBUG_TB_CHECK
62
#endif
63

    
64
#define SMC_BITMAP_USE_THRESHOLD 10
65

    
66
#define MMAP_AREA_START        0x00000000
67
#define MMAP_AREA_END          0xa8000000
68

    
69
#if defined(TARGET_SPARC64)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 41
71
#elif defined(TARGET_SPARC)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 36
73
#elif defined(TARGET_ALPHA)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#define TARGET_VIRT_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_PPC64)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 42
80
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81
#define TARGET_PHYS_ADDR_SPACE_BITS 36
82
#else
83
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84
#define TARGET_PHYS_ADDR_SPACE_BITS 32
85
#endif
86

    
87
static TranslationBlock *tbs;
88
int code_gen_max_blocks;
89
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90
static int nb_tbs;
91
/* any access to the tbs or the page table must use this lock */
92
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93

    
94
#if defined(__arm__) || defined(__sparc_v9__)
95
/* The prologue must be reachable with a direct jump. ARM and Sparc64
96
 have limited branch ranges (possibly also PPC) so place it in a
97
 section close to code segment. */
98
#define code_gen_section                                \
99
    __attribute__((__section__(".gen_code")))           \
100
    __attribute__((aligned (32)))
101
#else
102
#define code_gen_section                                \
103
    __attribute__((aligned (32)))
104
#endif
105

    
106
uint8_t code_gen_prologue[1024] code_gen_section;
107
static uint8_t *code_gen_buffer;
108
static unsigned long code_gen_buffer_size;
109
/* threshold to flush the translated code buffer */
110
static unsigned long code_gen_buffer_max_size;
111
uint8_t *code_gen_ptr;
112

    
113
#if !defined(CONFIG_USER_ONLY)
114
ram_addr_t phys_ram_size;
115
int phys_ram_fd;
116
uint8_t *phys_ram_base;
117
uint8_t *phys_ram_dirty;
118
static int in_migration;
119
static ram_addr_t phys_ram_alloc_offset = 0;
120
#endif
121

    
122
CPUState *first_cpu;
123
/* current CPU in the current thread. It is only valid inside
124
   cpu_exec() */
125
CPUState *cpu_single_env;
126
/* 0 = Do not count executed instructions.
127
   1 = Precise instruction counting.
128
   2 = Adaptive rate instruction counting.  */
129
int use_icount = 0;
130
/* Current instruction counter.  While executing translated code this may
131
   include some instructions that have not yet been executed.  */
132
int64_t qemu_icount;
133

    
134
typedef struct PageDesc {
135
    /* list of TBs intersecting this ram page */
136
    TranslationBlock *first_tb;
137
    /* in order to optimize self modifying code, we count the number
138
       of lookups we do to a given page to use a bitmap */
139
    unsigned int code_write_count;
140
    uint8_t *code_bitmap;
141
#if defined(CONFIG_USER_ONLY)
142
    unsigned long flags;
143
#endif
144
} PageDesc;
145

    
146
typedef struct PhysPageDesc {
147
    /* offset in host memory of the page + io_index in the low bits */
148
    ram_addr_t phys_offset;
149
} PhysPageDesc;
150

    
151
#define L2_BITS 10
152
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
153
/* XXX: this is a temporary hack for alpha target.
154
 *      In the future, this is to be replaced by a multi-level table
155
 *      to actually be able to handle the complete 64 bits address space.
156
 */
157
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
158
#else
159
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
160
#endif
161

    
162
#define L1_SIZE (1 << L1_BITS)
163
#define L2_SIZE (1 << L2_BITS)
164

    
165
unsigned long qemu_real_host_page_size;
166
unsigned long qemu_host_page_bits;
167
unsigned long qemu_host_page_size;
168
unsigned long qemu_host_page_mask;
169

    
170
/* XXX: for system emulation, it could just be an array */
171
static PageDesc *l1_map[L1_SIZE];
172
static PhysPageDesc **l1_phys_map;
173

    
174
#if !defined(CONFIG_USER_ONLY)
175
static void io_mem_init(void);
176

    
177
/* io memory support */
178
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
179
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
180
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
181
static int io_mem_nb;
182
static int io_mem_watch;
183
#endif
184

    
185
/* log support */
186
static const char *logfilename = "/tmp/qemu.log";
187
FILE *logfile;
188
int loglevel;
189
static int log_append = 0;
190

    
191
/* statistics */
192
static int tlb_flush_count;
193
static int tb_flush_count;
194
static int tb_phys_invalidate_count;
195

    
196
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197
typedef struct subpage_t {
198
    target_phys_addr_t base;
199
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
200
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
201
    void *opaque[TARGET_PAGE_SIZE][2][4];
202
} subpage_t;
203

    
204
#ifdef _WIN32
205
static void map_exec(void *addr, long size)
206
{
207
    DWORD old_protect;
208
    VirtualProtect(addr, size,
209
                   PAGE_EXECUTE_READWRITE, &old_protect);
210
    
211
}
212
#else
213
static void map_exec(void *addr, long size)
214
{
215
    unsigned long start, end, page_size;
216
    
217
    page_size = getpagesize();
218
    start = (unsigned long)addr;
219
    start &= ~(page_size - 1);
220
    
221
    end = (unsigned long)addr + size;
222
    end += page_size - 1;
223
    end &= ~(page_size - 1);
224
    
225
    mprotect((void *)start, end - start,
226
             PROT_READ | PROT_WRITE | PROT_EXEC);
227
}
228
#endif
229

    
230
static void page_init(void)
231
{
232
    /* NOTE: we can always suppose that qemu_host_page_size >=
233
       TARGET_PAGE_SIZE */
234
#ifdef _WIN32
235
    {
236
        SYSTEM_INFO system_info;
237

    
238
        GetSystemInfo(&system_info);
239
        qemu_real_host_page_size = system_info.dwPageSize;
240
    }
241
#else
242
    qemu_real_host_page_size = getpagesize();
243
#endif
244
    if (qemu_host_page_size == 0)
245
        qemu_host_page_size = qemu_real_host_page_size;
246
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
247
        qemu_host_page_size = TARGET_PAGE_SIZE;
248
    qemu_host_page_bits = 0;
249
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
250
        qemu_host_page_bits++;
251
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
252
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
253
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
254

    
255
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
256
    {
257
        long long startaddr, endaddr;
258
        FILE *f;
259
        int n;
260

    
261
        mmap_lock();
262
        last_brk = (unsigned long)sbrk(0);
263
        f = fopen("/proc/self/maps", "r");
264
        if (f) {
265
            do {
266
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
267
                if (n == 2) {
268
                    startaddr = MIN(startaddr,
269
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270
                    endaddr = MIN(endaddr,
271
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
273
                                   TARGET_PAGE_ALIGN(endaddr),
274
                                   PAGE_RESERVED); 
275
                }
276
            } while (!feof(f));
277
            fclose(f);
278
        }
279
        mmap_unlock();
280
    }
281
#endif
282
}
283

    
284
static inline PageDesc **page_l1_map(target_ulong index)
285
{
286
#if TARGET_LONG_BITS > 32
287
    /* Host memory outside guest VM.  For 32-bit targets we have already
288
       excluded high addresses.  */
289
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
290
        return NULL;
291
#endif
292
    return &l1_map[index >> L2_BITS];
293
}
294

    
295
static inline PageDesc *page_find_alloc(target_ulong index)
296
{
297
    PageDesc **lp, *p;
298
    lp = page_l1_map(index);
299
    if (!lp)
300
        return NULL;
301

    
302
    p = *lp;
303
    if (!p) {
304
        /* allocate if not found */
305
#if defined(CONFIG_USER_ONLY)
306
        unsigned long addr;
307
        size_t len = sizeof(PageDesc) * L2_SIZE;
308
        /* Don't use qemu_malloc because it may recurse.  */
309
        p = mmap(0, len, PROT_READ | PROT_WRITE,
310
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
311
        *lp = p;
312
        addr = h2g(p);
313
        if (addr == (target_ulong)addr) {
314
            page_set_flags(addr & TARGET_PAGE_MASK,
315
                           TARGET_PAGE_ALIGN(addr + len),
316
                           PAGE_RESERVED); 
317
        }
318
#else
319
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
320
        *lp = p;
321
#endif
322
    }
323
    return p + (index & (L2_SIZE - 1));
324
}
325

    
326
static inline PageDesc *page_find(target_ulong index)
327
{
328
    PageDesc **lp, *p;
329
    lp = page_l1_map(index);
330
    if (!lp)
331
        return NULL;
332

    
333
    p = *lp;
334
    if (!p)
335
        return 0;
336
    return p + (index & (L2_SIZE - 1));
337
}
338

    
339
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
340
{
341
    void **lp, **p;
342
    PhysPageDesc *pd;
343

    
344
    p = (void **)l1_phys_map;
345
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
346

    
347
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
348
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
349
#endif
350
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
351
    p = *lp;
352
    if (!p) {
353
        /* allocate if not found */
354
        if (!alloc)
355
            return NULL;
356
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
357
        memset(p, 0, sizeof(void *) * L1_SIZE);
358
        *lp = p;
359
    }
360
#endif
361
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
362
    pd = *lp;
363
    if (!pd) {
364
        int i;
365
        /* allocate if not found */
366
        if (!alloc)
367
            return NULL;
368
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
369
        *lp = pd;
370
        for (i = 0; i < L2_SIZE; i++)
371
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
372
    }
373
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
374
}
375

    
376
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
377
{
378
    return phys_page_find_alloc(index, 0);
379
}
380

    
381
#if !defined(CONFIG_USER_ONLY)
382
static void tlb_protect_code(ram_addr_t ram_addr);
383
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
384
                                    target_ulong vaddr);
385
#define mmap_lock() do { } while(0)
386
#define mmap_unlock() do { } while(0)
387
#endif
388

    
389
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
390

    
391
#if defined(CONFIG_USER_ONLY)
392
/* Currently it is not recommanded to allocate big chunks of data in
393
   user mode. It will change when a dedicated libc will be used */
394
#define USE_STATIC_CODE_GEN_BUFFER
395
#endif
396

    
397
#ifdef USE_STATIC_CODE_GEN_BUFFER
398
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
399
#endif
400

    
401
static void code_gen_alloc(unsigned long tb_size)
402
{
403
#ifdef USE_STATIC_CODE_GEN_BUFFER
404
    code_gen_buffer = static_code_gen_buffer;
405
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406
    map_exec(code_gen_buffer, code_gen_buffer_size);
407
#else
408
    code_gen_buffer_size = tb_size;
409
    if (code_gen_buffer_size == 0) {
410
#if defined(CONFIG_USER_ONLY)
411
        /* in user mode, phys_ram_size is not meaningful */
412
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
413
#else
414
        /* XXX: needs ajustments */
415
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
416
#endif
417
    }
418
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420
    /* The code gen buffer location may have constraints depending on
421
       the host cpu and OS */
422
#if defined(__linux__) 
423
    {
424
        int flags;
425
        void *start = NULL;
426

    
427
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
428
#if defined(__x86_64__)
429
        flags |= MAP_32BIT;
430
        /* Cannot map more than that */
431
        if (code_gen_buffer_size > (800 * 1024 * 1024))
432
            code_gen_buffer_size = (800 * 1024 * 1024);
433
#elif defined(__sparc_v9__)
434
        // Map the buffer below 2G, so we can use direct calls and branches
435
        flags |= MAP_FIXED;
436
        start = (void *) 0x60000000UL;
437
        if (code_gen_buffer_size > (512 * 1024 * 1024))
438
            code_gen_buffer_size = (512 * 1024 * 1024);
439
#endif
440
        code_gen_buffer = mmap(start, code_gen_buffer_size,
441
                               PROT_WRITE | PROT_READ | PROT_EXEC,
442
                               flags, -1, 0);
443
        if (code_gen_buffer == MAP_FAILED) {
444
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
445
            exit(1);
446
        }
447
    }
448
#elif defined(__FreeBSD__)
449
    {
450
        int flags;
451
        void *addr = NULL;
452
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
453
#if defined(__x86_64__)
454
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
455
         * 0x40000000 is free */
456
        flags |= MAP_FIXED;
457
        addr = (void *)0x40000000;
458
        /* Cannot map more than that */
459
        if (code_gen_buffer_size > (800 * 1024 * 1024))
460
            code_gen_buffer_size = (800 * 1024 * 1024);
461
#endif
462
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
463
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
464
                               flags, -1, 0);
465
        if (code_gen_buffer == MAP_FAILED) {
466
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467
            exit(1);
468
        }
469
    }
470
#else
471
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
472
    if (!code_gen_buffer) {
473
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
474
        exit(1);
475
    }
476
    map_exec(code_gen_buffer, code_gen_buffer_size);
477
#endif
478
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
479
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
480
    code_gen_buffer_max_size = code_gen_buffer_size - 
481
        code_gen_max_block_size();
482
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
483
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
484
}
485

    
486
/* Must be called before using the QEMU cpus. 'tb_size' is the size
487
   (in bytes) allocated to the translation buffer. Zero means default
488
   size. */
489
void cpu_exec_init_all(unsigned long tb_size)
490
{
491
    cpu_gen_init();
492
    code_gen_alloc(tb_size);
493
    code_gen_ptr = code_gen_buffer;
494
    page_init();
495
#if !defined(CONFIG_USER_ONLY)
496
    io_mem_init();
497
#endif
498
}
499

    
500
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
501

    
502
#define CPU_COMMON_SAVE_VERSION 1
503

    
504
static void cpu_common_save(QEMUFile *f, void *opaque)
505
{
506
    CPUState *env = opaque;
507

    
508
    qemu_put_be32s(f, &env->halted);
509
    qemu_put_be32s(f, &env->interrupt_request);
510
}
511

    
512
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
513
{
514
    CPUState *env = opaque;
515

    
516
    if (version_id != CPU_COMMON_SAVE_VERSION)
517
        return -EINVAL;
518

    
519
    qemu_get_be32s(f, &env->halted);
520
    qemu_get_be32s(f, &env->interrupt_request);
521
    tlb_flush(env, 1);
522

    
523
    return 0;
524
}
525
#endif
526

    
527
void cpu_exec_init(CPUState *env)
528
{
529
    CPUState **penv;
530
    int cpu_index;
531

    
532
    env->next_cpu = NULL;
533
    penv = &first_cpu;
534
    cpu_index = 0;
535
    while (*penv != NULL) {
536
        penv = (CPUState **)&(*penv)->next_cpu;
537
        cpu_index++;
538
    }
539
    env->cpu_index = cpu_index;
540
    TAILQ_INIT(&env->breakpoints);
541
    TAILQ_INIT(&env->watchpoints);
542
    *penv = env;
543
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
544
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
545
                    cpu_common_save, cpu_common_load, env);
546
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
547
                    cpu_save, cpu_load, env);
548
#endif
549
}
550

    
551
static inline void invalidate_page_bitmap(PageDesc *p)
552
{
553
    if (p->code_bitmap) {
554
        qemu_free(p->code_bitmap);
555
        p->code_bitmap = NULL;
556
    }
557
    p->code_write_count = 0;
558
}
559

    
560
/* set to NULL all the 'first_tb' fields in all PageDescs */
561
static void page_flush_tb(void)
562
{
563
    int i, j;
564
    PageDesc *p;
565

    
566
    for(i = 0; i < L1_SIZE; i++) {
567
        p = l1_map[i];
568
        if (p) {
569
            for(j = 0; j < L2_SIZE; j++) {
570
                p->first_tb = NULL;
571
                invalidate_page_bitmap(p);
572
                p++;
573
            }
574
        }
575
    }
576
}
577

    
578
/* flush all the translation blocks */
579
/* XXX: tb_flush is currently not thread safe */
580
void tb_flush(CPUState *env1)
581
{
582
    CPUState *env;
583
#if defined(DEBUG_FLUSH)
584
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
585
           (unsigned long)(code_gen_ptr - code_gen_buffer),
586
           nb_tbs, nb_tbs > 0 ?
587
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
588
#endif
589
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
590
        cpu_abort(env1, "Internal error: code buffer overflow\n");
591

    
592
    nb_tbs = 0;
593

    
594
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
595
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
596
    }
597

    
598
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
599
    page_flush_tb();
600

    
601
    code_gen_ptr = code_gen_buffer;
602
    /* XXX: flush processor icache at this point if cache flush is
603
       expensive */
604
    tb_flush_count++;
605
}
606

    
607
#ifdef DEBUG_TB_CHECK
608

    
609
static void tb_invalidate_check(target_ulong address)
610
{
611
    TranslationBlock *tb;
612
    int i;
613
    address &= TARGET_PAGE_MASK;
614
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
615
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
616
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
617
                  address >= tb->pc + tb->size)) {
618
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
619
                       address, (long)tb->pc, tb->size);
620
            }
621
        }
622
    }
623
}
624

    
625
/* verify that all the pages have correct rights for code */
626
static void tb_page_check(void)
627
{
628
    TranslationBlock *tb;
629
    int i, flags1, flags2;
630

    
631
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
632
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
633
            flags1 = page_get_flags(tb->pc);
634
            flags2 = page_get_flags(tb->pc + tb->size - 1);
635
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
636
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
637
                       (long)tb->pc, tb->size, flags1, flags2);
638
            }
639
        }
640
    }
641
}
642

    
643
static void tb_jmp_check(TranslationBlock *tb)
644
{
645
    TranslationBlock *tb1;
646
    unsigned int n1;
647

    
648
    /* suppress any remaining jumps to this TB */
649
    tb1 = tb->jmp_first;
650
    for(;;) {
651
        n1 = (long)tb1 & 3;
652
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
653
        if (n1 == 2)
654
            break;
655
        tb1 = tb1->jmp_next[n1];
656
    }
657
    /* check end of list */
658
    if (tb1 != tb) {
659
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
660
    }
661
}
662

    
663
#endif
664

    
665
/* invalidate one TB */
666
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
667
                             int next_offset)
668
{
669
    TranslationBlock *tb1;
670
    for(;;) {
671
        tb1 = *ptb;
672
        if (tb1 == tb) {
673
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
674
            break;
675
        }
676
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
677
    }
678
}
679

    
680
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
681
{
682
    TranslationBlock *tb1;
683
    unsigned int n1;
684

    
685
    for(;;) {
686
        tb1 = *ptb;
687
        n1 = (long)tb1 & 3;
688
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
689
        if (tb1 == tb) {
690
            *ptb = tb1->page_next[n1];
691
            break;
692
        }
693
        ptb = &tb1->page_next[n1];
694
    }
695
}
696

    
697
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
698
{
699
    TranslationBlock *tb1, **ptb;
700
    unsigned int n1;
701

    
702
    ptb = &tb->jmp_next[n];
703
    tb1 = *ptb;
704
    if (tb1) {
705
        /* find tb(n) in circular list */
706
        for(;;) {
707
            tb1 = *ptb;
708
            n1 = (long)tb1 & 3;
709
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
710
            if (n1 == n && tb1 == tb)
711
                break;
712
            if (n1 == 2) {
713
                ptb = &tb1->jmp_first;
714
            } else {
715
                ptb = &tb1->jmp_next[n1];
716
            }
717
        }
718
        /* now we can suppress tb(n) from the list */
719
        *ptb = tb->jmp_next[n];
720

    
721
        tb->jmp_next[n] = NULL;
722
    }
723
}
724

    
725
/* reset the jump entry 'n' of a TB so that it is not chained to
726
   another TB */
727
static inline void tb_reset_jump(TranslationBlock *tb, int n)
728
{
729
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
730
}
731

    
732
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
733
{
734
    CPUState *env;
735
    PageDesc *p;
736
    unsigned int h, n1;
737
    target_phys_addr_t phys_pc;
738
    TranslationBlock *tb1, *tb2;
739

    
740
    /* remove the TB from the hash list */
741
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
742
    h = tb_phys_hash_func(phys_pc);
743
    tb_remove(&tb_phys_hash[h], tb,
744
              offsetof(TranslationBlock, phys_hash_next));
745

    
746
    /* remove the TB from the page list */
747
    if (tb->page_addr[0] != page_addr) {
748
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
749
        tb_page_remove(&p->first_tb, tb);
750
        invalidate_page_bitmap(p);
751
    }
752
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
753
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
754
        tb_page_remove(&p->first_tb, tb);
755
        invalidate_page_bitmap(p);
756
    }
757

    
758
    tb_invalidated_flag = 1;
759

    
760
    /* remove the TB from the hash list */
761
    h = tb_jmp_cache_hash_func(tb->pc);
762
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
763
        if (env->tb_jmp_cache[h] == tb)
764
            env->tb_jmp_cache[h] = NULL;
765
    }
766

    
767
    /* suppress this TB from the two jump lists */
768
    tb_jmp_remove(tb, 0);
769
    tb_jmp_remove(tb, 1);
770

    
771
    /* suppress any remaining jumps to this TB */
772
    tb1 = tb->jmp_first;
773
    for(;;) {
774
        n1 = (long)tb1 & 3;
775
        if (n1 == 2)
776
            break;
777
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
778
        tb2 = tb1->jmp_next[n1];
779
        tb_reset_jump(tb1, n1);
780
        tb1->jmp_next[n1] = NULL;
781
        tb1 = tb2;
782
    }
783
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
784

    
785
    tb_phys_invalidate_count++;
786
}
787

    
788
static inline void set_bits(uint8_t *tab, int start, int len)
789
{
790
    int end, mask, end1;
791

    
792
    end = start + len;
793
    tab += start >> 3;
794
    mask = 0xff << (start & 7);
795
    if ((start & ~7) == (end & ~7)) {
796
        if (start < end) {
797
            mask &= ~(0xff << (end & 7));
798
            *tab |= mask;
799
        }
800
    } else {
801
        *tab++ |= mask;
802
        start = (start + 8) & ~7;
803
        end1 = end & ~7;
804
        while (start < end1) {
805
            *tab++ = 0xff;
806
            start += 8;
807
        }
808
        if (start < end) {
809
            mask = ~(0xff << (end & 7));
810
            *tab |= mask;
811
        }
812
    }
813
}
814

    
815
static void build_page_bitmap(PageDesc *p)
816
{
817
    int n, tb_start, tb_end;
818
    TranslationBlock *tb;
819

    
820
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
821
    if (!p->code_bitmap)
822
        return;
823

    
824
    tb = p->first_tb;
825
    while (tb != NULL) {
826
        n = (long)tb & 3;
827
        tb = (TranslationBlock *)((long)tb & ~3);
828
        /* NOTE: this is subtle as a TB may span two physical pages */
829
        if (n == 0) {
830
            /* NOTE: tb_end may be after the end of the page, but
831
               it is not a problem */
832
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
833
            tb_end = tb_start + tb->size;
834
            if (tb_end > TARGET_PAGE_SIZE)
835
                tb_end = TARGET_PAGE_SIZE;
836
        } else {
837
            tb_start = 0;
838
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
839
        }
840
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
841
        tb = tb->page_next[n];
842
    }
843
}
844

    
845
TranslationBlock *tb_gen_code(CPUState *env,
846
                              target_ulong pc, target_ulong cs_base,
847
                              int flags, int cflags)
848
{
849
    TranslationBlock *tb;
850
    uint8_t *tc_ptr;
851
    target_ulong phys_pc, phys_page2, virt_page2;
852
    int code_gen_size;
853

    
854
    phys_pc = get_phys_addr_code(env, pc);
855
    tb = tb_alloc(pc);
856
    if (!tb) {
857
        /* flush must be done */
858
        tb_flush(env);
859
        /* cannot fail at this point */
860
        tb = tb_alloc(pc);
861
        /* Don't forget to invalidate previous TB info.  */
862
        tb_invalidated_flag = 1;
863
    }
864
    tc_ptr = code_gen_ptr;
865
    tb->tc_ptr = tc_ptr;
866
    tb->cs_base = cs_base;
867
    tb->flags = flags;
868
    tb->cflags = cflags;
869
    cpu_gen_code(env, tb, &code_gen_size);
870
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
871

    
872
    /* check next page if needed */
873
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
874
    phys_page2 = -1;
875
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
876
        phys_page2 = get_phys_addr_code(env, virt_page2);
877
    }
878
    tb_link_phys(tb, phys_pc, phys_page2);
879
    return tb;
880
}
881

    
882
/* invalidate all TBs which intersect with the target physical page
883
   starting in range [start;end[. NOTE: start and end must refer to
884
   the same physical page. 'is_cpu_write_access' should be true if called
885
   from a real cpu write access: the virtual CPU will exit the current
886
   TB if code is modified inside this TB. */
887
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
888
                                   int is_cpu_write_access)
889
{
890
    TranslationBlock *tb, *tb_next, *saved_tb;
891
    CPUState *env = cpu_single_env;
892
    target_ulong tb_start, tb_end;
893
    PageDesc *p;
894
    int n;
895
#ifdef TARGET_HAS_PRECISE_SMC
896
    int current_tb_not_found = is_cpu_write_access;
897
    TranslationBlock *current_tb = NULL;
898
    int current_tb_modified = 0;
899
    target_ulong current_pc = 0;
900
    target_ulong current_cs_base = 0;
901
    int current_flags = 0;
902
#endif /* TARGET_HAS_PRECISE_SMC */
903

    
904
    p = page_find(start >> TARGET_PAGE_BITS);
905
    if (!p)
906
        return;
907
    if (!p->code_bitmap &&
908
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
909
        is_cpu_write_access) {
910
        /* build code bitmap */
911
        build_page_bitmap(p);
912
    }
913

    
914
    /* we remove all the TBs in the range [start, end[ */
915
    /* XXX: see if in some cases it could be faster to invalidate all the code */
916
    tb = p->first_tb;
917
    while (tb != NULL) {
918
        n = (long)tb & 3;
919
        tb = (TranslationBlock *)((long)tb & ~3);
920
        tb_next = tb->page_next[n];
921
        /* NOTE: this is subtle as a TB may span two physical pages */
922
        if (n == 0) {
923
            /* NOTE: tb_end may be after the end of the page, but
924
               it is not a problem */
925
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
926
            tb_end = tb_start + tb->size;
927
        } else {
928
            tb_start = tb->page_addr[1];
929
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
930
        }
931
        if (!(tb_end <= start || tb_start >= end)) {
932
#ifdef TARGET_HAS_PRECISE_SMC
933
            if (current_tb_not_found) {
934
                current_tb_not_found = 0;
935
                current_tb = NULL;
936
                if (env->mem_io_pc) {
937
                    /* now we have a real cpu fault */
938
                    current_tb = tb_find_pc(env->mem_io_pc);
939
                }
940
            }
941
            if (current_tb == tb &&
942
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
943
                /* If we are modifying the current TB, we must stop
944
                its execution. We could be more precise by checking
945
                that the modification is after the current PC, but it
946
                would require a specialized function to partially
947
                restore the CPU state */
948

    
949
                current_tb_modified = 1;
950
                cpu_restore_state(current_tb, env,
951
                                  env->mem_io_pc, NULL);
952
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
953
                                     &current_flags);
954
            }
955
#endif /* TARGET_HAS_PRECISE_SMC */
956
            /* we need to do that to handle the case where a signal
957
               occurs while doing tb_phys_invalidate() */
958
            saved_tb = NULL;
959
            if (env) {
960
                saved_tb = env->current_tb;
961
                env->current_tb = NULL;
962
            }
963
            tb_phys_invalidate(tb, -1);
964
            if (env) {
965
                env->current_tb = saved_tb;
966
                if (env->interrupt_request && env->current_tb)
967
                    cpu_interrupt(env, env->interrupt_request);
968
            }
969
        }
970
        tb = tb_next;
971
    }
972
#if !defined(CONFIG_USER_ONLY)
973
    /* if no code remaining, no need to continue to use slow writes */
974
    if (!p->first_tb) {
975
        invalidate_page_bitmap(p);
976
        if (is_cpu_write_access) {
977
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
978
        }
979
    }
980
#endif
981
#ifdef TARGET_HAS_PRECISE_SMC
982
    if (current_tb_modified) {
983
        /* we generate a block containing just the instruction
984
           modifying the memory. It will ensure that it cannot modify
985
           itself */
986
        env->current_tb = NULL;
987
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
988
        cpu_resume_from_signal(env, NULL);
989
    }
990
#endif
991
}
992

    
993
/* len must be <= 8 and start must be a multiple of len */
994
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
995
{
996
    PageDesc *p;
997
    int offset, b;
998
#if 0
999
    if (1) {
1000
        if (loglevel) {
1001
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1002
                   cpu_single_env->mem_io_vaddr, len,
1003
                   cpu_single_env->eip,
1004
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1005
        }
1006
    }
1007
#endif
1008
    p = page_find(start >> TARGET_PAGE_BITS);
1009
    if (!p)
1010
        return;
1011
    if (p->code_bitmap) {
1012
        offset = start & ~TARGET_PAGE_MASK;
1013
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1014
        if (b & ((1 << len) - 1))
1015
            goto do_invalidate;
1016
    } else {
1017
    do_invalidate:
1018
        tb_invalidate_phys_page_range(start, start + len, 1);
1019
    }
1020
}
1021

    
1022
#if !defined(CONFIG_SOFTMMU)
1023
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1024
                                    unsigned long pc, void *puc)
1025
{
1026
    TranslationBlock *tb;
1027
    PageDesc *p;
1028
    int n;
1029
#ifdef TARGET_HAS_PRECISE_SMC
1030
    TranslationBlock *current_tb = NULL;
1031
    CPUState *env = cpu_single_env;
1032
    int current_tb_modified = 0;
1033
    target_ulong current_pc = 0;
1034
    target_ulong current_cs_base = 0;
1035
    int current_flags = 0;
1036
#endif
1037

    
1038
    addr &= TARGET_PAGE_MASK;
1039
    p = page_find(addr >> TARGET_PAGE_BITS);
1040
    if (!p)
1041
        return;
1042
    tb = p->first_tb;
1043
#ifdef TARGET_HAS_PRECISE_SMC
1044
    if (tb && pc != 0) {
1045
        current_tb = tb_find_pc(pc);
1046
    }
1047
#endif
1048
    while (tb != NULL) {
1049
        n = (long)tb & 3;
1050
        tb = (TranslationBlock *)((long)tb & ~3);
1051
#ifdef TARGET_HAS_PRECISE_SMC
1052
        if (current_tb == tb &&
1053
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1054
                /* If we are modifying the current TB, we must stop
1055
                   its execution. We could be more precise by checking
1056
                   that the modification is after the current PC, but it
1057
                   would require a specialized function to partially
1058
                   restore the CPU state */
1059

    
1060
            current_tb_modified = 1;
1061
            cpu_restore_state(current_tb, env, pc, puc);
1062
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1063
                                 &current_flags);
1064
        }
1065
#endif /* TARGET_HAS_PRECISE_SMC */
1066
        tb_phys_invalidate(tb, addr);
1067
        tb = tb->page_next[n];
1068
    }
1069
    p->first_tb = NULL;
1070
#ifdef TARGET_HAS_PRECISE_SMC
1071
    if (current_tb_modified) {
1072
        /* we generate a block containing just the instruction
1073
           modifying the memory. It will ensure that it cannot modify
1074
           itself */
1075
        env->current_tb = NULL;
1076
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1077
        cpu_resume_from_signal(env, puc);
1078
    }
1079
#endif
1080
}
1081
#endif
1082

    
1083
/* add the tb in the target page and protect it if necessary */
1084
static inline void tb_alloc_page(TranslationBlock *tb,
1085
                                 unsigned int n, target_ulong page_addr)
1086
{
1087
    PageDesc *p;
1088
    TranslationBlock *last_first_tb;
1089

    
1090
    tb->page_addr[n] = page_addr;
1091
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1092
    tb->page_next[n] = p->first_tb;
1093
    last_first_tb = p->first_tb;
1094
    p->first_tb = (TranslationBlock *)((long)tb | n);
1095
    invalidate_page_bitmap(p);
1096

    
1097
#if defined(TARGET_HAS_SMC) || 1
1098

    
1099
#if defined(CONFIG_USER_ONLY)
1100
    if (p->flags & PAGE_WRITE) {
1101
        target_ulong addr;
1102
        PageDesc *p2;
1103
        int prot;
1104

    
1105
        /* force the host page as non writable (writes will have a
1106
           page fault + mprotect overhead) */
1107
        page_addr &= qemu_host_page_mask;
1108
        prot = 0;
1109
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1110
            addr += TARGET_PAGE_SIZE) {
1111

    
1112
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1113
            if (!p2)
1114
                continue;
1115
            prot |= p2->flags;
1116
            p2->flags &= ~PAGE_WRITE;
1117
            page_get_flags(addr);
1118
          }
1119
        mprotect(g2h(page_addr), qemu_host_page_size,
1120
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1121
#ifdef DEBUG_TB_INVALIDATE
1122
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1123
               page_addr);
1124
#endif
1125
    }
1126
#else
1127
    /* if some code is already present, then the pages are already
1128
       protected. So we handle the case where only the first TB is
1129
       allocated in a physical page */
1130
    if (!last_first_tb) {
1131
        tlb_protect_code(page_addr);
1132
    }
1133
#endif
1134

    
1135
#endif /* TARGET_HAS_SMC */
1136
}
1137

    
1138
/* Allocate a new translation block. Flush the translation buffer if
1139
   too many translation blocks or too much generated code. */
1140
TranslationBlock *tb_alloc(target_ulong pc)
1141
{
1142
    TranslationBlock *tb;
1143

    
1144
    if (nb_tbs >= code_gen_max_blocks ||
1145
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1146
        return NULL;
1147
    tb = &tbs[nb_tbs++];
1148
    tb->pc = pc;
1149
    tb->cflags = 0;
1150
    return tb;
1151
}
1152

    
1153
void tb_free(TranslationBlock *tb)
1154
{
1155
    /* In practice this is mostly used for single use temporary TB
1156
       Ignore the hard cases and just back up if this TB happens to
1157
       be the last one generated.  */
1158
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1159
        code_gen_ptr = tb->tc_ptr;
1160
        nb_tbs--;
1161
    }
1162
}
1163

    
1164
/* add a new TB and link it to the physical page tables. phys_page2 is
1165
   (-1) to indicate that only one page contains the TB. */
1166
void tb_link_phys(TranslationBlock *tb,
1167
                  target_ulong phys_pc, target_ulong phys_page2)
1168
{
1169
    unsigned int h;
1170
    TranslationBlock **ptb;
1171

    
1172
    /* Grab the mmap lock to stop another thread invalidating this TB
1173
       before we are done.  */
1174
    mmap_lock();
1175
    /* add in the physical hash table */
1176
    h = tb_phys_hash_func(phys_pc);
1177
    ptb = &tb_phys_hash[h];
1178
    tb->phys_hash_next = *ptb;
1179
    *ptb = tb;
1180

    
1181
    /* add in the page list */
1182
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1183
    if (phys_page2 != -1)
1184
        tb_alloc_page(tb, 1, phys_page2);
1185
    else
1186
        tb->page_addr[1] = -1;
1187

    
1188
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1189
    tb->jmp_next[0] = NULL;
1190
    tb->jmp_next[1] = NULL;
1191

    
1192
    /* init original jump addresses */
1193
    if (tb->tb_next_offset[0] != 0xffff)
1194
        tb_reset_jump(tb, 0);
1195
    if (tb->tb_next_offset[1] != 0xffff)
1196
        tb_reset_jump(tb, 1);
1197

    
1198
#ifdef DEBUG_TB_CHECK
1199
    tb_page_check();
1200
#endif
1201
    mmap_unlock();
1202
}
1203

    
1204
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1205
   tb[1].tc_ptr. Return NULL if not found */
1206
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1207
{
1208
    int m_min, m_max, m;
1209
    unsigned long v;
1210
    TranslationBlock *tb;
1211

    
1212
    if (nb_tbs <= 0)
1213
        return NULL;
1214
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1215
        tc_ptr >= (unsigned long)code_gen_ptr)
1216
        return NULL;
1217
    /* binary search (cf Knuth) */
1218
    m_min = 0;
1219
    m_max = nb_tbs - 1;
1220
    while (m_min <= m_max) {
1221
        m = (m_min + m_max) >> 1;
1222
        tb = &tbs[m];
1223
        v = (unsigned long)tb->tc_ptr;
1224
        if (v == tc_ptr)
1225
            return tb;
1226
        else if (tc_ptr < v) {
1227
            m_max = m - 1;
1228
        } else {
1229
            m_min = m + 1;
1230
        }
1231
    }
1232
    return &tbs[m_max];
1233
}
1234

    
1235
static void tb_reset_jump_recursive(TranslationBlock *tb);
1236

    
1237
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1238
{
1239
    TranslationBlock *tb1, *tb_next, **ptb;
1240
    unsigned int n1;
1241

    
1242
    tb1 = tb->jmp_next[n];
1243
    if (tb1 != NULL) {
1244
        /* find head of list */
1245
        for(;;) {
1246
            n1 = (long)tb1 & 3;
1247
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1248
            if (n1 == 2)
1249
                break;
1250
            tb1 = tb1->jmp_next[n1];
1251
        }
1252
        /* we are now sure now that tb jumps to tb1 */
1253
        tb_next = tb1;
1254

    
1255
        /* remove tb from the jmp_first list */
1256
        ptb = &tb_next->jmp_first;
1257
        for(;;) {
1258
            tb1 = *ptb;
1259
            n1 = (long)tb1 & 3;
1260
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1261
            if (n1 == n && tb1 == tb)
1262
                break;
1263
            ptb = &tb1->jmp_next[n1];
1264
        }
1265
        *ptb = tb->jmp_next[n];
1266
        tb->jmp_next[n] = NULL;
1267

    
1268
        /* suppress the jump to next tb in generated code */
1269
        tb_reset_jump(tb, n);
1270

    
1271
        /* suppress jumps in the tb on which we could have jumped */
1272
        tb_reset_jump_recursive(tb_next);
1273
    }
1274
}
1275

    
1276
static void tb_reset_jump_recursive(TranslationBlock *tb)
1277
{
1278
    tb_reset_jump_recursive2(tb, 0);
1279
    tb_reset_jump_recursive2(tb, 1);
1280
}
1281

    
1282
#if defined(TARGET_HAS_ICE)
1283
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1284
{
1285
    target_phys_addr_t addr;
1286
    target_ulong pd;
1287
    ram_addr_t ram_addr;
1288
    PhysPageDesc *p;
1289

    
1290
    addr = cpu_get_phys_page_debug(env, pc);
1291
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1292
    if (!p) {
1293
        pd = IO_MEM_UNASSIGNED;
1294
    } else {
1295
        pd = p->phys_offset;
1296
    }
1297
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1298
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1299
}
1300
#endif
1301

    
1302
/* Add a watchpoint.  */
1303
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1304
                          int flags, CPUWatchpoint **watchpoint)
1305
{
1306
    target_ulong len_mask = ~(len - 1);
1307
    CPUWatchpoint *wp;
1308

    
1309
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1310
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1311
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1312
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1313
        return -EINVAL;
1314
    }
1315
    wp = qemu_malloc(sizeof(*wp));
1316
    if (!wp)
1317
        return -ENOMEM;
1318

    
1319
    wp->vaddr = addr;
1320
    wp->len_mask = len_mask;
1321
    wp->flags = flags;
1322

    
1323
    /* keep all GDB-injected watchpoints in front */
1324
    if (flags & BP_GDB)
1325
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1326
    else
1327
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1328

    
1329
    tlb_flush_page(env, addr);
1330

    
1331
    if (watchpoint)
1332
        *watchpoint = wp;
1333
    return 0;
1334
}
1335

    
1336
/* Remove a specific watchpoint.  */
1337
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1338
                          int flags)
1339
{
1340
    target_ulong len_mask = ~(len - 1);
1341
    CPUWatchpoint *wp;
1342

    
1343
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1344
        if (addr == wp->vaddr && len_mask == wp->len_mask
1345
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1346
            cpu_watchpoint_remove_by_ref(env, wp);
1347
            return 0;
1348
        }
1349
    }
1350
    return -ENOENT;
1351
}
1352

    
1353
/* Remove a specific watchpoint by reference.  */
1354
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1355
{
1356
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1357

    
1358
    tlb_flush_page(env, watchpoint->vaddr);
1359

    
1360
    qemu_free(watchpoint);
1361
}
1362

    
1363
/* Remove all matching watchpoints.  */
1364
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1365
{
1366
    CPUWatchpoint *wp, *next;
1367

    
1368
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1369
        if (wp->flags & mask)
1370
            cpu_watchpoint_remove_by_ref(env, wp);
1371
    }
1372
}
1373

    
1374
/* Add a breakpoint.  */
1375
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1376
                          CPUBreakpoint **breakpoint)
1377
{
1378
#if defined(TARGET_HAS_ICE)
1379
    CPUBreakpoint *bp;
1380

    
1381
    bp = qemu_malloc(sizeof(*bp));
1382
    if (!bp)
1383
        return -ENOMEM;
1384

    
1385
    bp->pc = pc;
1386
    bp->flags = flags;
1387

    
1388
    /* keep all GDB-injected breakpoints in front */
1389
    if (flags & BP_GDB)
1390
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1391
    else
1392
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1393

    
1394
    breakpoint_invalidate(env, pc);
1395

    
1396
    if (breakpoint)
1397
        *breakpoint = bp;
1398
    return 0;
1399
#else
1400
    return -ENOSYS;
1401
#endif
1402
}
1403

    
1404
/* Remove a specific breakpoint.  */
1405
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1406
{
1407
#if defined(TARGET_HAS_ICE)
1408
    CPUBreakpoint *bp;
1409

    
1410
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1411
        if (bp->pc == pc && bp->flags == flags) {
1412
            cpu_breakpoint_remove_by_ref(env, bp);
1413
            return 0;
1414
        }
1415
    }
1416
    return -ENOENT;
1417
#else
1418
    return -ENOSYS;
1419
#endif
1420
}
1421

    
1422
/* Remove a specific breakpoint by reference.  */
1423
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1424
{
1425
#if defined(TARGET_HAS_ICE)
1426
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1427

    
1428
    breakpoint_invalidate(env, breakpoint->pc);
1429

    
1430
    qemu_free(breakpoint);
1431
#endif
1432
}
1433

    
1434
/* Remove all matching breakpoints. */
1435
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1436
{
1437
#if defined(TARGET_HAS_ICE)
1438
    CPUBreakpoint *bp, *next;
1439

    
1440
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1441
        if (bp->flags & mask)
1442
            cpu_breakpoint_remove_by_ref(env, bp);
1443
    }
1444
#endif
1445
}
1446

    
1447
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1448
   CPU loop after each instruction */
1449
void cpu_single_step(CPUState *env, int enabled)
1450
{
1451
#if defined(TARGET_HAS_ICE)
1452
    if (env->singlestep_enabled != enabled) {
1453
        env->singlestep_enabled = enabled;
1454
        /* must flush all the translated code to avoid inconsistancies */
1455
        /* XXX: only flush what is necessary */
1456
        tb_flush(env);
1457
    }
1458
#endif
1459
}
1460

    
1461
/* enable or disable low levels log */
1462
void cpu_set_log(int log_flags)
1463
{
1464
    loglevel = log_flags;
1465
    if (loglevel && !logfile) {
1466
        logfile = fopen(logfilename, log_append ? "a" : "w");
1467
        if (!logfile) {
1468
            perror(logfilename);
1469
            _exit(1);
1470
        }
1471
#if !defined(CONFIG_SOFTMMU)
1472
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1473
        {
1474
            static char logfile_buf[4096];
1475
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1476
        }
1477
#else
1478
        setvbuf(logfile, NULL, _IOLBF, 0);
1479
#endif
1480
        log_append = 1;
1481
    }
1482
    if (!loglevel && logfile) {
1483
        fclose(logfile);
1484
        logfile = NULL;
1485
    }
1486
}
1487

    
1488
void cpu_set_log_filename(const char *filename)
1489
{
1490
    logfilename = strdup(filename);
1491
    if (logfile) {
1492
        fclose(logfile);
1493
        logfile = NULL;
1494
    }
1495
    cpu_set_log(loglevel);
1496
}
1497

    
1498
/* mask must never be zero, except for A20 change call */
1499
void cpu_interrupt(CPUState *env, int mask)
1500
{
1501
#if !defined(USE_NPTL)
1502
    TranslationBlock *tb;
1503
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1504
#endif
1505
    int old_mask;
1506

    
1507
    old_mask = env->interrupt_request;
1508
    /* FIXME: This is probably not threadsafe.  A different thread could
1509
       be in the middle of a read-modify-write operation.  */
1510
    env->interrupt_request |= mask;
1511
#if defined(USE_NPTL)
1512
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1513
       problem and hope the cpu will stop of its own accord.  For userspace
1514
       emulation this often isn't actually as bad as it sounds.  Often
1515
       signals are used primarily to interrupt blocking syscalls.  */
1516
#else
1517
    if (use_icount) {
1518
        env->icount_decr.u16.high = 0xffff;
1519
#ifndef CONFIG_USER_ONLY
1520
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1521
           an async event happened and we need to process it.  */
1522
        if (!can_do_io(env)
1523
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1524
            cpu_abort(env, "Raised interrupt while not in I/O function");
1525
        }
1526
#endif
1527
    } else {
1528
        tb = env->current_tb;
1529
        /* if the cpu is currently executing code, we must unlink it and
1530
           all the potentially executing TB */
1531
        if (tb && !testandset(&interrupt_lock)) {
1532
            env->current_tb = NULL;
1533
            tb_reset_jump_recursive(tb);
1534
            resetlock(&interrupt_lock);
1535
        }
1536
    }
1537
#endif
1538
}
1539

    
1540
void cpu_reset_interrupt(CPUState *env, int mask)
1541
{
1542
    env->interrupt_request &= ~mask;
1543
}
1544

    
1545
const CPULogItem cpu_log_items[] = {
1546
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1547
      "show generated host assembly code for each compiled TB" },
1548
    { CPU_LOG_TB_IN_ASM, "in_asm",
1549
      "show target assembly code for each compiled TB" },
1550
    { CPU_LOG_TB_OP, "op",
1551
      "show micro ops for each compiled TB" },
1552
    { CPU_LOG_TB_OP_OPT, "op_opt",
1553
      "show micro ops "
1554
#ifdef TARGET_I386
1555
      "before eflags optimization and "
1556
#endif
1557
      "after liveness analysis" },
1558
    { CPU_LOG_INT, "int",
1559
      "show interrupts/exceptions in short format" },
1560
    { CPU_LOG_EXEC, "exec",
1561
      "show trace before each executed TB (lots of logs)" },
1562
    { CPU_LOG_TB_CPU, "cpu",
1563
      "show CPU state before block translation" },
1564
#ifdef TARGET_I386
1565
    { CPU_LOG_PCALL, "pcall",
1566
      "show protected mode far calls/returns/exceptions" },
1567
#endif
1568
#ifdef DEBUG_IOPORT
1569
    { CPU_LOG_IOPORT, "ioport",
1570
      "show all i/o ports accesses" },
1571
#endif
1572
    { 0, NULL, NULL },
1573
};
1574

    
1575
static int cmp1(const char *s1, int n, const char *s2)
1576
{
1577
    if (strlen(s2) != n)
1578
        return 0;
1579
    return memcmp(s1, s2, n) == 0;
1580
}
1581

    
1582
/* takes a comma separated list of log masks. Return 0 if error. */
1583
int cpu_str_to_log_mask(const char *str)
1584
{
1585
    const CPULogItem *item;
1586
    int mask;
1587
    const char *p, *p1;
1588

    
1589
    p = str;
1590
    mask = 0;
1591
    for(;;) {
1592
        p1 = strchr(p, ',');
1593
        if (!p1)
1594
            p1 = p + strlen(p);
1595
        if(cmp1(p,p1-p,"all")) {
1596
                for(item = cpu_log_items; item->mask != 0; item++) {
1597
                        mask |= item->mask;
1598
                }
1599
        } else {
1600
        for(item = cpu_log_items; item->mask != 0; item++) {
1601
            if (cmp1(p, p1 - p, item->name))
1602
                goto found;
1603
        }
1604
        return 0;
1605
        }
1606
    found:
1607
        mask |= item->mask;
1608
        if (*p1 != ',')
1609
            break;
1610
        p = p1 + 1;
1611
    }
1612
    return mask;
1613
}
1614

    
1615
void cpu_abort(CPUState *env, const char *fmt, ...)
1616
{
1617
    va_list ap;
1618
    va_list ap2;
1619

    
1620
    va_start(ap, fmt);
1621
    va_copy(ap2, ap);
1622
    fprintf(stderr, "qemu: fatal: ");
1623
    vfprintf(stderr, fmt, ap);
1624
    fprintf(stderr, "\n");
1625
#ifdef TARGET_I386
1626
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1627
#else
1628
    cpu_dump_state(env, stderr, fprintf, 0);
1629
#endif
1630
    if (logfile) {
1631
        fprintf(logfile, "qemu: fatal: ");
1632
        vfprintf(logfile, fmt, ap2);
1633
        fprintf(logfile, "\n");
1634
#ifdef TARGET_I386
1635
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1636
#else
1637
        cpu_dump_state(env, logfile, fprintf, 0);
1638
#endif
1639
        fflush(logfile);
1640
        fclose(logfile);
1641
    }
1642
    va_end(ap2);
1643
    va_end(ap);
1644
    abort();
1645
}
1646

    
1647
CPUState *cpu_copy(CPUState *env)
1648
{
1649
    CPUState *new_env = cpu_init(env->cpu_model_str);
1650
    /* preserve chaining and index */
1651
    CPUState *next_cpu = new_env->next_cpu;
1652
    int cpu_index = new_env->cpu_index;
1653
    memcpy(new_env, env, sizeof(CPUState));
1654
    new_env->next_cpu = next_cpu;
1655
    new_env->cpu_index = cpu_index;
1656
    return new_env;
1657
}
1658

    
1659
#if !defined(CONFIG_USER_ONLY)
1660

    
1661
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1662
{
1663
    unsigned int i;
1664

    
1665
    /* Discard jump cache entries for any tb which might potentially
1666
       overlap the flushed page.  */
1667
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1668
    memset (&env->tb_jmp_cache[i], 0, 
1669
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1670

    
1671
    i = tb_jmp_cache_hash_page(addr);
1672
    memset (&env->tb_jmp_cache[i], 0, 
1673
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1674
}
1675

    
1676
/* NOTE: if flush_global is true, also flush global entries (not
1677
   implemented yet) */
1678
void tlb_flush(CPUState *env, int flush_global)
1679
{
1680
    int i;
1681

    
1682
#if defined(DEBUG_TLB)
1683
    printf("tlb_flush:\n");
1684
#endif
1685
    /* must reset current TB so that interrupts cannot modify the
1686
       links while we are modifying them */
1687
    env->current_tb = NULL;
1688

    
1689
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1690
        env->tlb_table[0][i].addr_read = -1;
1691
        env->tlb_table[0][i].addr_write = -1;
1692
        env->tlb_table[0][i].addr_code = -1;
1693
        env->tlb_table[1][i].addr_read = -1;
1694
        env->tlb_table[1][i].addr_write = -1;
1695
        env->tlb_table[1][i].addr_code = -1;
1696
#if (NB_MMU_MODES >= 3)
1697
        env->tlb_table[2][i].addr_read = -1;
1698
        env->tlb_table[2][i].addr_write = -1;
1699
        env->tlb_table[2][i].addr_code = -1;
1700
#if (NB_MMU_MODES == 4)
1701
        env->tlb_table[3][i].addr_read = -1;
1702
        env->tlb_table[3][i].addr_write = -1;
1703
        env->tlb_table[3][i].addr_code = -1;
1704
#endif
1705
#endif
1706
    }
1707

    
1708
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1709

    
1710
#ifdef USE_KQEMU
1711
    if (env->kqemu_enabled) {
1712
        kqemu_flush(env, flush_global);
1713
    }
1714
#endif
1715
    tlb_flush_count++;
1716
}
1717

    
1718
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1719
{
1720
    if (addr == (tlb_entry->addr_read &
1721
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1722
        addr == (tlb_entry->addr_write &
1723
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1724
        addr == (tlb_entry->addr_code &
1725
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1726
        tlb_entry->addr_read = -1;
1727
        tlb_entry->addr_write = -1;
1728
        tlb_entry->addr_code = -1;
1729
    }
1730
}
1731

    
1732
void tlb_flush_page(CPUState *env, target_ulong addr)
1733
{
1734
    int i;
1735

    
1736
#if defined(DEBUG_TLB)
1737
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1738
#endif
1739
    /* must reset current TB so that interrupts cannot modify the
1740
       links while we are modifying them */
1741
    env->current_tb = NULL;
1742

    
1743
    addr &= TARGET_PAGE_MASK;
1744
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1745
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1746
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1747
#if (NB_MMU_MODES >= 3)
1748
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1749
#if (NB_MMU_MODES == 4)
1750
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1751
#endif
1752
#endif
1753

    
1754
    tlb_flush_jmp_cache(env, addr);
1755

    
1756
#ifdef USE_KQEMU
1757
    if (env->kqemu_enabled) {
1758
        kqemu_flush_page(env, addr);
1759
    }
1760
#endif
1761
}
1762

    
1763
/* update the TLBs so that writes to code in the virtual page 'addr'
1764
   can be detected */
1765
static void tlb_protect_code(ram_addr_t ram_addr)
1766
{
1767
    cpu_physical_memory_reset_dirty(ram_addr,
1768
                                    ram_addr + TARGET_PAGE_SIZE,
1769
                                    CODE_DIRTY_FLAG);
1770
}
1771

    
1772
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1773
   tested for self modifying code */
1774
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1775
                                    target_ulong vaddr)
1776
{
1777
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1778
}
1779

    
1780
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1781
                                         unsigned long start, unsigned long length)
1782
{
1783
    unsigned long addr;
1784
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1785
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1786
        if ((addr - start) < length) {
1787
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1788
        }
1789
    }
1790
}
1791

    
1792
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1793
                                     int dirty_flags)
1794
{
1795
    CPUState *env;
1796
    unsigned long length, start1;
1797
    int i, mask, len;
1798
    uint8_t *p;
1799

    
1800
    start &= TARGET_PAGE_MASK;
1801
    end = TARGET_PAGE_ALIGN(end);
1802

    
1803
    length = end - start;
1804
    if (length == 0)
1805
        return;
1806
    len = length >> TARGET_PAGE_BITS;
1807
#ifdef USE_KQEMU
1808
    /* XXX: should not depend on cpu context */
1809
    env = first_cpu;
1810
    if (env->kqemu_enabled) {
1811
        ram_addr_t addr;
1812
        addr = start;
1813
        for(i = 0; i < len; i++) {
1814
            kqemu_set_notdirty(env, addr);
1815
            addr += TARGET_PAGE_SIZE;
1816
        }
1817
    }
1818
#endif
1819
    mask = ~dirty_flags;
1820
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1821
    for(i = 0; i < len; i++)
1822
        p[i] &= mask;
1823

    
1824
    /* we modify the TLB cache so that the dirty bit will be set again
1825
       when accessing the range */
1826
    start1 = start + (unsigned long)phys_ram_base;
1827
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1828
        for(i = 0; i < CPU_TLB_SIZE; i++)
1829
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1830
        for(i = 0; i < CPU_TLB_SIZE; i++)
1831
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1832
#if (NB_MMU_MODES >= 3)
1833
        for(i = 0; i < CPU_TLB_SIZE; i++)
1834
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1835
#if (NB_MMU_MODES == 4)
1836
        for(i = 0; i < CPU_TLB_SIZE; i++)
1837
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1838
#endif
1839
#endif
1840
    }
1841
}
1842

    
1843
int cpu_physical_memory_set_dirty_tracking(int enable)
1844
{
1845
    in_migration = enable;
1846
    return 0;
1847
}
1848

    
1849
int cpu_physical_memory_get_dirty_tracking(void)
1850
{
1851
    return in_migration;
1852
}
1853

    
1854
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1855
{
1856
    if (kvm_enabled())
1857
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1858
}
1859

    
1860
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1861
{
1862
    ram_addr_t ram_addr;
1863

    
1864
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1865
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1866
            tlb_entry->addend - (unsigned long)phys_ram_base;
1867
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1868
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1869
        }
1870
    }
1871
}
1872

    
1873
/* update the TLB according to the current state of the dirty bits */
1874
void cpu_tlb_update_dirty(CPUState *env)
1875
{
1876
    int i;
1877
    for(i = 0; i < CPU_TLB_SIZE; i++)
1878
        tlb_update_dirty(&env->tlb_table[0][i]);
1879
    for(i = 0; i < CPU_TLB_SIZE; i++)
1880
        tlb_update_dirty(&env->tlb_table[1][i]);
1881
#if (NB_MMU_MODES >= 3)
1882
    for(i = 0; i < CPU_TLB_SIZE; i++)
1883
        tlb_update_dirty(&env->tlb_table[2][i]);
1884
#if (NB_MMU_MODES == 4)
1885
    for(i = 0; i < CPU_TLB_SIZE; i++)
1886
        tlb_update_dirty(&env->tlb_table[3][i]);
1887
#endif
1888
#endif
1889
}
1890

    
1891
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1892
{
1893
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1894
        tlb_entry->addr_write = vaddr;
1895
}
1896

    
1897
/* update the TLB corresponding to virtual page vaddr
1898
   so that it is no longer dirty */
1899
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1900
{
1901
    int i;
1902

    
1903
    vaddr &= TARGET_PAGE_MASK;
1904
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1905
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1906
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1907
#if (NB_MMU_MODES >= 3)
1908
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1909
#if (NB_MMU_MODES == 4)
1910
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1911
#endif
1912
#endif
1913
}
1914

    
1915
/* add a new TLB entry. At most one entry for a given virtual address
1916
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1917
   (can only happen in non SOFTMMU mode for I/O pages or pages
1918
   conflicting with the host address space). */
1919
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1920
                      target_phys_addr_t paddr, int prot,
1921
                      int mmu_idx, int is_softmmu)
1922
{
1923
    PhysPageDesc *p;
1924
    unsigned long pd;
1925
    unsigned int index;
1926
    target_ulong address;
1927
    target_ulong code_address;
1928
    target_phys_addr_t addend;
1929
    int ret;
1930
    CPUTLBEntry *te;
1931
    CPUWatchpoint *wp;
1932
    target_phys_addr_t iotlb;
1933

    
1934
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1935
    if (!p) {
1936
        pd = IO_MEM_UNASSIGNED;
1937
    } else {
1938
        pd = p->phys_offset;
1939
    }
1940
#if defined(DEBUG_TLB)
1941
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1942
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1943
#endif
1944

    
1945
    ret = 0;
1946
    address = vaddr;
1947
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1948
        /* IO memory case (romd handled later) */
1949
        address |= TLB_MMIO;
1950
    }
1951
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1952
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1953
        /* Normal RAM.  */
1954
        iotlb = pd & TARGET_PAGE_MASK;
1955
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1956
            iotlb |= IO_MEM_NOTDIRTY;
1957
        else
1958
            iotlb |= IO_MEM_ROM;
1959
    } else {
1960
        /* IO handlers are currently passed a phsical address.
1961
           It would be nice to pass an offset from the base address
1962
           of that region.  This would avoid having to special case RAM,
1963
           and avoid full address decoding in every device.
1964
           We can't use the high bits of pd for this because
1965
           IO_MEM_ROMD uses these as a ram address.  */
1966
        iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1967
    }
1968

    
1969
    code_address = address;
1970
    /* Make accesses to pages with watchpoints go via the
1971
       watchpoint trap routines.  */
1972
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1973
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1974
            iotlb = io_mem_watch + paddr;
1975
            /* TODO: The memory case can be optimized by not trapping
1976
               reads of pages with a write breakpoint.  */
1977
            address |= TLB_MMIO;
1978
        }
1979
    }
1980

    
1981
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1982
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
1983
    te = &env->tlb_table[mmu_idx][index];
1984
    te->addend = addend - vaddr;
1985
    if (prot & PAGE_READ) {
1986
        te->addr_read = address;
1987
    } else {
1988
        te->addr_read = -1;
1989
    }
1990

    
1991
    if (prot & PAGE_EXEC) {
1992
        te->addr_code = code_address;
1993
    } else {
1994
        te->addr_code = -1;
1995
    }
1996
    if (prot & PAGE_WRITE) {
1997
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1998
            (pd & IO_MEM_ROMD)) {
1999
            /* Write access calls the I/O callback.  */
2000
            te->addr_write = address | TLB_MMIO;
2001
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2002
                   !cpu_physical_memory_is_dirty(pd)) {
2003
            te->addr_write = address | TLB_NOTDIRTY;
2004
        } else {
2005
            te->addr_write = address;
2006
        }
2007
    } else {
2008
        te->addr_write = -1;
2009
    }
2010
    return ret;
2011
}
2012

    
2013
#else
2014

    
2015
void tlb_flush(CPUState *env, int flush_global)
2016
{
2017
}
2018

    
2019
void tlb_flush_page(CPUState *env, target_ulong addr)
2020
{
2021
}
2022

    
2023
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2024
                      target_phys_addr_t paddr, int prot,
2025
                      int mmu_idx, int is_softmmu)
2026
{
2027
    return 0;
2028
}
2029

    
2030
/* dump memory mappings */
2031
void page_dump(FILE *f)
2032
{
2033
    unsigned long start, end;
2034
    int i, j, prot, prot1;
2035
    PageDesc *p;
2036

    
2037
    fprintf(f, "%-8s %-8s %-8s %s\n",
2038
            "start", "end", "size", "prot");
2039
    start = -1;
2040
    end = -1;
2041
    prot = 0;
2042
    for(i = 0; i <= L1_SIZE; i++) {
2043
        if (i < L1_SIZE)
2044
            p = l1_map[i];
2045
        else
2046
            p = NULL;
2047
        for(j = 0;j < L2_SIZE; j++) {
2048
            if (!p)
2049
                prot1 = 0;
2050
            else
2051
                prot1 = p[j].flags;
2052
            if (prot1 != prot) {
2053
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2054
                if (start != -1) {
2055
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2056
                            start, end, end - start,
2057
                            prot & PAGE_READ ? 'r' : '-',
2058
                            prot & PAGE_WRITE ? 'w' : '-',
2059
                            prot & PAGE_EXEC ? 'x' : '-');
2060
                }
2061
                if (prot1 != 0)
2062
                    start = end;
2063
                else
2064
                    start = -1;
2065
                prot = prot1;
2066
            }
2067
            if (!p)
2068
                break;
2069
        }
2070
    }
2071
}
2072

    
2073
int page_get_flags(target_ulong address)
2074
{
2075
    PageDesc *p;
2076

    
2077
    p = page_find(address >> TARGET_PAGE_BITS);
2078
    if (!p)
2079
        return 0;
2080
    return p->flags;
2081
}
2082

    
2083
/* modify the flags of a page and invalidate the code if
2084
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2085
   depending on PAGE_WRITE */
2086
void page_set_flags(target_ulong start, target_ulong end, int flags)
2087
{
2088
    PageDesc *p;
2089
    target_ulong addr;
2090

    
2091
    /* mmap_lock should already be held.  */
2092
    start = start & TARGET_PAGE_MASK;
2093
    end = TARGET_PAGE_ALIGN(end);
2094
    if (flags & PAGE_WRITE)
2095
        flags |= PAGE_WRITE_ORG;
2096
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2097
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2098
        /* We may be called for host regions that are outside guest
2099
           address space.  */
2100
        if (!p)
2101
            return;
2102
        /* if the write protection is set, then we invalidate the code
2103
           inside */
2104
        if (!(p->flags & PAGE_WRITE) &&
2105
            (flags & PAGE_WRITE) &&
2106
            p->first_tb) {
2107
            tb_invalidate_phys_page(addr, 0, NULL);
2108
        }
2109
        p->flags = flags;
2110
    }
2111
}
2112

    
2113
int page_check_range(target_ulong start, target_ulong len, int flags)
2114
{
2115
    PageDesc *p;
2116
    target_ulong end;
2117
    target_ulong addr;
2118

    
2119
    if (start + len < start)
2120
        /* we've wrapped around */
2121
        return -1;
2122

    
2123
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2124
    start = start & TARGET_PAGE_MASK;
2125

    
2126
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2127
        p = page_find(addr >> TARGET_PAGE_BITS);
2128
        if( !p )
2129
            return -1;
2130
        if( !(p->flags & PAGE_VALID) )
2131
            return -1;
2132

    
2133
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2134
            return -1;
2135
        if (flags & PAGE_WRITE) {
2136
            if (!(p->flags & PAGE_WRITE_ORG))
2137
                return -1;
2138
            /* unprotect the page if it was put read-only because it
2139
               contains translated code */
2140
            if (!(p->flags & PAGE_WRITE)) {
2141
                if (!page_unprotect(addr, 0, NULL))
2142
                    return -1;
2143
            }
2144
            return 0;
2145
        }
2146
    }
2147
    return 0;
2148
}
2149

    
2150
/* called from signal handler: invalidate the code and unprotect the
2151
   page. Return TRUE if the fault was succesfully handled. */
2152
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2153
{
2154
    unsigned int page_index, prot, pindex;
2155
    PageDesc *p, *p1;
2156
    target_ulong host_start, host_end, addr;
2157

    
2158
    /* Technically this isn't safe inside a signal handler.  However we
2159
       know this only ever happens in a synchronous SEGV handler, so in
2160
       practice it seems to be ok.  */
2161
    mmap_lock();
2162

    
2163
    host_start = address & qemu_host_page_mask;
2164
    page_index = host_start >> TARGET_PAGE_BITS;
2165
    p1 = page_find(page_index);
2166
    if (!p1) {
2167
        mmap_unlock();
2168
        return 0;
2169
    }
2170
    host_end = host_start + qemu_host_page_size;
2171
    p = p1;
2172
    prot = 0;
2173
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2174
        prot |= p->flags;
2175
        p++;
2176
    }
2177
    /* if the page was really writable, then we change its
2178
       protection back to writable */
2179
    if (prot & PAGE_WRITE_ORG) {
2180
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2181
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2182
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2183
                     (prot & PAGE_BITS) | PAGE_WRITE);
2184
            p1[pindex].flags |= PAGE_WRITE;
2185
            /* and since the content will be modified, we must invalidate
2186
               the corresponding translated code. */
2187
            tb_invalidate_phys_page(address, pc, puc);
2188
#ifdef DEBUG_TB_CHECK
2189
            tb_invalidate_check(address);
2190
#endif
2191
            mmap_unlock();
2192
            return 1;
2193
        }
2194
    }
2195
    mmap_unlock();
2196
    return 0;
2197
}
2198

    
2199
static inline void tlb_set_dirty(CPUState *env,
2200
                                 unsigned long addr, target_ulong vaddr)
2201
{
2202
}
2203
#endif /* defined(CONFIG_USER_ONLY) */
2204

    
2205
#if !defined(CONFIG_USER_ONLY)
2206
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2207
                             ram_addr_t memory);
2208
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2209
                           ram_addr_t orig_memory);
2210
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2211
                      need_subpage)                                     \
2212
    do {                                                                \
2213
        if (addr > start_addr)                                          \
2214
            start_addr2 = 0;                                            \
2215
        else {                                                          \
2216
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2217
            if (start_addr2 > 0)                                        \
2218
                need_subpage = 1;                                       \
2219
        }                                                               \
2220
                                                                        \
2221
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2222
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2223
        else {                                                          \
2224
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2225
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2226
                need_subpage = 1;                                       \
2227
        }                                                               \
2228
    } while (0)
2229

    
2230
/* register physical memory. 'size' must be a multiple of the target
2231
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2232
   io memory page */
2233
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2234
                                  ram_addr_t size,
2235
                                  ram_addr_t phys_offset)
2236
{
2237
    target_phys_addr_t addr, end_addr;
2238
    PhysPageDesc *p;
2239
    CPUState *env;
2240
    ram_addr_t orig_size = size;
2241
    void *subpage;
2242

    
2243
#ifdef USE_KQEMU
2244
    /* XXX: should not depend on cpu context */
2245
    env = first_cpu;
2246
    if (env->kqemu_enabled) {
2247
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2248
    }
2249
#endif
2250
    if (kvm_enabled())
2251
        kvm_set_phys_mem(start_addr, size, phys_offset);
2252

    
2253
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2254
    end_addr = start_addr + (target_phys_addr_t)size;
2255
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2256
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2257
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2258
            ram_addr_t orig_memory = p->phys_offset;
2259
            target_phys_addr_t start_addr2, end_addr2;
2260
            int need_subpage = 0;
2261

    
2262
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2263
                          need_subpage);
2264
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2265
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2266
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2267
                                           &p->phys_offset, orig_memory);
2268
                } else {
2269
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2270
                                            >> IO_MEM_SHIFT];
2271
                }
2272
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2273
            } else {
2274
                p->phys_offset = phys_offset;
2275
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2276
                    (phys_offset & IO_MEM_ROMD))
2277
                    phys_offset += TARGET_PAGE_SIZE;
2278
            }
2279
        } else {
2280
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2281
            p->phys_offset = phys_offset;
2282
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2283
                (phys_offset & IO_MEM_ROMD))
2284
                phys_offset += TARGET_PAGE_SIZE;
2285
            else {
2286
                target_phys_addr_t start_addr2, end_addr2;
2287
                int need_subpage = 0;
2288

    
2289
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2290
                              end_addr2, need_subpage);
2291

    
2292
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2293
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2294
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2295
                    subpage_register(subpage, start_addr2, end_addr2,
2296
                                     phys_offset);
2297
                }
2298
            }
2299
        }
2300
    }
2301

    
2302
    /* since each CPU stores ram addresses in its TLB cache, we must
2303
       reset the modified entries */
2304
    /* XXX: slow ! */
2305
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2306
        tlb_flush(env, 1);
2307
    }
2308
}
2309

    
2310
/* XXX: temporary until new memory mapping API */
2311
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2312
{
2313
    PhysPageDesc *p;
2314

    
2315
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2316
    if (!p)
2317
        return IO_MEM_UNASSIGNED;
2318
    return p->phys_offset;
2319
}
2320

    
2321
/* XXX: better than nothing */
2322
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2323
{
2324
    ram_addr_t addr;
2325
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2326
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2327
                (uint64_t)size, (uint64_t)phys_ram_size);
2328
        abort();
2329
    }
2330
    addr = phys_ram_alloc_offset;
2331
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2332
    return addr;
2333
}
2334

    
2335
void qemu_ram_free(ram_addr_t addr)
2336
{
2337
}
2338

    
2339
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2340
{
2341
#ifdef DEBUG_UNASSIGNED
2342
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2343
#endif
2344
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2345
    do_unassigned_access(addr, 0, 0, 0, 1);
2346
#endif
2347
    return 0;
2348
}
2349

    
2350
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2351
{
2352
#ifdef DEBUG_UNASSIGNED
2353
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2354
#endif
2355
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2356
    do_unassigned_access(addr, 0, 0, 0, 2);
2357
#endif
2358
    return 0;
2359
}
2360

    
2361
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2362
{
2363
#ifdef DEBUG_UNASSIGNED
2364
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2365
#endif
2366
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2367
    do_unassigned_access(addr, 0, 0, 0, 4);
2368
#endif
2369
    return 0;
2370
}
2371

    
2372
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2373
{
2374
#ifdef DEBUG_UNASSIGNED
2375
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2376
#endif
2377
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2378
    do_unassigned_access(addr, 1, 0, 0, 1);
2379
#endif
2380
}
2381

    
2382
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2383
{
2384
#ifdef DEBUG_UNASSIGNED
2385
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2386
#endif
2387
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2388
    do_unassigned_access(addr, 1, 0, 0, 2);
2389
#endif
2390
}
2391

    
2392
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2393
{
2394
#ifdef DEBUG_UNASSIGNED
2395
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2396
#endif
2397
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2398
    do_unassigned_access(addr, 1, 0, 0, 4);
2399
#endif
2400
}
2401

    
2402
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2403
    unassigned_mem_readb,
2404
    unassigned_mem_readw,
2405
    unassigned_mem_readl,
2406
};
2407

    
2408
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2409
    unassigned_mem_writeb,
2410
    unassigned_mem_writew,
2411
    unassigned_mem_writel,
2412
};
2413

    
2414
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2415
                                uint32_t val)
2416
{
2417
    int dirty_flags;
2418
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2419
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2420
#if !defined(CONFIG_USER_ONLY)
2421
        tb_invalidate_phys_page_fast(ram_addr, 1);
2422
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2423
#endif
2424
    }
2425
    stb_p(phys_ram_base + ram_addr, val);
2426
#ifdef USE_KQEMU
2427
    if (cpu_single_env->kqemu_enabled &&
2428
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2429
        kqemu_modify_page(cpu_single_env, ram_addr);
2430
#endif
2431
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2432
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2433
    /* we remove the notdirty callback only if the code has been
2434
       flushed */
2435
    if (dirty_flags == 0xff)
2436
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2437
}
2438

    
2439
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2440
                                uint32_t val)
2441
{
2442
    int dirty_flags;
2443
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2444
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2445
#if !defined(CONFIG_USER_ONLY)
2446
        tb_invalidate_phys_page_fast(ram_addr, 2);
2447
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2448
#endif
2449
    }
2450
    stw_p(phys_ram_base + ram_addr, val);
2451
#ifdef USE_KQEMU
2452
    if (cpu_single_env->kqemu_enabled &&
2453
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2454
        kqemu_modify_page(cpu_single_env, ram_addr);
2455
#endif
2456
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2457
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2458
    /* we remove the notdirty callback only if the code has been
2459
       flushed */
2460
    if (dirty_flags == 0xff)
2461
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2462
}
2463

    
2464
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2465
                                uint32_t val)
2466
{
2467
    int dirty_flags;
2468
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2469
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2470
#if !defined(CONFIG_USER_ONLY)
2471
        tb_invalidate_phys_page_fast(ram_addr, 4);
2472
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2473
#endif
2474
    }
2475
    stl_p(phys_ram_base + ram_addr, val);
2476
#ifdef USE_KQEMU
2477
    if (cpu_single_env->kqemu_enabled &&
2478
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2479
        kqemu_modify_page(cpu_single_env, ram_addr);
2480
#endif
2481
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2482
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2483
    /* we remove the notdirty callback only if the code has been
2484
       flushed */
2485
    if (dirty_flags == 0xff)
2486
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2487
}
2488

    
2489
static CPUReadMemoryFunc *error_mem_read[3] = {
2490
    NULL, /* never used */
2491
    NULL, /* never used */
2492
    NULL, /* never used */
2493
};
2494

    
2495
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2496
    notdirty_mem_writeb,
2497
    notdirty_mem_writew,
2498
    notdirty_mem_writel,
2499
};
2500

    
2501
/* Generate a debug exception if a watchpoint has been hit.  */
2502
static void check_watchpoint(int offset, int len_mask, int flags)
2503
{
2504
    CPUState *env = cpu_single_env;
2505
    target_ulong pc, cs_base;
2506
    TranslationBlock *tb;
2507
    target_ulong vaddr;
2508
    CPUWatchpoint *wp;
2509
    int cpu_flags;
2510

    
2511
    if (env->watchpoint_hit) {
2512
        /* We re-entered the check after replacing the TB. Now raise
2513
         * the debug interrupt so that is will trigger after the
2514
         * current instruction. */
2515
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2516
        return;
2517
    }
2518
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2519
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2520
        if ((vaddr == (wp->vaddr & len_mask) ||
2521
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2522
            wp->flags |= BP_WATCHPOINT_HIT;
2523
            if (!env->watchpoint_hit) {
2524
                env->watchpoint_hit = wp;
2525
                tb = tb_find_pc(env->mem_io_pc);
2526
                if (!tb) {
2527
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2528
                              "pc=%p", (void *)env->mem_io_pc);
2529
                }
2530
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2531
                tb_phys_invalidate(tb, -1);
2532
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2533
                    env->exception_index = EXCP_DEBUG;
2534
                } else {
2535
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2536
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2537
                }
2538
                cpu_resume_from_signal(env, NULL);
2539
            }
2540
        } else {
2541
            wp->flags &= ~BP_WATCHPOINT_HIT;
2542
        }
2543
    }
2544
}
2545

    
2546
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2547
   so these check for a hit then pass through to the normal out-of-line
2548
   phys routines.  */
2549
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2550
{
2551
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2552
    return ldub_phys(addr);
2553
}
2554

    
2555
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2556
{
2557
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2558
    return lduw_phys(addr);
2559
}
2560

    
2561
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2562
{
2563
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2564
    return ldl_phys(addr);
2565
}
2566

    
2567
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2568
                             uint32_t val)
2569
{
2570
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2571
    stb_phys(addr, val);
2572
}
2573

    
2574
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2575
                             uint32_t val)
2576
{
2577
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2578
    stw_phys(addr, val);
2579
}
2580

    
2581
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2582
                             uint32_t val)
2583
{
2584
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2585
    stl_phys(addr, val);
2586
}
2587

    
2588
static CPUReadMemoryFunc *watch_mem_read[3] = {
2589
    watch_mem_readb,
2590
    watch_mem_readw,
2591
    watch_mem_readl,
2592
};
2593

    
2594
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2595
    watch_mem_writeb,
2596
    watch_mem_writew,
2597
    watch_mem_writel,
2598
};
2599

    
2600
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2601
                                 unsigned int len)
2602
{
2603
    uint32_t ret;
2604
    unsigned int idx;
2605

    
2606
    idx = SUBPAGE_IDX(addr - mmio->base);
2607
#if defined(DEBUG_SUBPAGE)
2608
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2609
           mmio, len, addr, idx);
2610
#endif
2611
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2612

    
2613
    return ret;
2614
}
2615

    
2616
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2617
                              uint32_t value, unsigned int len)
2618
{
2619
    unsigned int idx;
2620

    
2621
    idx = SUBPAGE_IDX(addr - mmio->base);
2622
#if defined(DEBUG_SUBPAGE)
2623
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2624
           mmio, len, addr, idx, value);
2625
#endif
2626
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2627
}
2628

    
2629
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2630
{
2631
#if defined(DEBUG_SUBPAGE)
2632
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2633
#endif
2634

    
2635
    return subpage_readlen(opaque, addr, 0);
2636
}
2637

    
2638
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2639
                            uint32_t value)
2640
{
2641
#if defined(DEBUG_SUBPAGE)
2642
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2643
#endif
2644
    subpage_writelen(opaque, addr, value, 0);
2645
}
2646

    
2647
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2648
{
2649
#if defined(DEBUG_SUBPAGE)
2650
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2651
#endif
2652

    
2653
    return subpage_readlen(opaque, addr, 1);
2654
}
2655

    
2656
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2657
                            uint32_t value)
2658
{
2659
#if defined(DEBUG_SUBPAGE)
2660
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2661
#endif
2662
    subpage_writelen(opaque, addr, value, 1);
2663
}
2664

    
2665
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2666
{
2667
#if defined(DEBUG_SUBPAGE)
2668
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2669
#endif
2670

    
2671
    return subpage_readlen(opaque, addr, 2);
2672
}
2673

    
2674
static void subpage_writel (void *opaque,
2675
                         target_phys_addr_t addr, uint32_t value)
2676
{
2677
#if defined(DEBUG_SUBPAGE)
2678
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2679
#endif
2680
    subpage_writelen(opaque, addr, value, 2);
2681
}
2682

    
2683
static CPUReadMemoryFunc *subpage_read[] = {
2684
    &subpage_readb,
2685
    &subpage_readw,
2686
    &subpage_readl,
2687
};
2688

    
2689
static CPUWriteMemoryFunc *subpage_write[] = {
2690
    &subpage_writeb,
2691
    &subpage_writew,
2692
    &subpage_writel,
2693
};
2694

    
2695
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2696
                             ram_addr_t memory)
2697
{
2698
    int idx, eidx;
2699
    unsigned int i;
2700

    
2701
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2702
        return -1;
2703
    idx = SUBPAGE_IDX(start);
2704
    eidx = SUBPAGE_IDX(end);
2705
#if defined(DEBUG_SUBPAGE)
2706
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2707
           mmio, start, end, idx, eidx, memory);
2708
#endif
2709
    memory >>= IO_MEM_SHIFT;
2710
    for (; idx <= eidx; idx++) {
2711
        for (i = 0; i < 4; i++) {
2712
            if (io_mem_read[memory][i]) {
2713
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2714
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2715
            }
2716
            if (io_mem_write[memory][i]) {
2717
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2718
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2719
            }
2720
        }
2721
    }
2722

    
2723
    return 0;
2724
}
2725

    
2726
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2727
                           ram_addr_t orig_memory)
2728
{
2729
    subpage_t *mmio;
2730
    int subpage_memory;
2731

    
2732
    mmio = qemu_mallocz(sizeof(subpage_t));
2733
    if (mmio != NULL) {
2734
        mmio->base = base;
2735
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2736
#if defined(DEBUG_SUBPAGE)
2737
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2738
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2739
#endif
2740
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2741
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2742
    }
2743

    
2744
    return mmio;
2745
}
2746

    
2747
static void io_mem_init(void)
2748
{
2749
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2750
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2751
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2752
    io_mem_nb = 5;
2753

    
2754
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2755
                                          watch_mem_write, NULL);
2756
    /* alloc dirty bits array */
2757
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2758
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2759
}
2760

    
2761
/* mem_read and mem_write are arrays of functions containing the
2762
   function to access byte (index 0), word (index 1) and dword (index
2763
   2). Functions can be omitted with a NULL function pointer. The
2764
   registered functions may be modified dynamically later.
2765
   If io_index is non zero, the corresponding io zone is
2766
   modified. If it is zero, a new io zone is allocated. The return
2767
   value can be used with cpu_register_physical_memory(). (-1) is
2768
   returned if error. */
2769
int cpu_register_io_memory(int io_index,
2770
                           CPUReadMemoryFunc **mem_read,
2771
                           CPUWriteMemoryFunc **mem_write,
2772
                           void *opaque)
2773
{
2774
    int i, subwidth = 0;
2775

    
2776
    if (io_index <= 0) {
2777
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2778
            return -1;
2779
        io_index = io_mem_nb++;
2780
    } else {
2781
        if (io_index >= IO_MEM_NB_ENTRIES)
2782
            return -1;
2783
    }
2784

    
2785
    for(i = 0;i < 3; i++) {
2786
        if (!mem_read[i] || !mem_write[i])
2787
            subwidth = IO_MEM_SUBWIDTH;
2788
        io_mem_read[io_index][i] = mem_read[i];
2789
        io_mem_write[io_index][i] = mem_write[i];
2790
    }
2791
    io_mem_opaque[io_index] = opaque;
2792
    return (io_index << IO_MEM_SHIFT) | subwidth;
2793
}
2794

    
2795
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2796
{
2797
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2798
}
2799

    
2800
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2801
{
2802
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2803
}
2804

    
2805
#endif /* !defined(CONFIG_USER_ONLY) */
2806

    
2807
/* physical memory access (slow version, mainly for debug) */
2808
#if defined(CONFIG_USER_ONLY)
2809
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2810
                            int len, int is_write)
2811
{
2812
    int l, flags;
2813
    target_ulong page;
2814
    void * p;
2815

    
2816
    while (len > 0) {
2817
        page = addr & TARGET_PAGE_MASK;
2818
        l = (page + TARGET_PAGE_SIZE) - addr;
2819
        if (l > len)
2820
            l = len;
2821
        flags = page_get_flags(page);
2822
        if (!(flags & PAGE_VALID))
2823
            return;
2824
        if (is_write) {
2825
            if (!(flags & PAGE_WRITE))
2826
                return;
2827
            /* XXX: this code should not depend on lock_user */
2828
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2829
                /* FIXME - should this return an error rather than just fail? */
2830
                return;
2831
            memcpy(p, buf, l);
2832
            unlock_user(p, addr, l);
2833
        } else {
2834
            if (!(flags & PAGE_READ))
2835
                return;
2836
            /* XXX: this code should not depend on lock_user */
2837
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2838
                /* FIXME - should this return an error rather than just fail? */
2839
                return;
2840
            memcpy(buf, p, l);
2841
            unlock_user(p, addr, 0);
2842
        }
2843
        len -= l;
2844
        buf += l;
2845
        addr += l;
2846
    }
2847
}
2848

    
2849
#else
2850
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2851
                            int len, int is_write)
2852
{
2853
    int l, io_index;
2854
    uint8_t *ptr;
2855
    uint32_t val;
2856
    target_phys_addr_t page;
2857
    unsigned long pd;
2858
    PhysPageDesc *p;
2859

    
2860
    while (len > 0) {
2861
        page = addr & TARGET_PAGE_MASK;
2862
        l = (page + TARGET_PAGE_SIZE) - addr;
2863
        if (l > len)
2864
            l = len;
2865
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2866
        if (!p) {
2867
            pd = IO_MEM_UNASSIGNED;
2868
        } else {
2869
            pd = p->phys_offset;
2870
        }
2871

    
2872
        if (is_write) {
2873
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2874
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2875
                /* XXX: could force cpu_single_env to NULL to avoid
2876
                   potential bugs */
2877
                if (l >= 4 && ((addr & 3) == 0)) {
2878
                    /* 32 bit write access */
2879
                    val = ldl_p(buf);
2880
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2881
                    l = 4;
2882
                } else if (l >= 2 && ((addr & 1) == 0)) {
2883
                    /* 16 bit write access */
2884
                    val = lduw_p(buf);
2885
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2886
                    l = 2;
2887
                } else {
2888
                    /* 8 bit write access */
2889
                    val = ldub_p(buf);
2890
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2891
                    l = 1;
2892
                }
2893
            } else {
2894
                unsigned long addr1;
2895
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2896
                /* RAM case */
2897
                ptr = phys_ram_base + addr1;
2898
                memcpy(ptr, buf, l);
2899
                if (!cpu_physical_memory_is_dirty(addr1)) {
2900
                    /* invalidate code */
2901
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2902
                    /* set dirty bit */
2903
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2904
                        (0xff & ~CODE_DIRTY_FLAG);
2905
                }
2906
            }
2907
        } else {
2908
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2909
                !(pd & IO_MEM_ROMD)) {
2910
                /* I/O case */
2911
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2912
                if (l >= 4 && ((addr & 3) == 0)) {
2913
                    /* 32 bit read access */
2914
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2915
                    stl_p(buf, val);
2916
                    l = 4;
2917
                } else if (l >= 2 && ((addr & 1) == 0)) {
2918
                    /* 16 bit read access */
2919
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2920
                    stw_p(buf, val);
2921
                    l = 2;
2922
                } else {
2923
                    /* 8 bit read access */
2924
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2925
                    stb_p(buf, val);
2926
                    l = 1;
2927
                }
2928
            } else {
2929
                /* RAM case */
2930
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2931
                    (addr & ~TARGET_PAGE_MASK);
2932
                memcpy(buf, ptr, l);
2933
            }
2934
        }
2935
        len -= l;
2936
        buf += l;
2937
        addr += l;
2938
    }
2939
}
2940

    
2941
/* used for ROM loading : can write in RAM and ROM */
2942
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2943
                                   const uint8_t *buf, int len)
2944
{
2945
    int l;
2946
    uint8_t *ptr;
2947
    target_phys_addr_t page;
2948
    unsigned long pd;
2949
    PhysPageDesc *p;
2950

    
2951
    while (len > 0) {
2952
        page = addr & TARGET_PAGE_MASK;
2953
        l = (page + TARGET_PAGE_SIZE) - addr;
2954
        if (l > len)
2955
            l = len;
2956
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2957
        if (!p) {
2958
            pd = IO_MEM_UNASSIGNED;
2959
        } else {
2960
            pd = p->phys_offset;
2961
        }
2962

    
2963
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2964
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2965
            !(pd & IO_MEM_ROMD)) {
2966
            /* do nothing */
2967
        } else {
2968
            unsigned long addr1;
2969
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2970
            /* ROM/RAM case */
2971
            ptr = phys_ram_base + addr1;
2972
            memcpy(ptr, buf, l);
2973
        }
2974
        len -= l;
2975
        buf += l;
2976
        addr += l;
2977
    }
2978
}
2979

    
2980

    
2981
/* warning: addr must be aligned */
2982
uint32_t ldl_phys(target_phys_addr_t addr)
2983
{
2984
    int io_index;
2985
    uint8_t *ptr;
2986
    uint32_t val;
2987
    unsigned long pd;
2988
    PhysPageDesc *p;
2989

    
2990
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2991
    if (!p) {
2992
        pd = IO_MEM_UNASSIGNED;
2993
    } else {
2994
        pd = p->phys_offset;
2995
    }
2996

    
2997
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2998
        !(pd & IO_MEM_ROMD)) {
2999
        /* I/O case */
3000
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3001
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3002
    } else {
3003
        /* RAM case */
3004
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3005
            (addr & ~TARGET_PAGE_MASK);
3006
        val = ldl_p(ptr);
3007
    }
3008
    return val;
3009
}
3010

    
3011
/* warning: addr must be aligned */
3012
uint64_t ldq_phys(target_phys_addr_t addr)
3013
{
3014
    int io_index;
3015
    uint8_t *ptr;
3016
    uint64_t val;
3017
    unsigned long pd;
3018
    PhysPageDesc *p;
3019

    
3020
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3021
    if (!p) {
3022
        pd = IO_MEM_UNASSIGNED;
3023
    } else {
3024
        pd = p->phys_offset;
3025
    }
3026

    
3027
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3028
        !(pd & IO_MEM_ROMD)) {
3029
        /* I/O case */
3030
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3031
#ifdef TARGET_WORDS_BIGENDIAN
3032
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3033
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3034
#else
3035
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3036
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3037
#endif
3038
    } else {
3039
        /* RAM case */
3040
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3041
            (addr & ~TARGET_PAGE_MASK);
3042
        val = ldq_p(ptr);
3043
    }
3044
    return val;
3045
}
3046

    
3047
/* XXX: optimize */
3048
uint32_t ldub_phys(target_phys_addr_t addr)
3049
{
3050
    uint8_t val;
3051
    cpu_physical_memory_read(addr, &val, 1);
3052
    return val;
3053
}
3054

    
3055
/* XXX: optimize */
3056
uint32_t lduw_phys(target_phys_addr_t addr)
3057
{
3058
    uint16_t val;
3059
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3060
    return tswap16(val);
3061
}
3062

    
3063
/* warning: addr must be aligned. The ram page is not masked as dirty
3064
   and the code inside is not invalidated. It is useful if the dirty
3065
   bits are used to track modified PTEs */
3066
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3067
{
3068
    int io_index;
3069
    uint8_t *ptr;
3070
    unsigned long pd;
3071
    PhysPageDesc *p;
3072

    
3073
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3074
    if (!p) {
3075
        pd = IO_MEM_UNASSIGNED;
3076
    } else {
3077
        pd = p->phys_offset;
3078
    }
3079

    
3080
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3081
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3082
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3083
    } else {
3084
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3085
        ptr = phys_ram_base + addr1;
3086
        stl_p(ptr, val);
3087

    
3088
        if (unlikely(in_migration)) {
3089
            if (!cpu_physical_memory_is_dirty(addr1)) {
3090
                /* invalidate code */
3091
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3092
                /* set dirty bit */
3093
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3094
                    (0xff & ~CODE_DIRTY_FLAG);
3095
            }
3096
        }
3097
    }
3098
}
3099

    
3100
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3101
{
3102
    int io_index;
3103
    uint8_t *ptr;
3104
    unsigned long pd;
3105
    PhysPageDesc *p;
3106

    
3107
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3108
    if (!p) {
3109
        pd = IO_MEM_UNASSIGNED;
3110
    } else {
3111
        pd = p->phys_offset;
3112
    }
3113

    
3114
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3115
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3116
#ifdef TARGET_WORDS_BIGENDIAN
3117
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3118
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3119
#else
3120
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3121
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3122
#endif
3123
    } else {
3124
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3125
            (addr & ~TARGET_PAGE_MASK);
3126
        stq_p(ptr, val);
3127
    }
3128
}
3129

    
3130
/* warning: addr must be aligned */
3131
void stl_phys(target_phys_addr_t addr, uint32_t val)
3132
{
3133
    int io_index;
3134
    uint8_t *ptr;
3135
    unsigned long pd;
3136
    PhysPageDesc *p;
3137

    
3138
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3139
    if (!p) {
3140
        pd = IO_MEM_UNASSIGNED;
3141
    } else {
3142
        pd = p->phys_offset;
3143
    }
3144

    
3145
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3146
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3147
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3148
    } else {
3149
        unsigned long addr1;
3150
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3151
        /* RAM case */
3152
        ptr = phys_ram_base + addr1;
3153
        stl_p(ptr, val);
3154
        if (!cpu_physical_memory_is_dirty(addr1)) {
3155
            /* invalidate code */
3156
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3157
            /* set dirty bit */
3158
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3159
                (0xff & ~CODE_DIRTY_FLAG);
3160
        }
3161
    }
3162
}
3163

    
3164
/* XXX: optimize */
3165
void stb_phys(target_phys_addr_t addr, uint32_t val)
3166
{
3167
    uint8_t v = val;
3168
    cpu_physical_memory_write(addr, &v, 1);
3169
}
3170

    
3171
/* XXX: optimize */
3172
void stw_phys(target_phys_addr_t addr, uint32_t val)
3173
{
3174
    uint16_t v = tswap16(val);
3175
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3176
}
3177

    
3178
/* XXX: optimize */
3179
void stq_phys(target_phys_addr_t addr, uint64_t val)
3180
{
3181
    val = tswap64(val);
3182
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3183
}
3184

    
3185
#endif
3186

    
3187
/* virtual memory access for debug */
3188
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3189
                        uint8_t *buf, int len, int is_write)
3190
{
3191
    int l;
3192
    target_phys_addr_t phys_addr;
3193
    target_ulong page;
3194

    
3195
    while (len > 0) {
3196
        page = addr & TARGET_PAGE_MASK;
3197
        phys_addr = cpu_get_phys_page_debug(env, page);
3198
        /* if no physical page mapped, return an error */
3199
        if (phys_addr == -1)
3200
            return -1;
3201
        l = (page + TARGET_PAGE_SIZE) - addr;
3202
        if (l > len)
3203
            l = len;
3204
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3205
                               buf, l, is_write);
3206
        len -= l;
3207
        buf += l;
3208
        addr += l;
3209
    }
3210
    return 0;
3211
}
3212

    
3213
/* in deterministic execution mode, instructions doing device I/Os
3214
   must be at the end of the TB */
3215
void cpu_io_recompile(CPUState *env, void *retaddr)
3216
{
3217
    TranslationBlock *tb;
3218
    uint32_t n, cflags;
3219
    target_ulong pc, cs_base;
3220
    uint64_t flags;
3221

    
3222
    tb = tb_find_pc((unsigned long)retaddr);
3223
    if (!tb) {
3224
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3225
                  retaddr);
3226
    }
3227
    n = env->icount_decr.u16.low + tb->icount;
3228
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3229
    /* Calculate how many instructions had been executed before the fault
3230
       occurred.  */
3231
    n = n - env->icount_decr.u16.low;
3232
    /* Generate a new TB ending on the I/O insn.  */
3233
    n++;
3234
    /* On MIPS and SH, delay slot instructions can only be restarted if
3235
       they were already the first instruction in the TB.  If this is not
3236
       the first instruction in a TB then re-execute the preceding
3237
       branch.  */
3238
#if defined(TARGET_MIPS)
3239
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3240
        env->active_tc.PC -= 4;
3241
        env->icount_decr.u16.low++;
3242
        env->hflags &= ~MIPS_HFLAG_BMASK;
3243
    }
3244
#elif defined(TARGET_SH4)
3245
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3246
            && n > 1) {
3247
        env->pc -= 2;
3248
        env->icount_decr.u16.low++;
3249
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3250
    }
3251
#endif
3252
    /* This should never happen.  */
3253
    if (n > CF_COUNT_MASK)
3254
        cpu_abort(env, "TB too big during recompile");
3255

    
3256
    cflags = n | CF_LAST_IO;
3257
    pc = tb->pc;
3258
    cs_base = tb->cs_base;
3259
    flags = tb->flags;
3260
    tb_phys_invalidate(tb, -1);
3261
    /* FIXME: In theory this could raise an exception.  In practice
3262
       we have already translated the block once so it's probably ok.  */
3263
    tb_gen_code(env, pc, cs_base, flags, cflags);
3264
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3265
       the first in the TB) then we end up generating a whole new TB and
3266
       repeating the fault, which is horribly inefficient.
3267
       Better would be to execute just this insn uncached, or generate a
3268
       second new TB.  */
3269
    cpu_resume_from_signal(env, NULL);
3270
}
3271

    
3272
void dump_exec_info(FILE *f,
3273
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3274
{
3275
    int i, target_code_size, max_target_code_size;
3276
    int direct_jmp_count, direct_jmp2_count, cross_page;
3277
    TranslationBlock *tb;
3278

    
3279
    target_code_size = 0;
3280
    max_target_code_size = 0;
3281
    cross_page = 0;
3282
    direct_jmp_count = 0;
3283
    direct_jmp2_count = 0;
3284
    for(i = 0; i < nb_tbs; i++) {
3285
        tb = &tbs[i];
3286
        target_code_size += tb->size;
3287
        if (tb->size > max_target_code_size)
3288
            max_target_code_size = tb->size;
3289
        if (tb->page_addr[1] != -1)
3290
            cross_page++;
3291
        if (tb->tb_next_offset[0] != 0xffff) {
3292
            direct_jmp_count++;
3293
            if (tb->tb_next_offset[1] != 0xffff) {
3294
                direct_jmp2_count++;
3295
            }
3296
        }
3297
    }
3298
    /* XXX: avoid using doubles ? */
3299
    cpu_fprintf(f, "Translation buffer state:\n");
3300
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3301
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3302
    cpu_fprintf(f, "TB count            %d/%d\n", 
3303
                nb_tbs, code_gen_max_blocks);
3304
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3305
                nb_tbs ? target_code_size / nb_tbs : 0,
3306
                max_target_code_size);
3307
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3308
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3309
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3310
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3311
            cross_page,
3312
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3313
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3314
                direct_jmp_count,
3315
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3316
                direct_jmp2_count,
3317
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3318
    cpu_fprintf(f, "\nStatistics:\n");
3319
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3320
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3321
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3322
    tcg_dump_info(f, cpu_fprintf);
3323
}
3324

    
3325
#if !defined(CONFIG_USER_ONLY)
3326

    
3327
#define MMUSUFFIX _cmmu
3328
#define GETPC() NULL
3329
#define env cpu_single_env
3330
#define SOFTMMU_CODE_ACCESS
3331

    
3332
#define SHIFT 0
3333
#include "softmmu_template.h"
3334

    
3335
#define SHIFT 1
3336
#include "softmmu_template.h"
3337

    
3338
#define SHIFT 2
3339
#include "softmmu_template.h"
3340

    
3341
#define SHIFT 3
3342
#include "softmmu_template.h"
3343

    
3344
#undef env
3345

    
3346
#endif