Statistics
| Branch: | Revision:

root / exec.c @ e18231a3

History | View | Annotate | Download (97.9 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#include "osdep.h"
42
#if defined(CONFIG_USER_ONLY)
43
#include <qemu.h>
44
#endif
45

    
46
//#define DEBUG_TB_INVALIDATE
47
//#define DEBUG_FLUSH
48
//#define DEBUG_TLB
49
//#define DEBUG_UNASSIGNED
50

    
51
/* make various TB consistency checks */
52
//#define DEBUG_TB_CHECK
53
//#define DEBUG_TLB_CHECK
54

    
55
//#define DEBUG_IOPORT
56
//#define DEBUG_SUBPAGE
57

    
58
#if !defined(CONFIG_USER_ONLY)
59
/* TB consistency checks only implemented for usermode emulation.  */
60
#undef DEBUG_TB_CHECK
61
#endif
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#define MMAP_AREA_START        0x00000000
66
#define MMAP_AREA_END          0xa8000000
67

    
68
#if defined(TARGET_SPARC64)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 41
70
#elif defined(TARGET_SPARC)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 36
72
#elif defined(TARGET_ALPHA)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#define TARGET_VIRT_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_PPC64)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 42
77
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
78
#define TARGET_PHYS_ADDR_SPACE_BITS 42
79
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
80
#define TARGET_PHYS_ADDR_SPACE_BITS 36
81
#else
82
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
83
#define TARGET_PHYS_ADDR_SPACE_BITS 32
84
#endif
85

    
86
static TranslationBlock *tbs;
87
int code_gen_max_blocks;
88
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
89
static int nb_tbs;
90
/* any access to the tbs or the page table must use this lock */
91
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
92

    
93
#if defined(__arm__) || defined(__sparc_v9__)
94
/* The prologue must be reachable with a direct jump. ARM and Sparc64
95
 have limited branch ranges (possibly also PPC) so place it in a
96
 section close to code segment. */
97
#define code_gen_section                                \
98
    __attribute__((__section__(".gen_code")))           \
99
    __attribute__((aligned (32)))
100
#else
101
#define code_gen_section                                \
102
    __attribute__((aligned (32)))
103
#endif
104

    
105
uint8_t code_gen_prologue[1024] code_gen_section;
106
static uint8_t *code_gen_buffer;
107
static unsigned long code_gen_buffer_size;
108
/* threshold to flush the translated code buffer */
109
static unsigned long code_gen_buffer_max_size;
110
uint8_t *code_gen_ptr;
111

    
112
#if !defined(CONFIG_USER_ONLY)
113
ram_addr_t phys_ram_size;
114
int phys_ram_fd;
115
uint8_t *phys_ram_base;
116
uint8_t *phys_ram_dirty;
117
static int in_migration;
118
static ram_addr_t phys_ram_alloc_offset = 0;
119
#endif
120

    
121
CPUState *first_cpu;
122
/* current CPU in the current thread. It is only valid inside
123
   cpu_exec() */
124
CPUState *cpu_single_env;
125
/* 0 = Do not count executed instructions.
126
   1 = Precise instruction counting.
127
   2 = Adaptive rate instruction counting.  */
128
int use_icount = 0;
129
/* Current instruction counter.  While executing translated code this may
130
   include some instructions that have not yet been executed.  */
131
int64_t qemu_icount;
132

    
133
typedef struct PageDesc {
134
    /* list of TBs intersecting this ram page */
135
    TranslationBlock *first_tb;
136
    /* in order to optimize self modifying code, we count the number
137
       of lookups we do to a given page to use a bitmap */
138
    unsigned int code_write_count;
139
    uint8_t *code_bitmap;
140
#if defined(CONFIG_USER_ONLY)
141
    unsigned long flags;
142
#endif
143
} PageDesc;
144

    
145
typedef struct PhysPageDesc {
146
    /* offset in host memory of the page + io_index in the low bits */
147
    ram_addr_t phys_offset;
148
} PhysPageDesc;
149

    
150
#define L2_BITS 10
151
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
152
/* XXX: this is a temporary hack for alpha target.
153
 *      In the future, this is to be replaced by a multi-level table
154
 *      to actually be able to handle the complete 64 bits address space.
155
 */
156
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
157
#else
158
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
159
#endif
160

    
161
#define L1_SIZE (1 << L1_BITS)
162
#define L2_SIZE (1 << L2_BITS)
163

    
164
unsigned long qemu_real_host_page_size;
165
unsigned long qemu_host_page_bits;
166
unsigned long qemu_host_page_size;
167
unsigned long qemu_host_page_mask;
168

    
169
/* XXX: for system emulation, it could just be an array */
170
static PageDesc *l1_map[L1_SIZE];
171
static PhysPageDesc **l1_phys_map;
172

    
173
#if !defined(CONFIG_USER_ONLY)
174
static void io_mem_init(void);
175

    
176
/* io memory support */
177
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
178
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
179
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
180
static int io_mem_nb;
181
static int io_mem_watch;
182
#endif
183

    
184
/* log support */
185
static const char *logfilename = "/tmp/qemu.log";
186
FILE *logfile;
187
int loglevel;
188
static int log_append = 0;
189

    
190
/* statistics */
191
static int tlb_flush_count;
192
static int tb_flush_count;
193
static int tb_phys_invalidate_count;
194

    
195
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
196
typedef struct subpage_t {
197
    target_phys_addr_t base;
198
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
199
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
200
    void *opaque[TARGET_PAGE_SIZE][2][4];
201
} subpage_t;
202

    
203
#ifdef _WIN32
204
static void map_exec(void *addr, long size)
205
{
206
    DWORD old_protect;
207
    VirtualProtect(addr, size,
208
                   PAGE_EXECUTE_READWRITE, &old_protect);
209
    
210
}
211
#else
212
static void map_exec(void *addr, long size)
213
{
214
    unsigned long start, end, page_size;
215
    
216
    page_size = getpagesize();
217
    start = (unsigned long)addr;
218
    start &= ~(page_size - 1);
219
    
220
    end = (unsigned long)addr + size;
221
    end += page_size - 1;
222
    end &= ~(page_size - 1);
223
    
224
    mprotect((void *)start, end - start,
225
             PROT_READ | PROT_WRITE | PROT_EXEC);
226
}
227
#endif
228

    
229
static void page_init(void)
230
{
231
    /* NOTE: we can always suppose that qemu_host_page_size >=
232
       TARGET_PAGE_SIZE */
233
#ifdef _WIN32
234
    {
235
        SYSTEM_INFO system_info;
236
        DWORD old_protect;
237

    
238
        GetSystemInfo(&system_info);
239
        qemu_real_host_page_size = system_info.dwPageSize;
240
    }
241
#else
242
    qemu_real_host_page_size = getpagesize();
243
#endif
244
    if (qemu_host_page_size == 0)
245
        qemu_host_page_size = qemu_real_host_page_size;
246
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
247
        qemu_host_page_size = TARGET_PAGE_SIZE;
248
    qemu_host_page_bits = 0;
249
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
250
        qemu_host_page_bits++;
251
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
252
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
253
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
254

    
255
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
256
    {
257
        long long startaddr, endaddr;
258
        FILE *f;
259
        int n;
260

    
261
        mmap_lock();
262
        last_brk = (unsigned long)sbrk(0);
263
        f = fopen("/proc/self/maps", "r");
264
        if (f) {
265
            do {
266
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
267
                if (n == 2) {
268
                    startaddr = MIN(startaddr,
269
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270
                    endaddr = MIN(endaddr,
271
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
273
                                   TARGET_PAGE_ALIGN(endaddr),
274
                                   PAGE_RESERVED); 
275
                }
276
            } while (!feof(f));
277
            fclose(f);
278
        }
279
        mmap_unlock();
280
    }
281
#endif
282
}
283

    
284
static inline PageDesc **page_l1_map(target_ulong index)
285
{
286
#if TARGET_LONG_BITS > 32
287
    /* Host memory outside guest VM.  For 32-bit targets we have already
288
       excluded high addresses.  */
289
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
290
        return NULL;
291
#endif
292
    return &l1_map[index >> L2_BITS];
293
}
294

    
295
static inline PageDesc *page_find_alloc(target_ulong index)
296
{
297
    PageDesc **lp, *p;
298
    lp = page_l1_map(index);
299
    if (!lp)
300
        return NULL;
301

    
302
    p = *lp;
303
    if (!p) {
304
        /* allocate if not found */
305
#if defined(CONFIG_USER_ONLY)
306
        unsigned long addr;
307
        size_t len = sizeof(PageDesc) * L2_SIZE;
308
        /* Don't use qemu_malloc because it may recurse.  */
309
        p = mmap(0, len, PROT_READ | PROT_WRITE,
310
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
311
        *lp = p;
312
        addr = h2g(p);
313
        if (addr == (target_ulong)addr) {
314
            page_set_flags(addr & TARGET_PAGE_MASK,
315
                           TARGET_PAGE_ALIGN(addr + len),
316
                           PAGE_RESERVED); 
317
        }
318
#else
319
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
320
        *lp = p;
321
#endif
322
    }
323
    return p + (index & (L2_SIZE - 1));
324
}
325

    
326
static inline PageDesc *page_find(target_ulong index)
327
{
328
    PageDesc **lp, *p;
329
    lp = page_l1_map(index);
330
    if (!lp)
331
        return NULL;
332

    
333
    p = *lp;
334
    if (!p)
335
        return 0;
336
    return p + (index & (L2_SIZE - 1));
337
}
338

    
339
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
340
{
341
    void **lp, **p;
342
    PhysPageDesc *pd;
343

    
344
    p = (void **)l1_phys_map;
345
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
346

    
347
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
348
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
349
#endif
350
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
351
    p = *lp;
352
    if (!p) {
353
        /* allocate if not found */
354
        if (!alloc)
355
            return NULL;
356
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
357
        memset(p, 0, sizeof(void *) * L1_SIZE);
358
        *lp = p;
359
    }
360
#endif
361
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
362
    pd = *lp;
363
    if (!pd) {
364
        int i;
365
        /* allocate if not found */
366
        if (!alloc)
367
            return NULL;
368
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
369
        *lp = pd;
370
        for (i = 0; i < L2_SIZE; i++)
371
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
372
    }
373
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
374
}
375

    
376
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
377
{
378
    return phys_page_find_alloc(index, 0);
379
}
380

    
381
#if !defined(CONFIG_USER_ONLY)
382
static void tlb_protect_code(ram_addr_t ram_addr);
383
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
384
                                    target_ulong vaddr);
385
#define mmap_lock() do { } while(0)
386
#define mmap_unlock() do { } while(0)
387
#endif
388

    
389
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
390

    
391
#if defined(CONFIG_USER_ONLY)
392
/* Currently it is not recommanded to allocate big chunks of data in
393
   user mode. It will change when a dedicated libc will be used */
394
#define USE_STATIC_CODE_GEN_BUFFER
395
#endif
396

    
397
#ifdef USE_STATIC_CODE_GEN_BUFFER
398
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
399
#endif
400

    
401
static void code_gen_alloc(unsigned long tb_size)
402
{
403
#ifdef USE_STATIC_CODE_GEN_BUFFER
404
    code_gen_buffer = static_code_gen_buffer;
405
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406
    map_exec(code_gen_buffer, code_gen_buffer_size);
407
#else
408
    code_gen_buffer_size = tb_size;
409
    if (code_gen_buffer_size == 0) {
410
#if defined(CONFIG_USER_ONLY)
411
        /* in user mode, phys_ram_size is not meaningful */
412
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
413
#else
414
        /* XXX: needs ajustments */
415
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
416
#endif
417
    }
418
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420
    /* The code gen buffer location may have constraints depending on
421
       the host cpu and OS */
422
#if defined(__linux__) 
423
    {
424
        int flags;
425
        void *start = NULL;
426

    
427
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
428
#if defined(__x86_64__)
429
        flags |= MAP_32BIT;
430
        /* Cannot map more than that */
431
        if (code_gen_buffer_size > (800 * 1024 * 1024))
432
            code_gen_buffer_size = (800 * 1024 * 1024);
433
#elif defined(__sparc_v9__)
434
        // Map the buffer below 2G, so we can use direct calls and branches
435
        flags |= MAP_FIXED;
436
        start = (void *) 0x60000000UL;
437
        if (code_gen_buffer_size > (512 * 1024 * 1024))
438
            code_gen_buffer_size = (512 * 1024 * 1024);
439
#endif
440
        code_gen_buffer = mmap(start, code_gen_buffer_size,
441
                               PROT_WRITE | PROT_READ | PROT_EXEC,
442
                               flags, -1, 0);
443
        if (code_gen_buffer == MAP_FAILED) {
444
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
445
            exit(1);
446
        }
447
    }
448
#elif defined(__FreeBSD__)
449
    {
450
        int flags;
451
        void *addr = NULL;
452
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
453
#if defined(__x86_64__)
454
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
455
         * 0x40000000 is free */
456
        flags |= MAP_FIXED;
457
        addr = (void *)0x40000000;
458
        /* Cannot map more than that */
459
        if (code_gen_buffer_size > (800 * 1024 * 1024))
460
            code_gen_buffer_size = (800 * 1024 * 1024);
461
#endif
462
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
463
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
464
                               flags, -1, 0);
465
        if (code_gen_buffer == MAP_FAILED) {
466
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467
            exit(1);
468
        }
469
    }
470
#else
471
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
472
    if (!code_gen_buffer) {
473
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
474
        exit(1);
475
    }
476
    map_exec(code_gen_buffer, code_gen_buffer_size);
477
#endif
478
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
479
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
480
    code_gen_buffer_max_size = code_gen_buffer_size - 
481
        code_gen_max_block_size();
482
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
483
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
484
}
485

    
486
/* Must be called before using the QEMU cpus. 'tb_size' is the size
487
   (in bytes) allocated to the translation buffer. Zero means default
488
   size. */
489
void cpu_exec_init_all(unsigned long tb_size)
490
{
491
    cpu_gen_init();
492
    code_gen_alloc(tb_size);
493
    code_gen_ptr = code_gen_buffer;
494
    page_init();
495
#if !defined(CONFIG_USER_ONLY)
496
    io_mem_init();
497
#endif
498
}
499

    
500
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
501

    
502
#define CPU_COMMON_SAVE_VERSION 1
503

    
504
static void cpu_common_save(QEMUFile *f, void *opaque)
505
{
506
    CPUState *env = opaque;
507

    
508
    qemu_put_be32s(f, &env->halted);
509
    qemu_put_be32s(f, &env->interrupt_request);
510
}
511

    
512
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
513
{
514
    CPUState *env = opaque;
515

    
516
    if (version_id != CPU_COMMON_SAVE_VERSION)
517
        return -EINVAL;
518

    
519
    qemu_get_be32s(f, &env->halted);
520
    qemu_get_be32s(f, &env->interrupt_request);
521
    tlb_flush(env, 1);
522

    
523
    return 0;
524
}
525
#endif
526

    
527
void cpu_exec_init(CPUState *env)
528
{
529
    CPUState **penv;
530
    int cpu_index;
531

    
532
    env->next_cpu = NULL;
533
    penv = &first_cpu;
534
    cpu_index = 0;
535
    while (*penv != NULL) {
536
        penv = (CPUState **)&(*penv)->next_cpu;
537
        cpu_index++;
538
    }
539
    env->cpu_index = cpu_index;
540
    env->nb_watchpoints = 0;
541
    *penv = env;
542
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
543
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
544
                    cpu_common_save, cpu_common_load, env);
545
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
546
                    cpu_save, cpu_load, env);
547
#endif
548
}
549

    
550
static inline void invalidate_page_bitmap(PageDesc *p)
551
{
552
    if (p->code_bitmap) {
553
        qemu_free(p->code_bitmap);
554
        p->code_bitmap = NULL;
555
    }
556
    p->code_write_count = 0;
557
}
558

    
559
/* set to NULL all the 'first_tb' fields in all PageDescs */
560
static void page_flush_tb(void)
561
{
562
    int i, j;
563
    PageDesc *p;
564

    
565
    for(i = 0; i < L1_SIZE; i++) {
566
        p = l1_map[i];
567
        if (p) {
568
            for(j = 0; j < L2_SIZE; j++) {
569
                p->first_tb = NULL;
570
                invalidate_page_bitmap(p);
571
                p++;
572
            }
573
        }
574
    }
575
}
576

    
577
/* flush all the translation blocks */
578
/* XXX: tb_flush is currently not thread safe */
579
void tb_flush(CPUState *env1)
580
{
581
    CPUState *env;
582
#if defined(DEBUG_FLUSH)
583
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
584
           (unsigned long)(code_gen_ptr - code_gen_buffer),
585
           nb_tbs, nb_tbs > 0 ?
586
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
587
#endif
588
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
589
        cpu_abort(env1, "Internal error: code buffer overflow\n");
590

    
591
    nb_tbs = 0;
592

    
593
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
594
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
595
    }
596

    
597
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
598
    page_flush_tb();
599

    
600
    code_gen_ptr = code_gen_buffer;
601
    /* XXX: flush processor icache at this point if cache flush is
602
       expensive */
603
    tb_flush_count++;
604
}
605

    
606
#ifdef DEBUG_TB_CHECK
607

    
608
static void tb_invalidate_check(target_ulong address)
609
{
610
    TranslationBlock *tb;
611
    int i;
612
    address &= TARGET_PAGE_MASK;
613
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
614
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
615
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
616
                  address >= tb->pc + tb->size)) {
617
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
618
                       address, (long)tb->pc, tb->size);
619
            }
620
        }
621
    }
622
}
623

    
624
/* verify that all the pages have correct rights for code */
625
static void tb_page_check(void)
626
{
627
    TranslationBlock *tb;
628
    int i, flags1, flags2;
629

    
630
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
631
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
632
            flags1 = page_get_flags(tb->pc);
633
            flags2 = page_get_flags(tb->pc + tb->size - 1);
634
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
635
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
636
                       (long)tb->pc, tb->size, flags1, flags2);
637
            }
638
        }
639
    }
640
}
641

    
642
static void tb_jmp_check(TranslationBlock *tb)
643
{
644
    TranslationBlock *tb1;
645
    unsigned int n1;
646

    
647
    /* suppress any remaining jumps to this TB */
648
    tb1 = tb->jmp_first;
649
    for(;;) {
650
        n1 = (long)tb1 & 3;
651
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
652
        if (n1 == 2)
653
            break;
654
        tb1 = tb1->jmp_next[n1];
655
    }
656
    /* check end of list */
657
    if (tb1 != tb) {
658
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
659
    }
660
}
661

    
662
#endif
663

    
664
/* invalidate one TB */
665
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
666
                             int next_offset)
667
{
668
    TranslationBlock *tb1;
669
    for(;;) {
670
        tb1 = *ptb;
671
        if (tb1 == tb) {
672
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
673
            break;
674
        }
675
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
676
    }
677
}
678

    
679
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
680
{
681
    TranslationBlock *tb1;
682
    unsigned int n1;
683

    
684
    for(;;) {
685
        tb1 = *ptb;
686
        n1 = (long)tb1 & 3;
687
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
688
        if (tb1 == tb) {
689
            *ptb = tb1->page_next[n1];
690
            break;
691
        }
692
        ptb = &tb1->page_next[n1];
693
    }
694
}
695

    
696
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
697
{
698
    TranslationBlock *tb1, **ptb;
699
    unsigned int n1;
700

    
701
    ptb = &tb->jmp_next[n];
702
    tb1 = *ptb;
703
    if (tb1) {
704
        /* find tb(n) in circular list */
705
        for(;;) {
706
            tb1 = *ptb;
707
            n1 = (long)tb1 & 3;
708
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
709
            if (n1 == n && tb1 == tb)
710
                break;
711
            if (n1 == 2) {
712
                ptb = &tb1->jmp_first;
713
            } else {
714
                ptb = &tb1->jmp_next[n1];
715
            }
716
        }
717
        /* now we can suppress tb(n) from the list */
718
        *ptb = tb->jmp_next[n];
719

    
720
        tb->jmp_next[n] = NULL;
721
    }
722
}
723

    
724
/* reset the jump entry 'n' of a TB so that it is not chained to
725
   another TB */
726
static inline void tb_reset_jump(TranslationBlock *tb, int n)
727
{
728
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
729
}
730

    
731
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
732
{
733
    CPUState *env;
734
    PageDesc *p;
735
    unsigned int h, n1;
736
    target_phys_addr_t phys_pc;
737
    TranslationBlock *tb1, *tb2;
738

    
739
    /* remove the TB from the hash list */
740
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
741
    h = tb_phys_hash_func(phys_pc);
742
    tb_remove(&tb_phys_hash[h], tb,
743
              offsetof(TranslationBlock, phys_hash_next));
744

    
745
    /* remove the TB from the page list */
746
    if (tb->page_addr[0] != page_addr) {
747
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
748
        tb_page_remove(&p->first_tb, tb);
749
        invalidate_page_bitmap(p);
750
    }
751
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
752
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
753
        tb_page_remove(&p->first_tb, tb);
754
        invalidate_page_bitmap(p);
755
    }
756

    
757
    tb_invalidated_flag = 1;
758

    
759
    /* remove the TB from the hash list */
760
    h = tb_jmp_cache_hash_func(tb->pc);
761
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
762
        if (env->tb_jmp_cache[h] == tb)
763
            env->tb_jmp_cache[h] = NULL;
764
    }
765

    
766
    /* suppress this TB from the two jump lists */
767
    tb_jmp_remove(tb, 0);
768
    tb_jmp_remove(tb, 1);
769

    
770
    /* suppress any remaining jumps to this TB */
771
    tb1 = tb->jmp_first;
772
    for(;;) {
773
        n1 = (long)tb1 & 3;
774
        if (n1 == 2)
775
            break;
776
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
777
        tb2 = tb1->jmp_next[n1];
778
        tb_reset_jump(tb1, n1);
779
        tb1->jmp_next[n1] = NULL;
780
        tb1 = tb2;
781
    }
782
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
783

    
784
    tb_phys_invalidate_count++;
785
}
786

    
787
static inline void set_bits(uint8_t *tab, int start, int len)
788
{
789
    int end, mask, end1;
790

    
791
    end = start + len;
792
    tab += start >> 3;
793
    mask = 0xff << (start & 7);
794
    if ((start & ~7) == (end & ~7)) {
795
        if (start < end) {
796
            mask &= ~(0xff << (end & 7));
797
            *tab |= mask;
798
        }
799
    } else {
800
        *tab++ |= mask;
801
        start = (start + 8) & ~7;
802
        end1 = end & ~7;
803
        while (start < end1) {
804
            *tab++ = 0xff;
805
            start += 8;
806
        }
807
        if (start < end) {
808
            mask = ~(0xff << (end & 7));
809
            *tab |= mask;
810
        }
811
    }
812
}
813

    
814
static void build_page_bitmap(PageDesc *p)
815
{
816
    int n, tb_start, tb_end;
817
    TranslationBlock *tb;
818

    
819
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
820
    if (!p->code_bitmap)
821
        return;
822

    
823
    tb = p->first_tb;
824
    while (tb != NULL) {
825
        n = (long)tb & 3;
826
        tb = (TranslationBlock *)((long)tb & ~3);
827
        /* NOTE: this is subtle as a TB may span two physical pages */
828
        if (n == 0) {
829
            /* NOTE: tb_end may be after the end of the page, but
830
               it is not a problem */
831
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
832
            tb_end = tb_start + tb->size;
833
            if (tb_end > TARGET_PAGE_SIZE)
834
                tb_end = TARGET_PAGE_SIZE;
835
        } else {
836
            tb_start = 0;
837
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
838
        }
839
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
840
        tb = tb->page_next[n];
841
    }
842
}
843

    
844
TranslationBlock *tb_gen_code(CPUState *env,
845
                              target_ulong pc, target_ulong cs_base,
846
                              int flags, int cflags)
847
{
848
    TranslationBlock *tb;
849
    uint8_t *tc_ptr;
850
    target_ulong phys_pc, phys_page2, virt_page2;
851
    int code_gen_size;
852

    
853
    phys_pc = get_phys_addr_code(env, pc);
854
    tb = tb_alloc(pc);
855
    if (!tb) {
856
        /* flush must be done */
857
        tb_flush(env);
858
        /* cannot fail at this point */
859
        tb = tb_alloc(pc);
860
        /* Don't forget to invalidate previous TB info.  */
861
        tb_invalidated_flag = 1;
862
    }
863
    tc_ptr = code_gen_ptr;
864
    tb->tc_ptr = tc_ptr;
865
    tb->cs_base = cs_base;
866
    tb->flags = flags;
867
    tb->cflags = cflags;
868
    cpu_gen_code(env, tb, &code_gen_size);
869
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
870

    
871
    /* check next page if needed */
872
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
873
    phys_page2 = -1;
874
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
875
        phys_page2 = get_phys_addr_code(env, virt_page2);
876
    }
877
    tb_link_phys(tb, phys_pc, phys_page2);
878
    return tb;
879
}
880

    
881
/* invalidate all TBs which intersect with the target physical page
882
   starting in range [start;end[. NOTE: start and end must refer to
883
   the same physical page. 'is_cpu_write_access' should be true if called
884
   from a real cpu write access: the virtual CPU will exit the current
885
   TB if code is modified inside this TB. */
886
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
887
                                   int is_cpu_write_access)
888
{
889
    int n, current_tb_modified, current_tb_not_found, current_flags;
890
    CPUState *env = cpu_single_env;
891
    PageDesc *p;
892
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
893
    target_ulong tb_start, tb_end;
894
    target_ulong current_pc, current_cs_base;
895

    
896
    p = page_find(start >> TARGET_PAGE_BITS);
897
    if (!p)
898
        return;
899
    if (!p->code_bitmap &&
900
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
901
        is_cpu_write_access) {
902
        /* build code bitmap */
903
        build_page_bitmap(p);
904
    }
905

    
906
    /* we remove all the TBs in the range [start, end[ */
907
    /* XXX: see if in some cases it could be faster to invalidate all the code */
908
    current_tb_not_found = is_cpu_write_access;
909
    current_tb_modified = 0;
910
    current_tb = NULL; /* avoid warning */
911
    current_pc = 0; /* avoid warning */
912
    current_cs_base = 0; /* avoid warning */
913
    current_flags = 0; /* avoid warning */
914
    tb = p->first_tb;
915
    while (tb != NULL) {
916
        n = (long)tb & 3;
917
        tb = (TranslationBlock *)((long)tb & ~3);
918
        tb_next = tb->page_next[n];
919
        /* NOTE: this is subtle as a TB may span two physical pages */
920
        if (n == 0) {
921
            /* NOTE: tb_end may be after the end of the page, but
922
               it is not a problem */
923
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
924
            tb_end = tb_start + tb->size;
925
        } else {
926
            tb_start = tb->page_addr[1];
927
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
928
        }
929
        if (!(tb_end <= start || tb_start >= end)) {
930
#ifdef TARGET_HAS_PRECISE_SMC
931
            if (current_tb_not_found) {
932
                current_tb_not_found = 0;
933
                current_tb = NULL;
934
                if (env->mem_io_pc) {
935
                    /* now we have a real cpu fault */
936
                    current_tb = tb_find_pc(env->mem_io_pc);
937
                }
938
            }
939
            if (current_tb == tb &&
940
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
941
                /* If we are modifying the current TB, we must stop
942
                its execution. We could be more precise by checking
943
                that the modification is after the current PC, but it
944
                would require a specialized function to partially
945
                restore the CPU state */
946

    
947
                current_tb_modified = 1;
948
                cpu_restore_state(current_tb, env,
949
                                  env->mem_io_pc, NULL);
950
#if defined(TARGET_I386)
951
                current_flags = env->hflags;
952
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
953
                current_cs_base = (target_ulong)env->segs[R_CS].base;
954
                current_pc = current_cs_base + env->eip;
955
#else
956
#error unsupported CPU
957
#endif
958
            }
959
#endif /* TARGET_HAS_PRECISE_SMC */
960
            /* we need to do that to handle the case where a signal
961
               occurs while doing tb_phys_invalidate() */
962
            saved_tb = NULL;
963
            if (env) {
964
                saved_tb = env->current_tb;
965
                env->current_tb = NULL;
966
            }
967
            tb_phys_invalidate(tb, -1);
968
            if (env) {
969
                env->current_tb = saved_tb;
970
                if (env->interrupt_request && env->current_tb)
971
                    cpu_interrupt(env, env->interrupt_request);
972
            }
973
        }
974
        tb = tb_next;
975
    }
976
#if !defined(CONFIG_USER_ONLY)
977
    /* if no code remaining, no need to continue to use slow writes */
978
    if (!p->first_tb) {
979
        invalidate_page_bitmap(p);
980
        if (is_cpu_write_access) {
981
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
982
        }
983
    }
984
#endif
985
#ifdef TARGET_HAS_PRECISE_SMC
986
    if (current_tb_modified) {
987
        /* we generate a block containing just the instruction
988
           modifying the memory. It will ensure that it cannot modify
989
           itself */
990
        env->current_tb = NULL;
991
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
992
        cpu_resume_from_signal(env, NULL);
993
    }
994
#endif
995
}
996

    
997
/* len must be <= 8 and start must be a multiple of len */
998
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
999
{
1000
    PageDesc *p;
1001
    int offset, b;
1002
#if 0
1003
    if (1) {
1004
        if (loglevel) {
1005
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1006
                   cpu_single_env->mem_io_vaddr, len,
1007
                   cpu_single_env->eip,
1008
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1009
        }
1010
    }
1011
#endif
1012
    p = page_find(start >> TARGET_PAGE_BITS);
1013
    if (!p)
1014
        return;
1015
    if (p->code_bitmap) {
1016
        offset = start & ~TARGET_PAGE_MASK;
1017
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1018
        if (b & ((1 << len) - 1))
1019
            goto do_invalidate;
1020
    } else {
1021
    do_invalidate:
1022
        tb_invalidate_phys_page_range(start, start + len, 1);
1023
    }
1024
}
1025

    
1026
#if !defined(CONFIG_SOFTMMU)
1027
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1028
                                    unsigned long pc, void *puc)
1029
{
1030
    int n, current_flags, current_tb_modified;
1031
    target_ulong current_pc, current_cs_base;
1032
    PageDesc *p;
1033
    TranslationBlock *tb, *current_tb;
1034
#ifdef TARGET_HAS_PRECISE_SMC
1035
    CPUState *env = cpu_single_env;
1036
#endif
1037

    
1038
    addr &= TARGET_PAGE_MASK;
1039
    p = page_find(addr >> TARGET_PAGE_BITS);
1040
    if (!p)
1041
        return;
1042
    tb = p->first_tb;
1043
    current_tb_modified = 0;
1044
    current_tb = NULL;
1045
    current_pc = 0; /* avoid warning */
1046
    current_cs_base = 0; /* avoid warning */
1047
    current_flags = 0; /* avoid warning */
1048
#ifdef TARGET_HAS_PRECISE_SMC
1049
    if (tb && pc != 0) {
1050
        current_tb = tb_find_pc(pc);
1051
    }
1052
#endif
1053
    while (tb != NULL) {
1054
        n = (long)tb & 3;
1055
        tb = (TranslationBlock *)((long)tb & ~3);
1056
#ifdef TARGET_HAS_PRECISE_SMC
1057
        if (current_tb == tb &&
1058
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1059
                /* If we are modifying the current TB, we must stop
1060
                   its execution. We could be more precise by checking
1061
                   that the modification is after the current PC, but it
1062
                   would require a specialized function to partially
1063
                   restore the CPU state */
1064

    
1065
            current_tb_modified = 1;
1066
            cpu_restore_state(current_tb, env, pc, puc);
1067
#if defined(TARGET_I386)
1068
            current_flags = env->hflags;
1069
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1070
            current_cs_base = (target_ulong)env->segs[R_CS].base;
1071
            current_pc = current_cs_base + env->eip;
1072
#else
1073
#error unsupported CPU
1074
#endif
1075
        }
1076
#endif /* TARGET_HAS_PRECISE_SMC */
1077
        tb_phys_invalidate(tb, addr);
1078
        tb = tb->page_next[n];
1079
    }
1080
    p->first_tb = NULL;
1081
#ifdef TARGET_HAS_PRECISE_SMC
1082
    if (current_tb_modified) {
1083
        /* we generate a block containing just the instruction
1084
           modifying the memory. It will ensure that it cannot modify
1085
           itself */
1086
        env->current_tb = NULL;
1087
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1088
        cpu_resume_from_signal(env, puc);
1089
    }
1090
#endif
1091
}
1092
#endif
1093

    
1094
/* add the tb in the target page and protect it if necessary */
1095
static inline void tb_alloc_page(TranslationBlock *tb,
1096
                                 unsigned int n, target_ulong page_addr)
1097
{
1098
    PageDesc *p;
1099
    TranslationBlock *last_first_tb;
1100

    
1101
    tb->page_addr[n] = page_addr;
1102
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1103
    tb->page_next[n] = p->first_tb;
1104
    last_first_tb = p->first_tb;
1105
    p->first_tb = (TranslationBlock *)((long)tb | n);
1106
    invalidate_page_bitmap(p);
1107

    
1108
#if defined(TARGET_HAS_SMC) || 1
1109

    
1110
#if defined(CONFIG_USER_ONLY)
1111
    if (p->flags & PAGE_WRITE) {
1112
        target_ulong addr;
1113
        PageDesc *p2;
1114
        int prot;
1115

    
1116
        /* force the host page as non writable (writes will have a
1117
           page fault + mprotect overhead) */
1118
        page_addr &= qemu_host_page_mask;
1119
        prot = 0;
1120
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1121
            addr += TARGET_PAGE_SIZE) {
1122

    
1123
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1124
            if (!p2)
1125
                continue;
1126
            prot |= p2->flags;
1127
            p2->flags &= ~PAGE_WRITE;
1128
            page_get_flags(addr);
1129
          }
1130
        mprotect(g2h(page_addr), qemu_host_page_size,
1131
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1132
#ifdef DEBUG_TB_INVALIDATE
1133
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1134
               page_addr);
1135
#endif
1136
    }
1137
#else
1138
    /* if some code is already present, then the pages are already
1139
       protected. So we handle the case where only the first TB is
1140
       allocated in a physical page */
1141
    if (!last_first_tb) {
1142
        tlb_protect_code(page_addr);
1143
    }
1144
#endif
1145

    
1146
#endif /* TARGET_HAS_SMC */
1147
}
1148

    
1149
/* Allocate a new translation block. Flush the translation buffer if
1150
   too many translation blocks or too much generated code. */
1151
TranslationBlock *tb_alloc(target_ulong pc)
1152
{
1153
    TranslationBlock *tb;
1154

    
1155
    if (nb_tbs >= code_gen_max_blocks ||
1156
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1157
        return NULL;
1158
    tb = &tbs[nb_tbs++];
1159
    tb->pc = pc;
1160
    tb->cflags = 0;
1161
    return tb;
1162
}
1163

    
1164
void tb_free(TranslationBlock *tb)
1165
{
1166
    /* In practice this is mostly used for single use temporary TB
1167
       Ignore the hard cases and just back up if this TB happens to
1168
       be the last one generated.  */
1169
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1170
        code_gen_ptr = tb->tc_ptr;
1171
        nb_tbs--;
1172
    }
1173
}
1174

    
1175
/* add a new TB and link it to the physical page tables. phys_page2 is
1176
   (-1) to indicate that only one page contains the TB. */
1177
void tb_link_phys(TranslationBlock *tb,
1178
                  target_ulong phys_pc, target_ulong phys_page2)
1179
{
1180
    unsigned int h;
1181
    TranslationBlock **ptb;
1182

    
1183
    /* Grab the mmap lock to stop another thread invalidating this TB
1184
       before we are done.  */
1185
    mmap_lock();
1186
    /* add in the physical hash table */
1187
    h = tb_phys_hash_func(phys_pc);
1188
    ptb = &tb_phys_hash[h];
1189
    tb->phys_hash_next = *ptb;
1190
    *ptb = tb;
1191

    
1192
    /* add in the page list */
1193
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1194
    if (phys_page2 != -1)
1195
        tb_alloc_page(tb, 1, phys_page2);
1196
    else
1197
        tb->page_addr[1] = -1;
1198

    
1199
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1200
    tb->jmp_next[0] = NULL;
1201
    tb->jmp_next[1] = NULL;
1202

    
1203
    /* init original jump addresses */
1204
    if (tb->tb_next_offset[0] != 0xffff)
1205
        tb_reset_jump(tb, 0);
1206
    if (tb->tb_next_offset[1] != 0xffff)
1207
        tb_reset_jump(tb, 1);
1208

    
1209
#ifdef DEBUG_TB_CHECK
1210
    tb_page_check();
1211
#endif
1212
    mmap_unlock();
1213
}
1214

    
1215
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1216
   tb[1].tc_ptr. Return NULL if not found */
1217
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1218
{
1219
    int m_min, m_max, m;
1220
    unsigned long v;
1221
    TranslationBlock *tb;
1222

    
1223
    if (nb_tbs <= 0)
1224
        return NULL;
1225
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1226
        tc_ptr >= (unsigned long)code_gen_ptr)
1227
        return NULL;
1228
    /* binary search (cf Knuth) */
1229
    m_min = 0;
1230
    m_max = nb_tbs - 1;
1231
    while (m_min <= m_max) {
1232
        m = (m_min + m_max) >> 1;
1233
        tb = &tbs[m];
1234
        v = (unsigned long)tb->tc_ptr;
1235
        if (v == tc_ptr)
1236
            return tb;
1237
        else if (tc_ptr < v) {
1238
            m_max = m - 1;
1239
        } else {
1240
            m_min = m + 1;
1241
        }
1242
    }
1243
    return &tbs[m_max];
1244
}
1245

    
1246
static void tb_reset_jump_recursive(TranslationBlock *tb);
1247

    
1248
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1249
{
1250
    TranslationBlock *tb1, *tb_next, **ptb;
1251
    unsigned int n1;
1252

    
1253
    tb1 = tb->jmp_next[n];
1254
    if (tb1 != NULL) {
1255
        /* find head of list */
1256
        for(;;) {
1257
            n1 = (long)tb1 & 3;
1258
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1259
            if (n1 == 2)
1260
                break;
1261
            tb1 = tb1->jmp_next[n1];
1262
        }
1263
        /* we are now sure now that tb jumps to tb1 */
1264
        tb_next = tb1;
1265

    
1266
        /* remove tb from the jmp_first list */
1267
        ptb = &tb_next->jmp_first;
1268
        for(;;) {
1269
            tb1 = *ptb;
1270
            n1 = (long)tb1 & 3;
1271
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1272
            if (n1 == n && tb1 == tb)
1273
                break;
1274
            ptb = &tb1->jmp_next[n1];
1275
        }
1276
        *ptb = tb->jmp_next[n];
1277
        tb->jmp_next[n] = NULL;
1278

    
1279
        /* suppress the jump to next tb in generated code */
1280
        tb_reset_jump(tb, n);
1281

    
1282
        /* suppress jumps in the tb on which we could have jumped */
1283
        tb_reset_jump_recursive(tb_next);
1284
    }
1285
}
1286

    
1287
static void tb_reset_jump_recursive(TranslationBlock *tb)
1288
{
1289
    tb_reset_jump_recursive2(tb, 0);
1290
    tb_reset_jump_recursive2(tb, 1);
1291
}
1292

    
1293
#if defined(TARGET_HAS_ICE)
1294
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1295
{
1296
    target_phys_addr_t addr;
1297
    target_ulong pd;
1298
    ram_addr_t ram_addr;
1299
    PhysPageDesc *p;
1300

    
1301
    addr = cpu_get_phys_page_debug(env, pc);
1302
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1303
    if (!p) {
1304
        pd = IO_MEM_UNASSIGNED;
1305
    } else {
1306
        pd = p->phys_offset;
1307
    }
1308
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1309
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1310
}
1311
#endif
1312

    
1313
/* Add a watchpoint.  */
1314
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1315
{
1316
    int i;
1317

    
1318
    for (i = 0; i < env->nb_watchpoints; i++) {
1319
        if (addr == env->watchpoint[i].vaddr)
1320
            return 0;
1321
    }
1322
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1323
        return -1;
1324

    
1325
    i = env->nb_watchpoints++;
1326
    env->watchpoint[i].vaddr = addr;
1327
    env->watchpoint[i].type = type;
1328
    tlb_flush_page(env, addr);
1329
    /* FIXME: This flush is needed because of the hack to make memory ops
1330
       terminate the TB.  It can be removed once the proper IO trap and
1331
       re-execute bits are in.  */
1332
    tb_flush(env);
1333
    return i;
1334
}
1335

    
1336
/* Remove a watchpoint.  */
1337
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1338
{
1339
    int i;
1340

    
1341
    for (i = 0; i < env->nb_watchpoints; i++) {
1342
        if (addr == env->watchpoint[i].vaddr) {
1343
            env->nb_watchpoints--;
1344
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1345
            tlb_flush_page(env, addr);
1346
            return 0;
1347
        }
1348
    }
1349
    return -1;
1350
}
1351

    
1352
/* Remove all watchpoints. */
1353
void cpu_watchpoint_remove_all(CPUState *env) {
1354
    int i;
1355

    
1356
    for (i = 0; i < env->nb_watchpoints; i++) {
1357
        tlb_flush_page(env, env->watchpoint[i].vaddr);
1358
    }
1359
    env->nb_watchpoints = 0;
1360
}
1361

    
1362
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1363
   breakpoint is reached */
1364
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1365
{
1366
#if defined(TARGET_HAS_ICE)
1367
    int i;
1368

    
1369
    for(i = 0; i < env->nb_breakpoints; i++) {
1370
        if (env->breakpoints[i] == pc)
1371
            return 0;
1372
    }
1373

    
1374
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1375
        return -1;
1376
    env->breakpoints[env->nb_breakpoints++] = pc;
1377

    
1378
    breakpoint_invalidate(env, pc);
1379
    return 0;
1380
#else
1381
    return -1;
1382
#endif
1383
}
1384

    
1385
/* remove all breakpoints */
1386
void cpu_breakpoint_remove_all(CPUState *env) {
1387
#if defined(TARGET_HAS_ICE)
1388
    int i;
1389
    for(i = 0; i < env->nb_breakpoints; i++) {
1390
        breakpoint_invalidate(env, env->breakpoints[i]);
1391
    }
1392
    env->nb_breakpoints = 0;
1393
#endif
1394
}
1395

    
1396
/* remove a breakpoint */
1397
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1398
{
1399
#if defined(TARGET_HAS_ICE)
1400
    int i;
1401
    for(i = 0; i < env->nb_breakpoints; i++) {
1402
        if (env->breakpoints[i] == pc)
1403
            goto found;
1404
    }
1405
    return -1;
1406
 found:
1407
    env->nb_breakpoints--;
1408
    if (i < env->nb_breakpoints)
1409
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1410

    
1411
    breakpoint_invalidate(env, pc);
1412
    return 0;
1413
#else
1414
    return -1;
1415
#endif
1416
}
1417

    
1418
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1419
   CPU loop after each instruction */
1420
void cpu_single_step(CPUState *env, int enabled)
1421
{
1422
#if defined(TARGET_HAS_ICE)
1423
    if (env->singlestep_enabled != enabled) {
1424
        env->singlestep_enabled = enabled;
1425
        /* must flush all the translated code to avoid inconsistancies */
1426
        /* XXX: only flush what is necessary */
1427
        tb_flush(env);
1428
    }
1429
#endif
1430
}
1431

    
1432
/* enable or disable low levels log */
1433
void cpu_set_log(int log_flags)
1434
{
1435
    loglevel = log_flags;
1436
    if (loglevel && !logfile) {
1437
        logfile = fopen(logfilename, log_append ? "a" : "w");
1438
        if (!logfile) {
1439
            perror(logfilename);
1440
            _exit(1);
1441
        }
1442
#if !defined(CONFIG_SOFTMMU)
1443
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1444
        {
1445
            static char logfile_buf[4096];
1446
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1447
        }
1448
#else
1449
        setvbuf(logfile, NULL, _IOLBF, 0);
1450
#endif
1451
        log_append = 1;
1452
    }
1453
    if (!loglevel && logfile) {
1454
        fclose(logfile);
1455
        logfile = NULL;
1456
    }
1457
}
1458

    
1459
void cpu_set_log_filename(const char *filename)
1460
{
1461
    logfilename = strdup(filename);
1462
    if (logfile) {
1463
        fclose(logfile);
1464
        logfile = NULL;
1465
    }
1466
    cpu_set_log(loglevel);
1467
}
1468

    
1469
/* mask must never be zero, except for A20 change call */
1470
void cpu_interrupt(CPUState *env, int mask)
1471
{
1472
#if !defined(USE_NPTL)
1473
    TranslationBlock *tb;
1474
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1475
#endif
1476
    int old_mask;
1477

    
1478
    old_mask = env->interrupt_request;
1479
    /* FIXME: This is probably not threadsafe.  A different thread could
1480
       be in the middle of a read-modify-write operation.  */
1481
    env->interrupt_request |= mask;
1482
#if defined(USE_NPTL)
1483
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1484
       problem and hope the cpu will stop of its own accord.  For userspace
1485
       emulation this often isn't actually as bad as it sounds.  Often
1486
       signals are used primarily to interrupt blocking syscalls.  */
1487
#else
1488
    if (use_icount) {
1489
        env->icount_decr.u16.high = 0xffff;
1490
#ifndef CONFIG_USER_ONLY
1491
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1492
           an async event happened and we need to process it.  */
1493
        if (!can_do_io(env)
1494
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1495
            cpu_abort(env, "Raised interrupt while not in I/O function");
1496
        }
1497
#endif
1498
    } else {
1499
        tb = env->current_tb;
1500
        /* if the cpu is currently executing code, we must unlink it and
1501
           all the potentially executing TB */
1502
        if (tb && !testandset(&interrupt_lock)) {
1503
            env->current_tb = NULL;
1504
            tb_reset_jump_recursive(tb);
1505
            resetlock(&interrupt_lock);
1506
        }
1507
    }
1508
#endif
1509
}
1510

    
1511
void cpu_reset_interrupt(CPUState *env, int mask)
1512
{
1513
    env->interrupt_request &= ~mask;
1514
}
1515

    
1516
const CPULogItem cpu_log_items[] = {
1517
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1518
      "show generated host assembly code for each compiled TB" },
1519
    { CPU_LOG_TB_IN_ASM, "in_asm",
1520
      "show target assembly code for each compiled TB" },
1521
    { CPU_LOG_TB_OP, "op",
1522
      "show micro ops for each compiled TB" },
1523
    { CPU_LOG_TB_OP_OPT, "op_opt",
1524
      "show micro ops "
1525
#ifdef TARGET_I386
1526
      "before eflags optimization and "
1527
#endif
1528
      "after liveness analysis" },
1529
    { CPU_LOG_INT, "int",
1530
      "show interrupts/exceptions in short format" },
1531
    { CPU_LOG_EXEC, "exec",
1532
      "show trace before each executed TB (lots of logs)" },
1533
    { CPU_LOG_TB_CPU, "cpu",
1534
      "show CPU state before block translation" },
1535
#ifdef TARGET_I386
1536
    { CPU_LOG_PCALL, "pcall",
1537
      "show protected mode far calls/returns/exceptions" },
1538
#endif
1539
#ifdef DEBUG_IOPORT
1540
    { CPU_LOG_IOPORT, "ioport",
1541
      "show all i/o ports accesses" },
1542
#endif
1543
    { 0, NULL, NULL },
1544
};
1545

    
1546
static int cmp1(const char *s1, int n, const char *s2)
1547
{
1548
    if (strlen(s2) != n)
1549
        return 0;
1550
    return memcmp(s1, s2, n) == 0;
1551
}
1552

    
1553
/* takes a comma separated list of log masks. Return 0 if error. */
1554
int cpu_str_to_log_mask(const char *str)
1555
{
1556
    const CPULogItem *item;
1557
    int mask;
1558
    const char *p, *p1;
1559

    
1560
    p = str;
1561
    mask = 0;
1562
    for(;;) {
1563
        p1 = strchr(p, ',');
1564
        if (!p1)
1565
            p1 = p + strlen(p);
1566
        if(cmp1(p,p1-p,"all")) {
1567
                for(item = cpu_log_items; item->mask != 0; item++) {
1568
                        mask |= item->mask;
1569
                }
1570
        } else {
1571
        for(item = cpu_log_items; item->mask != 0; item++) {
1572
            if (cmp1(p, p1 - p, item->name))
1573
                goto found;
1574
        }
1575
        return 0;
1576
        }
1577
    found:
1578
        mask |= item->mask;
1579
        if (*p1 != ',')
1580
            break;
1581
        p = p1 + 1;
1582
    }
1583
    return mask;
1584
}
1585

    
1586
void cpu_abort(CPUState *env, const char *fmt, ...)
1587
{
1588
    va_list ap;
1589
    va_list ap2;
1590

    
1591
    va_start(ap, fmt);
1592
    va_copy(ap2, ap);
1593
    fprintf(stderr, "qemu: fatal: ");
1594
    vfprintf(stderr, fmt, ap);
1595
    fprintf(stderr, "\n");
1596
#ifdef TARGET_I386
1597
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1598
#else
1599
    cpu_dump_state(env, stderr, fprintf, 0);
1600
#endif
1601
    if (logfile) {
1602
        fprintf(logfile, "qemu: fatal: ");
1603
        vfprintf(logfile, fmt, ap2);
1604
        fprintf(logfile, "\n");
1605
#ifdef TARGET_I386
1606
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1607
#else
1608
        cpu_dump_state(env, logfile, fprintf, 0);
1609
#endif
1610
        fflush(logfile);
1611
        fclose(logfile);
1612
    }
1613
    va_end(ap2);
1614
    va_end(ap);
1615
    abort();
1616
}
1617

    
1618
CPUState *cpu_copy(CPUState *env)
1619
{
1620
    CPUState *new_env = cpu_init(env->cpu_model_str);
1621
    /* preserve chaining and index */
1622
    CPUState *next_cpu = new_env->next_cpu;
1623
    int cpu_index = new_env->cpu_index;
1624
    memcpy(new_env, env, sizeof(CPUState));
1625
    new_env->next_cpu = next_cpu;
1626
    new_env->cpu_index = cpu_index;
1627
    return new_env;
1628
}
1629

    
1630
#if !defined(CONFIG_USER_ONLY)
1631

    
1632
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1633
{
1634
    unsigned int i;
1635

    
1636
    /* Discard jump cache entries for any tb which might potentially
1637
       overlap the flushed page.  */
1638
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1639
    memset (&env->tb_jmp_cache[i], 0, 
1640
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1641

    
1642
    i = tb_jmp_cache_hash_page(addr);
1643
    memset (&env->tb_jmp_cache[i], 0, 
1644
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1645
}
1646

    
1647
/* NOTE: if flush_global is true, also flush global entries (not
1648
   implemented yet) */
1649
void tlb_flush(CPUState *env, int flush_global)
1650
{
1651
    int i;
1652

    
1653
#if defined(DEBUG_TLB)
1654
    printf("tlb_flush:\n");
1655
#endif
1656
    /* must reset current TB so that interrupts cannot modify the
1657
       links while we are modifying them */
1658
    env->current_tb = NULL;
1659

    
1660
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1661
        env->tlb_table[0][i].addr_read = -1;
1662
        env->tlb_table[0][i].addr_write = -1;
1663
        env->tlb_table[0][i].addr_code = -1;
1664
        env->tlb_table[1][i].addr_read = -1;
1665
        env->tlb_table[1][i].addr_write = -1;
1666
        env->tlb_table[1][i].addr_code = -1;
1667
#if (NB_MMU_MODES >= 3)
1668
        env->tlb_table[2][i].addr_read = -1;
1669
        env->tlb_table[2][i].addr_write = -1;
1670
        env->tlb_table[2][i].addr_code = -1;
1671
#if (NB_MMU_MODES == 4)
1672
        env->tlb_table[3][i].addr_read = -1;
1673
        env->tlb_table[3][i].addr_write = -1;
1674
        env->tlb_table[3][i].addr_code = -1;
1675
#endif
1676
#endif
1677
    }
1678

    
1679
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1680

    
1681
#ifdef USE_KQEMU
1682
    if (env->kqemu_enabled) {
1683
        kqemu_flush(env, flush_global);
1684
    }
1685
#endif
1686
    tlb_flush_count++;
1687
}
1688

    
1689
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1690
{
1691
    if (addr == (tlb_entry->addr_read &
1692
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1693
        addr == (tlb_entry->addr_write &
1694
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1695
        addr == (tlb_entry->addr_code &
1696
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1697
        tlb_entry->addr_read = -1;
1698
        tlb_entry->addr_write = -1;
1699
        tlb_entry->addr_code = -1;
1700
    }
1701
}
1702

    
1703
void tlb_flush_page(CPUState *env, target_ulong addr)
1704
{
1705
    int i;
1706

    
1707
#if defined(DEBUG_TLB)
1708
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1709
#endif
1710
    /* must reset current TB so that interrupts cannot modify the
1711
       links while we are modifying them */
1712
    env->current_tb = NULL;
1713

    
1714
    addr &= TARGET_PAGE_MASK;
1715
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1716
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1717
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1718
#if (NB_MMU_MODES >= 3)
1719
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1720
#if (NB_MMU_MODES == 4)
1721
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1722
#endif
1723
#endif
1724

    
1725
    tlb_flush_jmp_cache(env, addr);
1726

    
1727
#ifdef USE_KQEMU
1728
    if (env->kqemu_enabled) {
1729
        kqemu_flush_page(env, addr);
1730
    }
1731
#endif
1732
}
1733

    
1734
/* update the TLBs so that writes to code in the virtual page 'addr'
1735
   can be detected */
1736
static void tlb_protect_code(ram_addr_t ram_addr)
1737
{
1738
    cpu_physical_memory_reset_dirty(ram_addr,
1739
                                    ram_addr + TARGET_PAGE_SIZE,
1740
                                    CODE_DIRTY_FLAG);
1741
}
1742

    
1743
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1744
   tested for self modifying code */
1745
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1746
                                    target_ulong vaddr)
1747
{
1748
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1749
}
1750

    
1751
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1752
                                         unsigned long start, unsigned long length)
1753
{
1754
    unsigned long addr;
1755
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1756
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1757
        if ((addr - start) < length) {
1758
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1759
        }
1760
    }
1761
}
1762

    
1763
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1764
                                     int dirty_flags)
1765
{
1766
    CPUState *env;
1767
    unsigned long length, start1;
1768
    int i, mask, len;
1769
    uint8_t *p;
1770

    
1771
    start &= TARGET_PAGE_MASK;
1772
    end = TARGET_PAGE_ALIGN(end);
1773

    
1774
    length = end - start;
1775
    if (length == 0)
1776
        return;
1777
    len = length >> TARGET_PAGE_BITS;
1778
#ifdef USE_KQEMU
1779
    /* XXX: should not depend on cpu context */
1780
    env = first_cpu;
1781
    if (env->kqemu_enabled) {
1782
        ram_addr_t addr;
1783
        addr = start;
1784
        for(i = 0; i < len; i++) {
1785
            kqemu_set_notdirty(env, addr);
1786
            addr += TARGET_PAGE_SIZE;
1787
        }
1788
    }
1789
#endif
1790
    mask = ~dirty_flags;
1791
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1792
    for(i = 0; i < len; i++)
1793
        p[i] &= mask;
1794

    
1795
    /* we modify the TLB cache so that the dirty bit will be set again
1796
       when accessing the range */
1797
    start1 = start + (unsigned long)phys_ram_base;
1798
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1799
        for(i = 0; i < CPU_TLB_SIZE; i++)
1800
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1801
        for(i = 0; i < CPU_TLB_SIZE; i++)
1802
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1803
#if (NB_MMU_MODES >= 3)
1804
        for(i = 0; i < CPU_TLB_SIZE; i++)
1805
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1806
#if (NB_MMU_MODES == 4)
1807
        for(i = 0; i < CPU_TLB_SIZE; i++)
1808
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1809
#endif
1810
#endif
1811
    }
1812
}
1813

    
1814
int cpu_physical_memory_set_dirty_tracking(int enable)
1815
{
1816
    in_migration = enable;
1817
    return 0;
1818
}
1819

    
1820
int cpu_physical_memory_get_dirty_tracking(void)
1821
{
1822
    return in_migration;
1823
}
1824

    
1825
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1826
{
1827
    ram_addr_t ram_addr;
1828

    
1829
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1830
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1831
            tlb_entry->addend - (unsigned long)phys_ram_base;
1832
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1833
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1834
        }
1835
    }
1836
}
1837

    
1838
/* update the TLB according to the current state of the dirty bits */
1839
void cpu_tlb_update_dirty(CPUState *env)
1840
{
1841
    int i;
1842
    for(i = 0; i < CPU_TLB_SIZE; i++)
1843
        tlb_update_dirty(&env->tlb_table[0][i]);
1844
    for(i = 0; i < CPU_TLB_SIZE; i++)
1845
        tlb_update_dirty(&env->tlb_table[1][i]);
1846
#if (NB_MMU_MODES >= 3)
1847
    for(i = 0; i < CPU_TLB_SIZE; i++)
1848
        tlb_update_dirty(&env->tlb_table[2][i]);
1849
#if (NB_MMU_MODES == 4)
1850
    for(i = 0; i < CPU_TLB_SIZE; i++)
1851
        tlb_update_dirty(&env->tlb_table[3][i]);
1852
#endif
1853
#endif
1854
}
1855

    
1856
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1857
{
1858
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1859
        tlb_entry->addr_write = vaddr;
1860
}
1861

    
1862
/* update the TLB corresponding to virtual page vaddr
1863
   so that it is no longer dirty */
1864
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1865
{
1866
    int i;
1867

    
1868
    vaddr &= TARGET_PAGE_MASK;
1869
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1870
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1871
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1872
#if (NB_MMU_MODES >= 3)
1873
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1874
#if (NB_MMU_MODES == 4)
1875
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1876
#endif
1877
#endif
1878
}
1879

    
1880
/* add a new TLB entry. At most one entry for a given virtual address
1881
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1882
   (can only happen in non SOFTMMU mode for I/O pages or pages
1883
   conflicting with the host address space). */
1884
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1885
                      target_phys_addr_t paddr, int prot,
1886
                      int mmu_idx, int is_softmmu)
1887
{
1888
    PhysPageDesc *p;
1889
    unsigned long pd;
1890
    unsigned int index;
1891
    target_ulong address;
1892
    target_ulong code_address;
1893
    target_phys_addr_t addend;
1894
    int ret;
1895
    CPUTLBEntry *te;
1896
    int i;
1897
    target_phys_addr_t iotlb;
1898

    
1899
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1900
    if (!p) {
1901
        pd = IO_MEM_UNASSIGNED;
1902
    } else {
1903
        pd = p->phys_offset;
1904
    }
1905
#if defined(DEBUG_TLB)
1906
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1907
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1908
#endif
1909

    
1910
    ret = 0;
1911
    address = vaddr;
1912
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1913
        /* IO memory case (romd handled later) */
1914
        address |= TLB_MMIO;
1915
    }
1916
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1917
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1918
        /* Normal RAM.  */
1919
        iotlb = pd & TARGET_PAGE_MASK;
1920
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1921
            iotlb |= IO_MEM_NOTDIRTY;
1922
        else
1923
            iotlb |= IO_MEM_ROM;
1924
    } else {
1925
        /* IO handlers are currently passed a phsical address.
1926
           It would be nice to pass an offset from the base address
1927
           of that region.  This would avoid having to special case RAM,
1928
           and avoid full address decoding in every device.
1929
           We can't use the high bits of pd for this because
1930
           IO_MEM_ROMD uses these as a ram address.  */
1931
        iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1932
    }
1933

    
1934
    code_address = address;
1935
    /* Make accesses to pages with watchpoints go via the
1936
       watchpoint trap routines.  */
1937
    for (i = 0; i < env->nb_watchpoints; i++) {
1938
        if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1939
            iotlb = io_mem_watch + paddr;
1940
            /* TODO: The memory case can be optimized by not trapping
1941
               reads of pages with a write breakpoint.  */
1942
            address |= TLB_MMIO;
1943
        }
1944
    }
1945

    
1946
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1947
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
1948
    te = &env->tlb_table[mmu_idx][index];
1949
    te->addend = addend - vaddr;
1950
    if (prot & PAGE_READ) {
1951
        te->addr_read = address;
1952
    } else {
1953
        te->addr_read = -1;
1954
    }
1955

    
1956
    if (prot & PAGE_EXEC) {
1957
        te->addr_code = code_address;
1958
    } else {
1959
        te->addr_code = -1;
1960
    }
1961
    if (prot & PAGE_WRITE) {
1962
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1963
            (pd & IO_MEM_ROMD)) {
1964
            /* Write access calls the I/O callback.  */
1965
            te->addr_write = address | TLB_MMIO;
1966
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1967
                   !cpu_physical_memory_is_dirty(pd)) {
1968
            te->addr_write = address | TLB_NOTDIRTY;
1969
        } else {
1970
            te->addr_write = address;
1971
        }
1972
    } else {
1973
        te->addr_write = -1;
1974
    }
1975
    return ret;
1976
}
1977

    
1978
#else
1979

    
1980
void tlb_flush(CPUState *env, int flush_global)
1981
{
1982
}
1983

    
1984
void tlb_flush_page(CPUState *env, target_ulong addr)
1985
{
1986
}
1987

    
1988
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1989
                      target_phys_addr_t paddr, int prot,
1990
                      int mmu_idx, int is_softmmu)
1991
{
1992
    return 0;
1993
}
1994

    
1995
/* dump memory mappings */
1996
void page_dump(FILE *f)
1997
{
1998
    unsigned long start, end;
1999
    int i, j, prot, prot1;
2000
    PageDesc *p;
2001

    
2002
    fprintf(f, "%-8s %-8s %-8s %s\n",
2003
            "start", "end", "size", "prot");
2004
    start = -1;
2005
    end = -1;
2006
    prot = 0;
2007
    for(i = 0; i <= L1_SIZE; i++) {
2008
        if (i < L1_SIZE)
2009
            p = l1_map[i];
2010
        else
2011
            p = NULL;
2012
        for(j = 0;j < L2_SIZE; j++) {
2013
            if (!p)
2014
                prot1 = 0;
2015
            else
2016
                prot1 = p[j].flags;
2017
            if (prot1 != prot) {
2018
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2019
                if (start != -1) {
2020
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2021
                            start, end, end - start,
2022
                            prot & PAGE_READ ? 'r' : '-',
2023
                            prot & PAGE_WRITE ? 'w' : '-',
2024
                            prot & PAGE_EXEC ? 'x' : '-');
2025
                }
2026
                if (prot1 != 0)
2027
                    start = end;
2028
                else
2029
                    start = -1;
2030
                prot = prot1;
2031
            }
2032
            if (!p)
2033
                break;
2034
        }
2035
    }
2036
}
2037

    
2038
int page_get_flags(target_ulong address)
2039
{
2040
    PageDesc *p;
2041

    
2042
    p = page_find(address >> TARGET_PAGE_BITS);
2043
    if (!p)
2044
        return 0;
2045
    return p->flags;
2046
}
2047

    
2048
/* modify the flags of a page and invalidate the code if
2049
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2050
   depending on PAGE_WRITE */
2051
void page_set_flags(target_ulong start, target_ulong end, int flags)
2052
{
2053
    PageDesc *p;
2054
    target_ulong addr;
2055

    
2056
    /* mmap_lock should already be held.  */
2057
    start = start & TARGET_PAGE_MASK;
2058
    end = TARGET_PAGE_ALIGN(end);
2059
    if (flags & PAGE_WRITE)
2060
        flags |= PAGE_WRITE_ORG;
2061
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2062
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2063
        /* We may be called for host regions that are outside guest
2064
           address space.  */
2065
        if (!p)
2066
            return;
2067
        /* if the write protection is set, then we invalidate the code
2068
           inside */
2069
        if (!(p->flags & PAGE_WRITE) &&
2070
            (flags & PAGE_WRITE) &&
2071
            p->first_tb) {
2072
            tb_invalidate_phys_page(addr, 0, NULL);
2073
        }
2074
        p->flags = flags;
2075
    }
2076
}
2077

    
2078
int page_check_range(target_ulong start, target_ulong len, int flags)
2079
{
2080
    PageDesc *p;
2081
    target_ulong end;
2082
    target_ulong addr;
2083

    
2084
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2085
    start = start & TARGET_PAGE_MASK;
2086

    
2087
    if( end < start )
2088
        /* we've wrapped around */
2089
        return -1;
2090
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2091
        p = page_find(addr >> TARGET_PAGE_BITS);
2092
        if( !p )
2093
            return -1;
2094
        if( !(p->flags & PAGE_VALID) )
2095
            return -1;
2096

    
2097
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2098
            return -1;
2099
        if (flags & PAGE_WRITE) {
2100
            if (!(p->flags & PAGE_WRITE_ORG))
2101
                return -1;
2102
            /* unprotect the page if it was put read-only because it
2103
               contains translated code */
2104
            if (!(p->flags & PAGE_WRITE)) {
2105
                if (!page_unprotect(addr, 0, NULL))
2106
                    return -1;
2107
            }
2108
            return 0;
2109
        }
2110
    }
2111
    return 0;
2112
}
2113

    
2114
/* called from signal handler: invalidate the code and unprotect the
2115
   page. Return TRUE if the fault was succesfully handled. */
2116
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2117
{
2118
    unsigned int page_index, prot, pindex;
2119
    PageDesc *p, *p1;
2120
    target_ulong host_start, host_end, addr;
2121

    
2122
    /* Technically this isn't safe inside a signal handler.  However we
2123
       know this only ever happens in a synchronous SEGV handler, so in
2124
       practice it seems to be ok.  */
2125
    mmap_lock();
2126

    
2127
    host_start = address & qemu_host_page_mask;
2128
    page_index = host_start >> TARGET_PAGE_BITS;
2129
    p1 = page_find(page_index);
2130
    if (!p1) {
2131
        mmap_unlock();
2132
        return 0;
2133
    }
2134
    host_end = host_start + qemu_host_page_size;
2135
    p = p1;
2136
    prot = 0;
2137
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2138
        prot |= p->flags;
2139
        p++;
2140
    }
2141
    /* if the page was really writable, then we change its
2142
       protection back to writable */
2143
    if (prot & PAGE_WRITE_ORG) {
2144
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2145
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2146
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2147
                     (prot & PAGE_BITS) | PAGE_WRITE);
2148
            p1[pindex].flags |= PAGE_WRITE;
2149
            /* and since the content will be modified, we must invalidate
2150
               the corresponding translated code. */
2151
            tb_invalidate_phys_page(address, pc, puc);
2152
#ifdef DEBUG_TB_CHECK
2153
            tb_invalidate_check(address);
2154
#endif
2155
            mmap_unlock();
2156
            return 1;
2157
        }
2158
    }
2159
    mmap_unlock();
2160
    return 0;
2161
}
2162

    
2163
static inline void tlb_set_dirty(CPUState *env,
2164
                                 unsigned long addr, target_ulong vaddr)
2165
{
2166
}
2167
#endif /* defined(CONFIG_USER_ONLY) */
2168

    
2169
#if !defined(CONFIG_USER_ONLY)
2170
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2171
                             ram_addr_t memory);
2172
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2173
                           ram_addr_t orig_memory);
2174
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2175
                      need_subpage)                                     \
2176
    do {                                                                \
2177
        if (addr > start_addr)                                          \
2178
            start_addr2 = 0;                                            \
2179
        else {                                                          \
2180
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2181
            if (start_addr2 > 0)                                        \
2182
                need_subpage = 1;                                       \
2183
        }                                                               \
2184
                                                                        \
2185
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2186
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2187
        else {                                                          \
2188
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2189
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2190
                need_subpage = 1;                                       \
2191
        }                                                               \
2192
    } while (0)
2193

    
2194
/* register physical memory. 'size' must be a multiple of the target
2195
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2196
   io memory page */
2197
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2198
                                  ram_addr_t size,
2199
                                  ram_addr_t phys_offset)
2200
{
2201
    target_phys_addr_t addr, end_addr;
2202
    PhysPageDesc *p;
2203
    CPUState *env;
2204
    ram_addr_t orig_size = size;
2205
    void *subpage;
2206

    
2207
#ifdef USE_KQEMU
2208
    /* XXX: should not depend on cpu context */
2209
    env = first_cpu;
2210
    if (env->kqemu_enabled) {
2211
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2212
    }
2213
#endif
2214
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2215
    end_addr = start_addr + (target_phys_addr_t)size;
2216
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2217
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2218
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2219
            ram_addr_t orig_memory = p->phys_offset;
2220
            target_phys_addr_t start_addr2, end_addr2;
2221
            int need_subpage = 0;
2222

    
2223
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2224
                          need_subpage);
2225
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2226
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2227
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2228
                                           &p->phys_offset, orig_memory);
2229
                } else {
2230
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2231
                                            >> IO_MEM_SHIFT];
2232
                }
2233
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2234
            } else {
2235
                p->phys_offset = phys_offset;
2236
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2237
                    (phys_offset & IO_MEM_ROMD))
2238
                    phys_offset += TARGET_PAGE_SIZE;
2239
            }
2240
        } else {
2241
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2242
            p->phys_offset = phys_offset;
2243
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2244
                (phys_offset & IO_MEM_ROMD))
2245
                phys_offset += TARGET_PAGE_SIZE;
2246
            else {
2247
                target_phys_addr_t start_addr2, end_addr2;
2248
                int need_subpage = 0;
2249

    
2250
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2251
                              end_addr2, need_subpage);
2252

    
2253
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2254
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2255
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2256
                    subpage_register(subpage, start_addr2, end_addr2,
2257
                                     phys_offset);
2258
                }
2259
            }
2260
        }
2261
    }
2262

    
2263
    /* since each CPU stores ram addresses in its TLB cache, we must
2264
       reset the modified entries */
2265
    /* XXX: slow ! */
2266
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2267
        tlb_flush(env, 1);
2268
    }
2269
}
2270

    
2271
/* XXX: temporary until new memory mapping API */
2272
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2273
{
2274
    PhysPageDesc *p;
2275

    
2276
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2277
    if (!p)
2278
        return IO_MEM_UNASSIGNED;
2279
    return p->phys_offset;
2280
}
2281

    
2282
/* XXX: better than nothing */
2283
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2284
{
2285
    ram_addr_t addr;
2286
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2287
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2288
                (uint64_t)size, (uint64_t)phys_ram_size);
2289
        abort();
2290
    }
2291
    addr = phys_ram_alloc_offset;
2292
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2293
    return addr;
2294
}
2295

    
2296
void qemu_ram_free(ram_addr_t addr)
2297
{
2298
}
2299

    
2300
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2301
{
2302
#ifdef DEBUG_UNASSIGNED
2303
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2304
#endif
2305
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2306
    do_unassigned_access(addr, 0, 0, 0, 1);
2307
#endif
2308
    return 0;
2309
}
2310

    
2311
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2312
{
2313
#ifdef DEBUG_UNASSIGNED
2314
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2315
#endif
2316
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2317
    do_unassigned_access(addr, 0, 0, 0, 2);
2318
#endif
2319
    return 0;
2320
}
2321

    
2322
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2323
{
2324
#ifdef DEBUG_UNASSIGNED
2325
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2326
#endif
2327
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2328
    do_unassigned_access(addr, 0, 0, 0, 4);
2329
#endif
2330
    return 0;
2331
}
2332

    
2333
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2334
{
2335
#ifdef DEBUG_UNASSIGNED
2336
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2337
#endif
2338
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2339
    do_unassigned_access(addr, 1, 0, 0, 1);
2340
#endif
2341
}
2342

    
2343
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2344
{
2345
#ifdef DEBUG_UNASSIGNED
2346
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2347
#endif
2348
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2349
    do_unassigned_access(addr, 1, 0, 0, 2);
2350
#endif
2351
}
2352

    
2353
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2354
{
2355
#ifdef DEBUG_UNASSIGNED
2356
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2357
#endif
2358
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2359
    do_unassigned_access(addr, 1, 0, 0, 4);
2360
#endif
2361
}
2362

    
2363
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2364
    unassigned_mem_readb,
2365
    unassigned_mem_readw,
2366
    unassigned_mem_readl,
2367
};
2368

    
2369
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2370
    unassigned_mem_writeb,
2371
    unassigned_mem_writew,
2372
    unassigned_mem_writel,
2373
};
2374

    
2375
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2376
                                uint32_t val)
2377
{
2378
    int dirty_flags;
2379
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2380
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2381
#if !defined(CONFIG_USER_ONLY)
2382
        tb_invalidate_phys_page_fast(ram_addr, 1);
2383
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2384
#endif
2385
    }
2386
    stb_p(phys_ram_base + ram_addr, val);
2387
#ifdef USE_KQEMU
2388
    if (cpu_single_env->kqemu_enabled &&
2389
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2390
        kqemu_modify_page(cpu_single_env, ram_addr);
2391
#endif
2392
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2393
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2394
    /* we remove the notdirty callback only if the code has been
2395
       flushed */
2396
    if (dirty_flags == 0xff)
2397
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2398
}
2399

    
2400
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2401
                                uint32_t val)
2402
{
2403
    int dirty_flags;
2404
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2405
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2406
#if !defined(CONFIG_USER_ONLY)
2407
        tb_invalidate_phys_page_fast(ram_addr, 2);
2408
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2409
#endif
2410
    }
2411
    stw_p(phys_ram_base + ram_addr, val);
2412
#ifdef USE_KQEMU
2413
    if (cpu_single_env->kqemu_enabled &&
2414
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2415
        kqemu_modify_page(cpu_single_env, ram_addr);
2416
#endif
2417
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2418
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2419
    /* we remove the notdirty callback only if the code has been
2420
       flushed */
2421
    if (dirty_flags == 0xff)
2422
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2423
}
2424

    
2425
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2426
                                uint32_t val)
2427
{
2428
    int dirty_flags;
2429
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2430
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2431
#if !defined(CONFIG_USER_ONLY)
2432
        tb_invalidate_phys_page_fast(ram_addr, 4);
2433
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2434
#endif
2435
    }
2436
    stl_p(phys_ram_base + ram_addr, val);
2437
#ifdef USE_KQEMU
2438
    if (cpu_single_env->kqemu_enabled &&
2439
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2440
        kqemu_modify_page(cpu_single_env, ram_addr);
2441
#endif
2442
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2443
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2444
    /* we remove the notdirty callback only if the code has been
2445
       flushed */
2446
    if (dirty_flags == 0xff)
2447
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2448
}
2449

    
2450
static CPUReadMemoryFunc *error_mem_read[3] = {
2451
    NULL, /* never used */
2452
    NULL, /* never used */
2453
    NULL, /* never used */
2454
};
2455

    
2456
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2457
    notdirty_mem_writeb,
2458
    notdirty_mem_writew,
2459
    notdirty_mem_writel,
2460
};
2461

    
2462
/* Generate a debug exception if a watchpoint has been hit.  */
2463
static void check_watchpoint(int offset, int flags)
2464
{
2465
    CPUState *env = cpu_single_env;
2466
    target_ulong vaddr;
2467
    int i;
2468

    
2469
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2470
    for (i = 0; i < env->nb_watchpoints; i++) {
2471
        if (vaddr == env->watchpoint[i].vaddr
2472
                && (env->watchpoint[i].type & flags)) {
2473
            env->watchpoint_hit = i + 1;
2474
            cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2475
            break;
2476
        }
2477
    }
2478
}
2479

    
2480
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2481
   so these check for a hit then pass through to the normal out-of-line
2482
   phys routines.  */
2483
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2484
{
2485
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2486
    return ldub_phys(addr);
2487
}
2488

    
2489
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2490
{
2491
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2492
    return lduw_phys(addr);
2493
}
2494

    
2495
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2496
{
2497
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2498
    return ldl_phys(addr);
2499
}
2500

    
2501
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2502
                             uint32_t val)
2503
{
2504
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2505
    stb_phys(addr, val);
2506
}
2507

    
2508
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2509
                             uint32_t val)
2510
{
2511
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2512
    stw_phys(addr, val);
2513
}
2514

    
2515
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2516
                             uint32_t val)
2517
{
2518
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2519
    stl_phys(addr, val);
2520
}
2521

    
2522
static CPUReadMemoryFunc *watch_mem_read[3] = {
2523
    watch_mem_readb,
2524
    watch_mem_readw,
2525
    watch_mem_readl,
2526
};
2527

    
2528
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2529
    watch_mem_writeb,
2530
    watch_mem_writew,
2531
    watch_mem_writel,
2532
};
2533

    
2534
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2535
                                 unsigned int len)
2536
{
2537
    uint32_t ret;
2538
    unsigned int idx;
2539

    
2540
    idx = SUBPAGE_IDX(addr - mmio->base);
2541
#if defined(DEBUG_SUBPAGE)
2542
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2543
           mmio, len, addr, idx);
2544
#endif
2545
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2546

    
2547
    return ret;
2548
}
2549

    
2550
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2551
                              uint32_t value, unsigned int len)
2552
{
2553
    unsigned int idx;
2554

    
2555
    idx = SUBPAGE_IDX(addr - mmio->base);
2556
#if defined(DEBUG_SUBPAGE)
2557
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2558
           mmio, len, addr, idx, value);
2559
#endif
2560
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2561
}
2562

    
2563
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2564
{
2565
#if defined(DEBUG_SUBPAGE)
2566
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2567
#endif
2568

    
2569
    return subpage_readlen(opaque, addr, 0);
2570
}
2571

    
2572
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2573
                            uint32_t value)
2574
{
2575
#if defined(DEBUG_SUBPAGE)
2576
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2577
#endif
2578
    subpage_writelen(opaque, addr, value, 0);
2579
}
2580

    
2581
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2582
{
2583
#if defined(DEBUG_SUBPAGE)
2584
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2585
#endif
2586

    
2587
    return subpage_readlen(opaque, addr, 1);
2588
}
2589

    
2590
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2591
                            uint32_t value)
2592
{
2593
#if defined(DEBUG_SUBPAGE)
2594
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2595
#endif
2596
    subpage_writelen(opaque, addr, value, 1);
2597
}
2598

    
2599
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2600
{
2601
#if defined(DEBUG_SUBPAGE)
2602
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2603
#endif
2604

    
2605
    return subpage_readlen(opaque, addr, 2);
2606
}
2607

    
2608
static void subpage_writel (void *opaque,
2609
                         target_phys_addr_t addr, uint32_t value)
2610
{
2611
#if defined(DEBUG_SUBPAGE)
2612
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2613
#endif
2614
    subpage_writelen(opaque, addr, value, 2);
2615
}
2616

    
2617
static CPUReadMemoryFunc *subpage_read[] = {
2618
    &subpage_readb,
2619
    &subpage_readw,
2620
    &subpage_readl,
2621
};
2622

    
2623
static CPUWriteMemoryFunc *subpage_write[] = {
2624
    &subpage_writeb,
2625
    &subpage_writew,
2626
    &subpage_writel,
2627
};
2628

    
2629
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2630
                             ram_addr_t memory)
2631
{
2632
    int idx, eidx;
2633
    unsigned int i;
2634

    
2635
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2636
        return -1;
2637
    idx = SUBPAGE_IDX(start);
2638
    eidx = SUBPAGE_IDX(end);
2639
#if defined(DEBUG_SUBPAGE)
2640
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2641
           mmio, start, end, idx, eidx, memory);
2642
#endif
2643
    memory >>= IO_MEM_SHIFT;
2644
    for (; idx <= eidx; idx++) {
2645
        for (i = 0; i < 4; i++) {
2646
            if (io_mem_read[memory][i]) {
2647
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2648
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2649
            }
2650
            if (io_mem_write[memory][i]) {
2651
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2652
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2653
            }
2654
        }
2655
    }
2656

    
2657
    return 0;
2658
}
2659

    
2660
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2661
                           ram_addr_t orig_memory)
2662
{
2663
    subpage_t *mmio;
2664
    int subpage_memory;
2665

    
2666
    mmio = qemu_mallocz(sizeof(subpage_t));
2667
    if (mmio != NULL) {
2668
        mmio->base = base;
2669
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2670
#if defined(DEBUG_SUBPAGE)
2671
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2672
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2673
#endif
2674
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2675
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2676
    }
2677

    
2678
    return mmio;
2679
}
2680

    
2681
static void io_mem_init(void)
2682
{
2683
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2684
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2685
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2686
    io_mem_nb = 5;
2687

    
2688
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2689
                                          watch_mem_write, NULL);
2690
    /* alloc dirty bits array */
2691
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2692
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2693
}
2694

    
2695
/* mem_read and mem_write are arrays of functions containing the
2696
   function to access byte (index 0), word (index 1) and dword (index
2697
   2). Functions can be omitted with a NULL function pointer. The
2698
   registered functions may be modified dynamically later.
2699
   If io_index is non zero, the corresponding io zone is
2700
   modified. If it is zero, a new io zone is allocated. The return
2701
   value can be used with cpu_register_physical_memory(). (-1) is
2702
   returned if error. */
2703
int cpu_register_io_memory(int io_index,
2704
                           CPUReadMemoryFunc **mem_read,
2705
                           CPUWriteMemoryFunc **mem_write,
2706
                           void *opaque)
2707
{
2708
    int i, subwidth = 0;
2709

    
2710
    if (io_index <= 0) {
2711
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2712
            return -1;
2713
        io_index = io_mem_nb++;
2714
    } else {
2715
        if (io_index >= IO_MEM_NB_ENTRIES)
2716
            return -1;
2717
    }
2718

    
2719
    for(i = 0;i < 3; i++) {
2720
        if (!mem_read[i] || !mem_write[i])
2721
            subwidth = IO_MEM_SUBWIDTH;
2722
        io_mem_read[io_index][i] = mem_read[i];
2723
        io_mem_write[io_index][i] = mem_write[i];
2724
    }
2725
    io_mem_opaque[io_index] = opaque;
2726
    return (io_index << IO_MEM_SHIFT) | subwidth;
2727
}
2728

    
2729
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2730
{
2731
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2732
}
2733

    
2734
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2735
{
2736
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2737
}
2738

    
2739
#endif /* !defined(CONFIG_USER_ONLY) */
2740

    
2741
/* physical memory access (slow version, mainly for debug) */
2742
#if defined(CONFIG_USER_ONLY)
2743
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2744
                            int len, int is_write)
2745
{
2746
    int l, flags;
2747
    target_ulong page;
2748
    void * p;
2749

    
2750
    while (len > 0) {
2751
        page = addr & TARGET_PAGE_MASK;
2752
        l = (page + TARGET_PAGE_SIZE) - addr;
2753
        if (l > len)
2754
            l = len;
2755
        flags = page_get_flags(page);
2756
        if (!(flags & PAGE_VALID))
2757
            return;
2758
        if (is_write) {
2759
            if (!(flags & PAGE_WRITE))
2760
                return;
2761
            /* XXX: this code should not depend on lock_user */
2762
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2763
                /* FIXME - should this return an error rather than just fail? */
2764
                return;
2765
            memcpy(p, buf, l);
2766
            unlock_user(p, addr, l);
2767
        } else {
2768
            if (!(flags & PAGE_READ))
2769
                return;
2770
            /* XXX: this code should not depend on lock_user */
2771
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2772
                /* FIXME - should this return an error rather than just fail? */
2773
                return;
2774
            memcpy(buf, p, l);
2775
            unlock_user(p, addr, 0);
2776
        }
2777
        len -= l;
2778
        buf += l;
2779
        addr += l;
2780
    }
2781
}
2782

    
2783
#else
2784
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2785
                            int len, int is_write)
2786
{
2787
    int l, io_index;
2788
    uint8_t *ptr;
2789
    uint32_t val;
2790
    target_phys_addr_t page;
2791
    unsigned long pd;
2792
    PhysPageDesc *p;
2793

    
2794
    while (len > 0) {
2795
        page = addr & TARGET_PAGE_MASK;
2796
        l = (page + TARGET_PAGE_SIZE) - addr;
2797
        if (l > len)
2798
            l = len;
2799
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2800
        if (!p) {
2801
            pd = IO_MEM_UNASSIGNED;
2802
        } else {
2803
            pd = p->phys_offset;
2804
        }
2805

    
2806
        if (is_write) {
2807
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2808
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2809
                /* XXX: could force cpu_single_env to NULL to avoid
2810
                   potential bugs */
2811
                if (l >= 4 && ((addr & 3) == 0)) {
2812
                    /* 32 bit write access */
2813
                    val = ldl_p(buf);
2814
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2815
                    l = 4;
2816
                } else if (l >= 2 && ((addr & 1) == 0)) {
2817
                    /* 16 bit write access */
2818
                    val = lduw_p(buf);
2819
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2820
                    l = 2;
2821
                } else {
2822
                    /* 8 bit write access */
2823
                    val = ldub_p(buf);
2824
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2825
                    l = 1;
2826
                }
2827
            } else {
2828
                unsigned long addr1;
2829
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2830
                /* RAM case */
2831
                ptr = phys_ram_base + addr1;
2832
                memcpy(ptr, buf, l);
2833
                if (!cpu_physical_memory_is_dirty(addr1)) {
2834
                    /* invalidate code */
2835
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2836
                    /* set dirty bit */
2837
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2838
                        (0xff & ~CODE_DIRTY_FLAG);
2839
                }
2840
            }
2841
        } else {
2842
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2843
                !(pd & IO_MEM_ROMD)) {
2844
                /* I/O case */
2845
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2846
                if (l >= 4 && ((addr & 3) == 0)) {
2847
                    /* 32 bit read access */
2848
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2849
                    stl_p(buf, val);
2850
                    l = 4;
2851
                } else if (l >= 2 && ((addr & 1) == 0)) {
2852
                    /* 16 bit read access */
2853
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2854
                    stw_p(buf, val);
2855
                    l = 2;
2856
                } else {
2857
                    /* 8 bit read access */
2858
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2859
                    stb_p(buf, val);
2860
                    l = 1;
2861
                }
2862
            } else {
2863
                /* RAM case */
2864
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2865
                    (addr & ~TARGET_PAGE_MASK);
2866
                memcpy(buf, ptr, l);
2867
            }
2868
        }
2869
        len -= l;
2870
        buf += l;
2871
        addr += l;
2872
    }
2873
}
2874

    
2875
/* used for ROM loading : can write in RAM and ROM */
2876
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2877
                                   const uint8_t *buf, int len)
2878
{
2879
    int l;
2880
    uint8_t *ptr;
2881
    target_phys_addr_t page;
2882
    unsigned long pd;
2883
    PhysPageDesc *p;
2884

    
2885
    while (len > 0) {
2886
        page = addr & TARGET_PAGE_MASK;
2887
        l = (page + TARGET_PAGE_SIZE) - addr;
2888
        if (l > len)
2889
            l = len;
2890
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2891
        if (!p) {
2892
            pd = IO_MEM_UNASSIGNED;
2893
        } else {
2894
            pd = p->phys_offset;
2895
        }
2896

    
2897
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2898
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2899
            !(pd & IO_MEM_ROMD)) {
2900
            /* do nothing */
2901
        } else {
2902
            unsigned long addr1;
2903
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2904
            /* ROM/RAM case */
2905
            ptr = phys_ram_base + addr1;
2906
            memcpy(ptr, buf, l);
2907
        }
2908
        len -= l;
2909
        buf += l;
2910
        addr += l;
2911
    }
2912
}
2913

    
2914

    
2915
/* warning: addr must be aligned */
2916
uint32_t ldl_phys(target_phys_addr_t addr)
2917
{
2918
    int io_index;
2919
    uint8_t *ptr;
2920
    uint32_t val;
2921
    unsigned long pd;
2922
    PhysPageDesc *p;
2923

    
2924
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2925
    if (!p) {
2926
        pd = IO_MEM_UNASSIGNED;
2927
    } else {
2928
        pd = p->phys_offset;
2929
    }
2930

    
2931
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2932
        !(pd & IO_MEM_ROMD)) {
2933
        /* I/O case */
2934
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2935
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2936
    } else {
2937
        /* RAM case */
2938
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2939
            (addr & ~TARGET_PAGE_MASK);
2940
        val = ldl_p(ptr);
2941
    }
2942
    return val;
2943
}
2944

    
2945
/* warning: addr must be aligned */
2946
uint64_t ldq_phys(target_phys_addr_t addr)
2947
{
2948
    int io_index;
2949
    uint8_t *ptr;
2950
    uint64_t val;
2951
    unsigned long pd;
2952
    PhysPageDesc *p;
2953

    
2954
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2955
    if (!p) {
2956
        pd = IO_MEM_UNASSIGNED;
2957
    } else {
2958
        pd = p->phys_offset;
2959
    }
2960

    
2961
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2962
        !(pd & IO_MEM_ROMD)) {
2963
        /* I/O case */
2964
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2965
#ifdef TARGET_WORDS_BIGENDIAN
2966
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2967
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2968
#else
2969
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2970
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2971
#endif
2972
    } else {
2973
        /* RAM case */
2974
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2975
            (addr & ~TARGET_PAGE_MASK);
2976
        val = ldq_p(ptr);
2977
    }
2978
    return val;
2979
}
2980

    
2981
/* XXX: optimize */
2982
uint32_t ldub_phys(target_phys_addr_t addr)
2983
{
2984
    uint8_t val;
2985
    cpu_physical_memory_read(addr, &val, 1);
2986
    return val;
2987
}
2988

    
2989
/* XXX: optimize */
2990
uint32_t lduw_phys(target_phys_addr_t addr)
2991
{
2992
    uint16_t val;
2993
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2994
    return tswap16(val);
2995
}
2996

    
2997
/* warning: addr must be aligned. The ram page is not masked as dirty
2998
   and the code inside is not invalidated. It is useful if the dirty
2999
   bits are used to track modified PTEs */
3000
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3001
{
3002
    int io_index;
3003
    uint8_t *ptr;
3004
    unsigned long pd;
3005
    PhysPageDesc *p;
3006

    
3007
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3008
    if (!p) {
3009
        pd = IO_MEM_UNASSIGNED;
3010
    } else {
3011
        pd = p->phys_offset;
3012
    }
3013

    
3014
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3015
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3016
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3017
    } else {
3018
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3019
        ptr = phys_ram_base + addr1;
3020
        stl_p(ptr, val);
3021

    
3022
        if (unlikely(in_migration)) {
3023
            if (!cpu_physical_memory_is_dirty(addr1)) {
3024
                /* invalidate code */
3025
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3026
                /* set dirty bit */
3027
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3028
                    (0xff & ~CODE_DIRTY_FLAG);
3029
            }
3030
        }
3031
    }
3032
}
3033

    
3034
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3035
{
3036
    int io_index;
3037
    uint8_t *ptr;
3038
    unsigned long pd;
3039
    PhysPageDesc *p;
3040

    
3041
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3042
    if (!p) {
3043
        pd = IO_MEM_UNASSIGNED;
3044
    } else {
3045
        pd = p->phys_offset;
3046
    }
3047

    
3048
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3049
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3050
#ifdef TARGET_WORDS_BIGENDIAN
3051
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3052
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3053
#else
3054
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3055
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3056
#endif
3057
    } else {
3058
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3059
            (addr & ~TARGET_PAGE_MASK);
3060
        stq_p(ptr, val);
3061
    }
3062
}
3063

    
3064
/* warning: addr must be aligned */
3065
void stl_phys(target_phys_addr_t addr, uint32_t val)
3066
{
3067
    int io_index;
3068
    uint8_t *ptr;
3069
    unsigned long pd;
3070
    PhysPageDesc *p;
3071

    
3072
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3073
    if (!p) {
3074
        pd = IO_MEM_UNASSIGNED;
3075
    } else {
3076
        pd = p->phys_offset;
3077
    }
3078

    
3079
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3080
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3081
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3082
    } else {
3083
        unsigned long addr1;
3084
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3085
        /* RAM case */
3086
        ptr = phys_ram_base + addr1;
3087
        stl_p(ptr, val);
3088
        if (!cpu_physical_memory_is_dirty(addr1)) {
3089
            /* invalidate code */
3090
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3091
            /* set dirty bit */
3092
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3093
                (0xff & ~CODE_DIRTY_FLAG);
3094
        }
3095
    }
3096
}
3097

    
3098
/* XXX: optimize */
3099
void stb_phys(target_phys_addr_t addr, uint32_t val)
3100
{
3101
    uint8_t v = val;
3102
    cpu_physical_memory_write(addr, &v, 1);
3103
}
3104

    
3105
/* XXX: optimize */
3106
void stw_phys(target_phys_addr_t addr, uint32_t val)
3107
{
3108
    uint16_t v = tswap16(val);
3109
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3110
}
3111

    
3112
/* XXX: optimize */
3113
void stq_phys(target_phys_addr_t addr, uint64_t val)
3114
{
3115
    val = tswap64(val);
3116
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3117
}
3118

    
3119
#endif
3120

    
3121
/* virtual memory access for debug */
3122
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3123
                        uint8_t *buf, int len, int is_write)
3124
{
3125
    int l;
3126
    target_phys_addr_t phys_addr;
3127
    target_ulong page;
3128

    
3129
    while (len > 0) {
3130
        page = addr & TARGET_PAGE_MASK;
3131
        phys_addr = cpu_get_phys_page_debug(env, page);
3132
        /* if no physical page mapped, return an error */
3133
        if (phys_addr == -1)
3134
            return -1;
3135
        l = (page + TARGET_PAGE_SIZE) - addr;
3136
        if (l > len)
3137
            l = len;
3138
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3139
                               buf, l, is_write);
3140
        len -= l;
3141
        buf += l;
3142
        addr += l;
3143
    }
3144
    return 0;
3145
}
3146

    
3147
/* in deterministic execution mode, instructions doing device I/Os
3148
   must be at the end of the TB */
3149
void cpu_io_recompile(CPUState *env, void *retaddr)
3150
{
3151
    TranslationBlock *tb;
3152
    uint32_t n, cflags;
3153
    target_ulong pc, cs_base;
3154
    uint64_t flags;
3155

    
3156
    tb = tb_find_pc((unsigned long)retaddr);
3157
    if (!tb) {
3158
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3159
                  retaddr);
3160
    }
3161
    n = env->icount_decr.u16.low + tb->icount;
3162
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3163
    /* Calculate how many instructions had been executed before the fault
3164
       occurred.  */
3165
    n = n - env->icount_decr.u16.low;
3166
    /* Generate a new TB ending on the I/O insn.  */
3167
    n++;
3168
    /* On MIPS and SH, delay slot instructions can only be restarted if
3169
       they were already the first instruction in the TB.  If this is not
3170
       the first instruction in a TB then re-execute the preceding
3171
       branch.  */
3172
#if defined(TARGET_MIPS)
3173
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3174
        env->active_tc.PC -= 4;
3175
        env->icount_decr.u16.low++;
3176
        env->hflags &= ~MIPS_HFLAG_BMASK;
3177
    }
3178
#elif defined(TARGET_SH4)
3179
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3180
            && n > 1) {
3181
        env->pc -= 2;
3182
        env->icount_decr.u16.low++;
3183
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3184
    }
3185
#endif
3186
    /* This should never happen.  */
3187
    if (n > CF_COUNT_MASK)
3188
        cpu_abort(env, "TB too big during recompile");
3189

    
3190
    cflags = n | CF_LAST_IO;
3191
    pc = tb->pc;
3192
    cs_base = tb->cs_base;
3193
    flags = tb->flags;
3194
    tb_phys_invalidate(tb, -1);
3195
    /* FIXME: In theory this could raise an exception.  In practice
3196
       we have already translated the block once so it's probably ok.  */
3197
    tb_gen_code(env, pc, cs_base, flags, cflags);
3198
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3199
       the first in the TB) then we end up generating a whole new TB and
3200
       repeating the fault, which is horribly inefficient.
3201
       Better would be to execute just this insn uncached, or generate a
3202
       second new TB.  */
3203
    cpu_resume_from_signal(env, NULL);
3204
}
3205

    
3206
void dump_exec_info(FILE *f,
3207
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3208
{
3209
    int i, target_code_size, max_target_code_size;
3210
    int direct_jmp_count, direct_jmp2_count, cross_page;
3211
    TranslationBlock *tb;
3212

    
3213
    target_code_size = 0;
3214
    max_target_code_size = 0;
3215
    cross_page = 0;
3216
    direct_jmp_count = 0;
3217
    direct_jmp2_count = 0;
3218
    for(i = 0; i < nb_tbs; i++) {
3219
        tb = &tbs[i];
3220
        target_code_size += tb->size;
3221
        if (tb->size > max_target_code_size)
3222
            max_target_code_size = tb->size;
3223
        if (tb->page_addr[1] != -1)
3224
            cross_page++;
3225
        if (tb->tb_next_offset[0] != 0xffff) {
3226
            direct_jmp_count++;
3227
            if (tb->tb_next_offset[1] != 0xffff) {
3228
                direct_jmp2_count++;
3229
            }
3230
        }
3231
    }
3232
    /* XXX: avoid using doubles ? */
3233
    cpu_fprintf(f, "Translation buffer state:\n");
3234
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3235
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3236
    cpu_fprintf(f, "TB count            %d/%d\n", 
3237
                nb_tbs, code_gen_max_blocks);
3238
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3239
                nb_tbs ? target_code_size / nb_tbs : 0,
3240
                max_target_code_size);
3241
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3242
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3243
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3244
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3245
            cross_page,
3246
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3247
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3248
                direct_jmp_count,
3249
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3250
                direct_jmp2_count,
3251
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3252
    cpu_fprintf(f, "\nStatistics:\n");
3253
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3254
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3255
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3256
    tcg_dump_info(f, cpu_fprintf);
3257
}
3258

    
3259
#if !defined(CONFIG_USER_ONLY)
3260

    
3261
#define MMUSUFFIX _cmmu
3262
#define GETPC() NULL
3263
#define env cpu_single_env
3264
#define SOFTMMU_CODE_ACCESS
3265

    
3266
#define SHIFT 0
3267
#include "softmmu_template.h"
3268

    
3269
#define SHIFT 1
3270
#include "softmmu_template.h"
3271

    
3272
#define SHIFT 2
3273
#include "softmmu_template.h"
3274

    
3275
#define SHIFT 3
3276
#include "softmmu_template.h"
3277

    
3278
#undef env
3279

    
3280
#endif