Statistics
| Branch: | Revision:

root / exec.c @ 174a9a1f

History | View | Annotate | Download (95.4 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#endif
44

    
45
//#define DEBUG_TB_INVALIDATE
46
//#define DEBUG_FLUSH
47
//#define DEBUG_TLB
48
//#define DEBUG_UNASSIGNED
49

    
50
/* make various TB consistency checks */
51
//#define DEBUG_TB_CHECK
52
//#define DEBUG_TLB_CHECK
53

    
54
//#define DEBUG_IOPORT
55
//#define DEBUG_SUBPAGE
56

    
57
#if !defined(CONFIG_USER_ONLY)
58
/* TB consistency checks only implemented for usermode emulation.  */
59
#undef DEBUG_TB_CHECK
60
#endif
61

    
62
#define SMC_BITMAP_USE_THRESHOLD 10
63

    
64
#define MMAP_AREA_START        0x00000000
65
#define MMAP_AREA_END          0xa8000000
66

    
67
#if defined(TARGET_SPARC64)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 41
69
#elif defined(TARGET_SPARC)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 36
71
#elif defined(TARGET_ALPHA)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 42
73
#define TARGET_VIRT_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_PPC64)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 36
80
#else
81
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82
#define TARGET_PHYS_ADDR_SPACE_BITS 32
83
#endif
84

    
85
TranslationBlock *tbs;
86
int code_gen_max_blocks;
87
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88
int nb_tbs;
89
/* any access to the tbs or the page table must use this lock */
90
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91

    
92
#if defined(__arm__) || defined(__sparc_v9__)
93
/* The prologue must be reachable with a direct jump. ARM and Sparc64
94
 have limited branch ranges (possibly also PPC) so place it in a
95
 section close to code segment. */
96
#define code_gen_section                                \
97
    __attribute__((__section__(".gen_code")))           \
98
    __attribute__((aligned (32)))
99
#else
100
#define code_gen_section                                \
101
    __attribute__((aligned (32)))
102
#endif
103

    
104
uint8_t code_gen_prologue[1024] code_gen_section;
105
uint8_t *code_gen_buffer;
106
unsigned long code_gen_buffer_size;
107
/* threshold to flush the translated code buffer */
108
unsigned long code_gen_buffer_max_size; 
109
uint8_t *code_gen_ptr;
110

    
111
#if !defined(CONFIG_USER_ONLY)
112
ram_addr_t phys_ram_size;
113
int phys_ram_fd;
114
uint8_t *phys_ram_base;
115
uint8_t *phys_ram_dirty;
116
static ram_addr_t phys_ram_alloc_offset = 0;
117
#endif
118

    
119
CPUState *first_cpu;
120
/* current CPU in the current thread. It is only valid inside
121
   cpu_exec() */
122
CPUState *cpu_single_env;
123
/* 0 = Do not count executed instructions.
124
   1 = Precise instruction counting.
125
   2 = Adaptive rate instruction counting.  */
126
int use_icount = 0;
127
/* Current instruction counter.  While executing translated code this may
128
   include some instructions that have not yet been executed.  */
129
int64_t qemu_icount;
130

    
131
typedef struct PageDesc {
132
    /* list of TBs intersecting this ram page */
133
    TranslationBlock *first_tb;
134
    /* in order to optimize self modifying code, we count the number
135
       of lookups we do to a given page to use a bitmap */
136
    unsigned int code_write_count;
137
    uint8_t *code_bitmap;
138
#if defined(CONFIG_USER_ONLY)
139
    unsigned long flags;
140
#endif
141
} PageDesc;
142

    
143
typedef struct PhysPageDesc {
144
    /* offset in host memory of the page + io_index in the low bits */
145
    ram_addr_t phys_offset;
146
} PhysPageDesc;
147

    
148
#define L2_BITS 10
149
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
150
/* XXX: this is a temporary hack for alpha target.
151
 *      In the future, this is to be replaced by a multi-level table
152
 *      to actually be able to handle the complete 64 bits address space.
153
 */
154
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
155
#else
156
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
157
#endif
158

    
159
#define L1_SIZE (1 << L1_BITS)
160
#define L2_SIZE (1 << L2_BITS)
161

    
162
unsigned long qemu_real_host_page_size;
163
unsigned long qemu_host_page_bits;
164
unsigned long qemu_host_page_size;
165
unsigned long qemu_host_page_mask;
166

    
167
/* XXX: for system emulation, it could just be an array */
168
static PageDesc *l1_map[L1_SIZE];
169
PhysPageDesc **l1_phys_map;
170

    
171
#if !defined(CONFIG_USER_ONLY)
172
static void io_mem_init(void);
173

    
174
/* io memory support */
175
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
176
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
177
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
178
static int io_mem_nb;
179
static int io_mem_watch;
180
#endif
181

    
182
/* log support */
183
const char *logfilename = "/tmp/qemu.log";
184
FILE *logfile;
185
int loglevel;
186
static int log_append = 0;
187

    
188
/* statistics */
189
static int tlb_flush_count;
190
static int tb_flush_count;
191
static int tb_phys_invalidate_count;
192

    
193
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194
typedef struct subpage_t {
195
    target_phys_addr_t base;
196
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
197
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
198
    void *opaque[TARGET_PAGE_SIZE][2][4];
199
} subpage_t;
200

    
201
#ifdef _WIN32
202
static void map_exec(void *addr, long size)
203
{
204
    DWORD old_protect;
205
    VirtualProtect(addr, size,
206
                   PAGE_EXECUTE_READWRITE, &old_protect);
207
    
208
}
209
#else
210
static void map_exec(void *addr, long size)
211
{
212
    unsigned long start, end, page_size;
213
    
214
    page_size = getpagesize();
215
    start = (unsigned long)addr;
216
    start &= ~(page_size - 1);
217
    
218
    end = (unsigned long)addr + size;
219
    end += page_size - 1;
220
    end &= ~(page_size - 1);
221
    
222
    mprotect((void *)start, end - start,
223
             PROT_READ | PROT_WRITE | PROT_EXEC);
224
}
225
#endif
226

    
227
static void page_init(void)
228
{
229
    /* NOTE: we can always suppose that qemu_host_page_size >=
230
       TARGET_PAGE_SIZE */
231
#ifdef _WIN32
232
    {
233
        SYSTEM_INFO system_info;
234
        DWORD old_protect;
235

    
236
        GetSystemInfo(&system_info);
237
        qemu_real_host_page_size = system_info.dwPageSize;
238
    }
239
#else
240
    qemu_real_host_page_size = getpagesize();
241
#endif
242
    if (qemu_host_page_size == 0)
243
        qemu_host_page_size = qemu_real_host_page_size;
244
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
245
        qemu_host_page_size = TARGET_PAGE_SIZE;
246
    qemu_host_page_bits = 0;
247
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
248
        qemu_host_page_bits++;
249
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
250
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
251
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
252

    
253
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
254
    {
255
        long long startaddr, endaddr;
256
        FILE *f;
257
        int n;
258

    
259
        mmap_lock();
260
        last_brk = (unsigned long)sbrk(0);
261
        f = fopen("/proc/self/maps", "r");
262
        if (f) {
263
            do {
264
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
265
                if (n == 2) {
266
                    startaddr = MIN(startaddr,
267
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
268
                    endaddr = MIN(endaddr,
269
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
271
                                   TARGET_PAGE_ALIGN(endaddr),
272
                                   PAGE_RESERVED); 
273
                }
274
            } while (!feof(f));
275
            fclose(f);
276
        }
277
        mmap_unlock();
278
    }
279
#endif
280
}
281

    
282
static inline PageDesc **page_l1_map(target_ulong index)
283
{
284
#if TARGET_LONG_BITS > 32
285
    /* Host memory outside guest VM.  For 32-bit targets we have already
286
       excluded high addresses.  */
287
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
288
        return NULL;
289
#endif
290
    return &l1_map[index >> L2_BITS];
291
}
292

    
293
static inline PageDesc *page_find_alloc(target_ulong index)
294
{
295
    PageDesc **lp, *p;
296
    lp = page_l1_map(index);
297
    if (!lp)
298
        return NULL;
299

    
300
    p = *lp;
301
    if (!p) {
302
        /* allocate if not found */
303
#if defined(CONFIG_USER_ONLY)
304
        unsigned long addr;
305
        size_t len = sizeof(PageDesc) * L2_SIZE;
306
        /* Don't use qemu_malloc because it may recurse.  */
307
        p = mmap(0, len, PROT_READ | PROT_WRITE,
308
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
309
        *lp = p;
310
        addr = h2g(p);
311
        if (addr == (target_ulong)addr) {
312
            page_set_flags(addr & TARGET_PAGE_MASK,
313
                           TARGET_PAGE_ALIGN(addr + len),
314
                           PAGE_RESERVED); 
315
        }
316
#else
317
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
318
        *lp = p;
319
#endif
320
    }
321
    return p + (index & (L2_SIZE - 1));
322
}
323

    
324
static inline PageDesc *page_find(target_ulong index)
325
{
326
    PageDesc **lp, *p;
327
    lp = page_l1_map(index);
328
    if (!lp)
329
        return NULL;
330

    
331
    p = *lp;
332
    if (!p)
333
        return 0;
334
    return p + (index & (L2_SIZE - 1));
335
}
336

    
337
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
338
{
339
    void **lp, **p;
340
    PhysPageDesc *pd;
341

    
342
    p = (void **)l1_phys_map;
343
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
344

    
345
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
346
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
347
#endif
348
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
349
    p = *lp;
350
    if (!p) {
351
        /* allocate if not found */
352
        if (!alloc)
353
            return NULL;
354
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
355
        memset(p, 0, sizeof(void *) * L1_SIZE);
356
        *lp = p;
357
    }
358
#endif
359
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
360
    pd = *lp;
361
    if (!pd) {
362
        int i;
363
        /* allocate if not found */
364
        if (!alloc)
365
            return NULL;
366
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
367
        *lp = pd;
368
        for (i = 0; i < L2_SIZE; i++)
369
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
370
    }
371
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
372
}
373

    
374
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
375
{
376
    return phys_page_find_alloc(index, 0);
377
}
378

    
379
#if !defined(CONFIG_USER_ONLY)
380
static void tlb_protect_code(ram_addr_t ram_addr);
381
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
382
                                    target_ulong vaddr);
383
#define mmap_lock() do { } while(0)
384
#define mmap_unlock() do { } while(0)
385
#endif
386

    
387
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
388

    
389
#if defined(CONFIG_USER_ONLY)
390
/* Currently it is not recommanded to allocate big chunks of data in
391
   user mode. It will change when a dedicated libc will be used */
392
#define USE_STATIC_CODE_GEN_BUFFER
393
#endif
394

    
395
#ifdef USE_STATIC_CODE_GEN_BUFFER
396
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
397
#endif
398

    
399
static void code_gen_alloc(unsigned long tb_size)
400
{
401
#ifdef USE_STATIC_CODE_GEN_BUFFER
402
    code_gen_buffer = static_code_gen_buffer;
403
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
404
    map_exec(code_gen_buffer, code_gen_buffer_size);
405
#else
406
    code_gen_buffer_size = tb_size;
407
    if (code_gen_buffer_size == 0) {
408
#if defined(CONFIG_USER_ONLY)
409
        /* in user mode, phys_ram_size is not meaningful */
410
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
411
#else
412
        /* XXX: needs ajustments */
413
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
414
#endif
415
    }
416
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
417
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
418
    /* The code gen buffer location may have constraints depending on
419
       the host cpu and OS */
420
#if defined(__linux__) 
421
    {
422
        int flags;
423
        void *start = NULL;
424

    
425
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
426
#if defined(__x86_64__)
427
        flags |= MAP_32BIT;
428
        /* Cannot map more than that */
429
        if (code_gen_buffer_size > (800 * 1024 * 1024))
430
            code_gen_buffer_size = (800 * 1024 * 1024);
431
#elif defined(__sparc_v9__)
432
        // Map the buffer below 2G, so we can use direct calls and branches
433
        flags |= MAP_FIXED;
434
        start = (void *) 0x60000000UL;
435
        if (code_gen_buffer_size > (512 * 1024 * 1024))
436
            code_gen_buffer_size = (512 * 1024 * 1024);
437
#endif
438
        code_gen_buffer = mmap(start, code_gen_buffer_size,
439
                               PROT_WRITE | PROT_READ | PROT_EXEC,
440
                               flags, -1, 0);
441
        if (code_gen_buffer == MAP_FAILED) {
442
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
443
            exit(1);
444
        }
445
    }
446
#else
447
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
448
    if (!code_gen_buffer) {
449
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
450
        exit(1);
451
    }
452
    map_exec(code_gen_buffer, code_gen_buffer_size);
453
#endif
454
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
455
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
456
    code_gen_buffer_max_size = code_gen_buffer_size - 
457
        code_gen_max_block_size();
458
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
459
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
460
}
461

    
462
/* Must be called before using the QEMU cpus. 'tb_size' is the size
463
   (in bytes) allocated to the translation buffer. Zero means default
464
   size. */
465
void cpu_exec_init_all(unsigned long tb_size)
466
{
467
    cpu_gen_init();
468
    code_gen_alloc(tb_size);
469
    code_gen_ptr = code_gen_buffer;
470
    page_init();
471
#if !defined(CONFIG_USER_ONLY)
472
    io_mem_init();
473
#endif
474
}
475

    
476
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
477

    
478
#define CPU_COMMON_SAVE_VERSION 1
479

    
480
static void cpu_common_save(QEMUFile *f, void *opaque)
481
{
482
    CPUState *env = opaque;
483

    
484
    qemu_put_be32s(f, &env->halted);
485
    qemu_put_be32s(f, &env->interrupt_request);
486
}
487

    
488
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
489
{
490
    CPUState *env = opaque;
491

    
492
    if (version_id != CPU_COMMON_SAVE_VERSION)
493
        return -EINVAL;
494

    
495
    qemu_get_be32s(f, &env->halted);
496
    qemu_get_be32s(f, &env->interrupt_request);
497
    tlb_flush(env, 1);
498

    
499
    return 0;
500
}
501
#endif
502

    
503
void cpu_exec_init(CPUState *env)
504
{
505
    CPUState **penv;
506
    int cpu_index;
507

    
508
    env->next_cpu = NULL;
509
    penv = &first_cpu;
510
    cpu_index = 0;
511
    while (*penv != NULL) {
512
        penv = (CPUState **)&(*penv)->next_cpu;
513
        cpu_index++;
514
    }
515
    env->cpu_index = cpu_index;
516
    env->nb_watchpoints = 0;
517
    *penv = env;
518
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
519
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
520
                    cpu_common_save, cpu_common_load, env);
521
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
522
                    cpu_save, cpu_load, env);
523
#endif
524
}
525

    
526
static inline void invalidate_page_bitmap(PageDesc *p)
527
{
528
    if (p->code_bitmap) {
529
        qemu_free(p->code_bitmap);
530
        p->code_bitmap = NULL;
531
    }
532
    p->code_write_count = 0;
533
}
534

    
535
/* set to NULL all the 'first_tb' fields in all PageDescs */
536
static void page_flush_tb(void)
537
{
538
    int i, j;
539
    PageDesc *p;
540

    
541
    for(i = 0; i < L1_SIZE; i++) {
542
        p = l1_map[i];
543
        if (p) {
544
            for(j = 0; j < L2_SIZE; j++) {
545
                p->first_tb = NULL;
546
                invalidate_page_bitmap(p);
547
                p++;
548
            }
549
        }
550
    }
551
}
552

    
553
/* flush all the translation blocks */
554
/* XXX: tb_flush is currently not thread safe */
555
void tb_flush(CPUState *env1)
556
{
557
    CPUState *env;
558
#if defined(DEBUG_FLUSH)
559
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
560
           (unsigned long)(code_gen_ptr - code_gen_buffer),
561
           nb_tbs, nb_tbs > 0 ?
562
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
563
#endif
564
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
565
        cpu_abort(env1, "Internal error: code buffer overflow\n");
566

    
567
    nb_tbs = 0;
568

    
569
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
570
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
571
    }
572

    
573
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
574
    page_flush_tb();
575

    
576
    code_gen_ptr = code_gen_buffer;
577
    /* XXX: flush processor icache at this point if cache flush is
578
       expensive */
579
    tb_flush_count++;
580
}
581

    
582
#ifdef DEBUG_TB_CHECK
583

    
584
static void tb_invalidate_check(target_ulong address)
585
{
586
    TranslationBlock *tb;
587
    int i;
588
    address &= TARGET_PAGE_MASK;
589
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
590
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
591
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
592
                  address >= tb->pc + tb->size)) {
593
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
594
                       address, (long)tb->pc, tb->size);
595
            }
596
        }
597
    }
598
}
599

    
600
/* verify that all the pages have correct rights for code */
601
static void tb_page_check(void)
602
{
603
    TranslationBlock *tb;
604
    int i, flags1, flags2;
605

    
606
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
607
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
608
            flags1 = page_get_flags(tb->pc);
609
            flags2 = page_get_flags(tb->pc + tb->size - 1);
610
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
611
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
612
                       (long)tb->pc, tb->size, flags1, flags2);
613
            }
614
        }
615
    }
616
}
617

    
618
void tb_jmp_check(TranslationBlock *tb)
619
{
620
    TranslationBlock *tb1;
621
    unsigned int n1;
622

    
623
    /* suppress any remaining jumps to this TB */
624
    tb1 = tb->jmp_first;
625
    for(;;) {
626
        n1 = (long)tb1 & 3;
627
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
628
        if (n1 == 2)
629
            break;
630
        tb1 = tb1->jmp_next[n1];
631
    }
632
    /* check end of list */
633
    if (tb1 != tb) {
634
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
635
    }
636
}
637

    
638
#endif
639

    
640
/* invalidate one TB */
641
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
642
                             int next_offset)
643
{
644
    TranslationBlock *tb1;
645
    for(;;) {
646
        tb1 = *ptb;
647
        if (tb1 == tb) {
648
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
649
            break;
650
        }
651
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
652
    }
653
}
654

    
655
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
656
{
657
    TranslationBlock *tb1;
658
    unsigned int n1;
659

    
660
    for(;;) {
661
        tb1 = *ptb;
662
        n1 = (long)tb1 & 3;
663
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
664
        if (tb1 == tb) {
665
            *ptb = tb1->page_next[n1];
666
            break;
667
        }
668
        ptb = &tb1->page_next[n1];
669
    }
670
}
671

    
672
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
673
{
674
    TranslationBlock *tb1, **ptb;
675
    unsigned int n1;
676

    
677
    ptb = &tb->jmp_next[n];
678
    tb1 = *ptb;
679
    if (tb1) {
680
        /* find tb(n) in circular list */
681
        for(;;) {
682
            tb1 = *ptb;
683
            n1 = (long)tb1 & 3;
684
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
685
            if (n1 == n && tb1 == tb)
686
                break;
687
            if (n1 == 2) {
688
                ptb = &tb1->jmp_first;
689
            } else {
690
                ptb = &tb1->jmp_next[n1];
691
            }
692
        }
693
        /* now we can suppress tb(n) from the list */
694
        *ptb = tb->jmp_next[n];
695

    
696
        tb->jmp_next[n] = NULL;
697
    }
698
}
699

    
700
/* reset the jump entry 'n' of a TB so that it is not chained to
701
   another TB */
702
static inline void tb_reset_jump(TranslationBlock *tb, int n)
703
{
704
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
705
}
706

    
707
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
708
{
709
    CPUState *env;
710
    PageDesc *p;
711
    unsigned int h, n1;
712
    target_phys_addr_t phys_pc;
713
    TranslationBlock *tb1, *tb2;
714

    
715
    /* remove the TB from the hash list */
716
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
717
    h = tb_phys_hash_func(phys_pc);
718
    tb_remove(&tb_phys_hash[h], tb,
719
              offsetof(TranslationBlock, phys_hash_next));
720

    
721
    /* remove the TB from the page list */
722
    if (tb->page_addr[0] != page_addr) {
723
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
724
        tb_page_remove(&p->first_tb, tb);
725
        invalidate_page_bitmap(p);
726
    }
727
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
728
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
729
        tb_page_remove(&p->first_tb, tb);
730
        invalidate_page_bitmap(p);
731
    }
732

    
733
    tb_invalidated_flag = 1;
734

    
735
    /* remove the TB from the hash list */
736
    h = tb_jmp_cache_hash_func(tb->pc);
737
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
738
        if (env->tb_jmp_cache[h] == tb)
739
            env->tb_jmp_cache[h] = NULL;
740
    }
741

    
742
    /* suppress this TB from the two jump lists */
743
    tb_jmp_remove(tb, 0);
744
    tb_jmp_remove(tb, 1);
745

    
746
    /* suppress any remaining jumps to this TB */
747
    tb1 = tb->jmp_first;
748
    for(;;) {
749
        n1 = (long)tb1 & 3;
750
        if (n1 == 2)
751
            break;
752
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
753
        tb2 = tb1->jmp_next[n1];
754
        tb_reset_jump(tb1, n1);
755
        tb1->jmp_next[n1] = NULL;
756
        tb1 = tb2;
757
    }
758
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
759

    
760
    tb_phys_invalidate_count++;
761
}
762

    
763
static inline void set_bits(uint8_t *tab, int start, int len)
764
{
765
    int end, mask, end1;
766

    
767
    end = start + len;
768
    tab += start >> 3;
769
    mask = 0xff << (start & 7);
770
    if ((start & ~7) == (end & ~7)) {
771
        if (start < end) {
772
            mask &= ~(0xff << (end & 7));
773
            *tab |= mask;
774
        }
775
    } else {
776
        *tab++ |= mask;
777
        start = (start + 8) & ~7;
778
        end1 = end & ~7;
779
        while (start < end1) {
780
            *tab++ = 0xff;
781
            start += 8;
782
        }
783
        if (start < end) {
784
            mask = ~(0xff << (end & 7));
785
            *tab |= mask;
786
        }
787
    }
788
}
789

    
790
static void build_page_bitmap(PageDesc *p)
791
{
792
    int n, tb_start, tb_end;
793
    TranslationBlock *tb;
794

    
795
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
796
    if (!p->code_bitmap)
797
        return;
798

    
799
    tb = p->first_tb;
800
    while (tb != NULL) {
801
        n = (long)tb & 3;
802
        tb = (TranslationBlock *)((long)tb & ~3);
803
        /* NOTE: this is subtle as a TB may span two physical pages */
804
        if (n == 0) {
805
            /* NOTE: tb_end may be after the end of the page, but
806
               it is not a problem */
807
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
808
            tb_end = tb_start + tb->size;
809
            if (tb_end > TARGET_PAGE_SIZE)
810
                tb_end = TARGET_PAGE_SIZE;
811
        } else {
812
            tb_start = 0;
813
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
814
        }
815
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
816
        tb = tb->page_next[n];
817
    }
818
}
819

    
820
TranslationBlock *tb_gen_code(CPUState *env,
821
                              target_ulong pc, target_ulong cs_base,
822
                              int flags, int cflags)
823
{
824
    TranslationBlock *tb;
825
    uint8_t *tc_ptr;
826
    target_ulong phys_pc, phys_page2, virt_page2;
827
    int code_gen_size;
828

    
829
    phys_pc = get_phys_addr_code(env, pc);
830
    tb = tb_alloc(pc);
831
    if (!tb) {
832
        /* flush must be done */
833
        tb_flush(env);
834
        /* cannot fail at this point */
835
        tb = tb_alloc(pc);
836
        /* Don't forget to invalidate previous TB info.  */
837
        tb_invalidated_flag = 1;
838
    }
839
    tc_ptr = code_gen_ptr;
840
    tb->tc_ptr = tc_ptr;
841
    tb->cs_base = cs_base;
842
    tb->flags = flags;
843
    tb->cflags = cflags;
844
    cpu_gen_code(env, tb, &code_gen_size);
845
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
846

    
847
    /* check next page if needed */
848
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
849
    phys_page2 = -1;
850
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
851
        phys_page2 = get_phys_addr_code(env, virt_page2);
852
    }
853
    tb_link_phys(tb, phys_pc, phys_page2);
854
    return tb;
855
}
856

    
857
/* invalidate all TBs which intersect with the target physical page
858
   starting in range [start;end[. NOTE: start and end must refer to
859
   the same physical page. 'is_cpu_write_access' should be true if called
860
   from a real cpu write access: the virtual CPU will exit the current
861
   TB if code is modified inside this TB. */
862
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
863
                                   int is_cpu_write_access)
864
{
865
    int n, current_tb_modified, current_tb_not_found, current_flags;
866
    CPUState *env = cpu_single_env;
867
    PageDesc *p;
868
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
869
    target_ulong tb_start, tb_end;
870
    target_ulong current_pc, current_cs_base;
871

    
872
    p = page_find(start >> TARGET_PAGE_BITS);
873
    if (!p)
874
        return;
875
    if (!p->code_bitmap &&
876
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
877
        is_cpu_write_access) {
878
        /* build code bitmap */
879
        build_page_bitmap(p);
880
    }
881

    
882
    /* we remove all the TBs in the range [start, end[ */
883
    /* XXX: see if in some cases it could be faster to invalidate all the code */
884
    current_tb_not_found = is_cpu_write_access;
885
    current_tb_modified = 0;
886
    current_tb = NULL; /* avoid warning */
887
    current_pc = 0; /* avoid warning */
888
    current_cs_base = 0; /* avoid warning */
889
    current_flags = 0; /* avoid warning */
890
    tb = p->first_tb;
891
    while (tb != NULL) {
892
        n = (long)tb & 3;
893
        tb = (TranslationBlock *)((long)tb & ~3);
894
        tb_next = tb->page_next[n];
895
        /* NOTE: this is subtle as a TB may span two physical pages */
896
        if (n == 0) {
897
            /* NOTE: tb_end may be after the end of the page, but
898
               it is not a problem */
899
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
900
            tb_end = tb_start + tb->size;
901
        } else {
902
            tb_start = tb->page_addr[1];
903
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
904
        }
905
        if (!(tb_end <= start || tb_start >= end)) {
906
#ifdef TARGET_HAS_PRECISE_SMC
907
            if (current_tb_not_found) {
908
                current_tb_not_found = 0;
909
                current_tb = NULL;
910
                if (env->mem_io_pc) {
911
                    /* now we have a real cpu fault */
912
                    current_tb = tb_find_pc(env->mem_io_pc);
913
                }
914
            }
915
            if (current_tb == tb &&
916
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
917
                /* If we are modifying the current TB, we must stop
918
                its execution. We could be more precise by checking
919
                that the modification is after the current PC, but it
920
                would require a specialized function to partially
921
                restore the CPU state */
922

    
923
                current_tb_modified = 1;
924
                cpu_restore_state(current_tb, env,
925
                                  env->mem_io_pc, NULL);
926
#if defined(TARGET_I386)
927
                current_flags = env->hflags;
928
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
929
                current_cs_base = (target_ulong)env->segs[R_CS].base;
930
                current_pc = current_cs_base + env->eip;
931
#else
932
#error unsupported CPU
933
#endif
934
            }
935
#endif /* TARGET_HAS_PRECISE_SMC */
936
            /* we need to do that to handle the case where a signal
937
               occurs while doing tb_phys_invalidate() */
938
            saved_tb = NULL;
939
            if (env) {
940
                saved_tb = env->current_tb;
941
                env->current_tb = NULL;
942
            }
943
            tb_phys_invalidate(tb, -1);
944
            if (env) {
945
                env->current_tb = saved_tb;
946
                if (env->interrupt_request && env->current_tb)
947
                    cpu_interrupt(env, env->interrupt_request);
948
            }
949
        }
950
        tb = tb_next;
951
    }
952
#if !defined(CONFIG_USER_ONLY)
953
    /* if no code remaining, no need to continue to use slow writes */
954
    if (!p->first_tb) {
955
        invalidate_page_bitmap(p);
956
        if (is_cpu_write_access) {
957
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
958
        }
959
    }
960
#endif
961
#ifdef TARGET_HAS_PRECISE_SMC
962
    if (current_tb_modified) {
963
        /* we generate a block containing just the instruction
964
           modifying the memory. It will ensure that it cannot modify
965
           itself */
966
        env->current_tb = NULL;
967
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
968
        cpu_resume_from_signal(env, NULL);
969
    }
970
#endif
971
}
972

    
973
/* len must be <= 8 and start must be a multiple of len */
974
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
975
{
976
    PageDesc *p;
977
    int offset, b;
978
#if 0
979
    if (1) {
980
        if (loglevel) {
981
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
982
                   cpu_single_env->mem_io_vaddr, len,
983
                   cpu_single_env->eip,
984
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
985
        }
986
    }
987
#endif
988
    p = page_find(start >> TARGET_PAGE_BITS);
989
    if (!p)
990
        return;
991
    if (p->code_bitmap) {
992
        offset = start & ~TARGET_PAGE_MASK;
993
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
994
        if (b & ((1 << len) - 1))
995
            goto do_invalidate;
996
    } else {
997
    do_invalidate:
998
        tb_invalidate_phys_page_range(start, start + len, 1);
999
    }
1000
}
1001

    
1002
#if !defined(CONFIG_SOFTMMU)
1003
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1004
                                    unsigned long pc, void *puc)
1005
{
1006
    int n, current_flags, current_tb_modified;
1007
    target_ulong current_pc, current_cs_base;
1008
    PageDesc *p;
1009
    TranslationBlock *tb, *current_tb;
1010
#ifdef TARGET_HAS_PRECISE_SMC
1011
    CPUState *env = cpu_single_env;
1012
#endif
1013

    
1014
    addr &= TARGET_PAGE_MASK;
1015
    p = page_find(addr >> TARGET_PAGE_BITS);
1016
    if (!p)
1017
        return;
1018
    tb = p->first_tb;
1019
    current_tb_modified = 0;
1020
    current_tb = NULL;
1021
    current_pc = 0; /* avoid warning */
1022
    current_cs_base = 0; /* avoid warning */
1023
    current_flags = 0; /* avoid warning */
1024
#ifdef TARGET_HAS_PRECISE_SMC
1025
    if (tb && pc != 0) {
1026
        current_tb = tb_find_pc(pc);
1027
    }
1028
#endif
1029
    while (tb != NULL) {
1030
        n = (long)tb & 3;
1031
        tb = (TranslationBlock *)((long)tb & ~3);
1032
#ifdef TARGET_HAS_PRECISE_SMC
1033
        if (current_tb == tb &&
1034
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1035
                /* If we are modifying the current TB, we must stop
1036
                   its execution. We could be more precise by checking
1037
                   that the modification is after the current PC, but it
1038
                   would require a specialized function to partially
1039
                   restore the CPU state */
1040

    
1041
            current_tb_modified = 1;
1042
            cpu_restore_state(current_tb, env, pc, puc);
1043
#if defined(TARGET_I386)
1044
            current_flags = env->hflags;
1045
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1046
            current_cs_base = (target_ulong)env->segs[R_CS].base;
1047
            current_pc = current_cs_base + env->eip;
1048
#else
1049
#error unsupported CPU
1050
#endif
1051
        }
1052
#endif /* TARGET_HAS_PRECISE_SMC */
1053
        tb_phys_invalidate(tb, addr);
1054
        tb = tb->page_next[n];
1055
    }
1056
    p->first_tb = NULL;
1057
#ifdef TARGET_HAS_PRECISE_SMC
1058
    if (current_tb_modified) {
1059
        /* we generate a block containing just the instruction
1060
           modifying the memory. It will ensure that it cannot modify
1061
           itself */
1062
        env->current_tb = NULL;
1063
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1064
        cpu_resume_from_signal(env, puc);
1065
    }
1066
#endif
1067
}
1068
#endif
1069

    
1070
/* add the tb in the target page and protect it if necessary */
1071
static inline void tb_alloc_page(TranslationBlock *tb,
1072
                                 unsigned int n, target_ulong page_addr)
1073
{
1074
    PageDesc *p;
1075
    TranslationBlock *last_first_tb;
1076

    
1077
    tb->page_addr[n] = page_addr;
1078
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1079
    tb->page_next[n] = p->first_tb;
1080
    last_first_tb = p->first_tb;
1081
    p->first_tb = (TranslationBlock *)((long)tb | n);
1082
    invalidate_page_bitmap(p);
1083

    
1084
#if defined(TARGET_HAS_SMC) || 1
1085

    
1086
#if defined(CONFIG_USER_ONLY)
1087
    if (p->flags & PAGE_WRITE) {
1088
        target_ulong addr;
1089
        PageDesc *p2;
1090
        int prot;
1091

    
1092
        /* force the host page as non writable (writes will have a
1093
           page fault + mprotect overhead) */
1094
        page_addr &= qemu_host_page_mask;
1095
        prot = 0;
1096
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1097
            addr += TARGET_PAGE_SIZE) {
1098

    
1099
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1100
            if (!p2)
1101
                continue;
1102
            prot |= p2->flags;
1103
            p2->flags &= ~PAGE_WRITE;
1104
            page_get_flags(addr);
1105
          }
1106
        mprotect(g2h(page_addr), qemu_host_page_size,
1107
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1108
#ifdef DEBUG_TB_INVALIDATE
1109
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1110
               page_addr);
1111
#endif
1112
    }
1113
#else
1114
    /* if some code is already present, then the pages are already
1115
       protected. So we handle the case where only the first TB is
1116
       allocated in a physical page */
1117
    if (!last_first_tb) {
1118
        tlb_protect_code(page_addr);
1119
    }
1120
#endif
1121

    
1122
#endif /* TARGET_HAS_SMC */
1123
}
1124

    
1125
/* Allocate a new translation block. Flush the translation buffer if
1126
   too many translation blocks or too much generated code. */
1127
TranslationBlock *tb_alloc(target_ulong pc)
1128
{
1129
    TranslationBlock *tb;
1130

    
1131
    if (nb_tbs >= code_gen_max_blocks ||
1132
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1133
        return NULL;
1134
    tb = &tbs[nb_tbs++];
1135
    tb->pc = pc;
1136
    tb->cflags = 0;
1137
    return tb;
1138
}
1139

    
1140
void tb_free(TranslationBlock *tb)
1141
{
1142
    /* In practice this is mostly used for single use temporary TB
1143
       Ignore the hard cases and just back up if this TB happens to
1144
       be the last one generated.  */
1145
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1146
        code_gen_ptr = tb->tc_ptr;
1147
        nb_tbs--;
1148
    }
1149
}
1150

    
1151
/* add a new TB and link it to the physical page tables. phys_page2 is
1152
   (-1) to indicate that only one page contains the TB. */
1153
void tb_link_phys(TranslationBlock *tb,
1154
                  target_ulong phys_pc, target_ulong phys_page2)
1155
{
1156
    unsigned int h;
1157
    TranslationBlock **ptb;
1158

    
1159
    /* Grab the mmap lock to stop another thread invalidating this TB
1160
       before we are done.  */
1161
    mmap_lock();
1162
    /* add in the physical hash table */
1163
    h = tb_phys_hash_func(phys_pc);
1164
    ptb = &tb_phys_hash[h];
1165
    tb->phys_hash_next = *ptb;
1166
    *ptb = tb;
1167

    
1168
    /* add in the page list */
1169
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1170
    if (phys_page2 != -1)
1171
        tb_alloc_page(tb, 1, phys_page2);
1172
    else
1173
        tb->page_addr[1] = -1;
1174

    
1175
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1176
    tb->jmp_next[0] = NULL;
1177
    tb->jmp_next[1] = NULL;
1178

    
1179
    /* init original jump addresses */
1180
    if (tb->tb_next_offset[0] != 0xffff)
1181
        tb_reset_jump(tb, 0);
1182
    if (tb->tb_next_offset[1] != 0xffff)
1183
        tb_reset_jump(tb, 1);
1184

    
1185
#ifdef DEBUG_TB_CHECK
1186
    tb_page_check();
1187
#endif
1188
    mmap_unlock();
1189
}
1190

    
1191
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1192
   tb[1].tc_ptr. Return NULL if not found */
1193
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1194
{
1195
    int m_min, m_max, m;
1196
    unsigned long v;
1197
    TranslationBlock *tb;
1198

    
1199
    if (nb_tbs <= 0)
1200
        return NULL;
1201
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1202
        tc_ptr >= (unsigned long)code_gen_ptr)
1203
        return NULL;
1204
    /* binary search (cf Knuth) */
1205
    m_min = 0;
1206
    m_max = nb_tbs - 1;
1207
    while (m_min <= m_max) {
1208
        m = (m_min + m_max) >> 1;
1209
        tb = &tbs[m];
1210
        v = (unsigned long)tb->tc_ptr;
1211
        if (v == tc_ptr)
1212
            return tb;
1213
        else if (tc_ptr < v) {
1214
            m_max = m - 1;
1215
        } else {
1216
            m_min = m + 1;
1217
        }
1218
    }
1219
    return &tbs[m_max];
1220
}
1221

    
1222
static void tb_reset_jump_recursive(TranslationBlock *tb);
1223

    
1224
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1225
{
1226
    TranslationBlock *tb1, *tb_next, **ptb;
1227
    unsigned int n1;
1228

    
1229
    tb1 = tb->jmp_next[n];
1230
    if (tb1 != NULL) {
1231
        /* find head of list */
1232
        for(;;) {
1233
            n1 = (long)tb1 & 3;
1234
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1235
            if (n1 == 2)
1236
                break;
1237
            tb1 = tb1->jmp_next[n1];
1238
        }
1239
        /* we are now sure now that tb jumps to tb1 */
1240
        tb_next = tb1;
1241

    
1242
        /* remove tb from the jmp_first list */
1243
        ptb = &tb_next->jmp_first;
1244
        for(;;) {
1245
            tb1 = *ptb;
1246
            n1 = (long)tb1 & 3;
1247
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1248
            if (n1 == n && tb1 == tb)
1249
                break;
1250
            ptb = &tb1->jmp_next[n1];
1251
        }
1252
        *ptb = tb->jmp_next[n];
1253
        tb->jmp_next[n] = NULL;
1254

    
1255
        /* suppress the jump to next tb in generated code */
1256
        tb_reset_jump(tb, n);
1257

    
1258
        /* suppress jumps in the tb on which we could have jumped */
1259
        tb_reset_jump_recursive(tb_next);
1260
    }
1261
}
1262

    
1263
static void tb_reset_jump_recursive(TranslationBlock *tb)
1264
{
1265
    tb_reset_jump_recursive2(tb, 0);
1266
    tb_reset_jump_recursive2(tb, 1);
1267
}
1268

    
1269
#if defined(TARGET_HAS_ICE)
1270
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1271
{
1272
    target_phys_addr_t addr;
1273
    target_ulong pd;
1274
    ram_addr_t ram_addr;
1275
    PhysPageDesc *p;
1276

    
1277
    addr = cpu_get_phys_page_debug(env, pc);
1278
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1279
    if (!p) {
1280
        pd = IO_MEM_UNASSIGNED;
1281
    } else {
1282
        pd = p->phys_offset;
1283
    }
1284
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1285
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1286
}
1287
#endif
1288

    
1289
/* Add a watchpoint.  */
1290
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1291
{
1292
    int i;
1293

    
1294
    for (i = 0; i < env->nb_watchpoints; i++) {
1295
        if (addr == env->watchpoint[i].vaddr)
1296
            return 0;
1297
    }
1298
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1299
        return -1;
1300

    
1301
    i = env->nb_watchpoints++;
1302
    env->watchpoint[i].vaddr = addr;
1303
    env->watchpoint[i].type = type;
1304
    tlb_flush_page(env, addr);
1305
    /* FIXME: This flush is needed because of the hack to make memory ops
1306
       terminate the TB.  It can be removed once the proper IO trap and
1307
       re-execute bits are in.  */
1308
    tb_flush(env);
1309
    return i;
1310
}
1311

    
1312
/* Remove a watchpoint.  */
1313
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1314
{
1315
    int i;
1316

    
1317
    for (i = 0; i < env->nb_watchpoints; i++) {
1318
        if (addr == env->watchpoint[i].vaddr) {
1319
            env->nb_watchpoints--;
1320
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1321
            tlb_flush_page(env, addr);
1322
            return 0;
1323
        }
1324
    }
1325
    return -1;
1326
}
1327

    
1328
/* Remove all watchpoints. */
1329
void cpu_watchpoint_remove_all(CPUState *env) {
1330
    int i;
1331

    
1332
    for (i = 0; i < env->nb_watchpoints; i++) {
1333
        tlb_flush_page(env, env->watchpoint[i].vaddr);
1334
    }
1335
    env->nb_watchpoints = 0;
1336
}
1337

    
1338
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1339
   breakpoint is reached */
1340
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1341
{
1342
#if defined(TARGET_HAS_ICE)
1343
    int i;
1344

    
1345
    for(i = 0; i < env->nb_breakpoints; i++) {
1346
        if (env->breakpoints[i] == pc)
1347
            return 0;
1348
    }
1349

    
1350
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1351
        return -1;
1352
    env->breakpoints[env->nb_breakpoints++] = pc;
1353

    
1354
    breakpoint_invalidate(env, pc);
1355
    return 0;
1356
#else
1357
    return -1;
1358
#endif
1359
}
1360

    
1361
/* remove all breakpoints */
1362
void cpu_breakpoint_remove_all(CPUState *env) {
1363
#if defined(TARGET_HAS_ICE)
1364
    int i;
1365
    for(i = 0; i < env->nb_breakpoints; i++) {
1366
        breakpoint_invalidate(env, env->breakpoints[i]);
1367
    }
1368
    env->nb_breakpoints = 0;
1369
#endif
1370
}
1371

    
1372
/* remove a breakpoint */
1373
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1374
{
1375
#if defined(TARGET_HAS_ICE)
1376
    int i;
1377
    for(i = 0; i < env->nb_breakpoints; i++) {
1378
        if (env->breakpoints[i] == pc)
1379
            goto found;
1380
    }
1381
    return -1;
1382
 found:
1383
    env->nb_breakpoints--;
1384
    if (i < env->nb_breakpoints)
1385
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1386

    
1387
    breakpoint_invalidate(env, pc);
1388
    return 0;
1389
#else
1390
    return -1;
1391
#endif
1392
}
1393

    
1394
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1395
   CPU loop after each instruction */
1396
void cpu_single_step(CPUState *env, int enabled)
1397
{
1398
#if defined(TARGET_HAS_ICE)
1399
    if (env->singlestep_enabled != enabled) {
1400
        env->singlestep_enabled = enabled;
1401
        /* must flush all the translated code to avoid inconsistancies */
1402
        /* XXX: only flush what is necessary */
1403
        tb_flush(env);
1404
    }
1405
#endif
1406
}
1407

    
1408
/* enable or disable low levels log */
1409
void cpu_set_log(int log_flags)
1410
{
1411
    loglevel = log_flags;
1412
    if (loglevel && !logfile) {
1413
        logfile = fopen(logfilename, log_append ? "a" : "w");
1414
        if (!logfile) {
1415
            perror(logfilename);
1416
            _exit(1);
1417
        }
1418
#if !defined(CONFIG_SOFTMMU)
1419
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1420
        {
1421
            static char logfile_buf[4096];
1422
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1423
        }
1424
#else
1425
        setvbuf(logfile, NULL, _IOLBF, 0);
1426
#endif
1427
        log_append = 1;
1428
    }
1429
    if (!loglevel && logfile) {
1430
        fclose(logfile);
1431
        logfile = NULL;
1432
    }
1433
}
1434

    
1435
void cpu_set_log_filename(const char *filename)
1436
{
1437
    logfilename = strdup(filename);
1438
    if (logfile) {
1439
        fclose(logfile);
1440
        logfile = NULL;
1441
    }
1442
    cpu_set_log(loglevel);
1443
}
1444

    
1445
/* mask must never be zero, except for A20 change call */
1446
void cpu_interrupt(CPUState *env, int mask)
1447
{
1448
#if !defined(USE_NPTL)
1449
    TranslationBlock *tb;
1450
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1451
#endif
1452
    int old_mask;
1453

    
1454
    old_mask = env->interrupt_request;
1455
    /* FIXME: This is probably not threadsafe.  A different thread could
1456
       be in the middle of a read-modify-write operation.  */
1457
    env->interrupt_request |= mask;
1458
#if defined(USE_NPTL)
1459
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1460
       problem and hope the cpu will stop of its own accord.  For userspace
1461
       emulation this often isn't actually as bad as it sounds.  Often
1462
       signals are used primarily to interrupt blocking syscalls.  */
1463
#else
1464
    if (use_icount) {
1465
        env->icount_decr.u16.high = 0xffff;
1466
#ifndef CONFIG_USER_ONLY
1467
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1468
           an async event happened and we need to process it.  */
1469
        if (!can_do_io(env)
1470
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1471
            cpu_abort(env, "Raised interrupt while not in I/O function");
1472
        }
1473
#endif
1474
    } else {
1475
        tb = env->current_tb;
1476
        /* if the cpu is currently executing code, we must unlink it and
1477
           all the potentially executing TB */
1478
        if (tb && !testandset(&interrupt_lock)) {
1479
            env->current_tb = NULL;
1480
            tb_reset_jump_recursive(tb);
1481
            resetlock(&interrupt_lock);
1482
        }
1483
    }
1484
#endif
1485
}
1486

    
1487
void cpu_reset_interrupt(CPUState *env, int mask)
1488
{
1489
    env->interrupt_request &= ~mask;
1490
}
1491

    
1492
CPULogItem cpu_log_items[] = {
1493
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1494
      "show generated host assembly code for each compiled TB" },
1495
    { CPU_LOG_TB_IN_ASM, "in_asm",
1496
      "show target assembly code for each compiled TB" },
1497
    { CPU_LOG_TB_OP, "op",
1498
      "show micro ops for each compiled TB" },
1499
    { CPU_LOG_TB_OP_OPT, "op_opt",
1500
      "show micro ops "
1501
#ifdef TARGET_I386
1502
      "before eflags optimization and "
1503
#endif
1504
      "after liveness analysis" },
1505
    { CPU_LOG_INT, "int",
1506
      "show interrupts/exceptions in short format" },
1507
    { CPU_LOG_EXEC, "exec",
1508
      "show trace before each executed TB (lots of logs)" },
1509
    { CPU_LOG_TB_CPU, "cpu",
1510
      "show CPU state before block translation" },
1511
#ifdef TARGET_I386
1512
    { CPU_LOG_PCALL, "pcall",
1513
      "show protected mode far calls/returns/exceptions" },
1514
#endif
1515
#ifdef DEBUG_IOPORT
1516
    { CPU_LOG_IOPORT, "ioport",
1517
      "show all i/o ports accesses" },
1518
#endif
1519
    { 0, NULL, NULL },
1520
};
1521

    
1522
static int cmp1(const char *s1, int n, const char *s2)
1523
{
1524
    if (strlen(s2) != n)
1525
        return 0;
1526
    return memcmp(s1, s2, n) == 0;
1527
}
1528

    
1529
/* takes a comma separated list of log masks. Return 0 if error. */
1530
int cpu_str_to_log_mask(const char *str)
1531
{
1532
    CPULogItem *item;
1533
    int mask;
1534
    const char *p, *p1;
1535

    
1536
    p = str;
1537
    mask = 0;
1538
    for(;;) {
1539
        p1 = strchr(p, ',');
1540
        if (!p1)
1541
            p1 = p + strlen(p);
1542
        if(cmp1(p,p1-p,"all")) {
1543
                for(item = cpu_log_items; item->mask != 0; item++) {
1544
                        mask |= item->mask;
1545
                }
1546
        } else {
1547
        for(item = cpu_log_items; item->mask != 0; item++) {
1548
            if (cmp1(p, p1 - p, item->name))
1549
                goto found;
1550
        }
1551
        return 0;
1552
        }
1553
    found:
1554
        mask |= item->mask;
1555
        if (*p1 != ',')
1556
            break;
1557
        p = p1 + 1;
1558
    }
1559
    return mask;
1560
}
1561

    
1562
void cpu_abort(CPUState *env, const char *fmt, ...)
1563
{
1564
    va_list ap;
1565
    va_list ap2;
1566

    
1567
    va_start(ap, fmt);
1568
    va_copy(ap2, ap);
1569
    fprintf(stderr, "qemu: fatal: ");
1570
    vfprintf(stderr, fmt, ap);
1571
    fprintf(stderr, "\n");
1572
#ifdef TARGET_I386
1573
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1574
#else
1575
    cpu_dump_state(env, stderr, fprintf, 0);
1576
#endif
1577
    if (logfile) {
1578
        fprintf(logfile, "qemu: fatal: ");
1579
        vfprintf(logfile, fmt, ap2);
1580
        fprintf(logfile, "\n");
1581
#ifdef TARGET_I386
1582
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1583
#else
1584
        cpu_dump_state(env, logfile, fprintf, 0);
1585
#endif
1586
        fflush(logfile);
1587
        fclose(logfile);
1588
    }
1589
    va_end(ap2);
1590
    va_end(ap);
1591
    abort();
1592
}
1593

    
1594
CPUState *cpu_copy(CPUState *env)
1595
{
1596
    CPUState *new_env = cpu_init(env->cpu_model_str);
1597
    /* preserve chaining and index */
1598
    CPUState *next_cpu = new_env->next_cpu;
1599
    int cpu_index = new_env->cpu_index;
1600
    memcpy(new_env, env, sizeof(CPUState));
1601
    new_env->next_cpu = next_cpu;
1602
    new_env->cpu_index = cpu_index;
1603
    return new_env;
1604
}
1605

    
1606
#if !defined(CONFIG_USER_ONLY)
1607

    
1608
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1609
{
1610
    unsigned int i;
1611

    
1612
    /* Discard jump cache entries for any tb which might potentially
1613
       overlap the flushed page.  */
1614
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1615
    memset (&env->tb_jmp_cache[i], 0, 
1616
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1617

    
1618
    i = tb_jmp_cache_hash_page(addr);
1619
    memset (&env->tb_jmp_cache[i], 0, 
1620
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1621
}
1622

    
1623
/* NOTE: if flush_global is true, also flush global entries (not
1624
   implemented yet) */
1625
void tlb_flush(CPUState *env, int flush_global)
1626
{
1627
    int i;
1628

    
1629
#if defined(DEBUG_TLB)
1630
    printf("tlb_flush:\n");
1631
#endif
1632
    /* must reset current TB so that interrupts cannot modify the
1633
       links while we are modifying them */
1634
    env->current_tb = NULL;
1635

    
1636
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1637
        env->tlb_table[0][i].addr_read = -1;
1638
        env->tlb_table[0][i].addr_write = -1;
1639
        env->tlb_table[0][i].addr_code = -1;
1640
        env->tlb_table[1][i].addr_read = -1;
1641
        env->tlb_table[1][i].addr_write = -1;
1642
        env->tlb_table[1][i].addr_code = -1;
1643
#if (NB_MMU_MODES >= 3)
1644
        env->tlb_table[2][i].addr_read = -1;
1645
        env->tlb_table[2][i].addr_write = -1;
1646
        env->tlb_table[2][i].addr_code = -1;
1647
#if (NB_MMU_MODES == 4)
1648
        env->tlb_table[3][i].addr_read = -1;
1649
        env->tlb_table[3][i].addr_write = -1;
1650
        env->tlb_table[3][i].addr_code = -1;
1651
#endif
1652
#endif
1653
    }
1654

    
1655
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1656

    
1657
#ifdef USE_KQEMU
1658
    if (env->kqemu_enabled) {
1659
        kqemu_flush(env, flush_global);
1660
    }
1661
#endif
1662
    tlb_flush_count++;
1663
}
1664

    
1665
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1666
{
1667
    if (addr == (tlb_entry->addr_read &
1668
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1669
        addr == (tlb_entry->addr_write &
1670
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1671
        addr == (tlb_entry->addr_code &
1672
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1673
        tlb_entry->addr_read = -1;
1674
        tlb_entry->addr_write = -1;
1675
        tlb_entry->addr_code = -1;
1676
    }
1677
}
1678

    
1679
void tlb_flush_page(CPUState *env, target_ulong addr)
1680
{
1681
    int i;
1682

    
1683
#if defined(DEBUG_TLB)
1684
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1685
#endif
1686
    /* must reset current TB so that interrupts cannot modify the
1687
       links while we are modifying them */
1688
    env->current_tb = NULL;
1689

    
1690
    addr &= TARGET_PAGE_MASK;
1691
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1692
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1693
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1694
#if (NB_MMU_MODES >= 3)
1695
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1696
#if (NB_MMU_MODES == 4)
1697
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1698
#endif
1699
#endif
1700

    
1701
    tlb_flush_jmp_cache(env, addr);
1702

    
1703
#ifdef USE_KQEMU
1704
    if (env->kqemu_enabled) {
1705
        kqemu_flush_page(env, addr);
1706
    }
1707
#endif
1708
}
1709

    
1710
/* update the TLBs so that writes to code in the virtual page 'addr'
1711
   can be detected */
1712
static void tlb_protect_code(ram_addr_t ram_addr)
1713
{
1714
    cpu_physical_memory_reset_dirty(ram_addr,
1715
                                    ram_addr + TARGET_PAGE_SIZE,
1716
                                    CODE_DIRTY_FLAG);
1717
}
1718

    
1719
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1720
   tested for self modifying code */
1721
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1722
                                    target_ulong vaddr)
1723
{
1724
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1725
}
1726

    
1727
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1728
                                         unsigned long start, unsigned long length)
1729
{
1730
    unsigned long addr;
1731
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1732
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1733
        if ((addr - start) < length) {
1734
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1735
        }
1736
    }
1737
}
1738

    
1739
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1740
                                     int dirty_flags)
1741
{
1742
    CPUState *env;
1743
    unsigned long length, start1;
1744
    int i, mask, len;
1745
    uint8_t *p;
1746

    
1747
    start &= TARGET_PAGE_MASK;
1748
    end = TARGET_PAGE_ALIGN(end);
1749

    
1750
    length = end - start;
1751
    if (length == 0)
1752
        return;
1753
    len = length >> TARGET_PAGE_BITS;
1754
#ifdef USE_KQEMU
1755
    /* XXX: should not depend on cpu context */
1756
    env = first_cpu;
1757
    if (env->kqemu_enabled) {
1758
        ram_addr_t addr;
1759
        addr = start;
1760
        for(i = 0; i < len; i++) {
1761
            kqemu_set_notdirty(env, addr);
1762
            addr += TARGET_PAGE_SIZE;
1763
        }
1764
    }
1765
#endif
1766
    mask = ~dirty_flags;
1767
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1768
    for(i = 0; i < len; i++)
1769
        p[i] &= mask;
1770

    
1771
    /* we modify the TLB cache so that the dirty bit will be set again
1772
       when accessing the range */
1773
    start1 = start + (unsigned long)phys_ram_base;
1774
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1775
        for(i = 0; i < CPU_TLB_SIZE; i++)
1776
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1777
        for(i = 0; i < CPU_TLB_SIZE; i++)
1778
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1779
#if (NB_MMU_MODES >= 3)
1780
        for(i = 0; i < CPU_TLB_SIZE; i++)
1781
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1782
#if (NB_MMU_MODES == 4)
1783
        for(i = 0; i < CPU_TLB_SIZE; i++)
1784
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1785
#endif
1786
#endif
1787
    }
1788
}
1789

    
1790
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1791
{
1792
    ram_addr_t ram_addr;
1793

    
1794
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1795
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1796
            tlb_entry->addend - (unsigned long)phys_ram_base;
1797
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1798
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1799
        }
1800
    }
1801
}
1802

    
1803
/* update the TLB according to the current state of the dirty bits */
1804
void cpu_tlb_update_dirty(CPUState *env)
1805
{
1806
    int i;
1807
    for(i = 0; i < CPU_TLB_SIZE; i++)
1808
        tlb_update_dirty(&env->tlb_table[0][i]);
1809
    for(i = 0; i < CPU_TLB_SIZE; i++)
1810
        tlb_update_dirty(&env->tlb_table[1][i]);
1811
#if (NB_MMU_MODES >= 3)
1812
    for(i = 0; i < CPU_TLB_SIZE; i++)
1813
        tlb_update_dirty(&env->tlb_table[2][i]);
1814
#if (NB_MMU_MODES == 4)
1815
    for(i = 0; i < CPU_TLB_SIZE; i++)
1816
        tlb_update_dirty(&env->tlb_table[3][i]);
1817
#endif
1818
#endif
1819
}
1820

    
1821
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1822
{
1823
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1824
        tlb_entry->addr_write = vaddr;
1825
}
1826

    
1827
/* update the TLB corresponding to virtual page vaddr
1828
   so that it is no longer dirty */
1829
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1830
{
1831
    int i;
1832

    
1833
    vaddr &= TARGET_PAGE_MASK;
1834
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1835
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1836
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1837
#if (NB_MMU_MODES >= 3)
1838
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1839
#if (NB_MMU_MODES == 4)
1840
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1841
#endif
1842
#endif
1843
}
1844

    
1845
/* add a new TLB entry. At most one entry for a given virtual address
1846
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1847
   (can only happen in non SOFTMMU mode for I/O pages or pages
1848
   conflicting with the host address space). */
1849
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1850
                      target_phys_addr_t paddr, int prot,
1851
                      int mmu_idx, int is_softmmu)
1852
{
1853
    PhysPageDesc *p;
1854
    unsigned long pd;
1855
    unsigned int index;
1856
    target_ulong address;
1857
    target_ulong code_address;
1858
    target_phys_addr_t addend;
1859
    int ret;
1860
    CPUTLBEntry *te;
1861
    int i;
1862
    target_phys_addr_t iotlb;
1863

    
1864
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1865
    if (!p) {
1866
        pd = IO_MEM_UNASSIGNED;
1867
    } else {
1868
        pd = p->phys_offset;
1869
    }
1870
#if defined(DEBUG_TLB)
1871
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1872
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1873
#endif
1874

    
1875
    ret = 0;
1876
    address = vaddr;
1877
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1878
        /* IO memory case (romd handled later) */
1879
        address |= TLB_MMIO;
1880
    }
1881
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1882
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1883
        /* Normal RAM.  */
1884
        iotlb = pd & TARGET_PAGE_MASK;
1885
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1886
            iotlb |= IO_MEM_NOTDIRTY;
1887
        else
1888
            iotlb |= IO_MEM_ROM;
1889
    } else {
1890
        /* IO handlers are currently passed a phsical address.
1891
           It would be nice to pass an offset from the base address
1892
           of that region.  This would avoid having to special case RAM,
1893
           and avoid full address decoding in every device.
1894
           We can't use the high bits of pd for this because
1895
           IO_MEM_ROMD uses these as a ram address.  */
1896
        iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1897
    }
1898

    
1899
    code_address = address;
1900
    /* Make accesses to pages with watchpoints go via the
1901
       watchpoint trap routines.  */
1902
    for (i = 0; i < env->nb_watchpoints; i++) {
1903
        if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1904
            iotlb = io_mem_watch + paddr;
1905
            /* TODO: The memory case can be optimized by not trapping
1906
               reads of pages with a write breakpoint.  */
1907
            address |= TLB_MMIO;
1908
        }
1909
    }
1910

    
1911
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1912
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
1913
    te = &env->tlb_table[mmu_idx][index];
1914
    te->addend = addend - vaddr;
1915
    if (prot & PAGE_READ) {
1916
        te->addr_read = address;
1917
    } else {
1918
        te->addr_read = -1;
1919
    }
1920

    
1921
    if (prot & PAGE_EXEC) {
1922
        te->addr_code = code_address;
1923
    } else {
1924
        te->addr_code = -1;
1925
    }
1926
    if (prot & PAGE_WRITE) {
1927
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1928
            (pd & IO_MEM_ROMD)) {
1929
            /* Write access calls the I/O callback.  */
1930
            te->addr_write = address | TLB_MMIO;
1931
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1932
                   !cpu_physical_memory_is_dirty(pd)) {
1933
            te->addr_write = address | TLB_NOTDIRTY;
1934
        } else {
1935
            te->addr_write = address;
1936
        }
1937
    } else {
1938
        te->addr_write = -1;
1939
    }
1940
    return ret;
1941
}
1942

    
1943
#else
1944

    
1945
void tlb_flush(CPUState *env, int flush_global)
1946
{
1947
}
1948

    
1949
void tlb_flush_page(CPUState *env, target_ulong addr)
1950
{
1951
}
1952

    
1953
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1954
                      target_phys_addr_t paddr, int prot,
1955
                      int mmu_idx, int is_softmmu)
1956
{
1957
    return 0;
1958
}
1959

    
1960
/* dump memory mappings */
1961
void page_dump(FILE *f)
1962
{
1963
    unsigned long start, end;
1964
    int i, j, prot, prot1;
1965
    PageDesc *p;
1966

    
1967
    fprintf(f, "%-8s %-8s %-8s %s\n",
1968
            "start", "end", "size", "prot");
1969
    start = -1;
1970
    end = -1;
1971
    prot = 0;
1972
    for(i = 0; i <= L1_SIZE; i++) {
1973
        if (i < L1_SIZE)
1974
            p = l1_map[i];
1975
        else
1976
            p = NULL;
1977
        for(j = 0;j < L2_SIZE; j++) {
1978
            if (!p)
1979
                prot1 = 0;
1980
            else
1981
                prot1 = p[j].flags;
1982
            if (prot1 != prot) {
1983
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1984
                if (start != -1) {
1985
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1986
                            start, end, end - start,
1987
                            prot & PAGE_READ ? 'r' : '-',
1988
                            prot & PAGE_WRITE ? 'w' : '-',
1989
                            prot & PAGE_EXEC ? 'x' : '-');
1990
                }
1991
                if (prot1 != 0)
1992
                    start = end;
1993
                else
1994
                    start = -1;
1995
                prot = prot1;
1996
            }
1997
            if (!p)
1998
                break;
1999
        }
2000
    }
2001
}
2002

    
2003
int page_get_flags(target_ulong address)
2004
{
2005
    PageDesc *p;
2006

    
2007
    p = page_find(address >> TARGET_PAGE_BITS);
2008
    if (!p)
2009
        return 0;
2010
    return p->flags;
2011
}
2012

    
2013
/* modify the flags of a page and invalidate the code if
2014
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2015
   depending on PAGE_WRITE */
2016
void page_set_flags(target_ulong start, target_ulong end, int flags)
2017
{
2018
    PageDesc *p;
2019
    target_ulong addr;
2020

    
2021
    /* mmap_lock should already be held.  */
2022
    start = start & TARGET_PAGE_MASK;
2023
    end = TARGET_PAGE_ALIGN(end);
2024
    if (flags & PAGE_WRITE)
2025
        flags |= PAGE_WRITE_ORG;
2026
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2027
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2028
        /* We may be called for host regions that are outside guest
2029
           address space.  */
2030
        if (!p)
2031
            return;
2032
        /* if the write protection is set, then we invalidate the code
2033
           inside */
2034
        if (!(p->flags & PAGE_WRITE) &&
2035
            (flags & PAGE_WRITE) &&
2036
            p->first_tb) {
2037
            tb_invalidate_phys_page(addr, 0, NULL);
2038
        }
2039
        p->flags = flags;
2040
    }
2041
}
2042

    
2043
int page_check_range(target_ulong start, target_ulong len, int flags)
2044
{
2045
    PageDesc *p;
2046
    target_ulong end;
2047
    target_ulong addr;
2048

    
2049
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2050
    start = start & TARGET_PAGE_MASK;
2051

    
2052
    if( end < start )
2053
        /* we've wrapped around */
2054
        return -1;
2055
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2056
        p = page_find(addr >> TARGET_PAGE_BITS);
2057
        if( !p )
2058
            return -1;
2059
        if( !(p->flags & PAGE_VALID) )
2060
            return -1;
2061

    
2062
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2063
            return -1;
2064
        if (flags & PAGE_WRITE) {
2065
            if (!(p->flags & PAGE_WRITE_ORG))
2066
                return -1;
2067
            /* unprotect the page if it was put read-only because it
2068
               contains translated code */
2069
            if (!(p->flags & PAGE_WRITE)) {
2070
                if (!page_unprotect(addr, 0, NULL))
2071
                    return -1;
2072
            }
2073
            return 0;
2074
        }
2075
    }
2076
    return 0;
2077
}
2078

    
2079
/* called from signal handler: invalidate the code and unprotect the
2080
   page. Return TRUE if the fault was succesfully handled. */
2081
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2082
{
2083
    unsigned int page_index, prot, pindex;
2084
    PageDesc *p, *p1;
2085
    target_ulong host_start, host_end, addr;
2086

    
2087
    /* Technically this isn't safe inside a signal handler.  However we
2088
       know this only ever happens in a synchronous SEGV handler, so in
2089
       practice it seems to be ok.  */
2090
    mmap_lock();
2091

    
2092
    host_start = address & qemu_host_page_mask;
2093
    page_index = host_start >> TARGET_PAGE_BITS;
2094
    p1 = page_find(page_index);
2095
    if (!p1) {
2096
        mmap_unlock();
2097
        return 0;
2098
    }
2099
    host_end = host_start + qemu_host_page_size;
2100
    p = p1;
2101
    prot = 0;
2102
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2103
        prot |= p->flags;
2104
        p++;
2105
    }
2106
    /* if the page was really writable, then we change its
2107
       protection back to writable */
2108
    if (prot & PAGE_WRITE_ORG) {
2109
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2110
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2111
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2112
                     (prot & PAGE_BITS) | PAGE_WRITE);
2113
            p1[pindex].flags |= PAGE_WRITE;
2114
            /* and since the content will be modified, we must invalidate
2115
               the corresponding translated code. */
2116
            tb_invalidate_phys_page(address, pc, puc);
2117
#ifdef DEBUG_TB_CHECK
2118
            tb_invalidate_check(address);
2119
#endif
2120
            mmap_unlock();
2121
            return 1;
2122
        }
2123
    }
2124
    mmap_unlock();
2125
    return 0;
2126
}
2127

    
2128
static inline void tlb_set_dirty(CPUState *env,
2129
                                 unsigned long addr, target_ulong vaddr)
2130
{
2131
}
2132
#endif /* defined(CONFIG_USER_ONLY) */
2133

    
2134
#if !defined(CONFIG_USER_ONLY)
2135
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2136
                             ram_addr_t memory);
2137
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2138
                           ram_addr_t orig_memory);
2139
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2140
                      need_subpage)                                     \
2141
    do {                                                                \
2142
        if (addr > start_addr)                                          \
2143
            start_addr2 = 0;                                            \
2144
        else {                                                          \
2145
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2146
            if (start_addr2 > 0)                                        \
2147
                need_subpage = 1;                                       \
2148
        }                                                               \
2149
                                                                        \
2150
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2151
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2152
        else {                                                          \
2153
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2154
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2155
                need_subpage = 1;                                       \
2156
        }                                                               \
2157
    } while (0)
2158

    
2159
/* register physical memory. 'size' must be a multiple of the target
2160
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2161
   io memory page */
2162
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2163
                                  ram_addr_t size,
2164
                                  ram_addr_t phys_offset)
2165
{
2166
    target_phys_addr_t addr, end_addr;
2167
    PhysPageDesc *p;
2168
    CPUState *env;
2169
    ram_addr_t orig_size = size;
2170
    void *subpage;
2171

    
2172
#ifdef USE_KQEMU
2173
    /* XXX: should not depend on cpu context */
2174
    env = first_cpu;
2175
    if (env->kqemu_enabled) {
2176
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2177
    }
2178
#endif
2179
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2180
    end_addr = start_addr + (target_phys_addr_t)size;
2181
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2182
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2183
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2184
            ram_addr_t orig_memory = p->phys_offset;
2185
            target_phys_addr_t start_addr2, end_addr2;
2186
            int need_subpage = 0;
2187

    
2188
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2189
                          need_subpage);
2190
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2191
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2192
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2193
                                           &p->phys_offset, orig_memory);
2194
                } else {
2195
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2196
                                            >> IO_MEM_SHIFT];
2197
                }
2198
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2199
            } else {
2200
                p->phys_offset = phys_offset;
2201
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2202
                    (phys_offset & IO_MEM_ROMD))
2203
                    phys_offset += TARGET_PAGE_SIZE;
2204
            }
2205
        } else {
2206
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2207
            p->phys_offset = phys_offset;
2208
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2209
                (phys_offset & IO_MEM_ROMD))
2210
                phys_offset += TARGET_PAGE_SIZE;
2211
            else {
2212
                target_phys_addr_t start_addr2, end_addr2;
2213
                int need_subpage = 0;
2214

    
2215
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2216
                              end_addr2, need_subpage);
2217

    
2218
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2219
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2220
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2221
                    subpage_register(subpage, start_addr2, end_addr2,
2222
                                     phys_offset);
2223
                }
2224
            }
2225
        }
2226
    }
2227

    
2228
    /* since each CPU stores ram addresses in its TLB cache, we must
2229
       reset the modified entries */
2230
    /* XXX: slow ! */
2231
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2232
        tlb_flush(env, 1);
2233
    }
2234
}
2235

    
2236
/* XXX: temporary until new memory mapping API */
2237
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2238
{
2239
    PhysPageDesc *p;
2240

    
2241
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2242
    if (!p)
2243
        return IO_MEM_UNASSIGNED;
2244
    return p->phys_offset;
2245
}
2246

    
2247
/* XXX: better than nothing */
2248
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2249
{
2250
    ram_addr_t addr;
2251
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2252
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2253
                (uint64_t)size, (uint64_t)phys_ram_size);
2254
        abort();
2255
    }
2256
    addr = phys_ram_alloc_offset;
2257
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2258
    return addr;
2259
}
2260

    
2261
void qemu_ram_free(ram_addr_t addr)
2262
{
2263
}
2264

    
2265
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2266
{
2267
#ifdef DEBUG_UNASSIGNED
2268
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2269
#endif
2270
#ifdef TARGET_SPARC
2271
    do_unassigned_access(addr, 0, 0, 0);
2272
#elif defined(TARGET_CRIS)
2273
    do_unassigned_access(addr, 0, 0, 0);
2274
#endif
2275
    return 0;
2276
}
2277

    
2278
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2279
{
2280
#ifdef DEBUG_UNASSIGNED
2281
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2282
#endif
2283
#ifdef TARGET_SPARC
2284
    do_unassigned_access(addr, 1, 0, 0);
2285
#elif defined(TARGET_CRIS)
2286
    do_unassigned_access(addr, 1, 0, 0);
2287
#endif
2288
}
2289

    
2290
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2291
    unassigned_mem_readb,
2292
    unassigned_mem_readb,
2293
    unassigned_mem_readb,
2294
};
2295

    
2296
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2297
    unassigned_mem_writeb,
2298
    unassigned_mem_writeb,
2299
    unassigned_mem_writeb,
2300
};
2301

    
2302
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2303
                                uint32_t val)
2304
{
2305
    int dirty_flags;
2306
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2307
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2308
#if !defined(CONFIG_USER_ONLY)
2309
        tb_invalidate_phys_page_fast(ram_addr, 1);
2310
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2311
#endif
2312
    }
2313
    stb_p(phys_ram_base + ram_addr, val);
2314
#ifdef USE_KQEMU
2315
    if (cpu_single_env->kqemu_enabled &&
2316
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2317
        kqemu_modify_page(cpu_single_env, ram_addr);
2318
#endif
2319
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2320
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2321
    /* we remove the notdirty callback only if the code has been
2322
       flushed */
2323
    if (dirty_flags == 0xff)
2324
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2325
}
2326

    
2327
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2328
                                uint32_t val)
2329
{
2330
    int dirty_flags;
2331
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2332
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2333
#if !defined(CONFIG_USER_ONLY)
2334
        tb_invalidate_phys_page_fast(ram_addr, 2);
2335
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2336
#endif
2337
    }
2338
    stw_p(phys_ram_base + ram_addr, val);
2339
#ifdef USE_KQEMU
2340
    if (cpu_single_env->kqemu_enabled &&
2341
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2342
        kqemu_modify_page(cpu_single_env, ram_addr);
2343
#endif
2344
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2345
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2346
    /* we remove the notdirty callback only if the code has been
2347
       flushed */
2348
    if (dirty_flags == 0xff)
2349
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2350
}
2351

    
2352
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2353
                                uint32_t val)
2354
{
2355
    int dirty_flags;
2356
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2357
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2358
#if !defined(CONFIG_USER_ONLY)
2359
        tb_invalidate_phys_page_fast(ram_addr, 4);
2360
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2361
#endif
2362
    }
2363
    stl_p(phys_ram_base + ram_addr, val);
2364
#ifdef USE_KQEMU
2365
    if (cpu_single_env->kqemu_enabled &&
2366
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2367
        kqemu_modify_page(cpu_single_env, ram_addr);
2368
#endif
2369
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2370
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2371
    /* we remove the notdirty callback only if the code has been
2372
       flushed */
2373
    if (dirty_flags == 0xff)
2374
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2375
}
2376

    
2377
static CPUReadMemoryFunc *error_mem_read[3] = {
2378
    NULL, /* never used */
2379
    NULL, /* never used */
2380
    NULL, /* never used */
2381
};
2382

    
2383
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2384
    notdirty_mem_writeb,
2385
    notdirty_mem_writew,
2386
    notdirty_mem_writel,
2387
};
2388

    
2389
/* Generate a debug exception if a watchpoint has been hit.  */
2390
static void check_watchpoint(int offset, int flags)
2391
{
2392
    CPUState *env = cpu_single_env;
2393
    target_ulong vaddr;
2394
    int i;
2395

    
2396
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2397
    for (i = 0; i < env->nb_watchpoints; i++) {
2398
        if (vaddr == env->watchpoint[i].vaddr
2399
                && (env->watchpoint[i].type & flags)) {
2400
            env->watchpoint_hit = i + 1;
2401
            cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2402
            break;
2403
        }
2404
    }
2405
}
2406

    
2407
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2408
   so these check for a hit then pass through to the normal out-of-line
2409
   phys routines.  */
2410
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2411
{
2412
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2413
    return ldub_phys(addr);
2414
}
2415

    
2416
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2417
{
2418
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2419
    return lduw_phys(addr);
2420
}
2421

    
2422
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2423
{
2424
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2425
    return ldl_phys(addr);
2426
}
2427

    
2428
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2429
                             uint32_t val)
2430
{
2431
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2432
    stb_phys(addr, val);
2433
}
2434

    
2435
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2436
                             uint32_t val)
2437
{
2438
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2439
    stw_phys(addr, val);
2440
}
2441

    
2442
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2443
                             uint32_t val)
2444
{
2445
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2446
    stl_phys(addr, val);
2447
}
2448

    
2449
static CPUReadMemoryFunc *watch_mem_read[3] = {
2450
    watch_mem_readb,
2451
    watch_mem_readw,
2452
    watch_mem_readl,
2453
};
2454

    
2455
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2456
    watch_mem_writeb,
2457
    watch_mem_writew,
2458
    watch_mem_writel,
2459
};
2460

    
2461
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2462
                                 unsigned int len)
2463
{
2464
    uint32_t ret;
2465
    unsigned int idx;
2466

    
2467
    idx = SUBPAGE_IDX(addr - mmio->base);
2468
#if defined(DEBUG_SUBPAGE)
2469
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2470
           mmio, len, addr, idx);
2471
#endif
2472
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2473

    
2474
    return ret;
2475
}
2476

    
2477
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2478
                              uint32_t value, unsigned int len)
2479
{
2480
    unsigned int idx;
2481

    
2482
    idx = SUBPAGE_IDX(addr - mmio->base);
2483
#if defined(DEBUG_SUBPAGE)
2484
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2485
           mmio, len, addr, idx, value);
2486
#endif
2487
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2488
}
2489

    
2490
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2491
{
2492
#if defined(DEBUG_SUBPAGE)
2493
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2494
#endif
2495

    
2496
    return subpage_readlen(opaque, addr, 0);
2497
}
2498

    
2499
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2500
                            uint32_t value)
2501
{
2502
#if defined(DEBUG_SUBPAGE)
2503
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2504
#endif
2505
    subpage_writelen(opaque, addr, value, 0);
2506
}
2507

    
2508
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2509
{
2510
#if defined(DEBUG_SUBPAGE)
2511
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2512
#endif
2513

    
2514
    return subpage_readlen(opaque, addr, 1);
2515
}
2516

    
2517
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2518
                            uint32_t value)
2519
{
2520
#if defined(DEBUG_SUBPAGE)
2521
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2522
#endif
2523
    subpage_writelen(opaque, addr, value, 1);
2524
}
2525

    
2526
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2527
{
2528
#if defined(DEBUG_SUBPAGE)
2529
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2530
#endif
2531

    
2532
    return subpage_readlen(opaque, addr, 2);
2533
}
2534

    
2535
static void subpage_writel (void *opaque,
2536
                         target_phys_addr_t addr, uint32_t value)
2537
{
2538
#if defined(DEBUG_SUBPAGE)
2539
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2540
#endif
2541
    subpage_writelen(opaque, addr, value, 2);
2542
}
2543

    
2544
static CPUReadMemoryFunc *subpage_read[] = {
2545
    &subpage_readb,
2546
    &subpage_readw,
2547
    &subpage_readl,
2548
};
2549

    
2550
static CPUWriteMemoryFunc *subpage_write[] = {
2551
    &subpage_writeb,
2552
    &subpage_writew,
2553
    &subpage_writel,
2554
};
2555

    
2556
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2557
                             ram_addr_t memory)
2558
{
2559
    int idx, eidx;
2560
    unsigned int i;
2561

    
2562
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2563
        return -1;
2564
    idx = SUBPAGE_IDX(start);
2565
    eidx = SUBPAGE_IDX(end);
2566
#if defined(DEBUG_SUBPAGE)
2567
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2568
           mmio, start, end, idx, eidx, memory);
2569
#endif
2570
    memory >>= IO_MEM_SHIFT;
2571
    for (; idx <= eidx; idx++) {
2572
        for (i = 0; i < 4; i++) {
2573
            if (io_mem_read[memory][i]) {
2574
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2575
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2576
            }
2577
            if (io_mem_write[memory][i]) {
2578
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2579
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2580
            }
2581
        }
2582
    }
2583

    
2584
    return 0;
2585
}
2586

    
2587
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2588
                           ram_addr_t orig_memory)
2589
{
2590
    subpage_t *mmio;
2591
    int subpage_memory;
2592

    
2593
    mmio = qemu_mallocz(sizeof(subpage_t));
2594
    if (mmio != NULL) {
2595
        mmio->base = base;
2596
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2597
#if defined(DEBUG_SUBPAGE)
2598
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2599
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2600
#endif
2601
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2602
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2603
    }
2604

    
2605
    return mmio;
2606
}
2607

    
2608
static void io_mem_init(void)
2609
{
2610
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2611
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2612
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2613
    io_mem_nb = 5;
2614

    
2615
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2616
                                          watch_mem_write, NULL);
2617
    /* alloc dirty bits array */
2618
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2619
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2620
}
2621

    
2622
/* mem_read and mem_write are arrays of functions containing the
2623
   function to access byte (index 0), word (index 1) and dword (index
2624
   2). Functions can be omitted with a NULL function pointer. The
2625
   registered functions may be modified dynamically later.
2626
   If io_index is non zero, the corresponding io zone is
2627
   modified. If it is zero, a new io zone is allocated. The return
2628
   value can be used with cpu_register_physical_memory(). (-1) is
2629
   returned if error. */
2630
int cpu_register_io_memory(int io_index,
2631
                           CPUReadMemoryFunc **mem_read,
2632
                           CPUWriteMemoryFunc **mem_write,
2633
                           void *opaque)
2634
{
2635
    int i, subwidth = 0;
2636

    
2637
    if (io_index <= 0) {
2638
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2639
            return -1;
2640
        io_index = io_mem_nb++;
2641
    } else {
2642
        if (io_index >= IO_MEM_NB_ENTRIES)
2643
            return -1;
2644
    }
2645

    
2646
    for(i = 0;i < 3; i++) {
2647
        if (!mem_read[i] || !mem_write[i])
2648
            subwidth = IO_MEM_SUBWIDTH;
2649
        io_mem_read[io_index][i] = mem_read[i];
2650
        io_mem_write[io_index][i] = mem_write[i];
2651
    }
2652
    io_mem_opaque[io_index] = opaque;
2653
    return (io_index << IO_MEM_SHIFT) | subwidth;
2654
}
2655

    
2656
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2657
{
2658
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2659
}
2660

    
2661
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2662
{
2663
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2664
}
2665

    
2666
#endif /* !defined(CONFIG_USER_ONLY) */
2667

    
2668
/* physical memory access (slow version, mainly for debug) */
2669
#if defined(CONFIG_USER_ONLY)
2670
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2671
                            int len, int is_write)
2672
{
2673
    int l, flags;
2674
    target_ulong page;
2675
    void * p;
2676

    
2677
    while (len > 0) {
2678
        page = addr & TARGET_PAGE_MASK;
2679
        l = (page + TARGET_PAGE_SIZE) - addr;
2680
        if (l > len)
2681
            l = len;
2682
        flags = page_get_flags(page);
2683
        if (!(flags & PAGE_VALID))
2684
            return;
2685
        if (is_write) {
2686
            if (!(flags & PAGE_WRITE))
2687
                return;
2688
            /* XXX: this code should not depend on lock_user */
2689
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2690
                /* FIXME - should this return an error rather than just fail? */
2691
                return;
2692
            memcpy(p, buf, l);
2693
            unlock_user(p, addr, l);
2694
        } else {
2695
            if (!(flags & PAGE_READ))
2696
                return;
2697
            /* XXX: this code should not depend on lock_user */
2698
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2699
                /* FIXME - should this return an error rather than just fail? */
2700
                return;
2701
            memcpy(buf, p, l);
2702
            unlock_user(p, addr, 0);
2703
        }
2704
        len -= l;
2705
        buf += l;
2706
        addr += l;
2707
    }
2708
}
2709

    
2710
#else
2711
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2712
                            int len, int is_write)
2713
{
2714
    int l, io_index;
2715
    uint8_t *ptr;
2716
    uint32_t val;
2717
    target_phys_addr_t page;
2718
    unsigned long pd;
2719
    PhysPageDesc *p;
2720

    
2721
    while (len > 0) {
2722
        page = addr & TARGET_PAGE_MASK;
2723
        l = (page + TARGET_PAGE_SIZE) - addr;
2724
        if (l > len)
2725
            l = len;
2726
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2727
        if (!p) {
2728
            pd = IO_MEM_UNASSIGNED;
2729
        } else {
2730
            pd = p->phys_offset;
2731
        }
2732

    
2733
        if (is_write) {
2734
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2735
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2736
                /* XXX: could force cpu_single_env to NULL to avoid
2737
                   potential bugs */
2738
                if (l >= 4 && ((addr & 3) == 0)) {
2739
                    /* 32 bit write access */
2740
                    val = ldl_p(buf);
2741
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2742
                    l = 4;
2743
                } else if (l >= 2 && ((addr & 1) == 0)) {
2744
                    /* 16 bit write access */
2745
                    val = lduw_p(buf);
2746
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2747
                    l = 2;
2748
                } else {
2749
                    /* 8 bit write access */
2750
                    val = ldub_p(buf);
2751
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2752
                    l = 1;
2753
                }
2754
            } else {
2755
                unsigned long addr1;
2756
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2757
                /* RAM case */
2758
                ptr = phys_ram_base + addr1;
2759
                memcpy(ptr, buf, l);
2760
                if (!cpu_physical_memory_is_dirty(addr1)) {
2761
                    /* invalidate code */
2762
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2763
                    /* set dirty bit */
2764
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2765
                        (0xff & ~CODE_DIRTY_FLAG);
2766
                }
2767
            }
2768
        } else {
2769
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2770
                !(pd & IO_MEM_ROMD)) {
2771
                /* I/O case */
2772
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2773
                if (l >= 4 && ((addr & 3) == 0)) {
2774
                    /* 32 bit read access */
2775
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2776
                    stl_p(buf, val);
2777
                    l = 4;
2778
                } else if (l >= 2 && ((addr & 1) == 0)) {
2779
                    /* 16 bit read access */
2780
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2781
                    stw_p(buf, val);
2782
                    l = 2;
2783
                } else {
2784
                    /* 8 bit read access */
2785
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2786
                    stb_p(buf, val);
2787
                    l = 1;
2788
                }
2789
            } else {
2790
                /* RAM case */
2791
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2792
                    (addr & ~TARGET_PAGE_MASK);
2793
                memcpy(buf, ptr, l);
2794
            }
2795
        }
2796
        len -= l;
2797
        buf += l;
2798
        addr += l;
2799
    }
2800
}
2801

    
2802
/* used for ROM loading : can write in RAM and ROM */
2803
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2804
                                   const uint8_t *buf, int len)
2805
{
2806
    int l;
2807
    uint8_t *ptr;
2808
    target_phys_addr_t page;
2809
    unsigned long pd;
2810
    PhysPageDesc *p;
2811

    
2812
    while (len > 0) {
2813
        page = addr & TARGET_PAGE_MASK;
2814
        l = (page + TARGET_PAGE_SIZE) - addr;
2815
        if (l > len)
2816
            l = len;
2817
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2818
        if (!p) {
2819
            pd = IO_MEM_UNASSIGNED;
2820
        } else {
2821
            pd = p->phys_offset;
2822
        }
2823

    
2824
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2825
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2826
            !(pd & IO_MEM_ROMD)) {
2827
            /* do nothing */
2828
        } else {
2829
            unsigned long addr1;
2830
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2831
            /* ROM/RAM case */
2832
            ptr = phys_ram_base + addr1;
2833
            memcpy(ptr, buf, l);
2834
        }
2835
        len -= l;
2836
        buf += l;
2837
        addr += l;
2838
    }
2839
}
2840

    
2841

    
2842
/* warning: addr must be aligned */
2843
uint32_t ldl_phys(target_phys_addr_t addr)
2844
{
2845
    int io_index;
2846
    uint8_t *ptr;
2847
    uint32_t val;
2848
    unsigned long pd;
2849
    PhysPageDesc *p;
2850

    
2851
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2852
    if (!p) {
2853
        pd = IO_MEM_UNASSIGNED;
2854
    } else {
2855
        pd = p->phys_offset;
2856
    }
2857

    
2858
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2859
        !(pd & IO_MEM_ROMD)) {
2860
        /* I/O case */
2861
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2862
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2863
    } else {
2864
        /* RAM case */
2865
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2866
            (addr & ~TARGET_PAGE_MASK);
2867
        val = ldl_p(ptr);
2868
    }
2869
    return val;
2870
}
2871

    
2872
/* warning: addr must be aligned */
2873
uint64_t ldq_phys(target_phys_addr_t addr)
2874
{
2875
    int io_index;
2876
    uint8_t *ptr;
2877
    uint64_t val;
2878
    unsigned long pd;
2879
    PhysPageDesc *p;
2880

    
2881
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2882
    if (!p) {
2883
        pd = IO_MEM_UNASSIGNED;
2884
    } else {
2885
        pd = p->phys_offset;
2886
    }
2887

    
2888
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2889
        !(pd & IO_MEM_ROMD)) {
2890
        /* I/O case */
2891
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2892
#ifdef TARGET_WORDS_BIGENDIAN
2893
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2894
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2895
#else
2896
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2897
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2898
#endif
2899
    } else {
2900
        /* RAM case */
2901
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2902
            (addr & ~TARGET_PAGE_MASK);
2903
        val = ldq_p(ptr);
2904
    }
2905
    return val;
2906
}
2907

    
2908
/* XXX: optimize */
2909
uint32_t ldub_phys(target_phys_addr_t addr)
2910
{
2911
    uint8_t val;
2912
    cpu_physical_memory_read(addr, &val, 1);
2913
    return val;
2914
}
2915

    
2916
/* XXX: optimize */
2917
uint32_t lduw_phys(target_phys_addr_t addr)
2918
{
2919
    uint16_t val;
2920
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2921
    return tswap16(val);
2922
}
2923

    
2924
/* warning: addr must be aligned. The ram page is not masked as dirty
2925
   and the code inside is not invalidated. It is useful if the dirty
2926
   bits are used to track modified PTEs */
2927
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2928
{
2929
    int io_index;
2930
    uint8_t *ptr;
2931
    unsigned long pd;
2932
    PhysPageDesc *p;
2933

    
2934
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2935
    if (!p) {
2936
        pd = IO_MEM_UNASSIGNED;
2937
    } else {
2938
        pd = p->phys_offset;
2939
    }
2940

    
2941
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2942
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2943
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2944
    } else {
2945
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2946
            (addr & ~TARGET_PAGE_MASK);
2947
        stl_p(ptr, val);
2948
    }
2949
}
2950

    
2951
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2952
{
2953
    int io_index;
2954
    uint8_t *ptr;
2955
    unsigned long pd;
2956
    PhysPageDesc *p;
2957

    
2958
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2959
    if (!p) {
2960
        pd = IO_MEM_UNASSIGNED;
2961
    } else {
2962
        pd = p->phys_offset;
2963
    }
2964

    
2965
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2966
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2967
#ifdef TARGET_WORDS_BIGENDIAN
2968
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2969
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2970
#else
2971
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2972
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2973
#endif
2974
    } else {
2975
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2976
            (addr & ~TARGET_PAGE_MASK);
2977
        stq_p(ptr, val);
2978
    }
2979
}
2980

    
2981
/* warning: addr must be aligned */
2982
void stl_phys(target_phys_addr_t addr, uint32_t val)
2983
{
2984
    int io_index;
2985
    uint8_t *ptr;
2986
    unsigned long pd;
2987
    PhysPageDesc *p;
2988

    
2989
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2990
    if (!p) {
2991
        pd = IO_MEM_UNASSIGNED;
2992
    } else {
2993
        pd = p->phys_offset;
2994
    }
2995

    
2996
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2997
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2998
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2999
    } else {
3000
        unsigned long addr1;
3001
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3002
        /* RAM case */
3003
        ptr = phys_ram_base + addr1;
3004
        stl_p(ptr, val);
3005
        if (!cpu_physical_memory_is_dirty(addr1)) {
3006
            /* invalidate code */
3007
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3008
            /* set dirty bit */
3009
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3010
                (0xff & ~CODE_DIRTY_FLAG);
3011
        }
3012
    }
3013
}
3014

    
3015
/* XXX: optimize */
3016
void stb_phys(target_phys_addr_t addr, uint32_t val)
3017
{
3018
    uint8_t v = val;
3019
    cpu_physical_memory_write(addr, &v, 1);
3020
}
3021

    
3022
/* XXX: optimize */
3023
void stw_phys(target_phys_addr_t addr, uint32_t val)
3024
{
3025
    uint16_t v = tswap16(val);
3026
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3027
}
3028

    
3029
/* XXX: optimize */
3030
void stq_phys(target_phys_addr_t addr, uint64_t val)
3031
{
3032
    val = tswap64(val);
3033
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3034
}
3035

    
3036
#endif
3037

    
3038
/* virtual memory access for debug */
3039
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3040
                        uint8_t *buf, int len, int is_write)
3041
{
3042
    int l;
3043
    target_phys_addr_t phys_addr;
3044
    target_ulong page;
3045

    
3046
    while (len > 0) {
3047
        page = addr & TARGET_PAGE_MASK;
3048
        phys_addr = cpu_get_phys_page_debug(env, page);
3049
        /* if no physical page mapped, return an error */
3050
        if (phys_addr == -1)
3051
            return -1;
3052
        l = (page + TARGET_PAGE_SIZE) - addr;
3053
        if (l > len)
3054
            l = len;
3055
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3056
                               buf, l, is_write);
3057
        len -= l;
3058
        buf += l;
3059
        addr += l;
3060
    }
3061
    return 0;
3062
}
3063

    
3064
/* in deterministic execution mode, instructions doing device I/Os
3065
   must be at the end of the TB */
3066
void cpu_io_recompile(CPUState *env, void *retaddr)
3067
{
3068
    TranslationBlock *tb;
3069
    uint32_t n, cflags;
3070
    target_ulong pc, cs_base;
3071
    uint64_t flags;
3072

    
3073
    tb = tb_find_pc((unsigned long)retaddr);
3074
    if (!tb) {
3075
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3076
                  retaddr);
3077
    }
3078
    n = env->icount_decr.u16.low + tb->icount;
3079
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3080
    /* Calculate how many instructions had been executed before the fault
3081
       occurred.  */
3082
    n = n - env->icount_decr.u16.low;
3083
    /* Generate a new TB ending on the I/O insn.  */
3084
    n++;
3085
    /* On MIPS and SH, delay slot instructions can only be restarted if
3086
       they were already the first instruction in the TB.  If this is not
3087
       the first instruction in a TB then re-execute the preceding
3088
       branch.  */
3089
#if defined(TARGET_MIPS)
3090
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3091
        env->active_tc.PC -= 4;
3092
        env->icount_decr.u16.low++;
3093
        env->hflags &= ~MIPS_HFLAG_BMASK;
3094
    }
3095
#elif defined(TARGET_SH4)
3096
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3097
            && n > 1) {
3098
        env->pc -= 2;
3099
        env->icount_decr.u16.low++;
3100
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3101
    }
3102
#endif
3103
    /* This should never happen.  */
3104
    if (n > CF_COUNT_MASK)
3105
        cpu_abort(env, "TB too big during recompile");
3106

    
3107
    cflags = n | CF_LAST_IO;
3108
    pc = tb->pc;
3109
    cs_base = tb->cs_base;
3110
    flags = tb->flags;
3111
    tb_phys_invalidate(tb, -1);
3112
    /* FIXME: In theory this could raise an exception.  In practice
3113
       we have already translated the block once so it's probably ok.  */
3114
    tb_gen_code(env, pc, cs_base, flags, cflags);
3115
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3116
       the first in the TB) then we end up generating a whole new TB and
3117
       repeating the fault, which is horribly inefficient.
3118
       Better would be to execute just this insn uncached, or generate a
3119
       second new TB.  */
3120
    cpu_resume_from_signal(env, NULL);
3121
}
3122

    
3123
void dump_exec_info(FILE *f,
3124
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3125
{
3126
    int i, target_code_size, max_target_code_size;
3127
    int direct_jmp_count, direct_jmp2_count, cross_page;
3128
    TranslationBlock *tb;
3129

    
3130
    target_code_size = 0;
3131
    max_target_code_size = 0;
3132
    cross_page = 0;
3133
    direct_jmp_count = 0;
3134
    direct_jmp2_count = 0;
3135
    for(i = 0; i < nb_tbs; i++) {
3136
        tb = &tbs[i];
3137
        target_code_size += tb->size;
3138
        if (tb->size > max_target_code_size)
3139
            max_target_code_size = tb->size;
3140
        if (tb->page_addr[1] != -1)
3141
            cross_page++;
3142
        if (tb->tb_next_offset[0] != 0xffff) {
3143
            direct_jmp_count++;
3144
            if (tb->tb_next_offset[1] != 0xffff) {
3145
                direct_jmp2_count++;
3146
            }
3147
        }
3148
    }
3149
    /* XXX: avoid using doubles ? */
3150
    cpu_fprintf(f, "Translation buffer state:\n");
3151
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3152
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3153
    cpu_fprintf(f, "TB count            %d/%d\n", 
3154
                nb_tbs, code_gen_max_blocks);
3155
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3156
                nb_tbs ? target_code_size / nb_tbs : 0,
3157
                max_target_code_size);
3158
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3159
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3160
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3161
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3162
            cross_page,
3163
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3164
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3165
                direct_jmp_count,
3166
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3167
                direct_jmp2_count,
3168
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3169
    cpu_fprintf(f, "\nStatistics:\n");
3170
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3171
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3172
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3173
    tcg_dump_info(f, cpu_fprintf);
3174
}
3175

    
3176
#if !defined(CONFIG_USER_ONLY)
3177

    
3178
#define MMUSUFFIX _cmmu
3179
#define GETPC() NULL
3180
#define env cpu_single_env
3181
#define SOFTMMU_CODE_ACCESS
3182

    
3183
#define SHIFT 0
3184
#include "softmmu_template.h"
3185

    
3186
#define SHIFT 1
3187
#include "softmmu_template.h"
3188

    
3189
#define SHIFT 2
3190
#include "softmmu_template.h"
3191

    
3192
#define SHIFT 3
3193
#include "softmmu_template.h"
3194

    
3195
#undef env
3196

    
3197
#endif