Statistics
| Branch: | Revision:

root / exec.c @ 7ccfb2eb

History | View | Annotate | Download (95.2 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#endif
44

    
45
//#define DEBUG_TB_INVALIDATE
46
//#define DEBUG_FLUSH
47
//#define DEBUG_TLB
48
//#define DEBUG_UNASSIGNED
49

    
50
/* make various TB consistency checks */
51
//#define DEBUG_TB_CHECK
52
//#define DEBUG_TLB_CHECK
53

    
54
//#define DEBUG_IOPORT
55
//#define DEBUG_SUBPAGE
56

    
57
#if !defined(CONFIG_USER_ONLY)
58
/* TB consistency checks only implemented for usermode emulation.  */
59
#undef DEBUG_TB_CHECK
60
#endif
61

    
62
#define SMC_BITMAP_USE_THRESHOLD 10
63

    
64
#define MMAP_AREA_START        0x00000000
65
#define MMAP_AREA_END          0xa8000000
66

    
67
#if defined(TARGET_SPARC64)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 41
69
#elif defined(TARGET_SPARC)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 36
71
#elif defined(TARGET_ALPHA)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 42
73
#define TARGET_VIRT_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_PPC64)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 36
80
#else
81
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
82
#define TARGET_PHYS_ADDR_SPACE_BITS 32
83
#endif
84

    
85
TranslationBlock *tbs;
86
int code_gen_max_blocks;
87
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
88
int nb_tbs;
89
/* any access to the tbs or the page table must use this lock */
90
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
91

    
92
#if defined(__arm__) || defined(__sparc_v9__)
93
/* The prologue must be reachable with a direct jump. ARM and Sparc64
94
 have limited branch ranges (possibly also PPC) so place it in a
95
 section close to code segment. */
96
#define code_gen_section                                \
97
    __attribute__((__section__(".gen_code")))           \
98
    __attribute__((aligned (32)))
99
#else
100
#define code_gen_section                                \
101
    __attribute__((aligned (32)))
102
#endif
103

    
104
uint8_t code_gen_prologue[1024] code_gen_section;
105
uint8_t *code_gen_buffer;
106
unsigned long code_gen_buffer_size;
107
/* threshold to flush the translated code buffer */
108
unsigned long code_gen_buffer_max_size; 
109
uint8_t *code_gen_ptr;
110

    
111
#if !defined(CONFIG_USER_ONLY)
112
ram_addr_t phys_ram_size;
113
int phys_ram_fd;
114
uint8_t *phys_ram_base;
115
uint8_t *phys_ram_dirty;
116
static ram_addr_t phys_ram_alloc_offset = 0;
117
#endif
118

    
119
CPUState *first_cpu;
120
/* current CPU in the current thread. It is only valid inside
121
   cpu_exec() */
122
CPUState *cpu_single_env;
123
/* 0 = Do not count executed instructions.
124
   1 = Precise instruction counting.
125
   2 = Adaptive rate instruction counting.  */
126
int use_icount = 0;
127
/* Current instruction counter.  While executing translated code this may
128
   include some instructions that have not yet been executed.  */
129
int64_t qemu_icount;
130

    
131
typedef struct PageDesc {
132
    /* list of TBs intersecting this ram page */
133
    TranslationBlock *first_tb;
134
    /* in order to optimize self modifying code, we count the number
135
       of lookups we do to a given page to use a bitmap */
136
    unsigned int code_write_count;
137
    uint8_t *code_bitmap;
138
#if defined(CONFIG_USER_ONLY)
139
    unsigned long flags;
140
#endif
141
} PageDesc;
142

    
143
typedef struct PhysPageDesc {
144
    /* offset in host memory of the page + io_index in the low bits */
145
    ram_addr_t phys_offset;
146
} PhysPageDesc;
147

    
148
#define L2_BITS 10
149
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
150
/* XXX: this is a temporary hack for alpha target.
151
 *      In the future, this is to be replaced by a multi-level table
152
 *      to actually be able to handle the complete 64 bits address space.
153
 */
154
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
155
#else
156
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
157
#endif
158

    
159
#define L1_SIZE (1 << L1_BITS)
160
#define L2_SIZE (1 << L2_BITS)
161

    
162
unsigned long qemu_real_host_page_size;
163
unsigned long qemu_host_page_bits;
164
unsigned long qemu_host_page_size;
165
unsigned long qemu_host_page_mask;
166

    
167
/* XXX: for system emulation, it could just be an array */
168
static PageDesc *l1_map[L1_SIZE];
169
PhysPageDesc **l1_phys_map;
170

    
171
#if !defined(CONFIG_USER_ONLY)
172
static void io_mem_init(void);
173

    
174
/* io memory support */
175
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
176
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
177
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
178
static int io_mem_nb;
179
static int io_mem_watch;
180
#endif
181

    
182
/* log support */
183
const char *logfilename = "/tmp/qemu.log";
184
FILE *logfile;
185
int loglevel;
186
static int log_append = 0;
187

    
188
/* statistics */
189
static int tlb_flush_count;
190
static int tb_flush_count;
191
static int tb_phys_invalidate_count;
192

    
193
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194
typedef struct subpage_t {
195
    target_phys_addr_t base;
196
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
197
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
198
    void *opaque[TARGET_PAGE_SIZE][2][4];
199
} subpage_t;
200

    
201
#ifdef _WIN32
202
static void map_exec(void *addr, long size)
203
{
204
    DWORD old_protect;
205
    VirtualProtect(addr, size,
206
                   PAGE_EXECUTE_READWRITE, &old_protect);
207
    
208
}
209
#else
210
static void map_exec(void *addr, long size)
211
{
212
    unsigned long start, end, page_size;
213
    
214
    page_size = getpagesize();
215
    start = (unsigned long)addr;
216
    start &= ~(page_size - 1);
217
    
218
    end = (unsigned long)addr + size;
219
    end += page_size - 1;
220
    end &= ~(page_size - 1);
221
    
222
    mprotect((void *)start, end - start,
223
             PROT_READ | PROT_WRITE | PROT_EXEC);
224
}
225
#endif
226

    
227
static void page_init(void)
228
{
229
    /* NOTE: we can always suppose that qemu_host_page_size >=
230
       TARGET_PAGE_SIZE */
231
#ifdef _WIN32
232
    {
233
        SYSTEM_INFO system_info;
234
        DWORD old_protect;
235

    
236
        GetSystemInfo(&system_info);
237
        qemu_real_host_page_size = system_info.dwPageSize;
238
    }
239
#else
240
    qemu_real_host_page_size = getpagesize();
241
#endif
242
    if (qemu_host_page_size == 0)
243
        qemu_host_page_size = qemu_real_host_page_size;
244
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
245
        qemu_host_page_size = TARGET_PAGE_SIZE;
246
    qemu_host_page_bits = 0;
247
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
248
        qemu_host_page_bits++;
249
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
250
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
251
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
252

    
253
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
254
    {
255
        long long startaddr, endaddr;
256
        FILE *f;
257
        int n;
258

    
259
        mmap_lock();
260
        last_brk = (unsigned long)sbrk(0);
261
        f = fopen("/proc/self/maps", "r");
262
        if (f) {
263
            do {
264
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
265
                if (n == 2) {
266
                    startaddr = MIN(startaddr,
267
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
268
                    endaddr = MIN(endaddr,
269
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
271
                                   TARGET_PAGE_ALIGN(endaddr),
272
                                   PAGE_RESERVED); 
273
                }
274
            } while (!feof(f));
275
            fclose(f);
276
        }
277
        mmap_unlock();
278
    }
279
#endif
280
}
281

    
282
static inline PageDesc *page_find_alloc(target_ulong index)
283
{
284
    PageDesc **lp, *p;
285

    
286
#if TARGET_LONG_BITS > 32
287
    /* Host memory outside guest VM.  For 32-bit targets we have already
288
       excluded high addresses.  */
289
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
290
        return NULL;
291
#endif
292
    lp = &l1_map[index >> L2_BITS];
293
    p = *lp;
294
    if (!p) {
295
        /* allocate if not found */
296
#if defined(CONFIG_USER_ONLY)
297
        unsigned long addr;
298
        size_t len = sizeof(PageDesc) * L2_SIZE;
299
        /* Don't use qemu_malloc because it may recurse.  */
300
        p = mmap(0, len, PROT_READ | PROT_WRITE,
301
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
302
        *lp = p;
303
        addr = h2g(p);
304
        if (addr == (target_ulong)addr) {
305
            page_set_flags(addr & TARGET_PAGE_MASK,
306
                           TARGET_PAGE_ALIGN(addr + len),
307
                           PAGE_RESERVED); 
308
        }
309
#else
310
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
311
        *lp = p;
312
#endif
313
    }
314
    return p + (index & (L2_SIZE - 1));
315
}
316

    
317
static inline PageDesc *page_find(target_ulong index)
318
{
319
    PageDesc *p;
320

    
321
    p = l1_map[index >> L2_BITS];
322
    if (!p)
323
        return 0;
324
    return p + (index & (L2_SIZE - 1));
325
}
326

    
327
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
328
{
329
    void **lp, **p;
330
    PhysPageDesc *pd;
331

    
332
    p = (void **)l1_phys_map;
333
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
334

    
335
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
336
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
337
#endif
338
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
339
    p = *lp;
340
    if (!p) {
341
        /* allocate if not found */
342
        if (!alloc)
343
            return NULL;
344
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
345
        memset(p, 0, sizeof(void *) * L1_SIZE);
346
        *lp = p;
347
    }
348
#endif
349
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
350
    pd = *lp;
351
    if (!pd) {
352
        int i;
353
        /* allocate if not found */
354
        if (!alloc)
355
            return NULL;
356
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
357
        *lp = pd;
358
        for (i = 0; i < L2_SIZE; i++)
359
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
360
    }
361
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
362
}
363

    
364
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
365
{
366
    return phys_page_find_alloc(index, 0);
367
}
368

    
369
#if !defined(CONFIG_USER_ONLY)
370
static void tlb_protect_code(ram_addr_t ram_addr);
371
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
372
                                    target_ulong vaddr);
373
#define mmap_lock() do { } while(0)
374
#define mmap_unlock() do { } while(0)
375
#endif
376

    
377
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
378

    
379
#if defined(CONFIG_USER_ONLY)
380
/* Currently it is not recommanded to allocate big chunks of data in
381
   user mode. It will change when a dedicated libc will be used */
382
#define USE_STATIC_CODE_GEN_BUFFER
383
#endif
384

    
385
#ifdef USE_STATIC_CODE_GEN_BUFFER
386
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
387
#endif
388

    
389
static void code_gen_alloc(unsigned long tb_size)
390
{
391
#ifdef USE_STATIC_CODE_GEN_BUFFER
392
    code_gen_buffer = static_code_gen_buffer;
393
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
394
    map_exec(code_gen_buffer, code_gen_buffer_size);
395
#else
396
    code_gen_buffer_size = tb_size;
397
    if (code_gen_buffer_size == 0) {
398
#if defined(CONFIG_USER_ONLY)
399
        /* in user mode, phys_ram_size is not meaningful */
400
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
401
#else
402
        /* XXX: needs ajustments */
403
        code_gen_buffer_size = (int)(phys_ram_size / 4);
404
#endif
405
    }
406
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
407
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
408
    /* The code gen buffer location may have constraints depending on
409
       the host cpu and OS */
410
#if defined(__linux__) 
411
    {
412
        int flags;
413
        void *start = NULL;
414

    
415
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
416
#if defined(__x86_64__)
417
        flags |= MAP_32BIT;
418
        /* Cannot map more than that */
419
        if (code_gen_buffer_size > (800 * 1024 * 1024))
420
            code_gen_buffer_size = (800 * 1024 * 1024);
421
#elif defined(__sparc_v9__)
422
        // Map the buffer below 2G, so we can use direct calls and branches
423
        flags |= MAP_FIXED;
424
        start = (void *) 0x60000000UL;
425
        if (code_gen_buffer_size > (512 * 1024 * 1024))
426
            code_gen_buffer_size = (512 * 1024 * 1024);
427
#endif
428
        code_gen_buffer = mmap(start, code_gen_buffer_size,
429
                               PROT_WRITE | PROT_READ | PROT_EXEC,
430
                               flags, -1, 0);
431
        if (code_gen_buffer == MAP_FAILED) {
432
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
433
            exit(1);
434
        }
435
    }
436
#else
437
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
438
    if (!code_gen_buffer) {
439
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
440
        exit(1);
441
    }
442
    map_exec(code_gen_buffer, code_gen_buffer_size);
443
#endif
444
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
445
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
446
    code_gen_buffer_max_size = code_gen_buffer_size - 
447
        code_gen_max_block_size();
448
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
449
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
450
}
451

    
452
/* Must be called before using the QEMU cpus. 'tb_size' is the size
453
   (in bytes) allocated to the translation buffer. Zero means default
454
   size. */
455
void cpu_exec_init_all(unsigned long tb_size)
456
{
457
    cpu_gen_init();
458
    code_gen_alloc(tb_size);
459
    code_gen_ptr = code_gen_buffer;
460
    page_init();
461
#if !defined(CONFIG_USER_ONLY)
462
    io_mem_init();
463
#endif
464
}
465

    
466
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
467

    
468
#define CPU_COMMON_SAVE_VERSION 1
469

    
470
static void cpu_common_save(QEMUFile *f, void *opaque)
471
{
472
    CPUState *env = opaque;
473

    
474
    qemu_put_be32s(f, &env->halted);
475
    qemu_put_be32s(f, &env->interrupt_request);
476
}
477

    
478
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
479
{
480
    CPUState *env = opaque;
481

    
482
    if (version_id != CPU_COMMON_SAVE_VERSION)
483
        return -EINVAL;
484

    
485
    qemu_get_be32s(f, &env->halted);
486
    qemu_get_be32s(f, &env->interrupt_request);
487
    tlb_flush(env, 1);
488

    
489
    return 0;
490
}
491
#endif
492

    
493
void cpu_exec_init(CPUState *env)
494
{
495
    CPUState **penv;
496
    int cpu_index;
497

    
498
    env->next_cpu = NULL;
499
    penv = &first_cpu;
500
    cpu_index = 0;
501
    while (*penv != NULL) {
502
        penv = (CPUState **)&(*penv)->next_cpu;
503
        cpu_index++;
504
    }
505
    env->cpu_index = cpu_index;
506
    env->nb_watchpoints = 0;
507
    *penv = env;
508
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
509
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
510
                    cpu_common_save, cpu_common_load, env);
511
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
512
                    cpu_save, cpu_load, env);
513
#endif
514
}
515

    
516
static inline void invalidate_page_bitmap(PageDesc *p)
517
{
518
    if (p->code_bitmap) {
519
        qemu_free(p->code_bitmap);
520
        p->code_bitmap = NULL;
521
    }
522
    p->code_write_count = 0;
523
}
524

    
525
/* set to NULL all the 'first_tb' fields in all PageDescs */
526
static void page_flush_tb(void)
527
{
528
    int i, j;
529
    PageDesc *p;
530

    
531
    for(i = 0; i < L1_SIZE; i++) {
532
        p = l1_map[i];
533
        if (p) {
534
            for(j = 0; j < L2_SIZE; j++) {
535
                p->first_tb = NULL;
536
                invalidate_page_bitmap(p);
537
                p++;
538
            }
539
        }
540
    }
541
}
542

    
543
/* flush all the translation blocks */
544
/* XXX: tb_flush is currently not thread safe */
545
void tb_flush(CPUState *env1)
546
{
547
    CPUState *env;
548
#if defined(DEBUG_FLUSH)
549
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
550
           (unsigned long)(code_gen_ptr - code_gen_buffer),
551
           nb_tbs, nb_tbs > 0 ?
552
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
553
#endif
554
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
555
        cpu_abort(env1, "Internal error: code buffer overflow\n");
556

    
557
    nb_tbs = 0;
558

    
559
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
560
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
561
    }
562

    
563
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
564
    page_flush_tb();
565

    
566
    code_gen_ptr = code_gen_buffer;
567
    /* XXX: flush processor icache at this point if cache flush is
568
       expensive */
569
    tb_flush_count++;
570
}
571

    
572
#ifdef DEBUG_TB_CHECK
573

    
574
static void tb_invalidate_check(target_ulong address)
575
{
576
    TranslationBlock *tb;
577
    int i;
578
    address &= TARGET_PAGE_MASK;
579
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
580
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
581
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
582
                  address >= tb->pc + tb->size)) {
583
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
584
                       address, (long)tb->pc, tb->size);
585
            }
586
        }
587
    }
588
}
589

    
590
/* verify that all the pages have correct rights for code */
591
static void tb_page_check(void)
592
{
593
    TranslationBlock *tb;
594
    int i, flags1, flags2;
595

    
596
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
597
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
598
            flags1 = page_get_flags(tb->pc);
599
            flags2 = page_get_flags(tb->pc + tb->size - 1);
600
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
601
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
602
                       (long)tb->pc, tb->size, flags1, flags2);
603
            }
604
        }
605
    }
606
}
607

    
608
void tb_jmp_check(TranslationBlock *tb)
609
{
610
    TranslationBlock *tb1;
611
    unsigned int n1;
612

    
613
    /* suppress any remaining jumps to this TB */
614
    tb1 = tb->jmp_first;
615
    for(;;) {
616
        n1 = (long)tb1 & 3;
617
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
618
        if (n1 == 2)
619
            break;
620
        tb1 = tb1->jmp_next[n1];
621
    }
622
    /* check end of list */
623
    if (tb1 != tb) {
624
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
625
    }
626
}
627

    
628
#endif
629

    
630
/* invalidate one TB */
631
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
632
                             int next_offset)
633
{
634
    TranslationBlock *tb1;
635
    for(;;) {
636
        tb1 = *ptb;
637
        if (tb1 == tb) {
638
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
639
            break;
640
        }
641
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
642
    }
643
}
644

    
645
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
646
{
647
    TranslationBlock *tb1;
648
    unsigned int n1;
649

    
650
    for(;;) {
651
        tb1 = *ptb;
652
        n1 = (long)tb1 & 3;
653
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
654
        if (tb1 == tb) {
655
            *ptb = tb1->page_next[n1];
656
            break;
657
        }
658
        ptb = &tb1->page_next[n1];
659
    }
660
}
661

    
662
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
663
{
664
    TranslationBlock *tb1, **ptb;
665
    unsigned int n1;
666

    
667
    ptb = &tb->jmp_next[n];
668
    tb1 = *ptb;
669
    if (tb1) {
670
        /* find tb(n) in circular list */
671
        for(;;) {
672
            tb1 = *ptb;
673
            n1 = (long)tb1 & 3;
674
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
675
            if (n1 == n && tb1 == tb)
676
                break;
677
            if (n1 == 2) {
678
                ptb = &tb1->jmp_first;
679
            } else {
680
                ptb = &tb1->jmp_next[n1];
681
            }
682
        }
683
        /* now we can suppress tb(n) from the list */
684
        *ptb = tb->jmp_next[n];
685

    
686
        tb->jmp_next[n] = NULL;
687
    }
688
}
689

    
690
/* reset the jump entry 'n' of a TB so that it is not chained to
691
   another TB */
692
static inline void tb_reset_jump(TranslationBlock *tb, int n)
693
{
694
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
695
}
696

    
697
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
698
{
699
    CPUState *env;
700
    PageDesc *p;
701
    unsigned int h, n1;
702
    target_phys_addr_t phys_pc;
703
    TranslationBlock *tb1, *tb2;
704

    
705
    /* remove the TB from the hash list */
706
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
707
    h = tb_phys_hash_func(phys_pc);
708
    tb_remove(&tb_phys_hash[h], tb,
709
              offsetof(TranslationBlock, phys_hash_next));
710

    
711
    /* remove the TB from the page list */
712
    if (tb->page_addr[0] != page_addr) {
713
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
714
        tb_page_remove(&p->first_tb, tb);
715
        invalidate_page_bitmap(p);
716
    }
717
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
718
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
719
        tb_page_remove(&p->first_tb, tb);
720
        invalidate_page_bitmap(p);
721
    }
722

    
723
    tb_invalidated_flag = 1;
724

    
725
    /* remove the TB from the hash list */
726
    h = tb_jmp_cache_hash_func(tb->pc);
727
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
728
        if (env->tb_jmp_cache[h] == tb)
729
            env->tb_jmp_cache[h] = NULL;
730
    }
731

    
732
    /* suppress this TB from the two jump lists */
733
    tb_jmp_remove(tb, 0);
734
    tb_jmp_remove(tb, 1);
735

    
736
    /* suppress any remaining jumps to this TB */
737
    tb1 = tb->jmp_first;
738
    for(;;) {
739
        n1 = (long)tb1 & 3;
740
        if (n1 == 2)
741
            break;
742
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
743
        tb2 = tb1->jmp_next[n1];
744
        tb_reset_jump(tb1, n1);
745
        tb1->jmp_next[n1] = NULL;
746
        tb1 = tb2;
747
    }
748
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
749

    
750
    tb_phys_invalidate_count++;
751
}
752

    
753
static inline void set_bits(uint8_t *tab, int start, int len)
754
{
755
    int end, mask, end1;
756

    
757
    end = start + len;
758
    tab += start >> 3;
759
    mask = 0xff << (start & 7);
760
    if ((start & ~7) == (end & ~7)) {
761
        if (start < end) {
762
            mask &= ~(0xff << (end & 7));
763
            *tab |= mask;
764
        }
765
    } else {
766
        *tab++ |= mask;
767
        start = (start + 8) & ~7;
768
        end1 = end & ~7;
769
        while (start < end1) {
770
            *tab++ = 0xff;
771
            start += 8;
772
        }
773
        if (start < end) {
774
            mask = ~(0xff << (end & 7));
775
            *tab |= mask;
776
        }
777
    }
778
}
779

    
780
static void build_page_bitmap(PageDesc *p)
781
{
782
    int n, tb_start, tb_end;
783
    TranslationBlock *tb;
784

    
785
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
786
    if (!p->code_bitmap)
787
        return;
788

    
789
    tb = p->first_tb;
790
    while (tb != NULL) {
791
        n = (long)tb & 3;
792
        tb = (TranslationBlock *)((long)tb & ~3);
793
        /* NOTE: this is subtle as a TB may span two physical pages */
794
        if (n == 0) {
795
            /* NOTE: tb_end may be after the end of the page, but
796
               it is not a problem */
797
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
798
            tb_end = tb_start + tb->size;
799
            if (tb_end > TARGET_PAGE_SIZE)
800
                tb_end = TARGET_PAGE_SIZE;
801
        } else {
802
            tb_start = 0;
803
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
804
        }
805
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
806
        tb = tb->page_next[n];
807
    }
808
}
809

    
810
TranslationBlock *tb_gen_code(CPUState *env,
811
                              target_ulong pc, target_ulong cs_base,
812
                              int flags, int cflags)
813
{
814
    TranslationBlock *tb;
815
    uint8_t *tc_ptr;
816
    target_ulong phys_pc, phys_page2, virt_page2;
817
    int code_gen_size;
818

    
819
    phys_pc = get_phys_addr_code(env, pc);
820
    tb = tb_alloc(pc);
821
    if (!tb) {
822
        /* flush must be done */
823
        tb_flush(env);
824
        /* cannot fail at this point */
825
        tb = tb_alloc(pc);
826
        /* Don't forget to invalidate previous TB info.  */
827
        tb_invalidated_flag = 1;
828
    }
829
    tc_ptr = code_gen_ptr;
830
    tb->tc_ptr = tc_ptr;
831
    tb->cs_base = cs_base;
832
    tb->flags = flags;
833
    tb->cflags = cflags;
834
    cpu_gen_code(env, tb, &code_gen_size);
835
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
836

    
837
    /* check next page if needed */
838
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
839
    phys_page2 = -1;
840
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
841
        phys_page2 = get_phys_addr_code(env, virt_page2);
842
    }
843
    tb_link_phys(tb, phys_pc, phys_page2);
844
    return tb;
845
}
846

    
847
/* invalidate all TBs which intersect with the target physical page
848
   starting in range [start;end[. NOTE: start and end must refer to
849
   the same physical page. 'is_cpu_write_access' should be true if called
850
   from a real cpu write access: the virtual CPU will exit the current
851
   TB if code is modified inside this TB. */
852
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
853
                                   int is_cpu_write_access)
854
{
855
    int n, current_tb_modified, current_tb_not_found, current_flags;
856
    CPUState *env = cpu_single_env;
857
    PageDesc *p;
858
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
859
    target_ulong tb_start, tb_end;
860
    target_ulong current_pc, current_cs_base;
861

    
862
    p = page_find(start >> TARGET_PAGE_BITS);
863
    if (!p)
864
        return;
865
    if (!p->code_bitmap &&
866
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
867
        is_cpu_write_access) {
868
        /* build code bitmap */
869
        build_page_bitmap(p);
870
    }
871

    
872
    /* we remove all the TBs in the range [start, end[ */
873
    /* XXX: see if in some cases it could be faster to invalidate all the code */
874
    current_tb_not_found = is_cpu_write_access;
875
    current_tb_modified = 0;
876
    current_tb = NULL; /* avoid warning */
877
    current_pc = 0; /* avoid warning */
878
    current_cs_base = 0; /* avoid warning */
879
    current_flags = 0; /* avoid warning */
880
    tb = p->first_tb;
881
    while (tb != NULL) {
882
        n = (long)tb & 3;
883
        tb = (TranslationBlock *)((long)tb & ~3);
884
        tb_next = tb->page_next[n];
885
        /* NOTE: this is subtle as a TB may span two physical pages */
886
        if (n == 0) {
887
            /* NOTE: tb_end may be after the end of the page, but
888
               it is not a problem */
889
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
890
            tb_end = tb_start + tb->size;
891
        } else {
892
            tb_start = tb->page_addr[1];
893
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
894
        }
895
        if (!(tb_end <= start || tb_start >= end)) {
896
#ifdef TARGET_HAS_PRECISE_SMC
897
            if (current_tb_not_found) {
898
                current_tb_not_found = 0;
899
                current_tb = NULL;
900
                if (env->mem_io_pc) {
901
                    /* now we have a real cpu fault */
902
                    current_tb = tb_find_pc(env->mem_io_pc);
903
                }
904
            }
905
            if (current_tb == tb &&
906
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
907
                /* If we are modifying the current TB, we must stop
908
                its execution. We could be more precise by checking
909
                that the modification is after the current PC, but it
910
                would require a specialized function to partially
911
                restore the CPU state */
912

    
913
                current_tb_modified = 1;
914
                cpu_restore_state(current_tb, env,
915
                                  env->mem_io_pc, NULL);
916
#if defined(TARGET_I386)
917
                current_flags = env->hflags;
918
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
919
                current_cs_base = (target_ulong)env->segs[R_CS].base;
920
                current_pc = current_cs_base + env->eip;
921
#else
922
#error unsupported CPU
923
#endif
924
            }
925
#endif /* TARGET_HAS_PRECISE_SMC */
926
            /* we need to do that to handle the case where a signal
927
               occurs while doing tb_phys_invalidate() */
928
            saved_tb = NULL;
929
            if (env) {
930
                saved_tb = env->current_tb;
931
                env->current_tb = NULL;
932
            }
933
            tb_phys_invalidate(tb, -1);
934
            if (env) {
935
                env->current_tb = saved_tb;
936
                if (env->interrupt_request && env->current_tb)
937
                    cpu_interrupt(env, env->interrupt_request);
938
            }
939
        }
940
        tb = tb_next;
941
    }
942
#if !defined(CONFIG_USER_ONLY)
943
    /* if no code remaining, no need to continue to use slow writes */
944
    if (!p->first_tb) {
945
        invalidate_page_bitmap(p);
946
        if (is_cpu_write_access) {
947
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
948
        }
949
    }
950
#endif
951
#ifdef TARGET_HAS_PRECISE_SMC
952
    if (current_tb_modified) {
953
        /* we generate a block containing just the instruction
954
           modifying the memory. It will ensure that it cannot modify
955
           itself */
956
        env->current_tb = NULL;
957
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
958
        cpu_resume_from_signal(env, NULL);
959
    }
960
#endif
961
}
962

    
963
/* len must be <= 8 and start must be a multiple of len */
964
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
965
{
966
    PageDesc *p;
967
    int offset, b;
968
#if 0
969
    if (1) {
970
        if (loglevel) {
971
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
972
                   cpu_single_env->mem_io_vaddr, len,
973
                   cpu_single_env->eip,
974
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
975
        }
976
    }
977
#endif
978
    p = page_find(start >> TARGET_PAGE_BITS);
979
    if (!p)
980
        return;
981
    if (p->code_bitmap) {
982
        offset = start & ~TARGET_PAGE_MASK;
983
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
984
        if (b & ((1 << len) - 1))
985
            goto do_invalidate;
986
    } else {
987
    do_invalidate:
988
        tb_invalidate_phys_page_range(start, start + len, 1);
989
    }
990
}
991

    
992
#if !defined(CONFIG_SOFTMMU)
993
static void tb_invalidate_phys_page(target_phys_addr_t addr,
994
                                    unsigned long pc, void *puc)
995
{
996
    int n, current_flags, current_tb_modified;
997
    target_ulong current_pc, current_cs_base;
998
    PageDesc *p;
999
    TranslationBlock *tb, *current_tb;
1000
#ifdef TARGET_HAS_PRECISE_SMC
1001
    CPUState *env = cpu_single_env;
1002
#endif
1003

    
1004
    addr &= TARGET_PAGE_MASK;
1005
    p = page_find(addr >> TARGET_PAGE_BITS);
1006
    if (!p)
1007
        return;
1008
    tb = p->first_tb;
1009
    current_tb_modified = 0;
1010
    current_tb = NULL;
1011
    current_pc = 0; /* avoid warning */
1012
    current_cs_base = 0; /* avoid warning */
1013
    current_flags = 0; /* avoid warning */
1014
#ifdef TARGET_HAS_PRECISE_SMC
1015
    if (tb && pc != 0) {
1016
        current_tb = tb_find_pc(pc);
1017
    }
1018
#endif
1019
    while (tb != NULL) {
1020
        n = (long)tb & 3;
1021
        tb = (TranslationBlock *)((long)tb & ~3);
1022
#ifdef TARGET_HAS_PRECISE_SMC
1023
        if (current_tb == tb &&
1024
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1025
                /* If we are modifying the current TB, we must stop
1026
                   its execution. We could be more precise by checking
1027
                   that the modification is after the current PC, but it
1028
                   would require a specialized function to partially
1029
                   restore the CPU state */
1030

    
1031
            current_tb_modified = 1;
1032
            cpu_restore_state(current_tb, env, pc, puc);
1033
#if defined(TARGET_I386)
1034
            current_flags = env->hflags;
1035
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
1036
            current_cs_base = (target_ulong)env->segs[R_CS].base;
1037
            current_pc = current_cs_base + env->eip;
1038
#else
1039
#error unsupported CPU
1040
#endif
1041
        }
1042
#endif /* TARGET_HAS_PRECISE_SMC */
1043
        tb_phys_invalidate(tb, addr);
1044
        tb = tb->page_next[n];
1045
    }
1046
    p->first_tb = NULL;
1047
#ifdef TARGET_HAS_PRECISE_SMC
1048
    if (current_tb_modified) {
1049
        /* we generate a block containing just the instruction
1050
           modifying the memory. It will ensure that it cannot modify
1051
           itself */
1052
        env->current_tb = NULL;
1053
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1054
        cpu_resume_from_signal(env, puc);
1055
    }
1056
#endif
1057
}
1058
#endif
1059

    
1060
/* add the tb in the target page and protect it if necessary */
1061
static inline void tb_alloc_page(TranslationBlock *tb,
1062
                                 unsigned int n, target_ulong page_addr)
1063
{
1064
    PageDesc *p;
1065
    TranslationBlock *last_first_tb;
1066

    
1067
    tb->page_addr[n] = page_addr;
1068
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1069
    tb->page_next[n] = p->first_tb;
1070
    last_first_tb = p->first_tb;
1071
    p->first_tb = (TranslationBlock *)((long)tb | n);
1072
    invalidate_page_bitmap(p);
1073

    
1074
#if defined(TARGET_HAS_SMC) || 1
1075

    
1076
#if defined(CONFIG_USER_ONLY)
1077
    if (p->flags & PAGE_WRITE) {
1078
        target_ulong addr;
1079
        PageDesc *p2;
1080
        int prot;
1081

    
1082
        /* force the host page as non writable (writes will have a
1083
           page fault + mprotect overhead) */
1084
        page_addr &= qemu_host_page_mask;
1085
        prot = 0;
1086
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1087
            addr += TARGET_PAGE_SIZE) {
1088

    
1089
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1090
            if (!p2)
1091
                continue;
1092
            prot |= p2->flags;
1093
            p2->flags &= ~PAGE_WRITE;
1094
            page_get_flags(addr);
1095
          }
1096
        mprotect(g2h(page_addr), qemu_host_page_size,
1097
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1098
#ifdef DEBUG_TB_INVALIDATE
1099
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1100
               page_addr);
1101
#endif
1102
    }
1103
#else
1104
    /* if some code is already present, then the pages are already
1105
       protected. So we handle the case where only the first TB is
1106
       allocated in a physical page */
1107
    if (!last_first_tb) {
1108
        tlb_protect_code(page_addr);
1109
    }
1110
#endif
1111

    
1112
#endif /* TARGET_HAS_SMC */
1113
}
1114

    
1115
/* Allocate a new translation block. Flush the translation buffer if
1116
   too many translation blocks or too much generated code. */
1117
TranslationBlock *tb_alloc(target_ulong pc)
1118
{
1119
    TranslationBlock *tb;
1120

    
1121
    if (nb_tbs >= code_gen_max_blocks ||
1122
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1123
        return NULL;
1124
    tb = &tbs[nb_tbs++];
1125
    tb->pc = pc;
1126
    tb->cflags = 0;
1127
    return tb;
1128
}
1129

    
1130
void tb_free(TranslationBlock *tb)
1131
{
1132
    /* In practice this is mostly used for single use temporary TB
1133
       Ignore the hard cases and just back up if this TB happens to
1134
       be the last one generated.  */
1135
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1136
        code_gen_ptr = tb->tc_ptr;
1137
        nb_tbs--;
1138
    }
1139
}
1140

    
1141
/* add a new TB and link it to the physical page tables. phys_page2 is
1142
   (-1) to indicate that only one page contains the TB. */
1143
void tb_link_phys(TranslationBlock *tb,
1144
                  target_ulong phys_pc, target_ulong phys_page2)
1145
{
1146
    unsigned int h;
1147
    TranslationBlock **ptb;
1148

    
1149
    /* Grab the mmap lock to stop another thread invalidating this TB
1150
       before we are done.  */
1151
    mmap_lock();
1152
    /* add in the physical hash table */
1153
    h = tb_phys_hash_func(phys_pc);
1154
    ptb = &tb_phys_hash[h];
1155
    tb->phys_hash_next = *ptb;
1156
    *ptb = tb;
1157

    
1158
    /* add in the page list */
1159
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1160
    if (phys_page2 != -1)
1161
        tb_alloc_page(tb, 1, phys_page2);
1162
    else
1163
        tb->page_addr[1] = -1;
1164

    
1165
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1166
    tb->jmp_next[0] = NULL;
1167
    tb->jmp_next[1] = NULL;
1168

    
1169
    /* init original jump addresses */
1170
    if (tb->tb_next_offset[0] != 0xffff)
1171
        tb_reset_jump(tb, 0);
1172
    if (tb->tb_next_offset[1] != 0xffff)
1173
        tb_reset_jump(tb, 1);
1174

    
1175
#ifdef DEBUG_TB_CHECK
1176
    tb_page_check();
1177
#endif
1178
    mmap_unlock();
1179
}
1180

    
1181
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1182
   tb[1].tc_ptr. Return NULL if not found */
1183
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1184
{
1185
    int m_min, m_max, m;
1186
    unsigned long v;
1187
    TranslationBlock *tb;
1188

    
1189
    if (nb_tbs <= 0)
1190
        return NULL;
1191
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1192
        tc_ptr >= (unsigned long)code_gen_ptr)
1193
        return NULL;
1194
    /* binary search (cf Knuth) */
1195
    m_min = 0;
1196
    m_max = nb_tbs - 1;
1197
    while (m_min <= m_max) {
1198
        m = (m_min + m_max) >> 1;
1199
        tb = &tbs[m];
1200
        v = (unsigned long)tb->tc_ptr;
1201
        if (v == tc_ptr)
1202
            return tb;
1203
        else if (tc_ptr < v) {
1204
            m_max = m - 1;
1205
        } else {
1206
            m_min = m + 1;
1207
        }
1208
    }
1209
    return &tbs[m_max];
1210
}
1211

    
1212
static void tb_reset_jump_recursive(TranslationBlock *tb);
1213

    
1214
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1215
{
1216
    TranslationBlock *tb1, *tb_next, **ptb;
1217
    unsigned int n1;
1218

    
1219
    tb1 = tb->jmp_next[n];
1220
    if (tb1 != NULL) {
1221
        /* find head of list */
1222
        for(;;) {
1223
            n1 = (long)tb1 & 3;
1224
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1225
            if (n1 == 2)
1226
                break;
1227
            tb1 = tb1->jmp_next[n1];
1228
        }
1229
        /* we are now sure now that tb jumps to tb1 */
1230
        tb_next = tb1;
1231

    
1232
        /* remove tb from the jmp_first list */
1233
        ptb = &tb_next->jmp_first;
1234
        for(;;) {
1235
            tb1 = *ptb;
1236
            n1 = (long)tb1 & 3;
1237
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1238
            if (n1 == n && tb1 == tb)
1239
                break;
1240
            ptb = &tb1->jmp_next[n1];
1241
        }
1242
        *ptb = tb->jmp_next[n];
1243
        tb->jmp_next[n] = NULL;
1244

    
1245
        /* suppress the jump to next tb in generated code */
1246
        tb_reset_jump(tb, n);
1247

    
1248
        /* suppress jumps in the tb on which we could have jumped */
1249
        tb_reset_jump_recursive(tb_next);
1250
    }
1251
}
1252

    
1253
static void tb_reset_jump_recursive(TranslationBlock *tb)
1254
{
1255
    tb_reset_jump_recursive2(tb, 0);
1256
    tb_reset_jump_recursive2(tb, 1);
1257
}
1258

    
1259
#if defined(TARGET_HAS_ICE)
1260
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1261
{
1262
    target_phys_addr_t addr;
1263
    target_ulong pd;
1264
    ram_addr_t ram_addr;
1265
    PhysPageDesc *p;
1266

    
1267
    addr = cpu_get_phys_page_debug(env, pc);
1268
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1269
    if (!p) {
1270
        pd = IO_MEM_UNASSIGNED;
1271
    } else {
1272
        pd = p->phys_offset;
1273
    }
1274
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1275
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1276
}
1277
#endif
1278

    
1279
/* Add a watchpoint.  */
1280
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, int type)
1281
{
1282
    int i;
1283

    
1284
    for (i = 0; i < env->nb_watchpoints; i++) {
1285
        if (addr == env->watchpoint[i].vaddr)
1286
            return 0;
1287
    }
1288
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1289
        return -1;
1290

    
1291
    i = env->nb_watchpoints++;
1292
    env->watchpoint[i].vaddr = addr;
1293
    env->watchpoint[i].type = type;
1294
    tlb_flush_page(env, addr);
1295
    /* FIXME: This flush is needed because of the hack to make memory ops
1296
       terminate the TB.  It can be removed once the proper IO trap and
1297
       re-execute bits are in.  */
1298
    tb_flush(env);
1299
    return i;
1300
}
1301

    
1302
/* Remove a watchpoint.  */
1303
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1304
{
1305
    int i;
1306

    
1307
    for (i = 0; i < env->nb_watchpoints; i++) {
1308
        if (addr == env->watchpoint[i].vaddr) {
1309
            env->nb_watchpoints--;
1310
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1311
            tlb_flush_page(env, addr);
1312
            return 0;
1313
        }
1314
    }
1315
    return -1;
1316
}
1317

    
1318
/* Remove all watchpoints. */
1319
void cpu_watchpoint_remove_all(CPUState *env) {
1320
    int i;
1321

    
1322
    for (i = 0; i < env->nb_watchpoints; i++) {
1323
        tlb_flush_page(env, env->watchpoint[i].vaddr);
1324
    }
1325
    env->nb_watchpoints = 0;
1326
}
1327

    
1328
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1329
   breakpoint is reached */
1330
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1331
{
1332
#if defined(TARGET_HAS_ICE)
1333
    int i;
1334

    
1335
    for(i = 0; i < env->nb_breakpoints; i++) {
1336
        if (env->breakpoints[i] == pc)
1337
            return 0;
1338
    }
1339

    
1340
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1341
        return -1;
1342
    env->breakpoints[env->nb_breakpoints++] = pc;
1343

    
1344
    breakpoint_invalidate(env, pc);
1345
    return 0;
1346
#else
1347
    return -1;
1348
#endif
1349
}
1350

    
1351
/* remove all breakpoints */
1352
void cpu_breakpoint_remove_all(CPUState *env) {
1353
#if defined(TARGET_HAS_ICE)
1354
    int i;
1355
    for(i = 0; i < env->nb_breakpoints; i++) {
1356
        breakpoint_invalidate(env, env->breakpoints[i]);
1357
    }
1358
    env->nb_breakpoints = 0;
1359
#endif
1360
}
1361

    
1362
/* remove a breakpoint */
1363
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1364
{
1365
#if defined(TARGET_HAS_ICE)
1366
    int i;
1367
    for(i = 0; i < env->nb_breakpoints; i++) {
1368
        if (env->breakpoints[i] == pc)
1369
            goto found;
1370
    }
1371
    return -1;
1372
 found:
1373
    env->nb_breakpoints--;
1374
    if (i < env->nb_breakpoints)
1375
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1376

    
1377
    breakpoint_invalidate(env, pc);
1378
    return 0;
1379
#else
1380
    return -1;
1381
#endif
1382
}
1383

    
1384
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1385
   CPU loop after each instruction */
1386
void cpu_single_step(CPUState *env, int enabled)
1387
{
1388
#if defined(TARGET_HAS_ICE)
1389
    if (env->singlestep_enabled != enabled) {
1390
        env->singlestep_enabled = enabled;
1391
        /* must flush all the translated code to avoid inconsistancies */
1392
        /* XXX: only flush what is necessary */
1393
        tb_flush(env);
1394
    }
1395
#endif
1396
}
1397

    
1398
/* enable or disable low levels log */
1399
void cpu_set_log(int log_flags)
1400
{
1401
    loglevel = log_flags;
1402
    if (loglevel && !logfile) {
1403
        logfile = fopen(logfilename, log_append ? "a" : "w");
1404
        if (!logfile) {
1405
            perror(logfilename);
1406
            _exit(1);
1407
        }
1408
#if !defined(CONFIG_SOFTMMU)
1409
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1410
        {
1411
            static uint8_t logfile_buf[4096];
1412
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1413
        }
1414
#else
1415
        setvbuf(logfile, NULL, _IOLBF, 0);
1416
#endif
1417
        log_append = 1;
1418
    }
1419
    if (!loglevel && logfile) {
1420
        fclose(logfile);
1421
        logfile = NULL;
1422
    }
1423
}
1424

    
1425
void cpu_set_log_filename(const char *filename)
1426
{
1427
    logfilename = strdup(filename);
1428
    if (logfile) {
1429
        fclose(logfile);
1430
        logfile = NULL;
1431
    }
1432
    cpu_set_log(loglevel);
1433
}
1434

    
1435
/* mask must never be zero, except for A20 change call */
1436
void cpu_interrupt(CPUState *env, int mask)
1437
{
1438
#if !defined(USE_NPTL)
1439
    TranslationBlock *tb;
1440
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1441
#endif
1442
    int old_mask;
1443

    
1444
    old_mask = env->interrupt_request;
1445
    /* FIXME: This is probably not threadsafe.  A different thread could
1446
       be in the middle of a read-modify-write operation.  */
1447
    env->interrupt_request |= mask;
1448
#if defined(USE_NPTL)
1449
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1450
       problem and hope the cpu will stop of its own accord.  For userspace
1451
       emulation this often isn't actually as bad as it sounds.  Often
1452
       signals are used primarily to interrupt blocking syscalls.  */
1453
#else
1454
    if (use_icount) {
1455
        env->icount_decr.u16.high = 0xffff;
1456
#ifndef CONFIG_USER_ONLY
1457
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1458
           an async event happened and we need to process it.  */
1459
        if (!can_do_io(env)
1460
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1461
            cpu_abort(env, "Raised interrupt while not in I/O function");
1462
        }
1463
#endif
1464
    } else {
1465
        tb = env->current_tb;
1466
        /* if the cpu is currently executing code, we must unlink it and
1467
           all the potentially executing TB */
1468
        if (tb && !testandset(&interrupt_lock)) {
1469
            env->current_tb = NULL;
1470
            tb_reset_jump_recursive(tb);
1471
            resetlock(&interrupt_lock);
1472
        }
1473
    }
1474
#endif
1475
}
1476

    
1477
void cpu_reset_interrupt(CPUState *env, int mask)
1478
{
1479
    env->interrupt_request &= ~mask;
1480
}
1481

    
1482
CPULogItem cpu_log_items[] = {
1483
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1484
      "show generated host assembly code for each compiled TB" },
1485
    { CPU_LOG_TB_IN_ASM, "in_asm",
1486
      "show target assembly code for each compiled TB" },
1487
    { CPU_LOG_TB_OP, "op",
1488
      "show micro ops for each compiled TB" },
1489
    { CPU_LOG_TB_OP_OPT, "op_opt",
1490
      "show micro ops "
1491
#ifdef TARGET_I386
1492
      "before eflags optimization and "
1493
#endif
1494
      "after liveness analysis" },
1495
    { CPU_LOG_INT, "int",
1496
      "show interrupts/exceptions in short format" },
1497
    { CPU_LOG_EXEC, "exec",
1498
      "show trace before each executed TB (lots of logs)" },
1499
    { CPU_LOG_TB_CPU, "cpu",
1500
      "show CPU state before block translation" },
1501
#ifdef TARGET_I386
1502
    { CPU_LOG_PCALL, "pcall",
1503
      "show protected mode far calls/returns/exceptions" },
1504
#endif
1505
#ifdef DEBUG_IOPORT
1506
    { CPU_LOG_IOPORT, "ioport",
1507
      "show all i/o ports accesses" },
1508
#endif
1509
    { 0, NULL, NULL },
1510
};
1511

    
1512
static int cmp1(const char *s1, int n, const char *s2)
1513
{
1514
    if (strlen(s2) != n)
1515
        return 0;
1516
    return memcmp(s1, s2, n) == 0;
1517
}
1518

    
1519
/* takes a comma separated list of log masks. Return 0 if error. */
1520
int cpu_str_to_log_mask(const char *str)
1521
{
1522
    CPULogItem *item;
1523
    int mask;
1524
    const char *p, *p1;
1525

    
1526
    p = str;
1527
    mask = 0;
1528
    for(;;) {
1529
        p1 = strchr(p, ',');
1530
        if (!p1)
1531
            p1 = p + strlen(p);
1532
        if(cmp1(p,p1-p,"all")) {
1533
                for(item = cpu_log_items; item->mask != 0; item++) {
1534
                        mask |= item->mask;
1535
                }
1536
        } else {
1537
        for(item = cpu_log_items; item->mask != 0; item++) {
1538
            if (cmp1(p, p1 - p, item->name))
1539
                goto found;
1540
        }
1541
        return 0;
1542
        }
1543
    found:
1544
        mask |= item->mask;
1545
        if (*p1 != ',')
1546
            break;
1547
        p = p1 + 1;
1548
    }
1549
    return mask;
1550
}
1551

    
1552
void cpu_abort(CPUState *env, const char *fmt, ...)
1553
{
1554
    va_list ap;
1555
    va_list ap2;
1556

    
1557
    va_start(ap, fmt);
1558
    va_copy(ap2, ap);
1559
    fprintf(stderr, "qemu: fatal: ");
1560
    vfprintf(stderr, fmt, ap);
1561
    fprintf(stderr, "\n");
1562
#ifdef TARGET_I386
1563
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1564
#else
1565
    cpu_dump_state(env, stderr, fprintf, 0);
1566
#endif
1567
    if (logfile) {
1568
        fprintf(logfile, "qemu: fatal: ");
1569
        vfprintf(logfile, fmt, ap2);
1570
        fprintf(logfile, "\n");
1571
#ifdef TARGET_I386
1572
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1573
#else
1574
        cpu_dump_state(env, logfile, fprintf, 0);
1575
#endif
1576
        fflush(logfile);
1577
        fclose(logfile);
1578
    }
1579
    va_end(ap2);
1580
    va_end(ap);
1581
    abort();
1582
}
1583

    
1584
CPUState *cpu_copy(CPUState *env)
1585
{
1586
    CPUState *new_env = cpu_init(env->cpu_model_str);
1587
    /* preserve chaining and index */
1588
    CPUState *next_cpu = new_env->next_cpu;
1589
    int cpu_index = new_env->cpu_index;
1590
    memcpy(new_env, env, sizeof(CPUState));
1591
    new_env->next_cpu = next_cpu;
1592
    new_env->cpu_index = cpu_index;
1593
    return new_env;
1594
}
1595

    
1596
#if !defined(CONFIG_USER_ONLY)
1597

    
1598
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1599
{
1600
    unsigned int i;
1601

    
1602
    /* Discard jump cache entries for any tb which might potentially
1603
       overlap the flushed page.  */
1604
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1605
    memset (&env->tb_jmp_cache[i], 0, 
1606
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1607

    
1608
    i = tb_jmp_cache_hash_page(addr);
1609
    memset (&env->tb_jmp_cache[i], 0, 
1610
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1611
}
1612

    
1613
/* NOTE: if flush_global is true, also flush global entries (not
1614
   implemented yet) */
1615
void tlb_flush(CPUState *env, int flush_global)
1616
{
1617
    int i;
1618

    
1619
#if defined(DEBUG_TLB)
1620
    printf("tlb_flush:\n");
1621
#endif
1622
    /* must reset current TB so that interrupts cannot modify the
1623
       links while we are modifying them */
1624
    env->current_tb = NULL;
1625

    
1626
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1627
        env->tlb_table[0][i].addr_read = -1;
1628
        env->tlb_table[0][i].addr_write = -1;
1629
        env->tlb_table[0][i].addr_code = -1;
1630
        env->tlb_table[1][i].addr_read = -1;
1631
        env->tlb_table[1][i].addr_write = -1;
1632
        env->tlb_table[1][i].addr_code = -1;
1633
#if (NB_MMU_MODES >= 3)
1634
        env->tlb_table[2][i].addr_read = -1;
1635
        env->tlb_table[2][i].addr_write = -1;
1636
        env->tlb_table[2][i].addr_code = -1;
1637
#if (NB_MMU_MODES == 4)
1638
        env->tlb_table[3][i].addr_read = -1;
1639
        env->tlb_table[3][i].addr_write = -1;
1640
        env->tlb_table[3][i].addr_code = -1;
1641
#endif
1642
#endif
1643
    }
1644

    
1645
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1646

    
1647
#ifdef USE_KQEMU
1648
    if (env->kqemu_enabled) {
1649
        kqemu_flush(env, flush_global);
1650
    }
1651
#endif
1652
    tlb_flush_count++;
1653
}
1654

    
1655
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1656
{
1657
    if (addr == (tlb_entry->addr_read &
1658
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1659
        addr == (tlb_entry->addr_write &
1660
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1661
        addr == (tlb_entry->addr_code &
1662
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1663
        tlb_entry->addr_read = -1;
1664
        tlb_entry->addr_write = -1;
1665
        tlb_entry->addr_code = -1;
1666
    }
1667
}
1668

    
1669
void tlb_flush_page(CPUState *env, target_ulong addr)
1670
{
1671
    int i;
1672

    
1673
#if defined(DEBUG_TLB)
1674
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1675
#endif
1676
    /* must reset current TB so that interrupts cannot modify the
1677
       links while we are modifying them */
1678
    env->current_tb = NULL;
1679

    
1680
    addr &= TARGET_PAGE_MASK;
1681
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1682
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1683
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1684
#if (NB_MMU_MODES >= 3)
1685
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1686
#if (NB_MMU_MODES == 4)
1687
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1688
#endif
1689
#endif
1690

    
1691
    tlb_flush_jmp_cache(env, addr);
1692

    
1693
#ifdef USE_KQEMU
1694
    if (env->kqemu_enabled) {
1695
        kqemu_flush_page(env, addr);
1696
    }
1697
#endif
1698
}
1699

    
1700
/* update the TLBs so that writes to code in the virtual page 'addr'
1701
   can be detected */
1702
static void tlb_protect_code(ram_addr_t ram_addr)
1703
{
1704
    cpu_physical_memory_reset_dirty(ram_addr,
1705
                                    ram_addr + TARGET_PAGE_SIZE,
1706
                                    CODE_DIRTY_FLAG);
1707
}
1708

    
1709
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1710
   tested for self modifying code */
1711
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1712
                                    target_ulong vaddr)
1713
{
1714
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1715
}
1716

    
1717
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1718
                                         unsigned long start, unsigned long length)
1719
{
1720
    unsigned long addr;
1721
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1722
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1723
        if ((addr - start) < length) {
1724
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1725
        }
1726
    }
1727
}
1728

    
1729
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1730
                                     int dirty_flags)
1731
{
1732
    CPUState *env;
1733
    unsigned long length, start1;
1734
    int i, mask, len;
1735
    uint8_t *p;
1736

    
1737
    start &= TARGET_PAGE_MASK;
1738
    end = TARGET_PAGE_ALIGN(end);
1739

    
1740
    length = end - start;
1741
    if (length == 0)
1742
        return;
1743
    len = length >> TARGET_PAGE_BITS;
1744
#ifdef USE_KQEMU
1745
    /* XXX: should not depend on cpu context */
1746
    env = first_cpu;
1747
    if (env->kqemu_enabled) {
1748
        ram_addr_t addr;
1749
        addr = start;
1750
        for(i = 0; i < len; i++) {
1751
            kqemu_set_notdirty(env, addr);
1752
            addr += TARGET_PAGE_SIZE;
1753
        }
1754
    }
1755
#endif
1756
    mask = ~dirty_flags;
1757
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1758
    for(i = 0; i < len; i++)
1759
        p[i] &= mask;
1760

    
1761
    /* we modify the TLB cache so that the dirty bit will be set again
1762
       when accessing the range */
1763
    start1 = start + (unsigned long)phys_ram_base;
1764
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1765
        for(i = 0; i < CPU_TLB_SIZE; i++)
1766
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1767
        for(i = 0; i < CPU_TLB_SIZE; i++)
1768
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1769
#if (NB_MMU_MODES >= 3)
1770
        for(i = 0; i < CPU_TLB_SIZE; i++)
1771
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1772
#if (NB_MMU_MODES == 4)
1773
        for(i = 0; i < CPU_TLB_SIZE; i++)
1774
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1775
#endif
1776
#endif
1777
    }
1778
}
1779

    
1780
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1781
{
1782
    ram_addr_t ram_addr;
1783

    
1784
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1785
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1786
            tlb_entry->addend - (unsigned long)phys_ram_base;
1787
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1788
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1789
        }
1790
    }
1791
}
1792

    
1793
/* update the TLB according to the current state of the dirty bits */
1794
void cpu_tlb_update_dirty(CPUState *env)
1795
{
1796
    int i;
1797
    for(i = 0; i < CPU_TLB_SIZE; i++)
1798
        tlb_update_dirty(&env->tlb_table[0][i]);
1799
    for(i = 0; i < CPU_TLB_SIZE; i++)
1800
        tlb_update_dirty(&env->tlb_table[1][i]);
1801
#if (NB_MMU_MODES >= 3)
1802
    for(i = 0; i < CPU_TLB_SIZE; i++)
1803
        tlb_update_dirty(&env->tlb_table[2][i]);
1804
#if (NB_MMU_MODES == 4)
1805
    for(i = 0; i < CPU_TLB_SIZE; i++)
1806
        tlb_update_dirty(&env->tlb_table[3][i]);
1807
#endif
1808
#endif
1809
}
1810

    
1811
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1812
{
1813
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1814
        tlb_entry->addr_write = vaddr;
1815
}
1816

    
1817
/* update the TLB corresponding to virtual page vaddr
1818
   so that it is no longer dirty */
1819
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1820
{
1821
    int i;
1822

    
1823
    vaddr &= TARGET_PAGE_MASK;
1824
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1825
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1826
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1827
#if (NB_MMU_MODES >= 3)
1828
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1829
#if (NB_MMU_MODES == 4)
1830
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1831
#endif
1832
#endif
1833
}
1834

    
1835
/* add a new TLB entry. At most one entry for a given virtual address
1836
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1837
   (can only happen in non SOFTMMU mode for I/O pages or pages
1838
   conflicting with the host address space). */
1839
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1840
                      target_phys_addr_t paddr, int prot,
1841
                      int mmu_idx, int is_softmmu)
1842
{
1843
    PhysPageDesc *p;
1844
    unsigned long pd;
1845
    unsigned int index;
1846
    target_ulong address;
1847
    target_ulong code_address;
1848
    target_phys_addr_t addend;
1849
    int ret;
1850
    CPUTLBEntry *te;
1851
    int i;
1852
    target_phys_addr_t iotlb;
1853

    
1854
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1855
    if (!p) {
1856
        pd = IO_MEM_UNASSIGNED;
1857
    } else {
1858
        pd = p->phys_offset;
1859
    }
1860
#if defined(DEBUG_TLB)
1861
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1862
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1863
#endif
1864

    
1865
    ret = 0;
1866
    address = vaddr;
1867
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1868
        /* IO memory case (romd handled later) */
1869
        address |= TLB_MMIO;
1870
    }
1871
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1872
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1873
        /* Normal RAM.  */
1874
        iotlb = pd & TARGET_PAGE_MASK;
1875
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1876
            iotlb |= IO_MEM_NOTDIRTY;
1877
        else
1878
            iotlb |= IO_MEM_ROM;
1879
    } else {
1880
        /* IO handlers are currently passed a phsical address.
1881
           It would be nice to pass an offset from the base address
1882
           of that region.  This would avoid having to special case RAM,
1883
           and avoid full address decoding in every device.
1884
           We can't use the high bits of pd for this because
1885
           IO_MEM_ROMD uses these as a ram address.  */
1886
        iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1887
    }
1888

    
1889
    code_address = address;
1890
    /* Make accesses to pages with watchpoints go via the
1891
       watchpoint trap routines.  */
1892
    for (i = 0; i < env->nb_watchpoints; i++) {
1893
        if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1894
            iotlb = io_mem_watch + paddr;
1895
            /* TODO: The memory case can be optimized by not trapping
1896
               reads of pages with a write breakpoint.  */
1897
            address |= TLB_MMIO;
1898
        }
1899
    }
1900

    
1901
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1902
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
1903
    te = &env->tlb_table[mmu_idx][index];
1904
    te->addend = addend - vaddr;
1905
    if (prot & PAGE_READ) {
1906
        te->addr_read = address;
1907
    } else {
1908
        te->addr_read = -1;
1909
    }
1910

    
1911
    if (prot & PAGE_EXEC) {
1912
        te->addr_code = code_address;
1913
    } else {
1914
        te->addr_code = -1;
1915
    }
1916
    if (prot & PAGE_WRITE) {
1917
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1918
            (pd & IO_MEM_ROMD)) {
1919
            /* Write access calls the I/O callback.  */
1920
            te->addr_write = address | TLB_MMIO;
1921
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1922
                   !cpu_physical_memory_is_dirty(pd)) {
1923
            te->addr_write = address | TLB_NOTDIRTY;
1924
        } else {
1925
            te->addr_write = address;
1926
        }
1927
    } else {
1928
        te->addr_write = -1;
1929
    }
1930
    return ret;
1931
}
1932

    
1933
#else
1934

    
1935
void tlb_flush(CPUState *env, int flush_global)
1936
{
1937
}
1938

    
1939
void tlb_flush_page(CPUState *env, target_ulong addr)
1940
{
1941
}
1942

    
1943
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1944
                      target_phys_addr_t paddr, int prot,
1945
                      int mmu_idx, int is_softmmu)
1946
{
1947
    return 0;
1948
}
1949

    
1950
/* dump memory mappings */
1951
void page_dump(FILE *f)
1952
{
1953
    unsigned long start, end;
1954
    int i, j, prot, prot1;
1955
    PageDesc *p;
1956

    
1957
    fprintf(f, "%-8s %-8s %-8s %s\n",
1958
            "start", "end", "size", "prot");
1959
    start = -1;
1960
    end = -1;
1961
    prot = 0;
1962
    for(i = 0; i <= L1_SIZE; i++) {
1963
        if (i < L1_SIZE)
1964
            p = l1_map[i];
1965
        else
1966
            p = NULL;
1967
        for(j = 0;j < L2_SIZE; j++) {
1968
            if (!p)
1969
                prot1 = 0;
1970
            else
1971
                prot1 = p[j].flags;
1972
            if (prot1 != prot) {
1973
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1974
                if (start != -1) {
1975
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1976
                            start, end, end - start,
1977
                            prot & PAGE_READ ? 'r' : '-',
1978
                            prot & PAGE_WRITE ? 'w' : '-',
1979
                            prot & PAGE_EXEC ? 'x' : '-');
1980
                }
1981
                if (prot1 != 0)
1982
                    start = end;
1983
                else
1984
                    start = -1;
1985
                prot = prot1;
1986
            }
1987
            if (!p)
1988
                break;
1989
        }
1990
    }
1991
}
1992

    
1993
int page_get_flags(target_ulong address)
1994
{
1995
    PageDesc *p;
1996

    
1997
    p = page_find(address >> TARGET_PAGE_BITS);
1998
    if (!p)
1999
        return 0;
2000
    return p->flags;
2001
}
2002

    
2003
/* modify the flags of a page and invalidate the code if
2004
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2005
   depending on PAGE_WRITE */
2006
void page_set_flags(target_ulong start, target_ulong end, int flags)
2007
{
2008
    PageDesc *p;
2009
    target_ulong addr;
2010

    
2011
    /* mmap_lock should already be held.  */
2012
    start = start & TARGET_PAGE_MASK;
2013
    end = TARGET_PAGE_ALIGN(end);
2014
    if (flags & PAGE_WRITE)
2015
        flags |= PAGE_WRITE_ORG;
2016
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2017
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2018
        /* We may be called for host regions that are outside guest
2019
           address space.  */
2020
        if (!p)
2021
            return;
2022
        /* if the write protection is set, then we invalidate the code
2023
           inside */
2024
        if (!(p->flags & PAGE_WRITE) &&
2025
            (flags & PAGE_WRITE) &&
2026
            p->first_tb) {
2027
            tb_invalidate_phys_page(addr, 0, NULL);
2028
        }
2029
        p->flags = flags;
2030
    }
2031
}
2032

    
2033
int page_check_range(target_ulong start, target_ulong len, int flags)
2034
{
2035
    PageDesc *p;
2036
    target_ulong end;
2037
    target_ulong addr;
2038

    
2039
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2040
    start = start & TARGET_PAGE_MASK;
2041

    
2042
    if( end < start )
2043
        /* we've wrapped around */
2044
        return -1;
2045
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2046
        p = page_find(addr >> TARGET_PAGE_BITS);
2047
        if( !p )
2048
            return -1;
2049
        if( !(p->flags & PAGE_VALID) )
2050
            return -1;
2051

    
2052
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2053
            return -1;
2054
        if (flags & PAGE_WRITE) {
2055
            if (!(p->flags & PAGE_WRITE_ORG))
2056
                return -1;
2057
            /* unprotect the page if it was put read-only because it
2058
               contains translated code */
2059
            if (!(p->flags & PAGE_WRITE)) {
2060
                if (!page_unprotect(addr, 0, NULL))
2061
                    return -1;
2062
            }
2063
            return 0;
2064
        }
2065
    }
2066
    return 0;
2067
}
2068

    
2069
/* called from signal handler: invalidate the code and unprotect the
2070
   page. Return TRUE if the fault was succesfully handled. */
2071
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2072
{
2073
    unsigned int page_index, prot, pindex;
2074
    PageDesc *p, *p1;
2075
    target_ulong host_start, host_end, addr;
2076

    
2077
    /* Technically this isn't safe inside a signal handler.  However we
2078
       know this only ever happens in a synchronous SEGV handler, so in
2079
       practice it seems to be ok.  */
2080
    mmap_lock();
2081

    
2082
    host_start = address & qemu_host_page_mask;
2083
    page_index = host_start >> TARGET_PAGE_BITS;
2084
    p1 = page_find(page_index);
2085
    if (!p1) {
2086
        mmap_unlock();
2087
        return 0;
2088
    }
2089
    host_end = host_start + qemu_host_page_size;
2090
    p = p1;
2091
    prot = 0;
2092
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2093
        prot |= p->flags;
2094
        p++;
2095
    }
2096
    /* if the page was really writable, then we change its
2097
       protection back to writable */
2098
    if (prot & PAGE_WRITE_ORG) {
2099
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2100
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2101
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2102
                     (prot & PAGE_BITS) | PAGE_WRITE);
2103
            p1[pindex].flags |= PAGE_WRITE;
2104
            /* and since the content will be modified, we must invalidate
2105
               the corresponding translated code. */
2106
            tb_invalidate_phys_page(address, pc, puc);
2107
#ifdef DEBUG_TB_CHECK
2108
            tb_invalidate_check(address);
2109
#endif
2110
            mmap_unlock();
2111
            return 1;
2112
        }
2113
    }
2114
    mmap_unlock();
2115
    return 0;
2116
}
2117

    
2118
static inline void tlb_set_dirty(CPUState *env,
2119
                                 unsigned long addr, target_ulong vaddr)
2120
{
2121
}
2122
#endif /* defined(CONFIG_USER_ONLY) */
2123

    
2124
#if !defined(CONFIG_USER_ONLY)
2125
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2126
                             ram_addr_t memory);
2127
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2128
                           ram_addr_t orig_memory);
2129
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2130
                      need_subpage)                                     \
2131
    do {                                                                \
2132
        if (addr > start_addr)                                          \
2133
            start_addr2 = 0;                                            \
2134
        else {                                                          \
2135
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2136
            if (start_addr2 > 0)                                        \
2137
                need_subpage = 1;                                       \
2138
        }                                                               \
2139
                                                                        \
2140
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2141
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2142
        else {                                                          \
2143
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2144
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2145
                need_subpage = 1;                                       \
2146
        }                                                               \
2147
    } while (0)
2148

    
2149
/* register physical memory. 'size' must be a multiple of the target
2150
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2151
   io memory page */
2152
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2153
                                  ram_addr_t size,
2154
                                  ram_addr_t phys_offset)
2155
{
2156
    target_phys_addr_t addr, end_addr;
2157
    PhysPageDesc *p;
2158
    CPUState *env;
2159
    ram_addr_t orig_size = size;
2160
    void *subpage;
2161

    
2162
#ifdef USE_KQEMU
2163
    /* XXX: should not depend on cpu context */
2164
    env = first_cpu;
2165
    if (env->kqemu_enabled) {
2166
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2167
    }
2168
#endif
2169
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2170
    end_addr = start_addr + (target_phys_addr_t)size;
2171
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2172
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2173
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2174
            ram_addr_t orig_memory = p->phys_offset;
2175
            target_phys_addr_t start_addr2, end_addr2;
2176
            int need_subpage = 0;
2177

    
2178
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2179
                          need_subpage);
2180
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2181
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2182
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2183
                                           &p->phys_offset, orig_memory);
2184
                } else {
2185
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2186
                                            >> IO_MEM_SHIFT];
2187
                }
2188
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2189
            } else {
2190
                p->phys_offset = phys_offset;
2191
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2192
                    (phys_offset & IO_MEM_ROMD))
2193
                    phys_offset += TARGET_PAGE_SIZE;
2194
            }
2195
        } else {
2196
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2197
            p->phys_offset = phys_offset;
2198
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2199
                (phys_offset & IO_MEM_ROMD))
2200
                phys_offset += TARGET_PAGE_SIZE;
2201
            else {
2202
                target_phys_addr_t start_addr2, end_addr2;
2203
                int need_subpage = 0;
2204

    
2205
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2206
                              end_addr2, need_subpage);
2207

    
2208
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2209
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2210
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2211
                    subpage_register(subpage, start_addr2, end_addr2,
2212
                                     phys_offset);
2213
                }
2214
            }
2215
        }
2216
    }
2217

    
2218
    /* since each CPU stores ram addresses in its TLB cache, we must
2219
       reset the modified entries */
2220
    /* XXX: slow ! */
2221
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2222
        tlb_flush(env, 1);
2223
    }
2224
}
2225

    
2226
/* XXX: temporary until new memory mapping API */
2227
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2228
{
2229
    PhysPageDesc *p;
2230

    
2231
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2232
    if (!p)
2233
        return IO_MEM_UNASSIGNED;
2234
    return p->phys_offset;
2235
}
2236

    
2237
/* XXX: better than nothing */
2238
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2239
{
2240
    ram_addr_t addr;
2241
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2242
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2243
                (uint64_t)size, (uint64_t)phys_ram_size);
2244
        abort();
2245
    }
2246
    addr = phys_ram_alloc_offset;
2247
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2248
    return addr;
2249
}
2250

    
2251
void qemu_ram_free(ram_addr_t addr)
2252
{
2253
}
2254

    
2255
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2256
{
2257
#ifdef DEBUG_UNASSIGNED
2258
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2259
#endif
2260
#ifdef TARGET_SPARC
2261
    do_unassigned_access(addr, 0, 0, 0);
2262
#elif defined(TARGET_CRIS)
2263
    do_unassigned_access(addr, 0, 0, 0);
2264
#endif
2265
    return 0;
2266
}
2267

    
2268
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2269
{
2270
#ifdef DEBUG_UNASSIGNED
2271
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2272
#endif
2273
#ifdef TARGET_SPARC
2274
    do_unassigned_access(addr, 1, 0, 0);
2275
#elif defined(TARGET_CRIS)
2276
    do_unassigned_access(addr, 1, 0, 0);
2277
#endif
2278
}
2279

    
2280
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2281
    unassigned_mem_readb,
2282
    unassigned_mem_readb,
2283
    unassigned_mem_readb,
2284
};
2285

    
2286
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2287
    unassigned_mem_writeb,
2288
    unassigned_mem_writeb,
2289
    unassigned_mem_writeb,
2290
};
2291

    
2292
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2293
                                uint32_t val)
2294
{
2295
    int dirty_flags;
2296
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2297
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2298
#if !defined(CONFIG_USER_ONLY)
2299
        tb_invalidate_phys_page_fast(ram_addr, 1);
2300
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2301
#endif
2302
    }
2303
    stb_p(phys_ram_base + ram_addr, val);
2304
#ifdef USE_KQEMU
2305
    if (cpu_single_env->kqemu_enabled &&
2306
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2307
        kqemu_modify_page(cpu_single_env, ram_addr);
2308
#endif
2309
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2310
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2311
    /* we remove the notdirty callback only if the code has been
2312
       flushed */
2313
    if (dirty_flags == 0xff)
2314
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2315
}
2316

    
2317
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2318
                                uint32_t val)
2319
{
2320
    int dirty_flags;
2321
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2322
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2323
#if !defined(CONFIG_USER_ONLY)
2324
        tb_invalidate_phys_page_fast(ram_addr, 2);
2325
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2326
#endif
2327
    }
2328
    stw_p(phys_ram_base + ram_addr, val);
2329
#ifdef USE_KQEMU
2330
    if (cpu_single_env->kqemu_enabled &&
2331
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2332
        kqemu_modify_page(cpu_single_env, ram_addr);
2333
#endif
2334
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2335
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2336
    /* we remove the notdirty callback only if the code has been
2337
       flushed */
2338
    if (dirty_flags == 0xff)
2339
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2340
}
2341

    
2342
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2343
                                uint32_t val)
2344
{
2345
    int dirty_flags;
2346
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2347
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2348
#if !defined(CONFIG_USER_ONLY)
2349
        tb_invalidate_phys_page_fast(ram_addr, 4);
2350
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2351
#endif
2352
    }
2353
    stl_p(phys_ram_base + ram_addr, val);
2354
#ifdef USE_KQEMU
2355
    if (cpu_single_env->kqemu_enabled &&
2356
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2357
        kqemu_modify_page(cpu_single_env, ram_addr);
2358
#endif
2359
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2360
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2361
    /* we remove the notdirty callback only if the code has been
2362
       flushed */
2363
    if (dirty_flags == 0xff)
2364
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2365
}
2366

    
2367
static CPUReadMemoryFunc *error_mem_read[3] = {
2368
    NULL, /* never used */
2369
    NULL, /* never used */
2370
    NULL, /* never used */
2371
};
2372

    
2373
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2374
    notdirty_mem_writeb,
2375
    notdirty_mem_writew,
2376
    notdirty_mem_writel,
2377
};
2378

    
2379
/* Generate a debug exception if a watchpoint has been hit.  */
2380
static void check_watchpoint(int offset, int flags)
2381
{
2382
    CPUState *env = cpu_single_env;
2383
    target_ulong vaddr;
2384
    int i;
2385

    
2386
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2387
    for (i = 0; i < env->nb_watchpoints; i++) {
2388
        if (vaddr == env->watchpoint[i].vaddr
2389
                && (env->watchpoint[i].type & flags)) {
2390
            env->watchpoint_hit = i + 1;
2391
            cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2392
            break;
2393
        }
2394
    }
2395
}
2396

    
2397
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2398
   so these check for a hit then pass through to the normal out-of-line
2399
   phys routines.  */
2400
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2401
{
2402
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2403
    return ldub_phys(addr);
2404
}
2405

    
2406
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2407
{
2408
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2409
    return lduw_phys(addr);
2410
}
2411

    
2412
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2413
{
2414
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_READ);
2415
    return ldl_phys(addr);
2416
}
2417

    
2418
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2419
                             uint32_t val)
2420
{
2421
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2422
    stb_phys(addr, val);
2423
}
2424

    
2425
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2426
                             uint32_t val)
2427
{
2428
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2429
    stw_phys(addr, val);
2430
}
2431

    
2432
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2433
                             uint32_t val)
2434
{
2435
    check_watchpoint(addr & ~TARGET_PAGE_MASK, PAGE_WRITE);
2436
    stl_phys(addr, val);
2437
}
2438

    
2439
static CPUReadMemoryFunc *watch_mem_read[3] = {
2440
    watch_mem_readb,
2441
    watch_mem_readw,
2442
    watch_mem_readl,
2443
};
2444

    
2445
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2446
    watch_mem_writeb,
2447
    watch_mem_writew,
2448
    watch_mem_writel,
2449
};
2450

    
2451
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2452
                                 unsigned int len)
2453
{
2454
    uint32_t ret;
2455
    unsigned int idx;
2456

    
2457
    idx = SUBPAGE_IDX(addr - mmio->base);
2458
#if defined(DEBUG_SUBPAGE)
2459
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2460
           mmio, len, addr, idx);
2461
#endif
2462
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2463

    
2464
    return ret;
2465
}
2466

    
2467
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2468
                              uint32_t value, unsigned int len)
2469
{
2470
    unsigned int idx;
2471

    
2472
    idx = SUBPAGE_IDX(addr - mmio->base);
2473
#if defined(DEBUG_SUBPAGE)
2474
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2475
           mmio, len, addr, idx, value);
2476
#endif
2477
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2478
}
2479

    
2480
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2481
{
2482
#if defined(DEBUG_SUBPAGE)
2483
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2484
#endif
2485

    
2486
    return subpage_readlen(opaque, addr, 0);
2487
}
2488

    
2489
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2490
                            uint32_t value)
2491
{
2492
#if defined(DEBUG_SUBPAGE)
2493
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2494
#endif
2495
    subpage_writelen(opaque, addr, value, 0);
2496
}
2497

    
2498
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2499
{
2500
#if defined(DEBUG_SUBPAGE)
2501
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2502
#endif
2503

    
2504
    return subpage_readlen(opaque, addr, 1);
2505
}
2506

    
2507
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2508
                            uint32_t value)
2509
{
2510
#if defined(DEBUG_SUBPAGE)
2511
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2512
#endif
2513
    subpage_writelen(opaque, addr, value, 1);
2514
}
2515

    
2516
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2517
{
2518
#if defined(DEBUG_SUBPAGE)
2519
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2520
#endif
2521

    
2522
    return subpage_readlen(opaque, addr, 2);
2523
}
2524

    
2525
static void subpage_writel (void *opaque,
2526
                         target_phys_addr_t addr, uint32_t value)
2527
{
2528
#if defined(DEBUG_SUBPAGE)
2529
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2530
#endif
2531
    subpage_writelen(opaque, addr, value, 2);
2532
}
2533

    
2534
static CPUReadMemoryFunc *subpage_read[] = {
2535
    &subpage_readb,
2536
    &subpage_readw,
2537
    &subpage_readl,
2538
};
2539

    
2540
static CPUWriteMemoryFunc *subpage_write[] = {
2541
    &subpage_writeb,
2542
    &subpage_writew,
2543
    &subpage_writel,
2544
};
2545

    
2546
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2547
                             ram_addr_t memory)
2548
{
2549
    int idx, eidx;
2550
    unsigned int i;
2551

    
2552
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2553
        return -1;
2554
    idx = SUBPAGE_IDX(start);
2555
    eidx = SUBPAGE_IDX(end);
2556
#if defined(DEBUG_SUBPAGE)
2557
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2558
           mmio, start, end, idx, eidx, memory);
2559
#endif
2560
    memory >>= IO_MEM_SHIFT;
2561
    for (; idx <= eidx; idx++) {
2562
        for (i = 0; i < 4; i++) {
2563
            if (io_mem_read[memory][i]) {
2564
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2565
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2566
            }
2567
            if (io_mem_write[memory][i]) {
2568
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2569
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2570
            }
2571
        }
2572
    }
2573

    
2574
    return 0;
2575
}
2576

    
2577
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2578
                           ram_addr_t orig_memory)
2579
{
2580
    subpage_t *mmio;
2581
    int subpage_memory;
2582

    
2583
    mmio = qemu_mallocz(sizeof(subpage_t));
2584
    if (mmio != NULL) {
2585
        mmio->base = base;
2586
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2587
#if defined(DEBUG_SUBPAGE)
2588
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2589
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2590
#endif
2591
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2592
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2593
    }
2594

    
2595
    return mmio;
2596
}
2597

    
2598
static void io_mem_init(void)
2599
{
2600
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2601
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2602
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2603
    io_mem_nb = 5;
2604

    
2605
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2606
                                          watch_mem_write, NULL);
2607
    /* alloc dirty bits array */
2608
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2609
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2610
}
2611

    
2612
/* mem_read and mem_write are arrays of functions containing the
2613
   function to access byte (index 0), word (index 1) and dword (index
2614
   2). Functions can be omitted with a NULL function pointer. The
2615
   registered functions may be modified dynamically later.
2616
   If io_index is non zero, the corresponding io zone is
2617
   modified. If it is zero, a new io zone is allocated. The return
2618
   value can be used with cpu_register_physical_memory(). (-1) is
2619
   returned if error. */
2620
int cpu_register_io_memory(int io_index,
2621
                           CPUReadMemoryFunc **mem_read,
2622
                           CPUWriteMemoryFunc **mem_write,
2623
                           void *opaque)
2624
{
2625
    int i, subwidth = 0;
2626

    
2627
    if (io_index <= 0) {
2628
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2629
            return -1;
2630
        io_index = io_mem_nb++;
2631
    } else {
2632
        if (io_index >= IO_MEM_NB_ENTRIES)
2633
            return -1;
2634
    }
2635

    
2636
    for(i = 0;i < 3; i++) {
2637
        if (!mem_read[i] || !mem_write[i])
2638
            subwidth = IO_MEM_SUBWIDTH;
2639
        io_mem_read[io_index][i] = mem_read[i];
2640
        io_mem_write[io_index][i] = mem_write[i];
2641
    }
2642
    io_mem_opaque[io_index] = opaque;
2643
    return (io_index << IO_MEM_SHIFT) | subwidth;
2644
}
2645

    
2646
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2647
{
2648
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2649
}
2650

    
2651
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2652
{
2653
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2654
}
2655

    
2656
#endif /* !defined(CONFIG_USER_ONLY) */
2657

    
2658
/* physical memory access (slow version, mainly for debug) */
2659
#if defined(CONFIG_USER_ONLY)
2660
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2661
                            int len, int is_write)
2662
{
2663
    int l, flags;
2664
    target_ulong page;
2665
    void * p;
2666

    
2667
    while (len > 0) {
2668
        page = addr & TARGET_PAGE_MASK;
2669
        l = (page + TARGET_PAGE_SIZE) - addr;
2670
        if (l > len)
2671
            l = len;
2672
        flags = page_get_flags(page);
2673
        if (!(flags & PAGE_VALID))
2674
            return;
2675
        if (is_write) {
2676
            if (!(flags & PAGE_WRITE))
2677
                return;
2678
            /* XXX: this code should not depend on lock_user */
2679
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2680
                /* FIXME - should this return an error rather than just fail? */
2681
                return;
2682
            memcpy(p, buf, l);
2683
            unlock_user(p, addr, l);
2684
        } else {
2685
            if (!(flags & PAGE_READ))
2686
                return;
2687
            /* XXX: this code should not depend on lock_user */
2688
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2689
                /* FIXME - should this return an error rather than just fail? */
2690
                return;
2691
            memcpy(buf, p, l);
2692
            unlock_user(p, addr, 0);
2693
        }
2694
        len -= l;
2695
        buf += l;
2696
        addr += l;
2697
    }
2698
}
2699

    
2700
#else
2701
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2702
                            int len, int is_write)
2703
{
2704
    int l, io_index;
2705
    uint8_t *ptr;
2706
    uint32_t val;
2707
    target_phys_addr_t page;
2708
    unsigned long pd;
2709
    PhysPageDesc *p;
2710

    
2711
    while (len > 0) {
2712
        page = addr & TARGET_PAGE_MASK;
2713
        l = (page + TARGET_PAGE_SIZE) - addr;
2714
        if (l > len)
2715
            l = len;
2716
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2717
        if (!p) {
2718
            pd = IO_MEM_UNASSIGNED;
2719
        } else {
2720
            pd = p->phys_offset;
2721
        }
2722

    
2723
        if (is_write) {
2724
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2725
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2726
                /* XXX: could force cpu_single_env to NULL to avoid
2727
                   potential bugs */
2728
                if (l >= 4 && ((addr & 3) == 0)) {
2729
                    /* 32 bit write access */
2730
                    val = ldl_p(buf);
2731
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2732
                    l = 4;
2733
                } else if (l >= 2 && ((addr & 1) == 0)) {
2734
                    /* 16 bit write access */
2735
                    val = lduw_p(buf);
2736
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2737
                    l = 2;
2738
                } else {
2739
                    /* 8 bit write access */
2740
                    val = ldub_p(buf);
2741
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2742
                    l = 1;
2743
                }
2744
            } else {
2745
                unsigned long addr1;
2746
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2747
                /* RAM case */
2748
                ptr = phys_ram_base + addr1;
2749
                memcpy(ptr, buf, l);
2750
                if (!cpu_physical_memory_is_dirty(addr1)) {
2751
                    /* invalidate code */
2752
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2753
                    /* set dirty bit */
2754
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2755
                        (0xff & ~CODE_DIRTY_FLAG);
2756
                }
2757
            }
2758
        } else {
2759
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2760
                !(pd & IO_MEM_ROMD)) {
2761
                /* I/O case */
2762
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2763
                if (l >= 4 && ((addr & 3) == 0)) {
2764
                    /* 32 bit read access */
2765
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2766
                    stl_p(buf, val);
2767
                    l = 4;
2768
                } else if (l >= 2 && ((addr & 1) == 0)) {
2769
                    /* 16 bit read access */
2770
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2771
                    stw_p(buf, val);
2772
                    l = 2;
2773
                } else {
2774
                    /* 8 bit read access */
2775
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2776
                    stb_p(buf, val);
2777
                    l = 1;
2778
                }
2779
            } else {
2780
                /* RAM case */
2781
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2782
                    (addr & ~TARGET_PAGE_MASK);
2783
                memcpy(buf, ptr, l);
2784
            }
2785
        }
2786
        len -= l;
2787
        buf += l;
2788
        addr += l;
2789
    }
2790
}
2791

    
2792
/* used for ROM loading : can write in RAM and ROM */
2793
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2794
                                   const uint8_t *buf, int len)
2795
{
2796
    int l;
2797
    uint8_t *ptr;
2798
    target_phys_addr_t page;
2799
    unsigned long pd;
2800
    PhysPageDesc *p;
2801

    
2802
    while (len > 0) {
2803
        page = addr & TARGET_PAGE_MASK;
2804
        l = (page + TARGET_PAGE_SIZE) - addr;
2805
        if (l > len)
2806
            l = len;
2807
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2808
        if (!p) {
2809
            pd = IO_MEM_UNASSIGNED;
2810
        } else {
2811
            pd = p->phys_offset;
2812
        }
2813

    
2814
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2815
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2816
            !(pd & IO_MEM_ROMD)) {
2817
            /* do nothing */
2818
        } else {
2819
            unsigned long addr1;
2820
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2821
            /* ROM/RAM case */
2822
            ptr = phys_ram_base + addr1;
2823
            memcpy(ptr, buf, l);
2824
        }
2825
        len -= l;
2826
        buf += l;
2827
        addr += l;
2828
    }
2829
}
2830

    
2831

    
2832
/* warning: addr must be aligned */
2833
uint32_t ldl_phys(target_phys_addr_t addr)
2834
{
2835
    int io_index;
2836
    uint8_t *ptr;
2837
    uint32_t val;
2838
    unsigned long pd;
2839
    PhysPageDesc *p;
2840

    
2841
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2842
    if (!p) {
2843
        pd = IO_MEM_UNASSIGNED;
2844
    } else {
2845
        pd = p->phys_offset;
2846
    }
2847

    
2848
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2849
        !(pd & IO_MEM_ROMD)) {
2850
        /* I/O case */
2851
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2852
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2853
    } else {
2854
        /* RAM case */
2855
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2856
            (addr & ~TARGET_PAGE_MASK);
2857
        val = ldl_p(ptr);
2858
    }
2859
    return val;
2860
}
2861

    
2862
/* warning: addr must be aligned */
2863
uint64_t ldq_phys(target_phys_addr_t addr)
2864
{
2865
    int io_index;
2866
    uint8_t *ptr;
2867
    uint64_t val;
2868
    unsigned long pd;
2869
    PhysPageDesc *p;
2870

    
2871
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2872
    if (!p) {
2873
        pd = IO_MEM_UNASSIGNED;
2874
    } else {
2875
        pd = p->phys_offset;
2876
    }
2877

    
2878
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2879
        !(pd & IO_MEM_ROMD)) {
2880
        /* I/O case */
2881
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2882
#ifdef TARGET_WORDS_BIGENDIAN
2883
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2884
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2885
#else
2886
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2887
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2888
#endif
2889
    } else {
2890
        /* RAM case */
2891
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2892
            (addr & ~TARGET_PAGE_MASK);
2893
        val = ldq_p(ptr);
2894
    }
2895
    return val;
2896
}
2897

    
2898
/* XXX: optimize */
2899
uint32_t ldub_phys(target_phys_addr_t addr)
2900
{
2901
    uint8_t val;
2902
    cpu_physical_memory_read(addr, &val, 1);
2903
    return val;
2904
}
2905

    
2906
/* XXX: optimize */
2907
uint32_t lduw_phys(target_phys_addr_t addr)
2908
{
2909
    uint16_t val;
2910
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2911
    return tswap16(val);
2912
}
2913

    
2914
/* warning: addr must be aligned. The ram page is not masked as dirty
2915
   and the code inside is not invalidated. It is useful if the dirty
2916
   bits are used to track modified PTEs */
2917
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2918
{
2919
    int io_index;
2920
    uint8_t *ptr;
2921
    unsigned long pd;
2922
    PhysPageDesc *p;
2923

    
2924
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2925
    if (!p) {
2926
        pd = IO_MEM_UNASSIGNED;
2927
    } else {
2928
        pd = p->phys_offset;
2929
    }
2930

    
2931
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2932
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2933
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2934
    } else {
2935
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2936
            (addr & ~TARGET_PAGE_MASK);
2937
        stl_p(ptr, val);
2938
    }
2939
}
2940

    
2941
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2942
{
2943
    int io_index;
2944
    uint8_t *ptr;
2945
    unsigned long pd;
2946
    PhysPageDesc *p;
2947

    
2948
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2949
    if (!p) {
2950
        pd = IO_MEM_UNASSIGNED;
2951
    } else {
2952
        pd = p->phys_offset;
2953
    }
2954

    
2955
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2956
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2957
#ifdef TARGET_WORDS_BIGENDIAN
2958
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2959
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2960
#else
2961
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2962
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2963
#endif
2964
    } else {
2965
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2966
            (addr & ~TARGET_PAGE_MASK);
2967
        stq_p(ptr, val);
2968
    }
2969
}
2970

    
2971
/* warning: addr must be aligned */
2972
void stl_phys(target_phys_addr_t addr, uint32_t val)
2973
{
2974
    int io_index;
2975
    uint8_t *ptr;
2976
    unsigned long pd;
2977
    PhysPageDesc *p;
2978

    
2979
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2980
    if (!p) {
2981
        pd = IO_MEM_UNASSIGNED;
2982
    } else {
2983
        pd = p->phys_offset;
2984
    }
2985

    
2986
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2987
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2988
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2989
    } else {
2990
        unsigned long addr1;
2991
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2992
        /* RAM case */
2993
        ptr = phys_ram_base + addr1;
2994
        stl_p(ptr, val);
2995
        if (!cpu_physical_memory_is_dirty(addr1)) {
2996
            /* invalidate code */
2997
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2998
            /* set dirty bit */
2999
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3000
                (0xff & ~CODE_DIRTY_FLAG);
3001
        }
3002
    }
3003
}
3004

    
3005
/* XXX: optimize */
3006
void stb_phys(target_phys_addr_t addr, uint32_t val)
3007
{
3008
    uint8_t v = val;
3009
    cpu_physical_memory_write(addr, &v, 1);
3010
}
3011

    
3012
/* XXX: optimize */
3013
void stw_phys(target_phys_addr_t addr, uint32_t val)
3014
{
3015
    uint16_t v = tswap16(val);
3016
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3017
}
3018

    
3019
/* XXX: optimize */
3020
void stq_phys(target_phys_addr_t addr, uint64_t val)
3021
{
3022
    val = tswap64(val);
3023
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3024
}
3025

    
3026
#endif
3027

    
3028
/* virtual memory access for debug */
3029
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3030
                        uint8_t *buf, int len, int is_write)
3031
{
3032
    int l;
3033
    target_phys_addr_t phys_addr;
3034
    target_ulong page;
3035

    
3036
    while (len > 0) {
3037
        page = addr & TARGET_PAGE_MASK;
3038
        phys_addr = cpu_get_phys_page_debug(env, page);
3039
        /* if no physical page mapped, return an error */
3040
        if (phys_addr == -1)
3041
            return -1;
3042
        l = (page + TARGET_PAGE_SIZE) - addr;
3043
        if (l > len)
3044
            l = len;
3045
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3046
                               buf, l, is_write);
3047
        len -= l;
3048
        buf += l;
3049
        addr += l;
3050
    }
3051
    return 0;
3052
}
3053

    
3054
/* in deterministic execution mode, instructions doing device I/Os
3055
   must be at the end of the TB */
3056
void cpu_io_recompile(CPUState *env, void *retaddr)
3057
{
3058
    TranslationBlock *tb;
3059
    uint32_t n, cflags;
3060
    target_ulong pc, cs_base;
3061
    uint64_t flags;
3062

    
3063
    tb = tb_find_pc((unsigned long)retaddr);
3064
    if (!tb) {
3065
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3066
                  retaddr);
3067
    }
3068
    n = env->icount_decr.u16.low + tb->icount;
3069
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3070
    /* Calculate how many instructions had been executed before the fault
3071
       occurred.  */
3072
    n = n - env->icount_decr.u16.low;
3073
    /* Generate a new TB ending on the I/O insn.  */
3074
    n++;
3075
    /* On MIPS and SH, delay slot instructions can only be restarted if
3076
       they were already the first instruction in the TB.  If this is not
3077
       the first instruction in a TB then re-execute the preceding
3078
       branch.  */
3079
#if defined(TARGET_MIPS)
3080
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3081
        env->active_tc.PC -= 4;
3082
        env->icount_decr.u16.low++;
3083
        env->hflags &= ~MIPS_HFLAG_BMASK;
3084
    }
3085
#elif defined(TARGET_SH4)
3086
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3087
            && n > 1) {
3088
        env->pc -= 2;
3089
        env->icount_decr.u16.low++;
3090
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3091
    }
3092
#endif
3093
    /* This should never happen.  */
3094
    if (n > CF_COUNT_MASK)
3095
        cpu_abort(env, "TB too big during recompile");
3096

    
3097
    cflags = n | CF_LAST_IO;
3098
    pc = tb->pc;
3099
    cs_base = tb->cs_base;
3100
    flags = tb->flags;
3101
    tb_phys_invalidate(tb, -1);
3102
    /* FIXME: In theory this could raise an exception.  In practice
3103
       we have already translated the block once so it's probably ok.  */
3104
    tb_gen_code(env, pc, cs_base, flags, cflags);
3105
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3106
       the first in the TB) then we end up generating a whole new TB and
3107
       repeating the fault, which is horribly inefficient.
3108
       Better would be to execute just this insn uncached, or generate a
3109
       second new TB.  */
3110
    cpu_resume_from_signal(env, NULL);
3111
}
3112

    
3113
void dump_exec_info(FILE *f,
3114
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3115
{
3116
    int i, target_code_size, max_target_code_size;
3117
    int direct_jmp_count, direct_jmp2_count, cross_page;
3118
    TranslationBlock *tb;
3119

    
3120
    target_code_size = 0;
3121
    max_target_code_size = 0;
3122
    cross_page = 0;
3123
    direct_jmp_count = 0;
3124
    direct_jmp2_count = 0;
3125
    for(i = 0; i < nb_tbs; i++) {
3126
        tb = &tbs[i];
3127
        target_code_size += tb->size;
3128
        if (tb->size > max_target_code_size)
3129
            max_target_code_size = tb->size;
3130
        if (tb->page_addr[1] != -1)
3131
            cross_page++;
3132
        if (tb->tb_next_offset[0] != 0xffff) {
3133
            direct_jmp_count++;
3134
            if (tb->tb_next_offset[1] != 0xffff) {
3135
                direct_jmp2_count++;
3136
            }
3137
        }
3138
    }
3139
    /* XXX: avoid using doubles ? */
3140
    cpu_fprintf(f, "Translation buffer state:\n");
3141
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3142
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3143
    cpu_fprintf(f, "TB count            %d/%d\n", 
3144
                nb_tbs, code_gen_max_blocks);
3145
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3146
                nb_tbs ? target_code_size / nb_tbs : 0,
3147
                max_target_code_size);
3148
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3149
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3150
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3151
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3152
            cross_page,
3153
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3154
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3155
                direct_jmp_count,
3156
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3157
                direct_jmp2_count,
3158
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3159
    cpu_fprintf(f, "\nStatistics:\n");
3160
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3161
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3162
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3163
    tcg_dump_info(f, cpu_fprintf);
3164
}
3165

    
3166
#if !defined(CONFIG_USER_ONLY)
3167

    
3168
#define MMUSUFFIX _cmmu
3169
#define GETPC() NULL
3170
#define env cpu_single_env
3171
#define SOFTMMU_CODE_ACCESS
3172

    
3173
#define SHIFT 0
3174
#include "softmmu_template.h"
3175

    
3176
#define SHIFT 1
3177
#include "softmmu_template.h"
3178

    
3179
#define SHIFT 2
3180
#include "softmmu_template.h"
3181

    
3182
#define SHIFT 3
3183
#include "softmmu_template.h"
3184

    
3185
#undef env
3186

    
3187
#endif