Statistics
| Branch: | Revision:

root / exec.c @ a1d1bb31

History | View | Annotate | Download (98.5 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#include "osdep.h"
42
#include "kvm.h"
43
#if defined(CONFIG_USER_ONLY)
44
#include <qemu.h>
45
#endif
46

    
47
//#define DEBUG_TB_INVALIDATE
48
//#define DEBUG_FLUSH
49
//#define DEBUG_TLB
50
//#define DEBUG_UNASSIGNED
51

    
52
/* make various TB consistency checks */
53
//#define DEBUG_TB_CHECK
54
//#define DEBUG_TLB_CHECK
55

    
56
//#define DEBUG_IOPORT
57
//#define DEBUG_SUBPAGE
58

    
59
#if !defined(CONFIG_USER_ONLY)
60
/* TB consistency checks only implemented for usermode emulation.  */
61
#undef DEBUG_TB_CHECK
62
#endif
63

    
64
#define SMC_BITMAP_USE_THRESHOLD 10
65

    
66
#define MMAP_AREA_START        0x00000000
67
#define MMAP_AREA_END          0xa8000000
68

    
69
#if defined(TARGET_SPARC64)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 41
71
#elif defined(TARGET_SPARC)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 36
73
#elif defined(TARGET_ALPHA)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#define TARGET_VIRT_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_PPC64)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 42
80
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81
#define TARGET_PHYS_ADDR_SPACE_BITS 36
82
#else
83
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84
#define TARGET_PHYS_ADDR_SPACE_BITS 32
85
#endif
86

    
87
static TranslationBlock *tbs;
88
int code_gen_max_blocks;
89
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90
static int nb_tbs;
91
/* any access to the tbs or the page table must use this lock */
92
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93

    
94
#if defined(__arm__) || defined(__sparc_v9__)
95
/* The prologue must be reachable with a direct jump. ARM and Sparc64
96
 have limited branch ranges (possibly also PPC) so place it in a
97
 section close to code segment. */
98
#define code_gen_section                                \
99
    __attribute__((__section__(".gen_code")))           \
100
    __attribute__((aligned (32)))
101
#else
102
#define code_gen_section                                \
103
    __attribute__((aligned (32)))
104
#endif
105

    
106
uint8_t code_gen_prologue[1024] code_gen_section;
107
static uint8_t *code_gen_buffer;
108
static unsigned long code_gen_buffer_size;
109
/* threshold to flush the translated code buffer */
110
static unsigned long code_gen_buffer_max_size;
111
uint8_t *code_gen_ptr;
112

    
113
#if !defined(CONFIG_USER_ONLY)
114
ram_addr_t phys_ram_size;
115
int phys_ram_fd;
116
uint8_t *phys_ram_base;
117
uint8_t *phys_ram_dirty;
118
static int in_migration;
119
static ram_addr_t phys_ram_alloc_offset = 0;
120
#endif
121

    
122
CPUState *first_cpu;
123
/* current CPU in the current thread. It is only valid inside
124
   cpu_exec() */
125
CPUState *cpu_single_env;
126
/* 0 = Do not count executed instructions.
127
   1 = Precise instruction counting.
128
   2 = Adaptive rate instruction counting.  */
129
int use_icount = 0;
130
/* Current instruction counter.  While executing translated code this may
131
   include some instructions that have not yet been executed.  */
132
int64_t qemu_icount;
133

    
134
typedef struct PageDesc {
135
    /* list of TBs intersecting this ram page */
136
    TranslationBlock *first_tb;
137
    /* in order to optimize self modifying code, we count the number
138
       of lookups we do to a given page to use a bitmap */
139
    unsigned int code_write_count;
140
    uint8_t *code_bitmap;
141
#if defined(CONFIG_USER_ONLY)
142
    unsigned long flags;
143
#endif
144
} PageDesc;
145

    
146
typedef struct PhysPageDesc {
147
    /* offset in host memory of the page + io_index in the low bits */
148
    ram_addr_t phys_offset;
149
} PhysPageDesc;
150

    
151
#define L2_BITS 10
152
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
153
/* XXX: this is a temporary hack for alpha target.
154
 *      In the future, this is to be replaced by a multi-level table
155
 *      to actually be able to handle the complete 64 bits address space.
156
 */
157
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
158
#else
159
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
160
#endif
161

    
162
#define L1_SIZE (1 << L1_BITS)
163
#define L2_SIZE (1 << L2_BITS)
164

    
165
unsigned long qemu_real_host_page_size;
166
unsigned long qemu_host_page_bits;
167
unsigned long qemu_host_page_size;
168
unsigned long qemu_host_page_mask;
169

    
170
/* XXX: for system emulation, it could just be an array */
171
static PageDesc *l1_map[L1_SIZE];
172
static PhysPageDesc **l1_phys_map;
173

    
174
#if !defined(CONFIG_USER_ONLY)
175
static void io_mem_init(void);
176

    
177
/* io memory support */
178
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
179
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
180
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
181
static int io_mem_nb;
182
static int io_mem_watch;
183
#endif
184

    
185
/* log support */
186
static const char *logfilename = "/tmp/qemu.log";
187
FILE *logfile;
188
int loglevel;
189
static int log_append = 0;
190

    
191
/* statistics */
192
static int tlb_flush_count;
193
static int tb_flush_count;
194
static int tb_phys_invalidate_count;
195

    
196
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
197
typedef struct subpage_t {
198
    target_phys_addr_t base;
199
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
200
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
201
    void *opaque[TARGET_PAGE_SIZE][2][4];
202
} subpage_t;
203

    
204
#ifdef _WIN32
205
static void map_exec(void *addr, long size)
206
{
207
    DWORD old_protect;
208
    VirtualProtect(addr, size,
209
                   PAGE_EXECUTE_READWRITE, &old_protect);
210
    
211
}
212
#else
213
static void map_exec(void *addr, long size)
214
{
215
    unsigned long start, end, page_size;
216
    
217
    page_size = getpagesize();
218
    start = (unsigned long)addr;
219
    start &= ~(page_size - 1);
220
    
221
    end = (unsigned long)addr + size;
222
    end += page_size - 1;
223
    end &= ~(page_size - 1);
224
    
225
    mprotect((void *)start, end - start,
226
             PROT_READ | PROT_WRITE | PROT_EXEC);
227
}
228
#endif
229

    
230
static void page_init(void)
231
{
232
    /* NOTE: we can always suppose that qemu_host_page_size >=
233
       TARGET_PAGE_SIZE */
234
#ifdef _WIN32
235
    {
236
        SYSTEM_INFO system_info;
237

    
238
        GetSystemInfo(&system_info);
239
        qemu_real_host_page_size = system_info.dwPageSize;
240
    }
241
#else
242
    qemu_real_host_page_size = getpagesize();
243
#endif
244
    if (qemu_host_page_size == 0)
245
        qemu_host_page_size = qemu_real_host_page_size;
246
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
247
        qemu_host_page_size = TARGET_PAGE_SIZE;
248
    qemu_host_page_bits = 0;
249
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
250
        qemu_host_page_bits++;
251
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
252
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
253
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
254

    
255
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
256
    {
257
        long long startaddr, endaddr;
258
        FILE *f;
259
        int n;
260

    
261
        mmap_lock();
262
        last_brk = (unsigned long)sbrk(0);
263
        f = fopen("/proc/self/maps", "r");
264
        if (f) {
265
            do {
266
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
267
                if (n == 2) {
268
                    startaddr = MIN(startaddr,
269
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270
                    endaddr = MIN(endaddr,
271
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
273
                                   TARGET_PAGE_ALIGN(endaddr),
274
                                   PAGE_RESERVED); 
275
                }
276
            } while (!feof(f));
277
            fclose(f);
278
        }
279
        mmap_unlock();
280
    }
281
#endif
282
}
283

    
284
static inline PageDesc **page_l1_map(target_ulong index)
285
{
286
#if TARGET_LONG_BITS > 32
287
    /* Host memory outside guest VM.  For 32-bit targets we have already
288
       excluded high addresses.  */
289
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
290
        return NULL;
291
#endif
292
    return &l1_map[index >> L2_BITS];
293
}
294

    
295
static inline PageDesc *page_find_alloc(target_ulong index)
296
{
297
    PageDesc **lp, *p;
298
    lp = page_l1_map(index);
299
    if (!lp)
300
        return NULL;
301

    
302
    p = *lp;
303
    if (!p) {
304
        /* allocate if not found */
305
#if defined(CONFIG_USER_ONLY)
306
        unsigned long addr;
307
        size_t len = sizeof(PageDesc) * L2_SIZE;
308
        /* Don't use qemu_malloc because it may recurse.  */
309
        p = mmap(0, len, PROT_READ | PROT_WRITE,
310
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
311
        *lp = p;
312
        addr = h2g(p);
313
        if (addr == (target_ulong)addr) {
314
            page_set_flags(addr & TARGET_PAGE_MASK,
315
                           TARGET_PAGE_ALIGN(addr + len),
316
                           PAGE_RESERVED); 
317
        }
318
#else
319
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
320
        *lp = p;
321
#endif
322
    }
323
    return p + (index & (L2_SIZE - 1));
324
}
325

    
326
static inline PageDesc *page_find(target_ulong index)
327
{
328
    PageDesc **lp, *p;
329
    lp = page_l1_map(index);
330
    if (!lp)
331
        return NULL;
332

    
333
    p = *lp;
334
    if (!p)
335
        return 0;
336
    return p + (index & (L2_SIZE - 1));
337
}
338

    
339
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
340
{
341
    void **lp, **p;
342
    PhysPageDesc *pd;
343

    
344
    p = (void **)l1_phys_map;
345
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
346

    
347
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
348
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
349
#endif
350
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
351
    p = *lp;
352
    if (!p) {
353
        /* allocate if not found */
354
        if (!alloc)
355
            return NULL;
356
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
357
        memset(p, 0, sizeof(void *) * L1_SIZE);
358
        *lp = p;
359
    }
360
#endif
361
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
362
    pd = *lp;
363
    if (!pd) {
364
        int i;
365
        /* allocate if not found */
366
        if (!alloc)
367
            return NULL;
368
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
369
        *lp = pd;
370
        for (i = 0; i < L2_SIZE; i++)
371
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
372
    }
373
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
374
}
375

    
376
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
377
{
378
    return phys_page_find_alloc(index, 0);
379
}
380

    
381
#if !defined(CONFIG_USER_ONLY)
382
static void tlb_protect_code(ram_addr_t ram_addr);
383
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
384
                                    target_ulong vaddr);
385
#define mmap_lock() do { } while(0)
386
#define mmap_unlock() do { } while(0)
387
#endif
388

    
389
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
390

    
391
#if defined(CONFIG_USER_ONLY)
392
/* Currently it is not recommanded to allocate big chunks of data in
393
   user mode. It will change when a dedicated libc will be used */
394
#define USE_STATIC_CODE_GEN_BUFFER
395
#endif
396

    
397
#ifdef USE_STATIC_CODE_GEN_BUFFER
398
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
399
#endif
400

    
401
static void code_gen_alloc(unsigned long tb_size)
402
{
403
#ifdef USE_STATIC_CODE_GEN_BUFFER
404
    code_gen_buffer = static_code_gen_buffer;
405
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
406
    map_exec(code_gen_buffer, code_gen_buffer_size);
407
#else
408
    code_gen_buffer_size = tb_size;
409
    if (code_gen_buffer_size == 0) {
410
#if defined(CONFIG_USER_ONLY)
411
        /* in user mode, phys_ram_size is not meaningful */
412
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
413
#else
414
        /* XXX: needs ajustments */
415
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
416
#endif
417
    }
418
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
419
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
420
    /* The code gen buffer location may have constraints depending on
421
       the host cpu and OS */
422
#if defined(__linux__) 
423
    {
424
        int flags;
425
        void *start = NULL;
426

    
427
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
428
#if defined(__x86_64__)
429
        flags |= MAP_32BIT;
430
        /* Cannot map more than that */
431
        if (code_gen_buffer_size > (800 * 1024 * 1024))
432
            code_gen_buffer_size = (800 * 1024 * 1024);
433
#elif defined(__sparc_v9__)
434
        // Map the buffer below 2G, so we can use direct calls and branches
435
        flags |= MAP_FIXED;
436
        start = (void *) 0x60000000UL;
437
        if (code_gen_buffer_size > (512 * 1024 * 1024))
438
            code_gen_buffer_size = (512 * 1024 * 1024);
439
#endif
440
        code_gen_buffer = mmap(start, code_gen_buffer_size,
441
                               PROT_WRITE | PROT_READ | PROT_EXEC,
442
                               flags, -1, 0);
443
        if (code_gen_buffer == MAP_FAILED) {
444
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
445
            exit(1);
446
        }
447
    }
448
#elif defined(__FreeBSD__)
449
    {
450
        int flags;
451
        void *addr = NULL;
452
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
453
#if defined(__x86_64__)
454
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
455
         * 0x40000000 is free */
456
        flags |= MAP_FIXED;
457
        addr = (void *)0x40000000;
458
        /* Cannot map more than that */
459
        if (code_gen_buffer_size > (800 * 1024 * 1024))
460
            code_gen_buffer_size = (800 * 1024 * 1024);
461
#endif
462
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
463
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
464
                               flags, -1, 0);
465
        if (code_gen_buffer == MAP_FAILED) {
466
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467
            exit(1);
468
        }
469
    }
470
#else
471
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
472
    if (!code_gen_buffer) {
473
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
474
        exit(1);
475
    }
476
    map_exec(code_gen_buffer, code_gen_buffer_size);
477
#endif
478
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
479
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
480
    code_gen_buffer_max_size = code_gen_buffer_size - 
481
        code_gen_max_block_size();
482
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
483
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
484
}
485

    
486
/* Must be called before using the QEMU cpus. 'tb_size' is the size
487
   (in bytes) allocated to the translation buffer. Zero means default
488
   size. */
489
void cpu_exec_init_all(unsigned long tb_size)
490
{
491
    cpu_gen_init();
492
    code_gen_alloc(tb_size);
493
    code_gen_ptr = code_gen_buffer;
494
    page_init();
495
#if !defined(CONFIG_USER_ONLY)
496
    io_mem_init();
497
#endif
498
}
499

    
500
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
501

    
502
#define CPU_COMMON_SAVE_VERSION 1
503

    
504
static void cpu_common_save(QEMUFile *f, void *opaque)
505
{
506
    CPUState *env = opaque;
507

    
508
    qemu_put_be32s(f, &env->halted);
509
    qemu_put_be32s(f, &env->interrupt_request);
510
}
511

    
512
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
513
{
514
    CPUState *env = opaque;
515

    
516
    if (version_id != CPU_COMMON_SAVE_VERSION)
517
        return -EINVAL;
518

    
519
    qemu_get_be32s(f, &env->halted);
520
    qemu_get_be32s(f, &env->interrupt_request);
521
    tlb_flush(env, 1);
522

    
523
    return 0;
524
}
525
#endif
526

    
527
void cpu_exec_init(CPUState *env)
528
{
529
    CPUState **penv;
530
    int cpu_index;
531

    
532
    env->next_cpu = NULL;
533
    penv = &first_cpu;
534
    cpu_index = 0;
535
    while (*penv != NULL) {
536
        penv = (CPUState **)&(*penv)->next_cpu;
537
        cpu_index++;
538
    }
539
    env->cpu_index = cpu_index;
540
    *penv = env;
541
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
542
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
543
                    cpu_common_save, cpu_common_load, env);
544
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
545
                    cpu_save, cpu_load, env);
546
#endif
547
}
548

    
549
static inline void invalidate_page_bitmap(PageDesc *p)
550
{
551
    if (p->code_bitmap) {
552
        qemu_free(p->code_bitmap);
553
        p->code_bitmap = NULL;
554
    }
555
    p->code_write_count = 0;
556
}
557

    
558
/* set to NULL all the 'first_tb' fields in all PageDescs */
559
static void page_flush_tb(void)
560
{
561
    int i, j;
562
    PageDesc *p;
563

    
564
    for(i = 0; i < L1_SIZE; i++) {
565
        p = l1_map[i];
566
        if (p) {
567
            for(j = 0; j < L2_SIZE; j++) {
568
                p->first_tb = NULL;
569
                invalidate_page_bitmap(p);
570
                p++;
571
            }
572
        }
573
    }
574
}
575

    
576
/* flush all the translation blocks */
577
/* XXX: tb_flush is currently not thread safe */
578
void tb_flush(CPUState *env1)
579
{
580
    CPUState *env;
581
#if defined(DEBUG_FLUSH)
582
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
583
           (unsigned long)(code_gen_ptr - code_gen_buffer),
584
           nb_tbs, nb_tbs > 0 ?
585
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
586
#endif
587
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
588
        cpu_abort(env1, "Internal error: code buffer overflow\n");
589

    
590
    nb_tbs = 0;
591

    
592
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
593
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
594
    }
595

    
596
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
597
    page_flush_tb();
598

    
599
    code_gen_ptr = code_gen_buffer;
600
    /* XXX: flush processor icache at this point if cache flush is
601
       expensive */
602
    tb_flush_count++;
603
}
604

    
605
#ifdef DEBUG_TB_CHECK
606

    
607
static void tb_invalidate_check(target_ulong address)
608
{
609
    TranslationBlock *tb;
610
    int i;
611
    address &= TARGET_PAGE_MASK;
612
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
613
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
614
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
615
                  address >= tb->pc + tb->size)) {
616
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
617
                       address, (long)tb->pc, tb->size);
618
            }
619
        }
620
    }
621
}
622

    
623
/* verify that all the pages have correct rights for code */
624
static void tb_page_check(void)
625
{
626
    TranslationBlock *tb;
627
    int i, flags1, flags2;
628

    
629
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
630
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
631
            flags1 = page_get_flags(tb->pc);
632
            flags2 = page_get_flags(tb->pc + tb->size - 1);
633
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
634
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
635
                       (long)tb->pc, tb->size, flags1, flags2);
636
            }
637
        }
638
    }
639
}
640

    
641
static void tb_jmp_check(TranslationBlock *tb)
642
{
643
    TranslationBlock *tb1;
644
    unsigned int n1;
645

    
646
    /* suppress any remaining jumps to this TB */
647
    tb1 = tb->jmp_first;
648
    for(;;) {
649
        n1 = (long)tb1 & 3;
650
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
651
        if (n1 == 2)
652
            break;
653
        tb1 = tb1->jmp_next[n1];
654
    }
655
    /* check end of list */
656
    if (tb1 != tb) {
657
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
658
    }
659
}
660

    
661
#endif
662

    
663
/* invalidate one TB */
664
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
665
                             int next_offset)
666
{
667
    TranslationBlock *tb1;
668
    for(;;) {
669
        tb1 = *ptb;
670
        if (tb1 == tb) {
671
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
672
            break;
673
        }
674
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
675
    }
676
}
677

    
678
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
679
{
680
    TranslationBlock *tb1;
681
    unsigned int n1;
682

    
683
    for(;;) {
684
        tb1 = *ptb;
685
        n1 = (long)tb1 & 3;
686
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
687
        if (tb1 == tb) {
688
            *ptb = tb1->page_next[n1];
689
            break;
690
        }
691
        ptb = &tb1->page_next[n1];
692
    }
693
}
694

    
695
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
696
{
697
    TranslationBlock *tb1, **ptb;
698
    unsigned int n1;
699

    
700
    ptb = &tb->jmp_next[n];
701
    tb1 = *ptb;
702
    if (tb1) {
703
        /* find tb(n) in circular list */
704
        for(;;) {
705
            tb1 = *ptb;
706
            n1 = (long)tb1 & 3;
707
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
708
            if (n1 == n && tb1 == tb)
709
                break;
710
            if (n1 == 2) {
711
                ptb = &tb1->jmp_first;
712
            } else {
713
                ptb = &tb1->jmp_next[n1];
714
            }
715
        }
716
        /* now we can suppress tb(n) from the list */
717
        *ptb = tb->jmp_next[n];
718

    
719
        tb->jmp_next[n] = NULL;
720
    }
721
}
722

    
723
/* reset the jump entry 'n' of a TB so that it is not chained to
724
   another TB */
725
static inline void tb_reset_jump(TranslationBlock *tb, int n)
726
{
727
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
728
}
729

    
730
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
731
{
732
    CPUState *env;
733
    PageDesc *p;
734
    unsigned int h, n1;
735
    target_phys_addr_t phys_pc;
736
    TranslationBlock *tb1, *tb2;
737

    
738
    /* remove the TB from the hash list */
739
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
740
    h = tb_phys_hash_func(phys_pc);
741
    tb_remove(&tb_phys_hash[h], tb,
742
              offsetof(TranslationBlock, phys_hash_next));
743

    
744
    /* remove the TB from the page list */
745
    if (tb->page_addr[0] != page_addr) {
746
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
747
        tb_page_remove(&p->first_tb, tb);
748
        invalidate_page_bitmap(p);
749
    }
750
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
751
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
752
        tb_page_remove(&p->first_tb, tb);
753
        invalidate_page_bitmap(p);
754
    }
755

    
756
    tb_invalidated_flag = 1;
757

    
758
    /* remove the TB from the hash list */
759
    h = tb_jmp_cache_hash_func(tb->pc);
760
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
761
        if (env->tb_jmp_cache[h] == tb)
762
            env->tb_jmp_cache[h] = NULL;
763
    }
764

    
765
    /* suppress this TB from the two jump lists */
766
    tb_jmp_remove(tb, 0);
767
    tb_jmp_remove(tb, 1);
768

    
769
    /* suppress any remaining jumps to this TB */
770
    tb1 = tb->jmp_first;
771
    for(;;) {
772
        n1 = (long)tb1 & 3;
773
        if (n1 == 2)
774
            break;
775
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
776
        tb2 = tb1->jmp_next[n1];
777
        tb_reset_jump(tb1, n1);
778
        tb1->jmp_next[n1] = NULL;
779
        tb1 = tb2;
780
    }
781
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
782

    
783
    tb_phys_invalidate_count++;
784
}
785

    
786
static inline void set_bits(uint8_t *tab, int start, int len)
787
{
788
    int end, mask, end1;
789

    
790
    end = start + len;
791
    tab += start >> 3;
792
    mask = 0xff << (start & 7);
793
    if ((start & ~7) == (end & ~7)) {
794
        if (start < end) {
795
            mask &= ~(0xff << (end & 7));
796
            *tab |= mask;
797
        }
798
    } else {
799
        *tab++ |= mask;
800
        start = (start + 8) & ~7;
801
        end1 = end & ~7;
802
        while (start < end1) {
803
            *tab++ = 0xff;
804
            start += 8;
805
        }
806
        if (start < end) {
807
            mask = ~(0xff << (end & 7));
808
            *tab |= mask;
809
        }
810
    }
811
}
812

    
813
static void build_page_bitmap(PageDesc *p)
814
{
815
    int n, tb_start, tb_end;
816
    TranslationBlock *tb;
817

    
818
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
819
    if (!p->code_bitmap)
820
        return;
821

    
822
    tb = p->first_tb;
823
    while (tb != NULL) {
824
        n = (long)tb & 3;
825
        tb = (TranslationBlock *)((long)tb & ~3);
826
        /* NOTE: this is subtle as a TB may span two physical pages */
827
        if (n == 0) {
828
            /* NOTE: tb_end may be after the end of the page, but
829
               it is not a problem */
830
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
831
            tb_end = tb_start + tb->size;
832
            if (tb_end > TARGET_PAGE_SIZE)
833
                tb_end = TARGET_PAGE_SIZE;
834
        } else {
835
            tb_start = 0;
836
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
837
        }
838
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
839
        tb = tb->page_next[n];
840
    }
841
}
842

    
843
TranslationBlock *tb_gen_code(CPUState *env,
844
                              target_ulong pc, target_ulong cs_base,
845
                              int flags, int cflags)
846
{
847
    TranslationBlock *tb;
848
    uint8_t *tc_ptr;
849
    target_ulong phys_pc, phys_page2, virt_page2;
850
    int code_gen_size;
851

    
852
    phys_pc = get_phys_addr_code(env, pc);
853
    tb = tb_alloc(pc);
854
    if (!tb) {
855
        /* flush must be done */
856
        tb_flush(env);
857
        /* cannot fail at this point */
858
        tb = tb_alloc(pc);
859
        /* Don't forget to invalidate previous TB info.  */
860
        tb_invalidated_flag = 1;
861
    }
862
    tc_ptr = code_gen_ptr;
863
    tb->tc_ptr = tc_ptr;
864
    tb->cs_base = cs_base;
865
    tb->flags = flags;
866
    tb->cflags = cflags;
867
    cpu_gen_code(env, tb, &code_gen_size);
868
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
869

    
870
    /* check next page if needed */
871
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
872
    phys_page2 = -1;
873
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
874
        phys_page2 = get_phys_addr_code(env, virt_page2);
875
    }
876
    tb_link_phys(tb, phys_pc, phys_page2);
877
    return tb;
878
}
879

    
880
/* invalidate all TBs which intersect with the target physical page
881
   starting in range [start;end[. NOTE: start and end must refer to
882
   the same physical page. 'is_cpu_write_access' should be true if called
883
   from a real cpu write access: the virtual CPU will exit the current
884
   TB if code is modified inside this TB. */
885
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
886
                                   int is_cpu_write_access)
887
{
888
    TranslationBlock *tb, *tb_next, *saved_tb;
889
    CPUState *env = cpu_single_env;
890
    target_ulong tb_start, tb_end;
891
    PageDesc *p;
892
    int n;
893
#ifdef TARGET_HAS_PRECISE_SMC
894
    int current_tb_not_found = is_cpu_write_access;
895
    TranslationBlock *current_tb = NULL;
896
    int current_tb_modified = 0;
897
    target_ulong current_pc = 0;
898
    target_ulong current_cs_base = 0;
899
    int current_flags = 0;
900
#endif /* TARGET_HAS_PRECISE_SMC */
901

    
902
    p = page_find(start >> TARGET_PAGE_BITS);
903
    if (!p)
904
        return;
905
    if (!p->code_bitmap &&
906
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
907
        is_cpu_write_access) {
908
        /* build code bitmap */
909
        build_page_bitmap(p);
910
    }
911

    
912
    /* we remove all the TBs in the range [start, end[ */
913
    /* XXX: see if in some cases it could be faster to invalidate all the code */
914
    tb = p->first_tb;
915
    while (tb != NULL) {
916
        n = (long)tb & 3;
917
        tb = (TranslationBlock *)((long)tb & ~3);
918
        tb_next = tb->page_next[n];
919
        /* NOTE: this is subtle as a TB may span two physical pages */
920
        if (n == 0) {
921
            /* NOTE: tb_end may be after the end of the page, but
922
               it is not a problem */
923
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
924
            tb_end = tb_start + tb->size;
925
        } else {
926
            tb_start = tb->page_addr[1];
927
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
928
        }
929
        if (!(tb_end <= start || tb_start >= end)) {
930
#ifdef TARGET_HAS_PRECISE_SMC
931
            if (current_tb_not_found) {
932
                current_tb_not_found = 0;
933
                current_tb = NULL;
934
                if (env->mem_io_pc) {
935
                    /* now we have a real cpu fault */
936
                    current_tb = tb_find_pc(env->mem_io_pc);
937
                }
938
            }
939
            if (current_tb == tb &&
940
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
941
                /* If we are modifying the current TB, we must stop
942
                its execution. We could be more precise by checking
943
                that the modification is after the current PC, but it
944
                would require a specialized function to partially
945
                restore the CPU state */
946

    
947
                current_tb_modified = 1;
948
                cpu_restore_state(current_tb, env,
949
                                  env->mem_io_pc, NULL);
950
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
951
                                     &current_flags);
952
            }
953
#endif /* TARGET_HAS_PRECISE_SMC */
954
            /* we need to do that to handle the case where a signal
955
               occurs while doing tb_phys_invalidate() */
956
            saved_tb = NULL;
957
            if (env) {
958
                saved_tb = env->current_tb;
959
                env->current_tb = NULL;
960
            }
961
            tb_phys_invalidate(tb, -1);
962
            if (env) {
963
                env->current_tb = saved_tb;
964
                if (env->interrupt_request && env->current_tb)
965
                    cpu_interrupt(env, env->interrupt_request);
966
            }
967
        }
968
        tb = tb_next;
969
    }
970
#if !defined(CONFIG_USER_ONLY)
971
    /* if no code remaining, no need to continue to use slow writes */
972
    if (!p->first_tb) {
973
        invalidate_page_bitmap(p);
974
        if (is_cpu_write_access) {
975
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
976
        }
977
    }
978
#endif
979
#ifdef TARGET_HAS_PRECISE_SMC
980
    if (current_tb_modified) {
981
        /* we generate a block containing just the instruction
982
           modifying the memory. It will ensure that it cannot modify
983
           itself */
984
        env->current_tb = NULL;
985
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
986
        cpu_resume_from_signal(env, NULL);
987
    }
988
#endif
989
}
990

    
991
/* len must be <= 8 and start must be a multiple of len */
992
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
993
{
994
    PageDesc *p;
995
    int offset, b;
996
#if 0
997
    if (1) {
998
        if (loglevel) {
999
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1000
                   cpu_single_env->mem_io_vaddr, len,
1001
                   cpu_single_env->eip,
1002
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1003
        }
1004
    }
1005
#endif
1006
    p = page_find(start >> TARGET_PAGE_BITS);
1007
    if (!p)
1008
        return;
1009
    if (p->code_bitmap) {
1010
        offset = start & ~TARGET_PAGE_MASK;
1011
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1012
        if (b & ((1 << len) - 1))
1013
            goto do_invalidate;
1014
    } else {
1015
    do_invalidate:
1016
        tb_invalidate_phys_page_range(start, start + len, 1);
1017
    }
1018
}
1019

    
1020
#if !defined(CONFIG_SOFTMMU)
1021
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1022
                                    unsigned long pc, void *puc)
1023
{
1024
    TranslationBlock *tb;
1025
    PageDesc *p;
1026
    int n;
1027
#ifdef TARGET_HAS_PRECISE_SMC
1028
    TranslationBlock *current_tb = NULL;
1029
    CPUState *env = cpu_single_env;
1030
    int current_tb_modified = 0;
1031
    target_ulong current_pc = 0;
1032
    target_ulong current_cs_base = 0;
1033
    int current_flags = 0;
1034
#endif
1035

    
1036
    addr &= TARGET_PAGE_MASK;
1037
    p = page_find(addr >> TARGET_PAGE_BITS);
1038
    if (!p)
1039
        return;
1040
    tb = p->first_tb;
1041
#ifdef TARGET_HAS_PRECISE_SMC
1042
    if (tb && pc != 0) {
1043
        current_tb = tb_find_pc(pc);
1044
    }
1045
#endif
1046
    while (tb != NULL) {
1047
        n = (long)tb & 3;
1048
        tb = (TranslationBlock *)((long)tb & ~3);
1049
#ifdef TARGET_HAS_PRECISE_SMC
1050
        if (current_tb == tb &&
1051
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1052
                /* If we are modifying the current TB, we must stop
1053
                   its execution. We could be more precise by checking
1054
                   that the modification is after the current PC, but it
1055
                   would require a specialized function to partially
1056
                   restore the CPU state */
1057

    
1058
            current_tb_modified = 1;
1059
            cpu_restore_state(current_tb, env, pc, puc);
1060
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1061
                                 &current_flags);
1062
        }
1063
#endif /* TARGET_HAS_PRECISE_SMC */
1064
        tb_phys_invalidate(tb, addr);
1065
        tb = tb->page_next[n];
1066
    }
1067
    p->first_tb = NULL;
1068
#ifdef TARGET_HAS_PRECISE_SMC
1069
    if (current_tb_modified) {
1070
        /* we generate a block containing just the instruction
1071
           modifying the memory. It will ensure that it cannot modify
1072
           itself */
1073
        env->current_tb = NULL;
1074
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1075
        cpu_resume_from_signal(env, puc);
1076
    }
1077
#endif
1078
}
1079
#endif
1080

    
1081
/* add the tb in the target page and protect it if necessary */
1082
static inline void tb_alloc_page(TranslationBlock *tb,
1083
                                 unsigned int n, target_ulong page_addr)
1084
{
1085
    PageDesc *p;
1086
    TranslationBlock *last_first_tb;
1087

    
1088
    tb->page_addr[n] = page_addr;
1089
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1090
    tb->page_next[n] = p->first_tb;
1091
    last_first_tb = p->first_tb;
1092
    p->first_tb = (TranslationBlock *)((long)tb | n);
1093
    invalidate_page_bitmap(p);
1094

    
1095
#if defined(TARGET_HAS_SMC) || 1
1096

    
1097
#if defined(CONFIG_USER_ONLY)
1098
    if (p->flags & PAGE_WRITE) {
1099
        target_ulong addr;
1100
        PageDesc *p2;
1101
        int prot;
1102

    
1103
        /* force the host page as non writable (writes will have a
1104
           page fault + mprotect overhead) */
1105
        page_addr &= qemu_host_page_mask;
1106
        prot = 0;
1107
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1108
            addr += TARGET_PAGE_SIZE) {
1109

    
1110
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1111
            if (!p2)
1112
                continue;
1113
            prot |= p2->flags;
1114
            p2->flags &= ~PAGE_WRITE;
1115
            page_get_flags(addr);
1116
          }
1117
        mprotect(g2h(page_addr), qemu_host_page_size,
1118
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1119
#ifdef DEBUG_TB_INVALIDATE
1120
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1121
               page_addr);
1122
#endif
1123
    }
1124
#else
1125
    /* if some code is already present, then the pages are already
1126
       protected. So we handle the case where only the first TB is
1127
       allocated in a physical page */
1128
    if (!last_first_tb) {
1129
        tlb_protect_code(page_addr);
1130
    }
1131
#endif
1132

    
1133
#endif /* TARGET_HAS_SMC */
1134
}
1135

    
1136
/* Allocate a new translation block. Flush the translation buffer if
1137
   too many translation blocks or too much generated code. */
1138
TranslationBlock *tb_alloc(target_ulong pc)
1139
{
1140
    TranslationBlock *tb;
1141

    
1142
    if (nb_tbs >= code_gen_max_blocks ||
1143
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1144
        return NULL;
1145
    tb = &tbs[nb_tbs++];
1146
    tb->pc = pc;
1147
    tb->cflags = 0;
1148
    return tb;
1149
}
1150

    
1151
void tb_free(TranslationBlock *tb)
1152
{
1153
    /* In practice this is mostly used for single use temporary TB
1154
       Ignore the hard cases and just back up if this TB happens to
1155
       be the last one generated.  */
1156
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1157
        code_gen_ptr = tb->tc_ptr;
1158
        nb_tbs--;
1159
    }
1160
}
1161

    
1162
/* add a new TB and link it to the physical page tables. phys_page2 is
1163
   (-1) to indicate that only one page contains the TB. */
1164
void tb_link_phys(TranslationBlock *tb,
1165
                  target_ulong phys_pc, target_ulong phys_page2)
1166
{
1167
    unsigned int h;
1168
    TranslationBlock **ptb;
1169

    
1170
    /* Grab the mmap lock to stop another thread invalidating this TB
1171
       before we are done.  */
1172
    mmap_lock();
1173
    /* add in the physical hash table */
1174
    h = tb_phys_hash_func(phys_pc);
1175
    ptb = &tb_phys_hash[h];
1176
    tb->phys_hash_next = *ptb;
1177
    *ptb = tb;
1178

    
1179
    /* add in the page list */
1180
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1181
    if (phys_page2 != -1)
1182
        tb_alloc_page(tb, 1, phys_page2);
1183
    else
1184
        tb->page_addr[1] = -1;
1185

    
1186
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1187
    tb->jmp_next[0] = NULL;
1188
    tb->jmp_next[1] = NULL;
1189

    
1190
    /* init original jump addresses */
1191
    if (tb->tb_next_offset[0] != 0xffff)
1192
        tb_reset_jump(tb, 0);
1193
    if (tb->tb_next_offset[1] != 0xffff)
1194
        tb_reset_jump(tb, 1);
1195

    
1196
#ifdef DEBUG_TB_CHECK
1197
    tb_page_check();
1198
#endif
1199
    mmap_unlock();
1200
}
1201

    
1202
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1203
   tb[1].tc_ptr. Return NULL if not found */
1204
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1205
{
1206
    int m_min, m_max, m;
1207
    unsigned long v;
1208
    TranslationBlock *tb;
1209

    
1210
    if (nb_tbs <= 0)
1211
        return NULL;
1212
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1213
        tc_ptr >= (unsigned long)code_gen_ptr)
1214
        return NULL;
1215
    /* binary search (cf Knuth) */
1216
    m_min = 0;
1217
    m_max = nb_tbs - 1;
1218
    while (m_min <= m_max) {
1219
        m = (m_min + m_max) >> 1;
1220
        tb = &tbs[m];
1221
        v = (unsigned long)tb->tc_ptr;
1222
        if (v == tc_ptr)
1223
            return tb;
1224
        else if (tc_ptr < v) {
1225
            m_max = m - 1;
1226
        } else {
1227
            m_min = m + 1;
1228
        }
1229
    }
1230
    return &tbs[m_max];
1231
}
1232

    
1233
static void tb_reset_jump_recursive(TranslationBlock *tb);
1234

    
1235
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1236
{
1237
    TranslationBlock *tb1, *tb_next, **ptb;
1238
    unsigned int n1;
1239

    
1240
    tb1 = tb->jmp_next[n];
1241
    if (tb1 != NULL) {
1242
        /* find head of list */
1243
        for(;;) {
1244
            n1 = (long)tb1 & 3;
1245
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1246
            if (n1 == 2)
1247
                break;
1248
            tb1 = tb1->jmp_next[n1];
1249
        }
1250
        /* we are now sure now that tb jumps to tb1 */
1251
        tb_next = tb1;
1252

    
1253
        /* remove tb from the jmp_first list */
1254
        ptb = &tb_next->jmp_first;
1255
        for(;;) {
1256
            tb1 = *ptb;
1257
            n1 = (long)tb1 & 3;
1258
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1259
            if (n1 == n && tb1 == tb)
1260
                break;
1261
            ptb = &tb1->jmp_next[n1];
1262
        }
1263
        *ptb = tb->jmp_next[n];
1264
        tb->jmp_next[n] = NULL;
1265

    
1266
        /* suppress the jump to next tb in generated code */
1267
        tb_reset_jump(tb, n);
1268

    
1269
        /* suppress jumps in the tb on which we could have jumped */
1270
        tb_reset_jump_recursive(tb_next);
1271
    }
1272
}
1273

    
1274
static void tb_reset_jump_recursive(TranslationBlock *tb)
1275
{
1276
    tb_reset_jump_recursive2(tb, 0);
1277
    tb_reset_jump_recursive2(tb, 1);
1278
}
1279

    
1280
#if defined(TARGET_HAS_ICE)
1281
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1282
{
1283
    target_phys_addr_t addr;
1284
    target_ulong pd;
1285
    ram_addr_t ram_addr;
1286
    PhysPageDesc *p;
1287

    
1288
    addr = cpu_get_phys_page_debug(env, pc);
1289
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1290
    if (!p) {
1291
        pd = IO_MEM_UNASSIGNED;
1292
    } else {
1293
        pd = p->phys_offset;
1294
    }
1295
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1296
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1297
}
1298
#endif
1299

    
1300
/* Add a watchpoint.  */
1301
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1302
                          int flags, CPUWatchpoint **watchpoint)
1303
{
1304
    CPUWatchpoint *wp;
1305

    
1306
    wp = qemu_malloc(sizeof(*wp));
1307
    if (!wp)
1308
        return -ENOBUFS;
1309

    
1310
    wp->vaddr = addr;
1311
    wp->len_mask = 0;
1312
    wp->flags = flags;
1313

    
1314
    wp->next = env->watchpoints;
1315
    wp->prev = NULL;
1316
    if (wp->next)
1317
        wp->next->prev = wp;
1318
    env->watchpoints = wp;
1319

    
1320
    tlb_flush_page(env, addr);
1321
    /* FIXME: This flush is needed because of the hack to make memory ops
1322
       terminate the TB.  It can be removed once the proper IO trap and
1323
       re-execute bits are in.  */
1324
    tb_flush(env);
1325

    
1326
    if (watchpoint)
1327
        *watchpoint = wp;
1328
    return 0;
1329
}
1330

    
1331
/* Remove a specific watchpoint.  */
1332
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1333
                          int flags)
1334
{
1335
    CPUWatchpoint *wp;
1336

    
1337
    for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
1338
        if (addr == wp->vaddr && flags == wp->flags) {
1339
            cpu_watchpoint_remove_by_ref(env, wp);
1340
            return 0;
1341
        }
1342
    }
1343
    return -ENOENT;
1344
}
1345

    
1346
/* Remove a specific watchpoint by reference.  */
1347
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1348
{
1349
    if (watchpoint->next)
1350
        watchpoint->next->prev = watchpoint->prev;
1351
    if (watchpoint->prev)
1352
        watchpoint->prev->next = watchpoint->next;
1353
    else
1354
        env->watchpoints = watchpoint->next;
1355

    
1356
    tlb_flush_page(env, watchpoint->vaddr);
1357

    
1358
    qemu_free(watchpoint);
1359
}
1360

    
1361
/* Remove all matching watchpoints.  */
1362
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1363
{
1364
    CPUWatchpoint *wp;
1365

    
1366
    for (wp = env->watchpoints; wp != NULL; wp = wp->next)
1367
        if (wp->flags & mask)
1368
            cpu_watchpoint_remove_by_ref(env, wp);
1369
}
1370

    
1371
/* Add a breakpoint.  */
1372
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1373
                          CPUBreakpoint **breakpoint)
1374
{
1375
#if defined(TARGET_HAS_ICE)
1376
    CPUBreakpoint *bp;
1377

    
1378
    bp = qemu_malloc(sizeof(*bp));
1379
    if (!bp)
1380
        return -ENOBUFS;
1381

    
1382
    bp->pc = pc;
1383
    bp->flags = flags;
1384

    
1385
    bp->next = env->breakpoints;
1386
    bp->prev = NULL;
1387
    if (bp->next)
1388
        bp->next->prev = bp;
1389
    env->breakpoints = bp;
1390

    
1391
    breakpoint_invalidate(env, pc);
1392

    
1393
    if (breakpoint)
1394
        *breakpoint = bp;
1395
    return 0;
1396
#else
1397
    return -ENOSYS;
1398
#endif
1399
}
1400

    
1401
/* Remove a specific breakpoint.  */
1402
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1403
{
1404
#if defined(TARGET_HAS_ICE)
1405
    CPUBreakpoint *bp;
1406

    
1407
    for (bp = env->breakpoints; bp != NULL; bp = bp->next) {
1408
        if (bp->pc == pc && bp->flags == flags) {
1409
            cpu_breakpoint_remove_by_ref(env, bp);
1410
            return 0;
1411
        }
1412
    }
1413
    return -ENOENT;
1414
#else
1415
    return -ENOSYS;
1416
#endif
1417
}
1418

    
1419
/* Remove a specific breakpoint by reference.  */
1420
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1421
{
1422
#if defined(TARGET_HAS_ICE)
1423
    if (breakpoint->next)
1424
        breakpoint->next->prev = breakpoint->prev;
1425
    if (breakpoint->prev)
1426
        breakpoint->prev->next = breakpoint->next;
1427
    else
1428
        env->breakpoints = breakpoint->next;
1429

    
1430
    breakpoint_invalidate(env, breakpoint->pc);
1431

    
1432
    qemu_free(breakpoint);
1433
#endif
1434
}
1435

    
1436
/* Remove all matching breakpoints. */
1437
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1438
{
1439
#if defined(TARGET_HAS_ICE)
1440
    CPUBreakpoint *bp;
1441

    
1442
    for (bp = env->breakpoints; bp != NULL; bp = bp->next)
1443
        if (bp->flags & mask)
1444
            cpu_breakpoint_remove_by_ref(env, bp);
1445
#endif
1446
}
1447

    
1448
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1449
   CPU loop after each instruction */
1450
void cpu_single_step(CPUState *env, int enabled)
1451
{
1452
#if defined(TARGET_HAS_ICE)
1453
    if (env->singlestep_enabled != enabled) {
1454
        env->singlestep_enabled = enabled;
1455
        /* must flush all the translated code to avoid inconsistancies */
1456
        /* XXX: only flush what is necessary */
1457
        tb_flush(env);
1458
    }
1459
#endif
1460
}
1461

    
1462
/* enable or disable low levels log */
1463
void cpu_set_log(int log_flags)
1464
{
1465
    loglevel = log_flags;
1466
    if (loglevel && !logfile) {
1467
        logfile = fopen(logfilename, log_append ? "a" : "w");
1468
        if (!logfile) {
1469
            perror(logfilename);
1470
            _exit(1);
1471
        }
1472
#if !defined(CONFIG_SOFTMMU)
1473
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1474
        {
1475
            static char logfile_buf[4096];
1476
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1477
        }
1478
#else
1479
        setvbuf(logfile, NULL, _IOLBF, 0);
1480
#endif
1481
        log_append = 1;
1482
    }
1483
    if (!loglevel && logfile) {
1484
        fclose(logfile);
1485
        logfile = NULL;
1486
    }
1487
}
1488

    
1489
void cpu_set_log_filename(const char *filename)
1490
{
1491
    logfilename = strdup(filename);
1492
    if (logfile) {
1493
        fclose(logfile);
1494
        logfile = NULL;
1495
    }
1496
    cpu_set_log(loglevel);
1497
}
1498

    
1499
/* mask must never be zero, except for A20 change call */
1500
void cpu_interrupt(CPUState *env, int mask)
1501
{
1502
#if !defined(USE_NPTL)
1503
    TranslationBlock *tb;
1504
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1505
#endif
1506
    int old_mask;
1507

    
1508
    old_mask = env->interrupt_request;
1509
    /* FIXME: This is probably not threadsafe.  A different thread could
1510
       be in the middle of a read-modify-write operation.  */
1511
    env->interrupt_request |= mask;
1512
#if defined(USE_NPTL)
1513
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1514
       problem and hope the cpu will stop of its own accord.  For userspace
1515
       emulation this often isn't actually as bad as it sounds.  Often
1516
       signals are used primarily to interrupt blocking syscalls.  */
1517
#else
1518
    if (use_icount) {
1519
        env->icount_decr.u16.high = 0xffff;
1520
#ifndef CONFIG_USER_ONLY
1521
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1522
           an async event happened and we need to process it.  */
1523
        if (!can_do_io(env)
1524
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1525
            cpu_abort(env, "Raised interrupt while not in I/O function");
1526
        }
1527
#endif
1528
    } else {
1529
        tb = env->current_tb;
1530
        /* if the cpu is currently executing code, we must unlink it and
1531
           all the potentially executing TB */
1532
        if (tb && !testandset(&interrupt_lock)) {
1533
            env->current_tb = NULL;
1534
            tb_reset_jump_recursive(tb);
1535
            resetlock(&interrupt_lock);
1536
        }
1537
    }
1538
#endif
1539
}
1540

    
1541
void cpu_reset_interrupt(CPUState *env, int mask)
1542
{
1543
    env->interrupt_request &= ~mask;
1544
}
1545

    
1546
const CPULogItem cpu_log_items[] = {
1547
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1548
      "show generated host assembly code for each compiled TB" },
1549
    { CPU_LOG_TB_IN_ASM, "in_asm",
1550
      "show target assembly code for each compiled TB" },
1551
    { CPU_LOG_TB_OP, "op",
1552
      "show micro ops for each compiled TB" },
1553
    { CPU_LOG_TB_OP_OPT, "op_opt",
1554
      "show micro ops "
1555
#ifdef TARGET_I386
1556
      "before eflags optimization and "
1557
#endif
1558
      "after liveness analysis" },
1559
    { CPU_LOG_INT, "int",
1560
      "show interrupts/exceptions in short format" },
1561
    { CPU_LOG_EXEC, "exec",
1562
      "show trace before each executed TB (lots of logs)" },
1563
    { CPU_LOG_TB_CPU, "cpu",
1564
      "show CPU state before block translation" },
1565
#ifdef TARGET_I386
1566
    { CPU_LOG_PCALL, "pcall",
1567
      "show protected mode far calls/returns/exceptions" },
1568
#endif
1569
#ifdef DEBUG_IOPORT
1570
    { CPU_LOG_IOPORT, "ioport",
1571
      "show all i/o ports accesses" },
1572
#endif
1573
    { 0, NULL, NULL },
1574
};
1575

    
1576
static int cmp1(const char *s1, int n, const char *s2)
1577
{
1578
    if (strlen(s2) != n)
1579
        return 0;
1580
    return memcmp(s1, s2, n) == 0;
1581
}
1582

    
1583
/* takes a comma separated list of log masks. Return 0 if error. */
1584
int cpu_str_to_log_mask(const char *str)
1585
{
1586
    const CPULogItem *item;
1587
    int mask;
1588
    const char *p, *p1;
1589

    
1590
    p = str;
1591
    mask = 0;
1592
    for(;;) {
1593
        p1 = strchr(p, ',');
1594
        if (!p1)
1595
            p1 = p + strlen(p);
1596
        if(cmp1(p,p1-p,"all")) {
1597
                for(item = cpu_log_items; item->mask != 0; item++) {
1598
                        mask |= item->mask;
1599
                }
1600
        } else {
1601
        for(item = cpu_log_items; item->mask != 0; item++) {
1602
            if (cmp1(p, p1 - p, item->name))
1603
                goto found;
1604
        }
1605
        return 0;
1606
        }
1607
    found:
1608
        mask |= item->mask;
1609
        if (*p1 != ',')
1610
            break;
1611
        p = p1 + 1;
1612
    }
1613
    return mask;
1614
}
1615

    
1616
void cpu_abort(CPUState *env, const char *fmt, ...)
1617
{
1618
    va_list ap;
1619
    va_list ap2;
1620

    
1621
    va_start(ap, fmt);
1622
    va_copy(ap2, ap);
1623
    fprintf(stderr, "qemu: fatal: ");
1624
    vfprintf(stderr, fmt, ap);
1625
    fprintf(stderr, "\n");
1626
#ifdef TARGET_I386
1627
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1628
#else
1629
    cpu_dump_state(env, stderr, fprintf, 0);
1630
#endif
1631
    if (logfile) {
1632
        fprintf(logfile, "qemu: fatal: ");
1633
        vfprintf(logfile, fmt, ap2);
1634
        fprintf(logfile, "\n");
1635
#ifdef TARGET_I386
1636
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1637
#else
1638
        cpu_dump_state(env, logfile, fprintf, 0);
1639
#endif
1640
        fflush(logfile);
1641
        fclose(logfile);
1642
    }
1643
    va_end(ap2);
1644
    va_end(ap);
1645
    abort();
1646
}
1647

    
1648
CPUState *cpu_copy(CPUState *env)
1649
{
1650
    CPUState *new_env = cpu_init(env->cpu_model_str);
1651
    /* preserve chaining and index */
1652
    CPUState *next_cpu = new_env->next_cpu;
1653
    int cpu_index = new_env->cpu_index;
1654
    memcpy(new_env, env, sizeof(CPUState));
1655
    new_env->next_cpu = next_cpu;
1656
    new_env->cpu_index = cpu_index;
1657
    return new_env;
1658
}
1659

    
1660
#if !defined(CONFIG_USER_ONLY)
1661

    
1662
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1663
{
1664
    unsigned int i;
1665

    
1666
    /* Discard jump cache entries for any tb which might potentially
1667
       overlap the flushed page.  */
1668
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1669
    memset (&env->tb_jmp_cache[i], 0, 
1670
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1671

    
1672
    i = tb_jmp_cache_hash_page(addr);
1673
    memset (&env->tb_jmp_cache[i], 0, 
1674
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1675
}
1676

    
1677
/* NOTE: if flush_global is true, also flush global entries (not
1678
   implemented yet) */
1679
void tlb_flush(CPUState *env, int flush_global)
1680
{
1681
    int i;
1682

    
1683
#if defined(DEBUG_TLB)
1684
    printf("tlb_flush:\n");
1685
#endif
1686
    /* must reset current TB so that interrupts cannot modify the
1687
       links while we are modifying them */
1688
    env->current_tb = NULL;
1689

    
1690
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1691
        env->tlb_table[0][i].addr_read = -1;
1692
        env->tlb_table[0][i].addr_write = -1;
1693
        env->tlb_table[0][i].addr_code = -1;
1694
        env->tlb_table[1][i].addr_read = -1;
1695
        env->tlb_table[1][i].addr_write = -1;
1696
        env->tlb_table[1][i].addr_code = -1;
1697
#if (NB_MMU_MODES >= 3)
1698
        env->tlb_table[2][i].addr_read = -1;
1699
        env->tlb_table[2][i].addr_write = -1;
1700
        env->tlb_table[2][i].addr_code = -1;
1701
#if (NB_MMU_MODES == 4)
1702
        env->tlb_table[3][i].addr_read = -1;
1703
        env->tlb_table[3][i].addr_write = -1;
1704
        env->tlb_table[3][i].addr_code = -1;
1705
#endif
1706
#endif
1707
    }
1708

    
1709
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1710

    
1711
#ifdef USE_KQEMU
1712
    if (env->kqemu_enabled) {
1713
        kqemu_flush(env, flush_global);
1714
    }
1715
#endif
1716
    tlb_flush_count++;
1717
}
1718

    
1719
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1720
{
1721
    if (addr == (tlb_entry->addr_read &
1722
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1723
        addr == (tlb_entry->addr_write &
1724
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1725
        addr == (tlb_entry->addr_code &
1726
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1727
        tlb_entry->addr_read = -1;
1728
        tlb_entry->addr_write = -1;
1729
        tlb_entry->addr_code = -1;
1730
    }
1731
}
1732

    
1733
void tlb_flush_page(CPUState *env, target_ulong addr)
1734
{
1735
    int i;
1736

    
1737
#if defined(DEBUG_TLB)
1738
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1739
#endif
1740
    /* must reset current TB so that interrupts cannot modify the
1741
       links while we are modifying them */
1742
    env->current_tb = NULL;
1743

    
1744
    addr &= TARGET_PAGE_MASK;
1745
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1746
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1747
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1748
#if (NB_MMU_MODES >= 3)
1749
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1750
#if (NB_MMU_MODES == 4)
1751
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1752
#endif
1753
#endif
1754

    
1755
    tlb_flush_jmp_cache(env, addr);
1756

    
1757
#ifdef USE_KQEMU
1758
    if (env->kqemu_enabled) {
1759
        kqemu_flush_page(env, addr);
1760
    }
1761
#endif
1762
}
1763

    
1764
/* update the TLBs so that writes to code in the virtual page 'addr'
1765
   can be detected */
1766
static void tlb_protect_code(ram_addr_t ram_addr)
1767
{
1768
    cpu_physical_memory_reset_dirty(ram_addr,
1769
                                    ram_addr + TARGET_PAGE_SIZE,
1770
                                    CODE_DIRTY_FLAG);
1771
}
1772

    
1773
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1774
   tested for self modifying code */
1775
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1776
                                    target_ulong vaddr)
1777
{
1778
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1779
}
1780

    
1781
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1782
                                         unsigned long start, unsigned long length)
1783
{
1784
    unsigned long addr;
1785
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1786
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1787
        if ((addr - start) < length) {
1788
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1789
        }
1790
    }
1791
}
1792

    
1793
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1794
                                     int dirty_flags)
1795
{
1796
    CPUState *env;
1797
    unsigned long length, start1;
1798
    int i, mask, len;
1799
    uint8_t *p;
1800

    
1801
    start &= TARGET_PAGE_MASK;
1802
    end = TARGET_PAGE_ALIGN(end);
1803

    
1804
    length = end - start;
1805
    if (length == 0)
1806
        return;
1807
    len = length >> TARGET_PAGE_BITS;
1808
#ifdef USE_KQEMU
1809
    /* XXX: should not depend on cpu context */
1810
    env = first_cpu;
1811
    if (env->kqemu_enabled) {
1812
        ram_addr_t addr;
1813
        addr = start;
1814
        for(i = 0; i < len; i++) {
1815
            kqemu_set_notdirty(env, addr);
1816
            addr += TARGET_PAGE_SIZE;
1817
        }
1818
    }
1819
#endif
1820
    mask = ~dirty_flags;
1821
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1822
    for(i = 0; i < len; i++)
1823
        p[i] &= mask;
1824

    
1825
    /* we modify the TLB cache so that the dirty bit will be set again
1826
       when accessing the range */
1827
    start1 = start + (unsigned long)phys_ram_base;
1828
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1829
        for(i = 0; i < CPU_TLB_SIZE; i++)
1830
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1831
        for(i = 0; i < CPU_TLB_SIZE; i++)
1832
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1833
#if (NB_MMU_MODES >= 3)
1834
        for(i = 0; i < CPU_TLB_SIZE; i++)
1835
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1836
#if (NB_MMU_MODES == 4)
1837
        for(i = 0; i < CPU_TLB_SIZE; i++)
1838
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1839
#endif
1840
#endif
1841
    }
1842
}
1843

    
1844
int cpu_physical_memory_set_dirty_tracking(int enable)
1845
{
1846
    in_migration = enable;
1847
    return 0;
1848
}
1849

    
1850
int cpu_physical_memory_get_dirty_tracking(void)
1851
{
1852
    return in_migration;
1853
}
1854

    
1855
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1856
{
1857
    ram_addr_t ram_addr;
1858

    
1859
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1860
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1861
            tlb_entry->addend - (unsigned long)phys_ram_base;
1862
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1863
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1864
        }
1865
    }
1866
}
1867

    
1868
/* update the TLB according to the current state of the dirty bits */
1869
void cpu_tlb_update_dirty(CPUState *env)
1870
{
1871
    int i;
1872
    for(i = 0; i < CPU_TLB_SIZE; i++)
1873
        tlb_update_dirty(&env->tlb_table[0][i]);
1874
    for(i = 0; i < CPU_TLB_SIZE; i++)
1875
        tlb_update_dirty(&env->tlb_table[1][i]);
1876
#if (NB_MMU_MODES >= 3)
1877
    for(i = 0; i < CPU_TLB_SIZE; i++)
1878
        tlb_update_dirty(&env->tlb_table[2][i]);
1879
#if (NB_MMU_MODES == 4)
1880
    for(i = 0; i < CPU_TLB_SIZE; i++)
1881
        tlb_update_dirty(&env->tlb_table[3][i]);
1882
#endif
1883
#endif
1884
}
1885

    
1886
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1887
{
1888
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1889
        tlb_entry->addr_write = vaddr;
1890
}
1891

    
1892
/* update the TLB corresponding to virtual page vaddr
1893
   so that it is no longer dirty */
1894
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1895
{
1896
    int i;
1897

    
1898
    vaddr &= TARGET_PAGE_MASK;
1899
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1900
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1901
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1902
#if (NB_MMU_MODES >= 3)
1903
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1904
#if (NB_MMU_MODES == 4)
1905
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1906
#endif
1907
#endif
1908
}
1909

    
1910
/* add a new TLB entry. At most one entry for a given virtual address
1911
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1912
   (can only happen in non SOFTMMU mode for I/O pages or pages
1913
   conflicting with the host address space). */
1914
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1915
                      target_phys_addr_t paddr, int prot,
1916
                      int mmu_idx, int is_softmmu)
1917
{
1918
    PhysPageDesc *p;
1919
    unsigned long pd;
1920
    unsigned int index;
1921
    target_ulong address;
1922
    target_ulong code_address;
1923
    target_phys_addr_t addend;
1924
    int ret;
1925
    CPUTLBEntry *te;
1926
    CPUWatchpoint *wp;
1927
    target_phys_addr_t iotlb;
1928

    
1929
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1930
    if (!p) {
1931
        pd = IO_MEM_UNASSIGNED;
1932
    } else {
1933
        pd = p->phys_offset;
1934
    }
1935
#if defined(DEBUG_TLB)
1936
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1937
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1938
#endif
1939

    
1940
    ret = 0;
1941
    address = vaddr;
1942
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1943
        /* IO memory case (romd handled later) */
1944
        address |= TLB_MMIO;
1945
    }
1946
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1947
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1948
        /* Normal RAM.  */
1949
        iotlb = pd & TARGET_PAGE_MASK;
1950
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1951
            iotlb |= IO_MEM_NOTDIRTY;
1952
        else
1953
            iotlb |= IO_MEM_ROM;
1954
    } else {
1955
        /* IO handlers are currently passed a phsical address.
1956
           It would be nice to pass an offset from the base address
1957
           of that region.  This would avoid having to special case RAM,
1958
           and avoid full address decoding in every device.
1959
           We can't use the high bits of pd for this because
1960
           IO_MEM_ROMD uses these as a ram address.  */
1961
        iotlb = (pd & ~TARGET_PAGE_MASK) + paddr;
1962
    }
1963

    
1964
    code_address = address;
1965
    /* Make accesses to pages with watchpoints go via the
1966
       watchpoint trap routines.  */
1967
    for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
1968
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1969
            iotlb = io_mem_watch + paddr;
1970
            /* TODO: The memory case can be optimized by not trapping
1971
               reads of pages with a write breakpoint.  */
1972
            address |= TLB_MMIO;
1973
        }
1974
    }
1975

    
1976
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1977
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
1978
    te = &env->tlb_table[mmu_idx][index];
1979
    te->addend = addend - vaddr;
1980
    if (prot & PAGE_READ) {
1981
        te->addr_read = address;
1982
    } else {
1983
        te->addr_read = -1;
1984
    }
1985

    
1986
    if (prot & PAGE_EXEC) {
1987
        te->addr_code = code_address;
1988
    } else {
1989
        te->addr_code = -1;
1990
    }
1991
    if (prot & PAGE_WRITE) {
1992
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1993
            (pd & IO_MEM_ROMD)) {
1994
            /* Write access calls the I/O callback.  */
1995
            te->addr_write = address | TLB_MMIO;
1996
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1997
                   !cpu_physical_memory_is_dirty(pd)) {
1998
            te->addr_write = address | TLB_NOTDIRTY;
1999
        } else {
2000
            te->addr_write = address;
2001
        }
2002
    } else {
2003
        te->addr_write = -1;
2004
    }
2005
    return ret;
2006
}
2007

    
2008
#else
2009

    
2010
void tlb_flush(CPUState *env, int flush_global)
2011
{
2012
}
2013

    
2014
void tlb_flush_page(CPUState *env, target_ulong addr)
2015
{
2016
}
2017

    
2018
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2019
                      target_phys_addr_t paddr, int prot,
2020
                      int mmu_idx, int is_softmmu)
2021
{
2022
    return 0;
2023
}
2024

    
2025
/* dump memory mappings */
2026
void page_dump(FILE *f)
2027
{
2028
    unsigned long start, end;
2029
    int i, j, prot, prot1;
2030
    PageDesc *p;
2031

    
2032
    fprintf(f, "%-8s %-8s %-8s %s\n",
2033
            "start", "end", "size", "prot");
2034
    start = -1;
2035
    end = -1;
2036
    prot = 0;
2037
    for(i = 0; i <= L1_SIZE; i++) {
2038
        if (i < L1_SIZE)
2039
            p = l1_map[i];
2040
        else
2041
            p = NULL;
2042
        for(j = 0;j < L2_SIZE; j++) {
2043
            if (!p)
2044
                prot1 = 0;
2045
            else
2046
                prot1 = p[j].flags;
2047
            if (prot1 != prot) {
2048
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2049
                if (start != -1) {
2050
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2051
                            start, end, end - start,
2052
                            prot & PAGE_READ ? 'r' : '-',
2053
                            prot & PAGE_WRITE ? 'w' : '-',
2054
                            prot & PAGE_EXEC ? 'x' : '-');
2055
                }
2056
                if (prot1 != 0)
2057
                    start = end;
2058
                else
2059
                    start = -1;
2060
                prot = prot1;
2061
            }
2062
            if (!p)
2063
                break;
2064
        }
2065
    }
2066
}
2067

    
2068
int page_get_flags(target_ulong address)
2069
{
2070
    PageDesc *p;
2071

    
2072
    p = page_find(address >> TARGET_PAGE_BITS);
2073
    if (!p)
2074
        return 0;
2075
    return p->flags;
2076
}
2077

    
2078
/* modify the flags of a page and invalidate the code if
2079
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2080
   depending on PAGE_WRITE */
2081
void page_set_flags(target_ulong start, target_ulong end, int flags)
2082
{
2083
    PageDesc *p;
2084
    target_ulong addr;
2085

    
2086
    /* mmap_lock should already be held.  */
2087
    start = start & TARGET_PAGE_MASK;
2088
    end = TARGET_PAGE_ALIGN(end);
2089
    if (flags & PAGE_WRITE)
2090
        flags |= PAGE_WRITE_ORG;
2091
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2092
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2093
        /* We may be called for host regions that are outside guest
2094
           address space.  */
2095
        if (!p)
2096
            return;
2097
        /* if the write protection is set, then we invalidate the code
2098
           inside */
2099
        if (!(p->flags & PAGE_WRITE) &&
2100
            (flags & PAGE_WRITE) &&
2101
            p->first_tb) {
2102
            tb_invalidate_phys_page(addr, 0, NULL);
2103
        }
2104
        p->flags = flags;
2105
    }
2106
}
2107

    
2108
int page_check_range(target_ulong start, target_ulong len, int flags)
2109
{
2110
    PageDesc *p;
2111
    target_ulong end;
2112
    target_ulong addr;
2113

    
2114
    if (start + len < start)
2115
        /* we've wrapped around */
2116
        return -1;
2117

    
2118
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2119
    start = start & TARGET_PAGE_MASK;
2120

    
2121
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2122
        p = page_find(addr >> TARGET_PAGE_BITS);
2123
        if( !p )
2124
            return -1;
2125
        if( !(p->flags & PAGE_VALID) )
2126
            return -1;
2127

    
2128
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2129
            return -1;
2130
        if (flags & PAGE_WRITE) {
2131
            if (!(p->flags & PAGE_WRITE_ORG))
2132
                return -1;
2133
            /* unprotect the page if it was put read-only because it
2134
               contains translated code */
2135
            if (!(p->flags & PAGE_WRITE)) {
2136
                if (!page_unprotect(addr, 0, NULL))
2137
                    return -1;
2138
            }
2139
            return 0;
2140
        }
2141
    }
2142
    return 0;
2143
}
2144

    
2145
/* called from signal handler: invalidate the code and unprotect the
2146
   page. Return TRUE if the fault was succesfully handled. */
2147
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2148
{
2149
    unsigned int page_index, prot, pindex;
2150
    PageDesc *p, *p1;
2151
    target_ulong host_start, host_end, addr;
2152

    
2153
    /* Technically this isn't safe inside a signal handler.  However we
2154
       know this only ever happens in a synchronous SEGV handler, so in
2155
       practice it seems to be ok.  */
2156
    mmap_lock();
2157

    
2158
    host_start = address & qemu_host_page_mask;
2159
    page_index = host_start >> TARGET_PAGE_BITS;
2160
    p1 = page_find(page_index);
2161
    if (!p1) {
2162
        mmap_unlock();
2163
        return 0;
2164
    }
2165
    host_end = host_start + qemu_host_page_size;
2166
    p = p1;
2167
    prot = 0;
2168
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2169
        prot |= p->flags;
2170
        p++;
2171
    }
2172
    /* if the page was really writable, then we change its
2173
       protection back to writable */
2174
    if (prot & PAGE_WRITE_ORG) {
2175
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2176
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2177
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2178
                     (prot & PAGE_BITS) | PAGE_WRITE);
2179
            p1[pindex].flags |= PAGE_WRITE;
2180
            /* and since the content will be modified, we must invalidate
2181
               the corresponding translated code. */
2182
            tb_invalidate_phys_page(address, pc, puc);
2183
#ifdef DEBUG_TB_CHECK
2184
            tb_invalidate_check(address);
2185
#endif
2186
            mmap_unlock();
2187
            return 1;
2188
        }
2189
    }
2190
    mmap_unlock();
2191
    return 0;
2192
}
2193

    
2194
static inline void tlb_set_dirty(CPUState *env,
2195
                                 unsigned long addr, target_ulong vaddr)
2196
{
2197
}
2198
#endif /* defined(CONFIG_USER_ONLY) */
2199

    
2200
#if !defined(CONFIG_USER_ONLY)
2201
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2202
                             ram_addr_t memory);
2203
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2204
                           ram_addr_t orig_memory);
2205
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2206
                      need_subpage)                                     \
2207
    do {                                                                \
2208
        if (addr > start_addr)                                          \
2209
            start_addr2 = 0;                                            \
2210
        else {                                                          \
2211
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2212
            if (start_addr2 > 0)                                        \
2213
                need_subpage = 1;                                       \
2214
        }                                                               \
2215
                                                                        \
2216
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2217
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2218
        else {                                                          \
2219
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2220
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2221
                need_subpage = 1;                                       \
2222
        }                                                               \
2223
    } while (0)
2224

    
2225
/* register physical memory. 'size' must be a multiple of the target
2226
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2227
   io memory page */
2228
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2229
                                  ram_addr_t size,
2230
                                  ram_addr_t phys_offset)
2231
{
2232
    target_phys_addr_t addr, end_addr;
2233
    PhysPageDesc *p;
2234
    CPUState *env;
2235
    ram_addr_t orig_size = size;
2236
    void *subpage;
2237

    
2238
#ifdef USE_KQEMU
2239
    /* XXX: should not depend on cpu context */
2240
    env = first_cpu;
2241
    if (env->kqemu_enabled) {
2242
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2243
    }
2244
#endif
2245
    if (kvm_enabled())
2246
        kvm_set_phys_mem(start_addr, size, phys_offset);
2247

    
2248
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2249
    end_addr = start_addr + (target_phys_addr_t)size;
2250
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2251
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2252
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2253
            ram_addr_t orig_memory = p->phys_offset;
2254
            target_phys_addr_t start_addr2, end_addr2;
2255
            int need_subpage = 0;
2256

    
2257
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2258
                          need_subpage);
2259
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2260
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2261
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2262
                                           &p->phys_offset, orig_memory);
2263
                } else {
2264
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2265
                                            >> IO_MEM_SHIFT];
2266
                }
2267
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2268
            } else {
2269
                p->phys_offset = phys_offset;
2270
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2271
                    (phys_offset & IO_MEM_ROMD))
2272
                    phys_offset += TARGET_PAGE_SIZE;
2273
            }
2274
        } else {
2275
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2276
            p->phys_offset = phys_offset;
2277
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2278
                (phys_offset & IO_MEM_ROMD))
2279
                phys_offset += TARGET_PAGE_SIZE;
2280
            else {
2281
                target_phys_addr_t start_addr2, end_addr2;
2282
                int need_subpage = 0;
2283

    
2284
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2285
                              end_addr2, need_subpage);
2286

    
2287
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2288
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2289
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2290
                    subpage_register(subpage, start_addr2, end_addr2,
2291
                                     phys_offset);
2292
                }
2293
            }
2294
        }
2295
    }
2296

    
2297
    /* since each CPU stores ram addresses in its TLB cache, we must
2298
       reset the modified entries */
2299
    /* XXX: slow ! */
2300
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2301
        tlb_flush(env, 1);
2302
    }
2303
}
2304

    
2305
/* XXX: temporary until new memory mapping API */
2306
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2307
{
2308
    PhysPageDesc *p;
2309

    
2310
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2311
    if (!p)
2312
        return IO_MEM_UNASSIGNED;
2313
    return p->phys_offset;
2314
}
2315

    
2316
/* XXX: better than nothing */
2317
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2318
{
2319
    ram_addr_t addr;
2320
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2321
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2322
                (uint64_t)size, (uint64_t)phys_ram_size);
2323
        abort();
2324
    }
2325
    addr = phys_ram_alloc_offset;
2326
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2327
    return addr;
2328
}
2329

    
2330
void qemu_ram_free(ram_addr_t addr)
2331
{
2332
}
2333

    
2334
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2335
{
2336
#ifdef DEBUG_UNASSIGNED
2337
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2338
#endif
2339
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2340
    do_unassigned_access(addr, 0, 0, 0, 1);
2341
#endif
2342
    return 0;
2343
}
2344

    
2345
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2346
{
2347
#ifdef DEBUG_UNASSIGNED
2348
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2349
#endif
2350
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2351
    do_unassigned_access(addr, 0, 0, 0, 2);
2352
#endif
2353
    return 0;
2354
}
2355

    
2356
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2357
{
2358
#ifdef DEBUG_UNASSIGNED
2359
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2360
#endif
2361
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2362
    do_unassigned_access(addr, 0, 0, 0, 4);
2363
#endif
2364
    return 0;
2365
}
2366

    
2367
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2368
{
2369
#ifdef DEBUG_UNASSIGNED
2370
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2371
#endif
2372
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2373
    do_unassigned_access(addr, 1, 0, 0, 1);
2374
#endif
2375
}
2376

    
2377
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2378
{
2379
#ifdef DEBUG_UNASSIGNED
2380
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2381
#endif
2382
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2383
    do_unassigned_access(addr, 1, 0, 0, 2);
2384
#endif
2385
}
2386

    
2387
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2388
{
2389
#ifdef DEBUG_UNASSIGNED
2390
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2391
#endif
2392
#if defined(TARGET_SPARC) || defined(TARGET_CRIS)
2393
    do_unassigned_access(addr, 1, 0, 0, 4);
2394
#endif
2395
}
2396

    
2397
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2398
    unassigned_mem_readb,
2399
    unassigned_mem_readw,
2400
    unassigned_mem_readl,
2401
};
2402

    
2403
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2404
    unassigned_mem_writeb,
2405
    unassigned_mem_writew,
2406
    unassigned_mem_writel,
2407
};
2408

    
2409
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2410
                                uint32_t val)
2411
{
2412
    int dirty_flags;
2413
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2414
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2415
#if !defined(CONFIG_USER_ONLY)
2416
        tb_invalidate_phys_page_fast(ram_addr, 1);
2417
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2418
#endif
2419
    }
2420
    stb_p(phys_ram_base + ram_addr, val);
2421
#ifdef USE_KQEMU
2422
    if (cpu_single_env->kqemu_enabled &&
2423
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2424
        kqemu_modify_page(cpu_single_env, ram_addr);
2425
#endif
2426
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2427
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2428
    /* we remove the notdirty callback only if the code has been
2429
       flushed */
2430
    if (dirty_flags == 0xff)
2431
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2432
}
2433

    
2434
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2435
                                uint32_t val)
2436
{
2437
    int dirty_flags;
2438
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2439
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2440
#if !defined(CONFIG_USER_ONLY)
2441
        tb_invalidate_phys_page_fast(ram_addr, 2);
2442
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2443
#endif
2444
    }
2445
    stw_p(phys_ram_base + ram_addr, val);
2446
#ifdef USE_KQEMU
2447
    if (cpu_single_env->kqemu_enabled &&
2448
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2449
        kqemu_modify_page(cpu_single_env, ram_addr);
2450
#endif
2451
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2452
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2453
    /* we remove the notdirty callback only if the code has been
2454
       flushed */
2455
    if (dirty_flags == 0xff)
2456
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2457
}
2458

    
2459
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2460
                                uint32_t val)
2461
{
2462
    int dirty_flags;
2463
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2464
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2465
#if !defined(CONFIG_USER_ONLY)
2466
        tb_invalidate_phys_page_fast(ram_addr, 4);
2467
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2468
#endif
2469
    }
2470
    stl_p(phys_ram_base + ram_addr, val);
2471
#ifdef USE_KQEMU
2472
    if (cpu_single_env->kqemu_enabled &&
2473
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2474
        kqemu_modify_page(cpu_single_env, ram_addr);
2475
#endif
2476
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2477
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2478
    /* we remove the notdirty callback only if the code has been
2479
       flushed */
2480
    if (dirty_flags == 0xff)
2481
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2482
}
2483

    
2484
static CPUReadMemoryFunc *error_mem_read[3] = {
2485
    NULL, /* never used */
2486
    NULL, /* never used */
2487
    NULL, /* never used */
2488
};
2489

    
2490
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2491
    notdirty_mem_writeb,
2492
    notdirty_mem_writew,
2493
    notdirty_mem_writel,
2494
};
2495

    
2496
/* Generate a debug exception if a watchpoint has been hit.  */
2497
static void check_watchpoint(int offset, int flags)
2498
{
2499
    CPUState *env = cpu_single_env;
2500
    target_ulong vaddr;
2501
    CPUWatchpoint *wp;
2502

    
2503
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2504
    for (wp = env->watchpoints; wp != NULL; wp = wp->next) {
2505
        if (vaddr == wp->vaddr && (wp->flags & flags)) {
2506
            env->watchpoint_hit = wp;
2507
            cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2508
            break;
2509
        }
2510
    }
2511
}
2512

    
2513
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2514
   so these check for a hit then pass through to the normal out-of-line
2515
   phys routines.  */
2516
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2517
{
2518
    check_watchpoint(addr & ~TARGET_PAGE_MASK, BP_MEM_READ);
2519
    return ldub_phys(addr);
2520
}
2521

    
2522
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2523
{
2524
    check_watchpoint(addr & ~TARGET_PAGE_MASK, BP_MEM_READ);
2525
    return lduw_phys(addr);
2526
}
2527

    
2528
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2529
{
2530
    check_watchpoint(addr & ~TARGET_PAGE_MASK, BP_MEM_READ);
2531
    return ldl_phys(addr);
2532
}
2533

    
2534
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2535
                             uint32_t val)
2536
{
2537
    check_watchpoint(addr & ~TARGET_PAGE_MASK, BP_MEM_WRITE);
2538
    stb_phys(addr, val);
2539
}
2540

    
2541
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2542
                             uint32_t val)
2543
{
2544
    check_watchpoint(addr & ~TARGET_PAGE_MASK, BP_MEM_WRITE);
2545
    stw_phys(addr, val);
2546
}
2547

    
2548
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2549
                             uint32_t val)
2550
{
2551
    check_watchpoint(addr & ~TARGET_PAGE_MASK, BP_MEM_WRITE);
2552
    stl_phys(addr, val);
2553
}
2554

    
2555
static CPUReadMemoryFunc *watch_mem_read[3] = {
2556
    watch_mem_readb,
2557
    watch_mem_readw,
2558
    watch_mem_readl,
2559
};
2560

    
2561
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2562
    watch_mem_writeb,
2563
    watch_mem_writew,
2564
    watch_mem_writel,
2565
};
2566

    
2567
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2568
                                 unsigned int len)
2569
{
2570
    uint32_t ret;
2571
    unsigned int idx;
2572

    
2573
    idx = SUBPAGE_IDX(addr - mmio->base);
2574
#if defined(DEBUG_SUBPAGE)
2575
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2576
           mmio, len, addr, idx);
2577
#endif
2578
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2579

    
2580
    return ret;
2581
}
2582

    
2583
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2584
                              uint32_t value, unsigned int len)
2585
{
2586
    unsigned int idx;
2587

    
2588
    idx = SUBPAGE_IDX(addr - mmio->base);
2589
#if defined(DEBUG_SUBPAGE)
2590
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2591
           mmio, len, addr, idx, value);
2592
#endif
2593
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2594
}
2595

    
2596
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2597
{
2598
#if defined(DEBUG_SUBPAGE)
2599
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2600
#endif
2601

    
2602
    return subpage_readlen(opaque, addr, 0);
2603
}
2604

    
2605
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2606
                            uint32_t value)
2607
{
2608
#if defined(DEBUG_SUBPAGE)
2609
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2610
#endif
2611
    subpage_writelen(opaque, addr, value, 0);
2612
}
2613

    
2614
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2615
{
2616
#if defined(DEBUG_SUBPAGE)
2617
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2618
#endif
2619

    
2620
    return subpage_readlen(opaque, addr, 1);
2621
}
2622

    
2623
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2624
                            uint32_t value)
2625
{
2626
#if defined(DEBUG_SUBPAGE)
2627
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2628
#endif
2629
    subpage_writelen(opaque, addr, value, 1);
2630
}
2631

    
2632
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2633
{
2634
#if defined(DEBUG_SUBPAGE)
2635
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2636
#endif
2637

    
2638
    return subpage_readlen(opaque, addr, 2);
2639
}
2640

    
2641
static void subpage_writel (void *opaque,
2642
                         target_phys_addr_t addr, uint32_t value)
2643
{
2644
#if defined(DEBUG_SUBPAGE)
2645
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2646
#endif
2647
    subpage_writelen(opaque, addr, value, 2);
2648
}
2649

    
2650
static CPUReadMemoryFunc *subpage_read[] = {
2651
    &subpage_readb,
2652
    &subpage_readw,
2653
    &subpage_readl,
2654
};
2655

    
2656
static CPUWriteMemoryFunc *subpage_write[] = {
2657
    &subpage_writeb,
2658
    &subpage_writew,
2659
    &subpage_writel,
2660
};
2661

    
2662
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2663
                             ram_addr_t memory)
2664
{
2665
    int idx, eidx;
2666
    unsigned int i;
2667

    
2668
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2669
        return -1;
2670
    idx = SUBPAGE_IDX(start);
2671
    eidx = SUBPAGE_IDX(end);
2672
#if defined(DEBUG_SUBPAGE)
2673
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2674
           mmio, start, end, idx, eidx, memory);
2675
#endif
2676
    memory >>= IO_MEM_SHIFT;
2677
    for (; idx <= eidx; idx++) {
2678
        for (i = 0; i < 4; i++) {
2679
            if (io_mem_read[memory][i]) {
2680
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2681
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2682
            }
2683
            if (io_mem_write[memory][i]) {
2684
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2685
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2686
            }
2687
        }
2688
    }
2689

    
2690
    return 0;
2691
}
2692

    
2693
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2694
                           ram_addr_t orig_memory)
2695
{
2696
    subpage_t *mmio;
2697
    int subpage_memory;
2698

    
2699
    mmio = qemu_mallocz(sizeof(subpage_t));
2700
    if (mmio != NULL) {
2701
        mmio->base = base;
2702
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2703
#if defined(DEBUG_SUBPAGE)
2704
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2705
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2706
#endif
2707
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2708
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2709
    }
2710

    
2711
    return mmio;
2712
}
2713

    
2714
static void io_mem_init(void)
2715
{
2716
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2717
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2718
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2719
    io_mem_nb = 5;
2720

    
2721
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2722
                                          watch_mem_write, NULL);
2723
    /* alloc dirty bits array */
2724
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2725
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2726
}
2727

    
2728
/* mem_read and mem_write are arrays of functions containing the
2729
   function to access byte (index 0), word (index 1) and dword (index
2730
   2). Functions can be omitted with a NULL function pointer. The
2731
   registered functions may be modified dynamically later.
2732
   If io_index is non zero, the corresponding io zone is
2733
   modified. If it is zero, a new io zone is allocated. The return
2734
   value can be used with cpu_register_physical_memory(). (-1) is
2735
   returned if error. */
2736
int cpu_register_io_memory(int io_index,
2737
                           CPUReadMemoryFunc **mem_read,
2738
                           CPUWriteMemoryFunc **mem_write,
2739
                           void *opaque)
2740
{
2741
    int i, subwidth = 0;
2742

    
2743
    if (io_index <= 0) {
2744
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2745
            return -1;
2746
        io_index = io_mem_nb++;
2747
    } else {
2748
        if (io_index >= IO_MEM_NB_ENTRIES)
2749
            return -1;
2750
    }
2751

    
2752
    for(i = 0;i < 3; i++) {
2753
        if (!mem_read[i] || !mem_write[i])
2754
            subwidth = IO_MEM_SUBWIDTH;
2755
        io_mem_read[io_index][i] = mem_read[i];
2756
        io_mem_write[io_index][i] = mem_write[i];
2757
    }
2758
    io_mem_opaque[io_index] = opaque;
2759
    return (io_index << IO_MEM_SHIFT) | subwidth;
2760
}
2761

    
2762
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2763
{
2764
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2765
}
2766

    
2767
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2768
{
2769
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2770
}
2771

    
2772
#endif /* !defined(CONFIG_USER_ONLY) */
2773

    
2774
/* physical memory access (slow version, mainly for debug) */
2775
#if defined(CONFIG_USER_ONLY)
2776
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2777
                            int len, int is_write)
2778
{
2779
    int l, flags;
2780
    target_ulong page;
2781
    void * p;
2782

    
2783
    while (len > 0) {
2784
        page = addr & TARGET_PAGE_MASK;
2785
        l = (page + TARGET_PAGE_SIZE) - addr;
2786
        if (l > len)
2787
            l = len;
2788
        flags = page_get_flags(page);
2789
        if (!(flags & PAGE_VALID))
2790
            return;
2791
        if (is_write) {
2792
            if (!(flags & PAGE_WRITE))
2793
                return;
2794
            /* XXX: this code should not depend on lock_user */
2795
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2796
                /* FIXME - should this return an error rather than just fail? */
2797
                return;
2798
            memcpy(p, buf, l);
2799
            unlock_user(p, addr, l);
2800
        } else {
2801
            if (!(flags & PAGE_READ))
2802
                return;
2803
            /* XXX: this code should not depend on lock_user */
2804
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2805
                /* FIXME - should this return an error rather than just fail? */
2806
                return;
2807
            memcpy(buf, p, l);
2808
            unlock_user(p, addr, 0);
2809
        }
2810
        len -= l;
2811
        buf += l;
2812
        addr += l;
2813
    }
2814
}
2815

    
2816
#else
2817
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2818
                            int len, int is_write)
2819
{
2820
    int l, io_index;
2821
    uint8_t *ptr;
2822
    uint32_t val;
2823
    target_phys_addr_t page;
2824
    unsigned long pd;
2825
    PhysPageDesc *p;
2826

    
2827
    while (len > 0) {
2828
        page = addr & TARGET_PAGE_MASK;
2829
        l = (page + TARGET_PAGE_SIZE) - addr;
2830
        if (l > len)
2831
            l = len;
2832
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2833
        if (!p) {
2834
            pd = IO_MEM_UNASSIGNED;
2835
        } else {
2836
            pd = p->phys_offset;
2837
        }
2838

    
2839
        if (is_write) {
2840
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2841
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2842
                /* XXX: could force cpu_single_env to NULL to avoid
2843
                   potential bugs */
2844
                if (l >= 4 && ((addr & 3) == 0)) {
2845
                    /* 32 bit write access */
2846
                    val = ldl_p(buf);
2847
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2848
                    l = 4;
2849
                } else if (l >= 2 && ((addr & 1) == 0)) {
2850
                    /* 16 bit write access */
2851
                    val = lduw_p(buf);
2852
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2853
                    l = 2;
2854
                } else {
2855
                    /* 8 bit write access */
2856
                    val = ldub_p(buf);
2857
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2858
                    l = 1;
2859
                }
2860
            } else {
2861
                unsigned long addr1;
2862
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2863
                /* RAM case */
2864
                ptr = phys_ram_base + addr1;
2865
                memcpy(ptr, buf, l);
2866
                if (!cpu_physical_memory_is_dirty(addr1)) {
2867
                    /* invalidate code */
2868
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2869
                    /* set dirty bit */
2870
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2871
                        (0xff & ~CODE_DIRTY_FLAG);
2872
                }
2873
            }
2874
        } else {
2875
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2876
                !(pd & IO_MEM_ROMD)) {
2877
                /* I/O case */
2878
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2879
                if (l >= 4 && ((addr & 3) == 0)) {
2880
                    /* 32 bit read access */
2881
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2882
                    stl_p(buf, val);
2883
                    l = 4;
2884
                } else if (l >= 2 && ((addr & 1) == 0)) {
2885
                    /* 16 bit read access */
2886
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2887
                    stw_p(buf, val);
2888
                    l = 2;
2889
                } else {
2890
                    /* 8 bit read access */
2891
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2892
                    stb_p(buf, val);
2893
                    l = 1;
2894
                }
2895
            } else {
2896
                /* RAM case */
2897
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2898
                    (addr & ~TARGET_PAGE_MASK);
2899
                memcpy(buf, ptr, l);
2900
            }
2901
        }
2902
        len -= l;
2903
        buf += l;
2904
        addr += l;
2905
    }
2906
}
2907

    
2908
/* used for ROM loading : can write in RAM and ROM */
2909
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2910
                                   const uint8_t *buf, int len)
2911
{
2912
    int l;
2913
    uint8_t *ptr;
2914
    target_phys_addr_t page;
2915
    unsigned long pd;
2916
    PhysPageDesc *p;
2917

    
2918
    while (len > 0) {
2919
        page = addr & TARGET_PAGE_MASK;
2920
        l = (page + TARGET_PAGE_SIZE) - addr;
2921
        if (l > len)
2922
            l = len;
2923
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2924
        if (!p) {
2925
            pd = IO_MEM_UNASSIGNED;
2926
        } else {
2927
            pd = p->phys_offset;
2928
        }
2929

    
2930
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2931
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2932
            !(pd & IO_MEM_ROMD)) {
2933
            /* do nothing */
2934
        } else {
2935
            unsigned long addr1;
2936
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2937
            /* ROM/RAM case */
2938
            ptr = phys_ram_base + addr1;
2939
            memcpy(ptr, buf, l);
2940
        }
2941
        len -= l;
2942
        buf += l;
2943
        addr += l;
2944
    }
2945
}
2946

    
2947

    
2948
/* warning: addr must be aligned */
2949
uint32_t ldl_phys(target_phys_addr_t addr)
2950
{
2951
    int io_index;
2952
    uint8_t *ptr;
2953
    uint32_t val;
2954
    unsigned long pd;
2955
    PhysPageDesc *p;
2956

    
2957
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2958
    if (!p) {
2959
        pd = IO_MEM_UNASSIGNED;
2960
    } else {
2961
        pd = p->phys_offset;
2962
    }
2963

    
2964
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2965
        !(pd & IO_MEM_ROMD)) {
2966
        /* I/O case */
2967
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2968
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2969
    } else {
2970
        /* RAM case */
2971
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2972
            (addr & ~TARGET_PAGE_MASK);
2973
        val = ldl_p(ptr);
2974
    }
2975
    return val;
2976
}
2977

    
2978
/* warning: addr must be aligned */
2979
uint64_t ldq_phys(target_phys_addr_t addr)
2980
{
2981
    int io_index;
2982
    uint8_t *ptr;
2983
    uint64_t val;
2984
    unsigned long pd;
2985
    PhysPageDesc *p;
2986

    
2987
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2988
    if (!p) {
2989
        pd = IO_MEM_UNASSIGNED;
2990
    } else {
2991
        pd = p->phys_offset;
2992
    }
2993

    
2994
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2995
        !(pd & IO_MEM_ROMD)) {
2996
        /* I/O case */
2997
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2998
#ifdef TARGET_WORDS_BIGENDIAN
2999
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3000
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3001
#else
3002
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3003
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3004
#endif
3005
    } else {
3006
        /* RAM case */
3007
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3008
            (addr & ~TARGET_PAGE_MASK);
3009
        val = ldq_p(ptr);
3010
    }
3011
    return val;
3012
}
3013

    
3014
/* XXX: optimize */
3015
uint32_t ldub_phys(target_phys_addr_t addr)
3016
{
3017
    uint8_t val;
3018
    cpu_physical_memory_read(addr, &val, 1);
3019
    return val;
3020
}
3021

    
3022
/* XXX: optimize */
3023
uint32_t lduw_phys(target_phys_addr_t addr)
3024
{
3025
    uint16_t val;
3026
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3027
    return tswap16(val);
3028
}
3029

    
3030
/* warning: addr must be aligned. The ram page is not masked as dirty
3031
   and the code inside is not invalidated. It is useful if the dirty
3032
   bits are used to track modified PTEs */
3033
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3034
{
3035
    int io_index;
3036
    uint8_t *ptr;
3037
    unsigned long pd;
3038
    PhysPageDesc *p;
3039

    
3040
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3041
    if (!p) {
3042
        pd = IO_MEM_UNASSIGNED;
3043
    } else {
3044
        pd = p->phys_offset;
3045
    }
3046

    
3047
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3048
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3049
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3050
    } else {
3051
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3052
        ptr = phys_ram_base + addr1;
3053
        stl_p(ptr, val);
3054

    
3055
        if (unlikely(in_migration)) {
3056
            if (!cpu_physical_memory_is_dirty(addr1)) {
3057
                /* invalidate code */
3058
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3059
                /* set dirty bit */
3060
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3061
                    (0xff & ~CODE_DIRTY_FLAG);
3062
            }
3063
        }
3064
    }
3065
}
3066

    
3067
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3068
{
3069
    int io_index;
3070
    uint8_t *ptr;
3071
    unsigned long pd;
3072
    PhysPageDesc *p;
3073

    
3074
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3075
    if (!p) {
3076
        pd = IO_MEM_UNASSIGNED;
3077
    } else {
3078
        pd = p->phys_offset;
3079
    }
3080

    
3081
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3082
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3083
#ifdef TARGET_WORDS_BIGENDIAN
3084
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3085
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3086
#else
3087
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3088
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3089
#endif
3090
    } else {
3091
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3092
            (addr & ~TARGET_PAGE_MASK);
3093
        stq_p(ptr, val);
3094
    }
3095
}
3096

    
3097
/* warning: addr must be aligned */
3098
void stl_phys(target_phys_addr_t addr, uint32_t val)
3099
{
3100
    int io_index;
3101
    uint8_t *ptr;
3102
    unsigned long pd;
3103
    PhysPageDesc *p;
3104

    
3105
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3106
    if (!p) {
3107
        pd = IO_MEM_UNASSIGNED;
3108
    } else {
3109
        pd = p->phys_offset;
3110
    }
3111

    
3112
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3113
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3114
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3115
    } else {
3116
        unsigned long addr1;
3117
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3118
        /* RAM case */
3119
        ptr = phys_ram_base + addr1;
3120
        stl_p(ptr, val);
3121
        if (!cpu_physical_memory_is_dirty(addr1)) {
3122
            /* invalidate code */
3123
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3124
            /* set dirty bit */
3125
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3126
                (0xff & ~CODE_DIRTY_FLAG);
3127
        }
3128
    }
3129
}
3130

    
3131
/* XXX: optimize */
3132
void stb_phys(target_phys_addr_t addr, uint32_t val)
3133
{
3134
    uint8_t v = val;
3135
    cpu_physical_memory_write(addr, &v, 1);
3136
}
3137

    
3138
/* XXX: optimize */
3139
void stw_phys(target_phys_addr_t addr, uint32_t val)
3140
{
3141
    uint16_t v = tswap16(val);
3142
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3143
}
3144

    
3145
/* XXX: optimize */
3146
void stq_phys(target_phys_addr_t addr, uint64_t val)
3147
{
3148
    val = tswap64(val);
3149
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3150
}
3151

    
3152
#endif
3153

    
3154
/* virtual memory access for debug */
3155
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3156
                        uint8_t *buf, int len, int is_write)
3157
{
3158
    int l;
3159
    target_phys_addr_t phys_addr;
3160
    target_ulong page;
3161

    
3162
    while (len > 0) {
3163
        page = addr & TARGET_PAGE_MASK;
3164
        phys_addr = cpu_get_phys_page_debug(env, page);
3165
        /* if no physical page mapped, return an error */
3166
        if (phys_addr == -1)
3167
            return -1;
3168
        l = (page + TARGET_PAGE_SIZE) - addr;
3169
        if (l > len)
3170
            l = len;
3171
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3172
                               buf, l, is_write);
3173
        len -= l;
3174
        buf += l;
3175
        addr += l;
3176
    }
3177
    return 0;
3178
}
3179

    
3180
/* in deterministic execution mode, instructions doing device I/Os
3181
   must be at the end of the TB */
3182
void cpu_io_recompile(CPUState *env, void *retaddr)
3183
{
3184
    TranslationBlock *tb;
3185
    uint32_t n, cflags;
3186
    target_ulong pc, cs_base;
3187
    uint64_t flags;
3188

    
3189
    tb = tb_find_pc((unsigned long)retaddr);
3190
    if (!tb) {
3191
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3192
                  retaddr);
3193
    }
3194
    n = env->icount_decr.u16.low + tb->icount;
3195
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3196
    /* Calculate how many instructions had been executed before the fault
3197
       occurred.  */
3198
    n = n - env->icount_decr.u16.low;
3199
    /* Generate a new TB ending on the I/O insn.  */
3200
    n++;
3201
    /* On MIPS and SH, delay slot instructions can only be restarted if
3202
       they were already the first instruction in the TB.  If this is not
3203
       the first instruction in a TB then re-execute the preceding
3204
       branch.  */
3205
#if defined(TARGET_MIPS)
3206
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3207
        env->active_tc.PC -= 4;
3208
        env->icount_decr.u16.low++;
3209
        env->hflags &= ~MIPS_HFLAG_BMASK;
3210
    }
3211
#elif defined(TARGET_SH4)
3212
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3213
            && n > 1) {
3214
        env->pc -= 2;
3215
        env->icount_decr.u16.low++;
3216
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3217
    }
3218
#endif
3219
    /* This should never happen.  */
3220
    if (n > CF_COUNT_MASK)
3221
        cpu_abort(env, "TB too big during recompile");
3222

    
3223
    cflags = n | CF_LAST_IO;
3224
    pc = tb->pc;
3225
    cs_base = tb->cs_base;
3226
    flags = tb->flags;
3227
    tb_phys_invalidate(tb, -1);
3228
    /* FIXME: In theory this could raise an exception.  In practice
3229
       we have already translated the block once so it's probably ok.  */
3230
    tb_gen_code(env, pc, cs_base, flags, cflags);
3231
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3232
       the first in the TB) then we end up generating a whole new TB and
3233
       repeating the fault, which is horribly inefficient.
3234
       Better would be to execute just this insn uncached, or generate a
3235
       second new TB.  */
3236
    cpu_resume_from_signal(env, NULL);
3237
}
3238

    
3239
void dump_exec_info(FILE *f,
3240
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3241
{
3242
    int i, target_code_size, max_target_code_size;
3243
    int direct_jmp_count, direct_jmp2_count, cross_page;
3244
    TranslationBlock *tb;
3245

    
3246
    target_code_size = 0;
3247
    max_target_code_size = 0;
3248
    cross_page = 0;
3249
    direct_jmp_count = 0;
3250
    direct_jmp2_count = 0;
3251
    for(i = 0; i < nb_tbs; i++) {
3252
        tb = &tbs[i];
3253
        target_code_size += tb->size;
3254
        if (tb->size > max_target_code_size)
3255
            max_target_code_size = tb->size;
3256
        if (tb->page_addr[1] != -1)
3257
            cross_page++;
3258
        if (tb->tb_next_offset[0] != 0xffff) {
3259
            direct_jmp_count++;
3260
            if (tb->tb_next_offset[1] != 0xffff) {
3261
                direct_jmp2_count++;
3262
            }
3263
        }
3264
    }
3265
    /* XXX: avoid using doubles ? */
3266
    cpu_fprintf(f, "Translation buffer state:\n");
3267
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3268
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3269
    cpu_fprintf(f, "TB count            %d/%d\n", 
3270
                nb_tbs, code_gen_max_blocks);
3271
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3272
                nb_tbs ? target_code_size / nb_tbs : 0,
3273
                max_target_code_size);
3274
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3275
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3276
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3277
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3278
            cross_page,
3279
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3280
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3281
                direct_jmp_count,
3282
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3283
                direct_jmp2_count,
3284
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3285
    cpu_fprintf(f, "\nStatistics:\n");
3286
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3287
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3288
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3289
    tcg_dump_info(f, cpu_fprintf);
3290
}
3291

    
3292
#if !defined(CONFIG_USER_ONLY)
3293

    
3294
#define MMUSUFFIX _cmmu
3295
#define GETPC() NULL
3296
#define env cpu_single_env
3297
#define SOFTMMU_CODE_ACCESS
3298

    
3299
#define SHIFT 0
3300
#include "softmmu_template.h"
3301

    
3302
#define SHIFT 1
3303
#include "softmmu_template.h"
3304

    
3305
#define SHIFT 2
3306
#include "softmmu_template.h"
3307

    
3308
#define SHIFT 3
3309
#include "softmmu_template.h"
3310

    
3311
#undef env
3312

    
3313
#endif