Statistics
| Branch: | Revision:

root / exec.c @ fefe54e3

History | View | Annotate | Download (103 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#include "hw/hw.h"
41
#include "osdep.h"
42
#include "kvm.h"
43
#if defined(CONFIG_USER_ONLY)
44
#include <qemu.h>
45
#endif
46

    
47
//#define DEBUG_TB_INVALIDATE
48
//#define DEBUG_FLUSH
49
//#define DEBUG_TLB
50
//#define DEBUG_UNASSIGNED
51

    
52
/* make various TB consistency checks */
53
//#define DEBUG_TB_CHECK
54
//#define DEBUG_TLB_CHECK
55

    
56
//#define DEBUG_IOPORT
57
//#define DEBUG_SUBPAGE
58

    
59
#if !defined(CONFIG_USER_ONLY)
60
/* TB consistency checks only implemented for usermode emulation.  */
61
#undef DEBUG_TB_CHECK
62
#endif
63

    
64
#define SMC_BITMAP_USE_THRESHOLD 10
65

    
66
#define MMAP_AREA_START        0x00000000
67
#define MMAP_AREA_END          0xa8000000
68

    
69
#if defined(TARGET_SPARC64)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 41
71
#elif defined(TARGET_SPARC)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 36
73
#elif defined(TARGET_ALPHA)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#define TARGET_VIRT_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_PPC64)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 42
78
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
79
#define TARGET_PHYS_ADDR_SPACE_BITS 42
80
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
81
#define TARGET_PHYS_ADDR_SPACE_BITS 36
82
#else
83
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
84
#define TARGET_PHYS_ADDR_SPACE_BITS 32
85
#endif
86

    
87
static TranslationBlock *tbs;
88
int code_gen_max_blocks;
89
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
90
static int nb_tbs;
91
/* any access to the tbs or the page table must use this lock */
92
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
93

    
94
#if defined(__arm__) || defined(__sparc_v9__)
95
/* The prologue must be reachable with a direct jump. ARM and Sparc64
96
 have limited branch ranges (possibly also PPC) so place it in a
97
 section close to code segment. */
98
#define code_gen_section                                \
99
    __attribute__((__section__(".gen_code")))           \
100
    __attribute__((aligned (32)))
101
#else
102
#define code_gen_section                                \
103
    __attribute__((aligned (32)))
104
#endif
105

    
106
uint8_t code_gen_prologue[1024] code_gen_section;
107
static uint8_t *code_gen_buffer;
108
static unsigned long code_gen_buffer_size;
109
/* threshold to flush the translated code buffer */
110
static unsigned long code_gen_buffer_max_size;
111
uint8_t *code_gen_ptr;
112

    
113
#if !defined(CONFIG_USER_ONLY)
114
ram_addr_t phys_ram_size;
115
int phys_ram_fd;
116
uint8_t *phys_ram_base;
117
uint8_t *phys_ram_dirty;
118
static int in_migration;
119
static ram_addr_t phys_ram_alloc_offset = 0;
120
#endif
121

    
122
CPUState *first_cpu;
123
/* current CPU in the current thread. It is only valid inside
124
   cpu_exec() */
125
CPUState *cpu_single_env;
126
/* 0 = Do not count executed instructions.
127
   1 = Precise instruction counting.
128
   2 = Adaptive rate instruction counting.  */
129
int use_icount = 0;
130
/* Current instruction counter.  While executing translated code this may
131
   include some instructions that have not yet been executed.  */
132
int64_t qemu_icount;
133

    
134
typedef struct PageDesc {
135
    /* list of TBs intersecting this ram page */
136
    TranslationBlock *first_tb;
137
    /* in order to optimize self modifying code, we count the number
138
       of lookups we do to a given page to use a bitmap */
139
    unsigned int code_write_count;
140
    uint8_t *code_bitmap;
141
#if defined(CONFIG_USER_ONLY)
142
    unsigned long flags;
143
#endif
144
} PageDesc;
145

    
146
typedef struct PhysPageDesc {
147
    /* offset in host memory of the page + io_index in the low bits */
148
    ram_addr_t phys_offset;
149
    ram_addr_t region_offset;
150
} PhysPageDesc;
151

    
152
#define L2_BITS 10
153
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
154
/* XXX: this is a temporary hack for alpha target.
155
 *      In the future, this is to be replaced by a multi-level table
156
 *      to actually be able to handle the complete 64 bits address space.
157
 */
158
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
159
#else
160
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
161
#endif
162

    
163
#define L1_SIZE (1 << L1_BITS)
164
#define L2_SIZE (1 << L2_BITS)
165

    
166
unsigned long qemu_real_host_page_size;
167
unsigned long qemu_host_page_bits;
168
unsigned long qemu_host_page_size;
169
unsigned long qemu_host_page_mask;
170

    
171
/* XXX: for system emulation, it could just be an array */
172
static PageDesc *l1_map[L1_SIZE];
173
static PhysPageDesc **l1_phys_map;
174

    
175
#if !defined(CONFIG_USER_ONLY)
176
static void io_mem_init(void);
177

    
178
/* io memory support */
179
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
180
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
181
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
182
static int io_mem_nb;
183
static int io_mem_watch;
184
#endif
185

    
186
/* log support */
187
static const char *logfilename = "/tmp/qemu.log";
188
FILE *logfile;
189
int loglevel;
190
static int log_append = 0;
191

    
192
/* statistics */
193
static int tlb_flush_count;
194
static int tb_flush_count;
195
static int tb_phys_invalidate_count;
196

    
197
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
198
typedef struct subpage_t {
199
    target_phys_addr_t base;
200
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
201
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
202
    void *opaque[TARGET_PAGE_SIZE][2][4];
203
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
204
} subpage_t;
205

    
206
#ifdef _WIN32
207
static void map_exec(void *addr, long size)
208
{
209
    DWORD old_protect;
210
    VirtualProtect(addr, size,
211
                   PAGE_EXECUTE_READWRITE, &old_protect);
212
    
213
}
214
#else
215
static void map_exec(void *addr, long size)
216
{
217
    unsigned long start, end, page_size;
218
    
219
    page_size = getpagesize();
220
    start = (unsigned long)addr;
221
    start &= ~(page_size - 1);
222
    
223
    end = (unsigned long)addr + size;
224
    end += page_size - 1;
225
    end &= ~(page_size - 1);
226
    
227
    mprotect((void *)start, end - start,
228
             PROT_READ | PROT_WRITE | PROT_EXEC);
229
}
230
#endif
231

    
232
static void page_init(void)
233
{
234
    /* NOTE: we can always suppose that qemu_host_page_size >=
235
       TARGET_PAGE_SIZE */
236
#ifdef _WIN32
237
    {
238
        SYSTEM_INFO system_info;
239

    
240
        GetSystemInfo(&system_info);
241
        qemu_real_host_page_size = system_info.dwPageSize;
242
    }
243
#else
244
    qemu_real_host_page_size = getpagesize();
245
#endif
246
    if (qemu_host_page_size == 0)
247
        qemu_host_page_size = qemu_real_host_page_size;
248
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
249
        qemu_host_page_size = TARGET_PAGE_SIZE;
250
    qemu_host_page_bits = 0;
251
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
252
        qemu_host_page_bits++;
253
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
254
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
255
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
256

    
257
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
258
    {
259
        long long startaddr, endaddr;
260
        FILE *f;
261
        int n;
262

    
263
        mmap_lock();
264
        last_brk = (unsigned long)sbrk(0);
265
        f = fopen("/proc/self/maps", "r");
266
        if (f) {
267
            do {
268
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
269
                if (n == 2) {
270
                    startaddr = MIN(startaddr,
271
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
272
                    endaddr = MIN(endaddr,
273
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
274
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
275
                                   TARGET_PAGE_ALIGN(endaddr),
276
                                   PAGE_RESERVED); 
277
                }
278
            } while (!feof(f));
279
            fclose(f);
280
        }
281
        mmap_unlock();
282
    }
283
#endif
284
}
285

    
286
static inline PageDesc **page_l1_map(target_ulong index)
287
{
288
#if TARGET_LONG_BITS > 32
289
    /* Host memory outside guest VM.  For 32-bit targets we have already
290
       excluded high addresses.  */
291
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
292
        return NULL;
293
#endif
294
    return &l1_map[index >> L2_BITS];
295
}
296

    
297
static inline PageDesc *page_find_alloc(target_ulong index)
298
{
299
    PageDesc **lp, *p;
300
    lp = page_l1_map(index);
301
    if (!lp)
302
        return NULL;
303

    
304
    p = *lp;
305
    if (!p) {
306
        /* allocate if not found */
307
#if defined(CONFIG_USER_ONLY)
308
        size_t len = sizeof(PageDesc) * L2_SIZE;
309
        /* Don't use qemu_malloc because it may recurse.  */
310
        p = mmap(0, len, PROT_READ | PROT_WRITE,
311
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
312
        *lp = p;
313
        if (h2g_valid(p)) {
314
            unsigned long addr = h2g(p);
315
            page_set_flags(addr & TARGET_PAGE_MASK,
316
                           TARGET_PAGE_ALIGN(addr + len),
317
                           PAGE_RESERVED); 
318
        }
319
#else
320
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
321
        *lp = p;
322
#endif
323
    }
324
    return p + (index & (L2_SIZE - 1));
325
}
326

    
327
static inline PageDesc *page_find(target_ulong index)
328
{
329
    PageDesc **lp, *p;
330
    lp = page_l1_map(index);
331
    if (!lp)
332
        return NULL;
333

    
334
    p = *lp;
335
    if (!p)
336
        return 0;
337
    return p + (index & (L2_SIZE - 1));
338
}
339

    
340
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
341
{
342
    void **lp, **p;
343
    PhysPageDesc *pd;
344

    
345
    p = (void **)l1_phys_map;
346
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
347

    
348
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
349
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
350
#endif
351
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
352
    p = *lp;
353
    if (!p) {
354
        /* allocate if not found */
355
        if (!alloc)
356
            return NULL;
357
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
358
        memset(p, 0, sizeof(void *) * L1_SIZE);
359
        *lp = p;
360
    }
361
#endif
362
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
363
    pd = *lp;
364
    if (!pd) {
365
        int i;
366
        /* allocate if not found */
367
        if (!alloc)
368
            return NULL;
369
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
370
        *lp = pd;
371
        for (i = 0; i < L2_SIZE; i++)
372
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
373
    }
374
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
375
}
376

    
377
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
378
{
379
    return phys_page_find_alloc(index, 0);
380
}
381

    
382
#if !defined(CONFIG_USER_ONLY)
383
static void tlb_protect_code(ram_addr_t ram_addr);
384
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
385
                                    target_ulong vaddr);
386
#define mmap_lock() do { } while(0)
387
#define mmap_unlock() do { } while(0)
388
#endif
389

    
390
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
391

    
392
#if defined(CONFIG_USER_ONLY)
393
/* Currently it is not recommanded to allocate big chunks of data in
394
   user mode. It will change when a dedicated libc will be used */
395
#define USE_STATIC_CODE_GEN_BUFFER
396
#endif
397

    
398
#ifdef USE_STATIC_CODE_GEN_BUFFER
399
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
400
#endif
401

    
402
static void code_gen_alloc(unsigned long tb_size)
403
{
404
#ifdef USE_STATIC_CODE_GEN_BUFFER
405
    code_gen_buffer = static_code_gen_buffer;
406
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
407
    map_exec(code_gen_buffer, code_gen_buffer_size);
408
#else
409
    code_gen_buffer_size = tb_size;
410
    if (code_gen_buffer_size == 0) {
411
#if defined(CONFIG_USER_ONLY)
412
        /* in user mode, phys_ram_size is not meaningful */
413
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
414
#else
415
        /* XXX: needs ajustments */
416
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
417
#endif
418
    }
419
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
420
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
421
    /* The code gen buffer location may have constraints depending on
422
       the host cpu and OS */
423
#if defined(__linux__) 
424
    {
425
        int flags;
426
        void *start = NULL;
427

    
428
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
429
#if defined(__x86_64__)
430
        flags |= MAP_32BIT;
431
        /* Cannot map more than that */
432
        if (code_gen_buffer_size > (800 * 1024 * 1024))
433
            code_gen_buffer_size = (800 * 1024 * 1024);
434
#elif defined(__sparc_v9__)
435
        // Map the buffer below 2G, so we can use direct calls and branches
436
        flags |= MAP_FIXED;
437
        start = (void *) 0x60000000UL;
438
        if (code_gen_buffer_size > (512 * 1024 * 1024))
439
            code_gen_buffer_size = (512 * 1024 * 1024);
440
#elif defined(__arm__)
441
        /* Map the buffer below 32M, so we can use direct calls and branches */
442
        flags |= MAP_FIXED;
443
        start = (void *) 0x01000000UL;
444
        if (code_gen_buffer_size > 16 * 1024 * 1024)
445
            code_gen_buffer_size = 16 * 1024 * 1024;
446
#endif
447
        code_gen_buffer = mmap(start, code_gen_buffer_size,
448
                               PROT_WRITE | PROT_READ | PROT_EXEC,
449
                               flags, -1, 0);
450
        if (code_gen_buffer == MAP_FAILED) {
451
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
452
            exit(1);
453
        }
454
    }
455
#elif defined(__FreeBSD__)
456
    {
457
        int flags;
458
        void *addr = NULL;
459
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
460
#if defined(__x86_64__)
461
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
462
         * 0x40000000 is free */
463
        flags |= MAP_FIXED;
464
        addr = (void *)0x40000000;
465
        /* Cannot map more than that */
466
        if (code_gen_buffer_size > (800 * 1024 * 1024))
467
            code_gen_buffer_size = (800 * 1024 * 1024);
468
#endif
469
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
470
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
471
                               flags, -1, 0);
472
        if (code_gen_buffer == MAP_FAILED) {
473
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
474
            exit(1);
475
        }
476
    }
477
#else
478
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
479
    if (!code_gen_buffer) {
480
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
481
        exit(1);
482
    }
483
    map_exec(code_gen_buffer, code_gen_buffer_size);
484
#endif
485
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
486
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
487
    code_gen_buffer_max_size = code_gen_buffer_size - 
488
        code_gen_max_block_size();
489
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
490
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
491
}
492

    
493
/* Must be called before using the QEMU cpus. 'tb_size' is the size
494
   (in bytes) allocated to the translation buffer. Zero means default
495
   size. */
496
void cpu_exec_init_all(unsigned long tb_size)
497
{
498
    cpu_gen_init();
499
    code_gen_alloc(tb_size);
500
    code_gen_ptr = code_gen_buffer;
501
    page_init();
502
#if !defined(CONFIG_USER_ONLY)
503
    io_mem_init();
504
#endif
505
}
506

    
507
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
508

    
509
#define CPU_COMMON_SAVE_VERSION 1
510

    
511
static void cpu_common_save(QEMUFile *f, void *opaque)
512
{
513
    CPUState *env = opaque;
514

    
515
    qemu_put_be32s(f, &env->halted);
516
    qemu_put_be32s(f, &env->interrupt_request);
517
}
518

    
519
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
520
{
521
    CPUState *env = opaque;
522

    
523
    if (version_id != CPU_COMMON_SAVE_VERSION)
524
        return -EINVAL;
525

    
526
    qemu_get_be32s(f, &env->halted);
527
    qemu_get_be32s(f, &env->interrupt_request);
528
    tlb_flush(env, 1);
529

    
530
    return 0;
531
}
532
#endif
533

    
534
void cpu_exec_init(CPUState *env)
535
{
536
    CPUState **penv;
537
    int cpu_index;
538

    
539
    env->next_cpu = NULL;
540
    penv = &first_cpu;
541
    cpu_index = 0;
542
    while (*penv != NULL) {
543
        penv = (CPUState **)&(*penv)->next_cpu;
544
        cpu_index++;
545
    }
546
    env->cpu_index = cpu_index;
547
    TAILQ_INIT(&env->breakpoints);
548
    TAILQ_INIT(&env->watchpoints);
549
    *penv = env;
550
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
551
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
552
                    cpu_common_save, cpu_common_load, env);
553
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
554
                    cpu_save, cpu_load, env);
555
#endif
556
}
557

    
558
static inline void invalidate_page_bitmap(PageDesc *p)
559
{
560
    if (p->code_bitmap) {
561
        qemu_free(p->code_bitmap);
562
        p->code_bitmap = NULL;
563
    }
564
    p->code_write_count = 0;
565
}
566

    
567
/* set to NULL all the 'first_tb' fields in all PageDescs */
568
static void page_flush_tb(void)
569
{
570
    int i, j;
571
    PageDesc *p;
572

    
573
    for(i = 0; i < L1_SIZE; i++) {
574
        p = l1_map[i];
575
        if (p) {
576
            for(j = 0; j < L2_SIZE; j++) {
577
                p->first_tb = NULL;
578
                invalidate_page_bitmap(p);
579
                p++;
580
            }
581
        }
582
    }
583
}
584

    
585
/* flush all the translation blocks */
586
/* XXX: tb_flush is currently not thread safe */
587
void tb_flush(CPUState *env1)
588
{
589
    CPUState *env;
590
#if defined(DEBUG_FLUSH)
591
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
592
           (unsigned long)(code_gen_ptr - code_gen_buffer),
593
           nb_tbs, nb_tbs > 0 ?
594
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
595
#endif
596
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
597
        cpu_abort(env1, "Internal error: code buffer overflow\n");
598

    
599
    nb_tbs = 0;
600

    
601
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
602
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
603
    }
604

    
605
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
606
    page_flush_tb();
607

    
608
    code_gen_ptr = code_gen_buffer;
609
    /* XXX: flush processor icache at this point if cache flush is
610
       expensive */
611
    tb_flush_count++;
612
}
613

    
614
#ifdef DEBUG_TB_CHECK
615

    
616
static void tb_invalidate_check(target_ulong address)
617
{
618
    TranslationBlock *tb;
619
    int i;
620
    address &= TARGET_PAGE_MASK;
621
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
622
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
623
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
624
                  address >= tb->pc + tb->size)) {
625
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
626
                       address, (long)tb->pc, tb->size);
627
            }
628
        }
629
    }
630
}
631

    
632
/* verify that all the pages have correct rights for code */
633
static void tb_page_check(void)
634
{
635
    TranslationBlock *tb;
636
    int i, flags1, flags2;
637

    
638
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
639
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
640
            flags1 = page_get_flags(tb->pc);
641
            flags2 = page_get_flags(tb->pc + tb->size - 1);
642
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
643
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
644
                       (long)tb->pc, tb->size, flags1, flags2);
645
            }
646
        }
647
    }
648
}
649

    
650
static void tb_jmp_check(TranslationBlock *tb)
651
{
652
    TranslationBlock *tb1;
653
    unsigned int n1;
654

    
655
    /* suppress any remaining jumps to this TB */
656
    tb1 = tb->jmp_first;
657
    for(;;) {
658
        n1 = (long)tb1 & 3;
659
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
660
        if (n1 == 2)
661
            break;
662
        tb1 = tb1->jmp_next[n1];
663
    }
664
    /* check end of list */
665
    if (tb1 != tb) {
666
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
667
    }
668
}
669

    
670
#endif
671

    
672
/* invalidate one TB */
673
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
674
                             int next_offset)
675
{
676
    TranslationBlock *tb1;
677
    for(;;) {
678
        tb1 = *ptb;
679
        if (tb1 == tb) {
680
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
681
            break;
682
        }
683
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
684
    }
685
}
686

    
687
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
688
{
689
    TranslationBlock *tb1;
690
    unsigned int n1;
691

    
692
    for(;;) {
693
        tb1 = *ptb;
694
        n1 = (long)tb1 & 3;
695
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
696
        if (tb1 == tb) {
697
            *ptb = tb1->page_next[n1];
698
            break;
699
        }
700
        ptb = &tb1->page_next[n1];
701
    }
702
}
703

    
704
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
705
{
706
    TranslationBlock *tb1, **ptb;
707
    unsigned int n1;
708

    
709
    ptb = &tb->jmp_next[n];
710
    tb1 = *ptb;
711
    if (tb1) {
712
        /* find tb(n) in circular list */
713
        for(;;) {
714
            tb1 = *ptb;
715
            n1 = (long)tb1 & 3;
716
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
717
            if (n1 == n && tb1 == tb)
718
                break;
719
            if (n1 == 2) {
720
                ptb = &tb1->jmp_first;
721
            } else {
722
                ptb = &tb1->jmp_next[n1];
723
            }
724
        }
725
        /* now we can suppress tb(n) from the list */
726
        *ptb = tb->jmp_next[n];
727

    
728
        tb->jmp_next[n] = NULL;
729
    }
730
}
731

    
732
/* reset the jump entry 'n' of a TB so that it is not chained to
733
   another TB */
734
static inline void tb_reset_jump(TranslationBlock *tb, int n)
735
{
736
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
737
}
738

    
739
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
740
{
741
    CPUState *env;
742
    PageDesc *p;
743
    unsigned int h, n1;
744
    target_phys_addr_t phys_pc;
745
    TranslationBlock *tb1, *tb2;
746

    
747
    /* remove the TB from the hash list */
748
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
749
    h = tb_phys_hash_func(phys_pc);
750
    tb_remove(&tb_phys_hash[h], tb,
751
              offsetof(TranslationBlock, phys_hash_next));
752

    
753
    /* remove the TB from the page list */
754
    if (tb->page_addr[0] != page_addr) {
755
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
756
        tb_page_remove(&p->first_tb, tb);
757
        invalidate_page_bitmap(p);
758
    }
759
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
760
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
761
        tb_page_remove(&p->first_tb, tb);
762
        invalidate_page_bitmap(p);
763
    }
764

    
765
    tb_invalidated_flag = 1;
766

    
767
    /* remove the TB from the hash list */
768
    h = tb_jmp_cache_hash_func(tb->pc);
769
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
770
        if (env->tb_jmp_cache[h] == tb)
771
            env->tb_jmp_cache[h] = NULL;
772
    }
773

    
774
    /* suppress this TB from the two jump lists */
775
    tb_jmp_remove(tb, 0);
776
    tb_jmp_remove(tb, 1);
777

    
778
    /* suppress any remaining jumps to this TB */
779
    tb1 = tb->jmp_first;
780
    for(;;) {
781
        n1 = (long)tb1 & 3;
782
        if (n1 == 2)
783
            break;
784
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
785
        tb2 = tb1->jmp_next[n1];
786
        tb_reset_jump(tb1, n1);
787
        tb1->jmp_next[n1] = NULL;
788
        tb1 = tb2;
789
    }
790
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
791

    
792
    tb_phys_invalidate_count++;
793
}
794

    
795
static inline void set_bits(uint8_t *tab, int start, int len)
796
{
797
    int end, mask, end1;
798

    
799
    end = start + len;
800
    tab += start >> 3;
801
    mask = 0xff << (start & 7);
802
    if ((start & ~7) == (end & ~7)) {
803
        if (start < end) {
804
            mask &= ~(0xff << (end & 7));
805
            *tab |= mask;
806
        }
807
    } else {
808
        *tab++ |= mask;
809
        start = (start + 8) & ~7;
810
        end1 = end & ~7;
811
        while (start < end1) {
812
            *tab++ = 0xff;
813
            start += 8;
814
        }
815
        if (start < end) {
816
            mask = ~(0xff << (end & 7));
817
            *tab |= mask;
818
        }
819
    }
820
}
821

    
822
static void build_page_bitmap(PageDesc *p)
823
{
824
    int n, tb_start, tb_end;
825
    TranslationBlock *tb;
826

    
827
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
828
    if (!p->code_bitmap)
829
        return;
830

    
831
    tb = p->first_tb;
832
    while (tb != NULL) {
833
        n = (long)tb & 3;
834
        tb = (TranslationBlock *)((long)tb & ~3);
835
        /* NOTE: this is subtle as a TB may span two physical pages */
836
        if (n == 0) {
837
            /* NOTE: tb_end may be after the end of the page, but
838
               it is not a problem */
839
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
840
            tb_end = tb_start + tb->size;
841
            if (tb_end > TARGET_PAGE_SIZE)
842
                tb_end = TARGET_PAGE_SIZE;
843
        } else {
844
            tb_start = 0;
845
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
846
        }
847
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
848
        tb = tb->page_next[n];
849
    }
850
}
851

    
852
TranslationBlock *tb_gen_code(CPUState *env,
853
                              target_ulong pc, target_ulong cs_base,
854
                              int flags, int cflags)
855
{
856
    TranslationBlock *tb;
857
    uint8_t *tc_ptr;
858
    target_ulong phys_pc, phys_page2, virt_page2;
859
    int code_gen_size;
860

    
861
    phys_pc = get_phys_addr_code(env, pc);
862
    tb = tb_alloc(pc);
863
    if (!tb) {
864
        /* flush must be done */
865
        tb_flush(env);
866
        /* cannot fail at this point */
867
        tb = tb_alloc(pc);
868
        /* Don't forget to invalidate previous TB info.  */
869
        tb_invalidated_flag = 1;
870
    }
871
    tc_ptr = code_gen_ptr;
872
    tb->tc_ptr = tc_ptr;
873
    tb->cs_base = cs_base;
874
    tb->flags = flags;
875
    tb->cflags = cflags;
876
    cpu_gen_code(env, tb, &code_gen_size);
877
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
878

    
879
    /* check next page if needed */
880
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
881
    phys_page2 = -1;
882
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
883
        phys_page2 = get_phys_addr_code(env, virt_page2);
884
    }
885
    tb_link_phys(tb, phys_pc, phys_page2);
886
    return tb;
887
}
888

    
889
/* invalidate all TBs which intersect with the target physical page
890
   starting in range [start;end[. NOTE: start and end must refer to
891
   the same physical page. 'is_cpu_write_access' should be true if called
892
   from a real cpu write access: the virtual CPU will exit the current
893
   TB if code is modified inside this TB. */
894
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
895
                                   int is_cpu_write_access)
896
{
897
    TranslationBlock *tb, *tb_next, *saved_tb;
898
    CPUState *env = cpu_single_env;
899
    target_ulong tb_start, tb_end;
900
    PageDesc *p;
901
    int n;
902
#ifdef TARGET_HAS_PRECISE_SMC
903
    int current_tb_not_found = is_cpu_write_access;
904
    TranslationBlock *current_tb = NULL;
905
    int current_tb_modified = 0;
906
    target_ulong current_pc = 0;
907
    target_ulong current_cs_base = 0;
908
    int current_flags = 0;
909
#endif /* TARGET_HAS_PRECISE_SMC */
910

    
911
    p = page_find(start >> TARGET_PAGE_BITS);
912
    if (!p)
913
        return;
914
    if (!p->code_bitmap &&
915
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
916
        is_cpu_write_access) {
917
        /* build code bitmap */
918
        build_page_bitmap(p);
919
    }
920

    
921
    /* we remove all the TBs in the range [start, end[ */
922
    /* XXX: see if in some cases it could be faster to invalidate all the code */
923
    tb = p->first_tb;
924
    while (tb != NULL) {
925
        n = (long)tb & 3;
926
        tb = (TranslationBlock *)((long)tb & ~3);
927
        tb_next = tb->page_next[n];
928
        /* NOTE: this is subtle as a TB may span two physical pages */
929
        if (n == 0) {
930
            /* NOTE: tb_end may be after the end of the page, but
931
               it is not a problem */
932
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
933
            tb_end = tb_start + tb->size;
934
        } else {
935
            tb_start = tb->page_addr[1];
936
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
937
        }
938
        if (!(tb_end <= start || tb_start >= end)) {
939
#ifdef TARGET_HAS_PRECISE_SMC
940
            if (current_tb_not_found) {
941
                current_tb_not_found = 0;
942
                current_tb = NULL;
943
                if (env->mem_io_pc) {
944
                    /* now we have a real cpu fault */
945
                    current_tb = tb_find_pc(env->mem_io_pc);
946
                }
947
            }
948
            if (current_tb == tb &&
949
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
950
                /* If we are modifying the current TB, we must stop
951
                its execution. We could be more precise by checking
952
                that the modification is after the current PC, but it
953
                would require a specialized function to partially
954
                restore the CPU state */
955

    
956
                current_tb_modified = 1;
957
                cpu_restore_state(current_tb, env,
958
                                  env->mem_io_pc, NULL);
959
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
960
                                     &current_flags);
961
            }
962
#endif /* TARGET_HAS_PRECISE_SMC */
963
            /* we need to do that to handle the case where a signal
964
               occurs while doing tb_phys_invalidate() */
965
            saved_tb = NULL;
966
            if (env) {
967
                saved_tb = env->current_tb;
968
                env->current_tb = NULL;
969
            }
970
            tb_phys_invalidate(tb, -1);
971
            if (env) {
972
                env->current_tb = saved_tb;
973
                if (env->interrupt_request && env->current_tb)
974
                    cpu_interrupt(env, env->interrupt_request);
975
            }
976
        }
977
        tb = tb_next;
978
    }
979
#if !defined(CONFIG_USER_ONLY)
980
    /* if no code remaining, no need to continue to use slow writes */
981
    if (!p->first_tb) {
982
        invalidate_page_bitmap(p);
983
        if (is_cpu_write_access) {
984
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
985
        }
986
    }
987
#endif
988
#ifdef TARGET_HAS_PRECISE_SMC
989
    if (current_tb_modified) {
990
        /* we generate a block containing just the instruction
991
           modifying the memory. It will ensure that it cannot modify
992
           itself */
993
        env->current_tb = NULL;
994
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
995
        cpu_resume_from_signal(env, NULL);
996
    }
997
#endif
998
}
999

    
1000
/* len must be <= 8 and start must be a multiple of len */
1001
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1002
{
1003
    PageDesc *p;
1004
    int offset, b;
1005
#if 0
1006
    if (1) {
1007
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1008
                  cpu_single_env->mem_io_vaddr, len,
1009
                  cpu_single_env->eip,
1010
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1011
    }
1012
#endif
1013
    p = page_find(start >> TARGET_PAGE_BITS);
1014
    if (!p)
1015
        return;
1016
    if (p->code_bitmap) {
1017
        offset = start & ~TARGET_PAGE_MASK;
1018
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1019
        if (b & ((1 << len) - 1))
1020
            goto do_invalidate;
1021
    } else {
1022
    do_invalidate:
1023
        tb_invalidate_phys_page_range(start, start + len, 1);
1024
    }
1025
}
1026

    
1027
#if !defined(CONFIG_SOFTMMU)
1028
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1029
                                    unsigned long pc, void *puc)
1030
{
1031
    TranslationBlock *tb;
1032
    PageDesc *p;
1033
    int n;
1034
#ifdef TARGET_HAS_PRECISE_SMC
1035
    TranslationBlock *current_tb = NULL;
1036
    CPUState *env = cpu_single_env;
1037
    int current_tb_modified = 0;
1038
    target_ulong current_pc = 0;
1039
    target_ulong current_cs_base = 0;
1040
    int current_flags = 0;
1041
#endif
1042

    
1043
    addr &= TARGET_PAGE_MASK;
1044
    p = page_find(addr >> TARGET_PAGE_BITS);
1045
    if (!p)
1046
        return;
1047
    tb = p->first_tb;
1048
#ifdef TARGET_HAS_PRECISE_SMC
1049
    if (tb && pc != 0) {
1050
        current_tb = tb_find_pc(pc);
1051
    }
1052
#endif
1053
    while (tb != NULL) {
1054
        n = (long)tb & 3;
1055
        tb = (TranslationBlock *)((long)tb & ~3);
1056
#ifdef TARGET_HAS_PRECISE_SMC
1057
        if (current_tb == tb &&
1058
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1059
                /* If we are modifying the current TB, we must stop
1060
                   its execution. We could be more precise by checking
1061
                   that the modification is after the current PC, but it
1062
                   would require a specialized function to partially
1063
                   restore the CPU state */
1064

    
1065
            current_tb_modified = 1;
1066
            cpu_restore_state(current_tb, env, pc, puc);
1067
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1068
                                 &current_flags);
1069
        }
1070
#endif /* TARGET_HAS_PRECISE_SMC */
1071
        tb_phys_invalidate(tb, addr);
1072
        tb = tb->page_next[n];
1073
    }
1074
    p->first_tb = NULL;
1075
#ifdef TARGET_HAS_PRECISE_SMC
1076
    if (current_tb_modified) {
1077
        /* we generate a block containing just the instruction
1078
           modifying the memory. It will ensure that it cannot modify
1079
           itself */
1080
        env->current_tb = NULL;
1081
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1082
        cpu_resume_from_signal(env, puc);
1083
    }
1084
#endif
1085
}
1086
#endif
1087

    
1088
/* add the tb in the target page and protect it if necessary */
1089
static inline void tb_alloc_page(TranslationBlock *tb,
1090
                                 unsigned int n, target_ulong page_addr)
1091
{
1092
    PageDesc *p;
1093
    TranslationBlock *last_first_tb;
1094

    
1095
    tb->page_addr[n] = page_addr;
1096
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1097
    tb->page_next[n] = p->first_tb;
1098
    last_first_tb = p->first_tb;
1099
    p->first_tb = (TranslationBlock *)((long)tb | n);
1100
    invalidate_page_bitmap(p);
1101

    
1102
#if defined(TARGET_HAS_SMC) || 1
1103

    
1104
#if defined(CONFIG_USER_ONLY)
1105
    if (p->flags & PAGE_WRITE) {
1106
        target_ulong addr;
1107
        PageDesc *p2;
1108
        int prot;
1109

    
1110
        /* force the host page as non writable (writes will have a
1111
           page fault + mprotect overhead) */
1112
        page_addr &= qemu_host_page_mask;
1113
        prot = 0;
1114
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1115
            addr += TARGET_PAGE_SIZE) {
1116

    
1117
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1118
            if (!p2)
1119
                continue;
1120
            prot |= p2->flags;
1121
            p2->flags &= ~PAGE_WRITE;
1122
            page_get_flags(addr);
1123
          }
1124
        mprotect(g2h(page_addr), qemu_host_page_size,
1125
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1126
#ifdef DEBUG_TB_INVALIDATE
1127
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1128
               page_addr);
1129
#endif
1130
    }
1131
#else
1132
    /* if some code is already present, then the pages are already
1133
       protected. So we handle the case where only the first TB is
1134
       allocated in a physical page */
1135
    if (!last_first_tb) {
1136
        tlb_protect_code(page_addr);
1137
    }
1138
#endif
1139

    
1140
#endif /* TARGET_HAS_SMC */
1141
}
1142

    
1143
/* Allocate a new translation block. Flush the translation buffer if
1144
   too many translation blocks or too much generated code. */
1145
TranslationBlock *tb_alloc(target_ulong pc)
1146
{
1147
    TranslationBlock *tb;
1148

    
1149
    if (nb_tbs >= code_gen_max_blocks ||
1150
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1151
        return NULL;
1152
    tb = &tbs[nb_tbs++];
1153
    tb->pc = pc;
1154
    tb->cflags = 0;
1155
    return tb;
1156
}
1157

    
1158
void tb_free(TranslationBlock *tb)
1159
{
1160
    /* In practice this is mostly used for single use temporary TB
1161
       Ignore the hard cases and just back up if this TB happens to
1162
       be the last one generated.  */
1163
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1164
        code_gen_ptr = tb->tc_ptr;
1165
        nb_tbs--;
1166
    }
1167
}
1168

    
1169
/* add a new TB and link it to the physical page tables. phys_page2 is
1170
   (-1) to indicate that only one page contains the TB. */
1171
void tb_link_phys(TranslationBlock *tb,
1172
                  target_ulong phys_pc, target_ulong phys_page2)
1173
{
1174
    unsigned int h;
1175
    TranslationBlock **ptb;
1176

    
1177
    /* Grab the mmap lock to stop another thread invalidating this TB
1178
       before we are done.  */
1179
    mmap_lock();
1180
    /* add in the physical hash table */
1181
    h = tb_phys_hash_func(phys_pc);
1182
    ptb = &tb_phys_hash[h];
1183
    tb->phys_hash_next = *ptb;
1184
    *ptb = tb;
1185

    
1186
    /* add in the page list */
1187
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1188
    if (phys_page2 != -1)
1189
        tb_alloc_page(tb, 1, phys_page2);
1190
    else
1191
        tb->page_addr[1] = -1;
1192

    
1193
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1194
    tb->jmp_next[0] = NULL;
1195
    tb->jmp_next[1] = NULL;
1196

    
1197
    /* init original jump addresses */
1198
    if (tb->tb_next_offset[0] != 0xffff)
1199
        tb_reset_jump(tb, 0);
1200
    if (tb->tb_next_offset[1] != 0xffff)
1201
        tb_reset_jump(tb, 1);
1202

    
1203
#ifdef DEBUG_TB_CHECK
1204
    tb_page_check();
1205
#endif
1206
    mmap_unlock();
1207
}
1208

    
1209
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1210
   tb[1].tc_ptr. Return NULL if not found */
1211
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1212
{
1213
    int m_min, m_max, m;
1214
    unsigned long v;
1215
    TranslationBlock *tb;
1216

    
1217
    if (nb_tbs <= 0)
1218
        return NULL;
1219
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1220
        tc_ptr >= (unsigned long)code_gen_ptr)
1221
        return NULL;
1222
    /* binary search (cf Knuth) */
1223
    m_min = 0;
1224
    m_max = nb_tbs - 1;
1225
    while (m_min <= m_max) {
1226
        m = (m_min + m_max) >> 1;
1227
        tb = &tbs[m];
1228
        v = (unsigned long)tb->tc_ptr;
1229
        if (v == tc_ptr)
1230
            return tb;
1231
        else if (tc_ptr < v) {
1232
            m_max = m - 1;
1233
        } else {
1234
            m_min = m + 1;
1235
        }
1236
    }
1237
    return &tbs[m_max];
1238
}
1239

    
1240
static void tb_reset_jump_recursive(TranslationBlock *tb);
1241

    
1242
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1243
{
1244
    TranslationBlock *tb1, *tb_next, **ptb;
1245
    unsigned int n1;
1246

    
1247
    tb1 = tb->jmp_next[n];
1248
    if (tb1 != NULL) {
1249
        /* find head of list */
1250
        for(;;) {
1251
            n1 = (long)tb1 & 3;
1252
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1253
            if (n1 == 2)
1254
                break;
1255
            tb1 = tb1->jmp_next[n1];
1256
        }
1257
        /* we are now sure now that tb jumps to tb1 */
1258
        tb_next = tb1;
1259

    
1260
        /* remove tb from the jmp_first list */
1261
        ptb = &tb_next->jmp_first;
1262
        for(;;) {
1263
            tb1 = *ptb;
1264
            n1 = (long)tb1 & 3;
1265
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1266
            if (n1 == n && tb1 == tb)
1267
                break;
1268
            ptb = &tb1->jmp_next[n1];
1269
        }
1270
        *ptb = tb->jmp_next[n];
1271
        tb->jmp_next[n] = NULL;
1272

    
1273
        /* suppress the jump to next tb in generated code */
1274
        tb_reset_jump(tb, n);
1275

    
1276
        /* suppress jumps in the tb on which we could have jumped */
1277
        tb_reset_jump_recursive(tb_next);
1278
    }
1279
}
1280

    
1281
static void tb_reset_jump_recursive(TranslationBlock *tb)
1282
{
1283
    tb_reset_jump_recursive2(tb, 0);
1284
    tb_reset_jump_recursive2(tb, 1);
1285
}
1286

    
1287
#if defined(TARGET_HAS_ICE)
1288
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1289
{
1290
    target_phys_addr_t addr;
1291
    target_ulong pd;
1292
    ram_addr_t ram_addr;
1293
    PhysPageDesc *p;
1294

    
1295
    addr = cpu_get_phys_page_debug(env, pc);
1296
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1297
    if (!p) {
1298
        pd = IO_MEM_UNASSIGNED;
1299
    } else {
1300
        pd = p->phys_offset;
1301
    }
1302
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1303
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1304
}
1305
#endif
1306

    
1307
/* Add a watchpoint.  */
1308
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1309
                          int flags, CPUWatchpoint **watchpoint)
1310
{
1311
    target_ulong len_mask = ~(len - 1);
1312
    CPUWatchpoint *wp;
1313

    
1314
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1315
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1316
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1317
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1318
        return -EINVAL;
1319
    }
1320
    wp = qemu_malloc(sizeof(*wp));
1321
    if (!wp)
1322
        return -ENOMEM;
1323

    
1324
    wp->vaddr = addr;
1325
    wp->len_mask = len_mask;
1326
    wp->flags = flags;
1327

    
1328
    /* keep all GDB-injected watchpoints in front */
1329
    if (flags & BP_GDB)
1330
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1331
    else
1332
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1333

    
1334
    tlb_flush_page(env, addr);
1335

    
1336
    if (watchpoint)
1337
        *watchpoint = wp;
1338
    return 0;
1339
}
1340

    
1341
/* Remove a specific watchpoint.  */
1342
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1343
                          int flags)
1344
{
1345
    target_ulong len_mask = ~(len - 1);
1346
    CPUWatchpoint *wp;
1347

    
1348
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1349
        if (addr == wp->vaddr && len_mask == wp->len_mask
1350
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1351
            cpu_watchpoint_remove_by_ref(env, wp);
1352
            return 0;
1353
        }
1354
    }
1355
    return -ENOENT;
1356
}
1357

    
1358
/* Remove a specific watchpoint by reference.  */
1359
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1360
{
1361
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1362

    
1363
    tlb_flush_page(env, watchpoint->vaddr);
1364

    
1365
    qemu_free(watchpoint);
1366
}
1367

    
1368
/* Remove all matching watchpoints.  */
1369
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1370
{
1371
    CPUWatchpoint *wp, *next;
1372

    
1373
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1374
        if (wp->flags & mask)
1375
            cpu_watchpoint_remove_by_ref(env, wp);
1376
    }
1377
}
1378

    
1379
/* Add a breakpoint.  */
1380
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1381
                          CPUBreakpoint **breakpoint)
1382
{
1383
#if defined(TARGET_HAS_ICE)
1384
    CPUBreakpoint *bp;
1385

    
1386
    bp = qemu_malloc(sizeof(*bp));
1387
    if (!bp)
1388
        return -ENOMEM;
1389

    
1390
    bp->pc = pc;
1391
    bp->flags = flags;
1392

    
1393
    /* keep all GDB-injected breakpoints in front */
1394
    if (flags & BP_GDB)
1395
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1396
    else
1397
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1398

    
1399
    breakpoint_invalidate(env, pc);
1400

    
1401
    if (breakpoint)
1402
        *breakpoint = bp;
1403
    return 0;
1404
#else
1405
    return -ENOSYS;
1406
#endif
1407
}
1408

    
1409
/* Remove a specific breakpoint.  */
1410
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1411
{
1412
#if defined(TARGET_HAS_ICE)
1413
    CPUBreakpoint *bp;
1414

    
1415
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1416
        if (bp->pc == pc && bp->flags == flags) {
1417
            cpu_breakpoint_remove_by_ref(env, bp);
1418
            return 0;
1419
        }
1420
    }
1421
    return -ENOENT;
1422
#else
1423
    return -ENOSYS;
1424
#endif
1425
}
1426

    
1427
/* Remove a specific breakpoint by reference.  */
1428
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1429
{
1430
#if defined(TARGET_HAS_ICE)
1431
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1432

    
1433
    breakpoint_invalidate(env, breakpoint->pc);
1434

    
1435
    qemu_free(breakpoint);
1436
#endif
1437
}
1438

    
1439
/* Remove all matching breakpoints. */
1440
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1441
{
1442
#if defined(TARGET_HAS_ICE)
1443
    CPUBreakpoint *bp, *next;
1444

    
1445
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1446
        if (bp->flags & mask)
1447
            cpu_breakpoint_remove_by_ref(env, bp);
1448
    }
1449
#endif
1450
}
1451

    
1452
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1453
   CPU loop after each instruction */
1454
void cpu_single_step(CPUState *env, int enabled)
1455
{
1456
#if defined(TARGET_HAS_ICE)
1457
    if (env->singlestep_enabled != enabled) {
1458
        env->singlestep_enabled = enabled;
1459
        /* must flush all the translated code to avoid inconsistancies */
1460
        /* XXX: only flush what is necessary */
1461
        tb_flush(env);
1462
    }
1463
#endif
1464
}
1465

    
1466
/* enable or disable low levels log */
1467
void cpu_set_log(int log_flags)
1468
{
1469
    loglevel = log_flags;
1470
    if (loglevel && !logfile) {
1471
        logfile = fopen(logfilename, log_append ? "a" : "w");
1472
        if (!logfile) {
1473
            perror(logfilename);
1474
            _exit(1);
1475
        }
1476
#if !defined(CONFIG_SOFTMMU)
1477
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1478
        {
1479
            static char logfile_buf[4096];
1480
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1481
        }
1482
#else
1483
        setvbuf(logfile, NULL, _IOLBF, 0);
1484
#endif
1485
        log_append = 1;
1486
    }
1487
    if (!loglevel && logfile) {
1488
        fclose(logfile);
1489
        logfile = NULL;
1490
    }
1491
}
1492

    
1493
void cpu_set_log_filename(const char *filename)
1494
{
1495
    logfilename = strdup(filename);
1496
    if (logfile) {
1497
        fclose(logfile);
1498
        logfile = NULL;
1499
    }
1500
    cpu_set_log(loglevel);
1501
}
1502

    
1503
/* mask must never be zero, except for A20 change call */
1504
void cpu_interrupt(CPUState *env, int mask)
1505
{
1506
#if !defined(USE_NPTL)
1507
    TranslationBlock *tb;
1508
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1509
#endif
1510
    int old_mask;
1511

    
1512
    old_mask = env->interrupt_request;
1513
    /* FIXME: This is probably not threadsafe.  A different thread could
1514
       be in the middle of a read-modify-write operation.  */
1515
    env->interrupt_request |= mask;
1516
#if defined(USE_NPTL)
1517
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1518
       problem and hope the cpu will stop of its own accord.  For userspace
1519
       emulation this often isn't actually as bad as it sounds.  Often
1520
       signals are used primarily to interrupt blocking syscalls.  */
1521
#else
1522
    if (use_icount) {
1523
        env->icount_decr.u16.high = 0xffff;
1524
#ifndef CONFIG_USER_ONLY
1525
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1526
           an async event happened and we need to process it.  */
1527
        if (!can_do_io(env)
1528
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1529
            cpu_abort(env, "Raised interrupt while not in I/O function");
1530
        }
1531
#endif
1532
    } else {
1533
        tb = env->current_tb;
1534
        /* if the cpu is currently executing code, we must unlink it and
1535
           all the potentially executing TB */
1536
        if (tb && !testandset(&interrupt_lock)) {
1537
            env->current_tb = NULL;
1538
            tb_reset_jump_recursive(tb);
1539
            resetlock(&interrupt_lock);
1540
        }
1541
    }
1542
#endif
1543
}
1544

    
1545
void cpu_reset_interrupt(CPUState *env, int mask)
1546
{
1547
    env->interrupt_request &= ~mask;
1548
}
1549

    
1550
const CPULogItem cpu_log_items[] = {
1551
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1552
      "show generated host assembly code for each compiled TB" },
1553
    { CPU_LOG_TB_IN_ASM, "in_asm",
1554
      "show target assembly code for each compiled TB" },
1555
    { CPU_LOG_TB_OP, "op",
1556
      "show micro ops for each compiled TB" },
1557
    { CPU_LOG_TB_OP_OPT, "op_opt",
1558
      "show micro ops "
1559
#ifdef TARGET_I386
1560
      "before eflags optimization and "
1561
#endif
1562
      "after liveness analysis" },
1563
    { CPU_LOG_INT, "int",
1564
      "show interrupts/exceptions in short format" },
1565
    { CPU_LOG_EXEC, "exec",
1566
      "show trace before each executed TB (lots of logs)" },
1567
    { CPU_LOG_TB_CPU, "cpu",
1568
      "show CPU state before block translation" },
1569
#ifdef TARGET_I386
1570
    { CPU_LOG_PCALL, "pcall",
1571
      "show protected mode far calls/returns/exceptions" },
1572
#endif
1573
#ifdef DEBUG_IOPORT
1574
    { CPU_LOG_IOPORT, "ioport",
1575
      "show all i/o ports accesses" },
1576
#endif
1577
    { 0, NULL, NULL },
1578
};
1579

    
1580
static int cmp1(const char *s1, int n, const char *s2)
1581
{
1582
    if (strlen(s2) != n)
1583
        return 0;
1584
    return memcmp(s1, s2, n) == 0;
1585
}
1586

    
1587
/* takes a comma separated list of log masks. Return 0 if error. */
1588
int cpu_str_to_log_mask(const char *str)
1589
{
1590
    const CPULogItem *item;
1591
    int mask;
1592
    const char *p, *p1;
1593

    
1594
    p = str;
1595
    mask = 0;
1596
    for(;;) {
1597
        p1 = strchr(p, ',');
1598
        if (!p1)
1599
            p1 = p + strlen(p);
1600
        if(cmp1(p,p1-p,"all")) {
1601
                for(item = cpu_log_items; item->mask != 0; item++) {
1602
                        mask |= item->mask;
1603
                }
1604
        } else {
1605
        for(item = cpu_log_items; item->mask != 0; item++) {
1606
            if (cmp1(p, p1 - p, item->name))
1607
                goto found;
1608
        }
1609
        return 0;
1610
        }
1611
    found:
1612
        mask |= item->mask;
1613
        if (*p1 != ',')
1614
            break;
1615
        p = p1 + 1;
1616
    }
1617
    return mask;
1618
}
1619

    
1620
void cpu_abort(CPUState *env, const char *fmt, ...)
1621
{
1622
    va_list ap;
1623
    va_list ap2;
1624

    
1625
    va_start(ap, fmt);
1626
    va_copy(ap2, ap);
1627
    fprintf(stderr, "qemu: fatal: ");
1628
    vfprintf(stderr, fmt, ap);
1629
    fprintf(stderr, "\n");
1630
#ifdef TARGET_I386
1631
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1632
#else
1633
    cpu_dump_state(env, stderr, fprintf, 0);
1634
#endif
1635
    if (qemu_log_enabled()) {
1636
        qemu_log("qemu: fatal: ");
1637
        qemu_log_vprintf(fmt, ap2);
1638
        qemu_log("\n");
1639
#ifdef TARGET_I386
1640
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1641
#else
1642
        log_cpu_state(env, 0);
1643
#endif
1644
        qemu_log_flush();
1645
        qemu_log_close();
1646
    }
1647
    va_end(ap2);
1648
    va_end(ap);
1649
    abort();
1650
}
1651

    
1652
CPUState *cpu_copy(CPUState *env)
1653
{
1654
    CPUState *new_env = cpu_init(env->cpu_model_str);
1655
    CPUState *next_cpu = new_env->next_cpu;
1656
    int cpu_index = new_env->cpu_index;
1657
#if defined(TARGET_HAS_ICE)
1658
    CPUBreakpoint *bp;
1659
    CPUWatchpoint *wp;
1660
#endif
1661

    
1662
    memcpy(new_env, env, sizeof(CPUState));
1663

    
1664
    /* Preserve chaining and index. */
1665
    new_env->next_cpu = next_cpu;
1666
    new_env->cpu_index = cpu_index;
1667

    
1668
    /* Clone all break/watchpoints.
1669
       Note: Once we support ptrace with hw-debug register access, make sure
1670
       BP_CPU break/watchpoints are handled correctly on clone. */
1671
    TAILQ_INIT(&env->breakpoints);
1672
    TAILQ_INIT(&env->watchpoints);
1673
#if defined(TARGET_HAS_ICE)
1674
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1675
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1676
    }
1677
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1678
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1679
                              wp->flags, NULL);
1680
    }
1681
#endif
1682

    
1683
    return new_env;
1684
}
1685

    
1686
#if !defined(CONFIG_USER_ONLY)
1687

    
1688
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1689
{
1690
    unsigned int i;
1691

    
1692
    /* Discard jump cache entries for any tb which might potentially
1693
       overlap the flushed page.  */
1694
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1695
    memset (&env->tb_jmp_cache[i], 0, 
1696
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1697

    
1698
    i = tb_jmp_cache_hash_page(addr);
1699
    memset (&env->tb_jmp_cache[i], 0, 
1700
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1701
}
1702

    
1703
/* NOTE: if flush_global is true, also flush global entries (not
1704
   implemented yet) */
1705
void tlb_flush(CPUState *env, int flush_global)
1706
{
1707
    int i;
1708

    
1709
#if defined(DEBUG_TLB)
1710
    printf("tlb_flush:\n");
1711
#endif
1712
    /* must reset current TB so that interrupts cannot modify the
1713
       links while we are modifying them */
1714
    env->current_tb = NULL;
1715

    
1716
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1717
        env->tlb_table[0][i].addr_read = -1;
1718
        env->tlb_table[0][i].addr_write = -1;
1719
        env->tlb_table[0][i].addr_code = -1;
1720
        env->tlb_table[1][i].addr_read = -1;
1721
        env->tlb_table[1][i].addr_write = -1;
1722
        env->tlb_table[1][i].addr_code = -1;
1723
#if (NB_MMU_MODES >= 3)
1724
        env->tlb_table[2][i].addr_read = -1;
1725
        env->tlb_table[2][i].addr_write = -1;
1726
        env->tlb_table[2][i].addr_code = -1;
1727
#if (NB_MMU_MODES == 4)
1728
        env->tlb_table[3][i].addr_read = -1;
1729
        env->tlb_table[3][i].addr_write = -1;
1730
        env->tlb_table[3][i].addr_code = -1;
1731
#endif
1732
#endif
1733
    }
1734

    
1735
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1736

    
1737
#ifdef USE_KQEMU
1738
    if (env->kqemu_enabled) {
1739
        kqemu_flush(env, flush_global);
1740
    }
1741
#endif
1742
    tlb_flush_count++;
1743
}
1744

    
1745
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1746
{
1747
    if (addr == (tlb_entry->addr_read &
1748
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1749
        addr == (tlb_entry->addr_write &
1750
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1751
        addr == (tlb_entry->addr_code &
1752
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1753
        tlb_entry->addr_read = -1;
1754
        tlb_entry->addr_write = -1;
1755
        tlb_entry->addr_code = -1;
1756
    }
1757
}
1758

    
1759
void tlb_flush_page(CPUState *env, target_ulong addr)
1760
{
1761
    int i;
1762

    
1763
#if defined(DEBUG_TLB)
1764
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1765
#endif
1766
    /* must reset current TB so that interrupts cannot modify the
1767
       links while we are modifying them */
1768
    env->current_tb = NULL;
1769

    
1770
    addr &= TARGET_PAGE_MASK;
1771
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1772
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1773
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1774
#if (NB_MMU_MODES >= 3)
1775
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1776
#if (NB_MMU_MODES == 4)
1777
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1778
#endif
1779
#endif
1780

    
1781
    tlb_flush_jmp_cache(env, addr);
1782

    
1783
#ifdef USE_KQEMU
1784
    if (env->kqemu_enabled) {
1785
        kqemu_flush_page(env, addr);
1786
    }
1787
#endif
1788
}
1789

    
1790
/* update the TLBs so that writes to code in the virtual page 'addr'
1791
   can be detected */
1792
static void tlb_protect_code(ram_addr_t ram_addr)
1793
{
1794
    cpu_physical_memory_reset_dirty(ram_addr,
1795
                                    ram_addr + TARGET_PAGE_SIZE,
1796
                                    CODE_DIRTY_FLAG);
1797
}
1798

    
1799
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1800
   tested for self modifying code */
1801
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1802
                                    target_ulong vaddr)
1803
{
1804
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1805
}
1806

    
1807
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1808
                                         unsigned long start, unsigned long length)
1809
{
1810
    unsigned long addr;
1811
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1812
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1813
        if ((addr - start) < length) {
1814
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1815
        }
1816
    }
1817
}
1818

    
1819
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1820
                                     int dirty_flags)
1821
{
1822
    CPUState *env;
1823
    unsigned long length, start1;
1824
    int i, mask, len;
1825
    uint8_t *p;
1826

    
1827
    start &= TARGET_PAGE_MASK;
1828
    end = TARGET_PAGE_ALIGN(end);
1829

    
1830
    length = end - start;
1831
    if (length == 0)
1832
        return;
1833
    len = length >> TARGET_PAGE_BITS;
1834
#ifdef USE_KQEMU
1835
    /* XXX: should not depend on cpu context */
1836
    env = first_cpu;
1837
    if (env->kqemu_enabled) {
1838
        ram_addr_t addr;
1839
        addr = start;
1840
        for(i = 0; i < len; i++) {
1841
            kqemu_set_notdirty(env, addr);
1842
            addr += TARGET_PAGE_SIZE;
1843
        }
1844
    }
1845
#endif
1846
    mask = ~dirty_flags;
1847
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1848
    for(i = 0; i < len; i++)
1849
        p[i] &= mask;
1850

    
1851
    /* we modify the TLB cache so that the dirty bit will be set again
1852
       when accessing the range */
1853
    start1 = start + (unsigned long)phys_ram_base;
1854
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1855
        for(i = 0; i < CPU_TLB_SIZE; i++)
1856
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1857
        for(i = 0; i < CPU_TLB_SIZE; i++)
1858
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1859
#if (NB_MMU_MODES >= 3)
1860
        for(i = 0; i < CPU_TLB_SIZE; i++)
1861
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1862
#if (NB_MMU_MODES == 4)
1863
        for(i = 0; i < CPU_TLB_SIZE; i++)
1864
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1865
#endif
1866
#endif
1867
    }
1868
}
1869

    
1870
int cpu_physical_memory_set_dirty_tracking(int enable)
1871
{
1872
    in_migration = enable;
1873
    return 0;
1874
}
1875

    
1876
int cpu_physical_memory_get_dirty_tracking(void)
1877
{
1878
    return in_migration;
1879
}
1880

    
1881
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1882
{
1883
    if (kvm_enabled())
1884
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1885
}
1886

    
1887
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1888
{
1889
    ram_addr_t ram_addr;
1890

    
1891
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1892
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1893
            tlb_entry->addend - (unsigned long)phys_ram_base;
1894
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1895
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1896
        }
1897
    }
1898
}
1899

    
1900
/* update the TLB according to the current state of the dirty bits */
1901
void cpu_tlb_update_dirty(CPUState *env)
1902
{
1903
    int i;
1904
    for(i = 0; i < CPU_TLB_SIZE; i++)
1905
        tlb_update_dirty(&env->tlb_table[0][i]);
1906
    for(i = 0; i < CPU_TLB_SIZE; i++)
1907
        tlb_update_dirty(&env->tlb_table[1][i]);
1908
#if (NB_MMU_MODES >= 3)
1909
    for(i = 0; i < CPU_TLB_SIZE; i++)
1910
        tlb_update_dirty(&env->tlb_table[2][i]);
1911
#if (NB_MMU_MODES == 4)
1912
    for(i = 0; i < CPU_TLB_SIZE; i++)
1913
        tlb_update_dirty(&env->tlb_table[3][i]);
1914
#endif
1915
#endif
1916
}
1917

    
1918
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1919
{
1920
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1921
        tlb_entry->addr_write = vaddr;
1922
}
1923

    
1924
/* update the TLB corresponding to virtual page vaddr
1925
   so that it is no longer dirty */
1926
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1927
{
1928
    int i;
1929

    
1930
    vaddr &= TARGET_PAGE_MASK;
1931
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1932
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1933
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1934
#if (NB_MMU_MODES >= 3)
1935
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1936
#if (NB_MMU_MODES == 4)
1937
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1938
#endif
1939
#endif
1940
}
1941

    
1942
/* add a new TLB entry. At most one entry for a given virtual address
1943
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1944
   (can only happen in non SOFTMMU mode for I/O pages or pages
1945
   conflicting with the host address space). */
1946
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1947
                      target_phys_addr_t paddr, int prot,
1948
                      int mmu_idx, int is_softmmu)
1949
{
1950
    PhysPageDesc *p;
1951
    unsigned long pd;
1952
    unsigned int index;
1953
    target_ulong address;
1954
    target_ulong code_address;
1955
    target_phys_addr_t addend;
1956
    int ret;
1957
    CPUTLBEntry *te;
1958
    CPUWatchpoint *wp;
1959
    target_phys_addr_t iotlb;
1960

    
1961
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1962
    if (!p) {
1963
        pd = IO_MEM_UNASSIGNED;
1964
    } else {
1965
        pd = p->phys_offset;
1966
    }
1967
#if defined(DEBUG_TLB)
1968
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1969
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1970
#endif
1971

    
1972
    ret = 0;
1973
    address = vaddr;
1974
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1975
        /* IO memory case (romd handled later) */
1976
        address |= TLB_MMIO;
1977
    }
1978
    addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1979
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1980
        /* Normal RAM.  */
1981
        iotlb = pd & TARGET_PAGE_MASK;
1982
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1983
            iotlb |= IO_MEM_NOTDIRTY;
1984
        else
1985
            iotlb |= IO_MEM_ROM;
1986
    } else {
1987
        /* IO handlers are currently passed a phsical address.
1988
           It would be nice to pass an offset from the base address
1989
           of that region.  This would avoid having to special case RAM,
1990
           and avoid full address decoding in every device.
1991
           We can't use the high bits of pd for this because
1992
           IO_MEM_ROMD uses these as a ram address.  */
1993
        iotlb = (pd & ~TARGET_PAGE_MASK);
1994
        if (p) {
1995
            iotlb += p->region_offset;
1996
        } else {
1997
            iotlb += paddr;
1998
        }
1999
    }
2000

    
2001
    code_address = address;
2002
    /* Make accesses to pages with watchpoints go via the
2003
       watchpoint trap routines.  */
2004
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2005
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2006
            iotlb = io_mem_watch + paddr;
2007
            /* TODO: The memory case can be optimized by not trapping
2008
               reads of pages with a write breakpoint.  */
2009
            address |= TLB_MMIO;
2010
        }
2011
    }
2012

    
2013
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2014
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2015
    te = &env->tlb_table[mmu_idx][index];
2016
    te->addend = addend - vaddr;
2017
    if (prot & PAGE_READ) {
2018
        te->addr_read = address;
2019
    } else {
2020
        te->addr_read = -1;
2021
    }
2022

    
2023
    if (prot & PAGE_EXEC) {
2024
        te->addr_code = code_address;
2025
    } else {
2026
        te->addr_code = -1;
2027
    }
2028
    if (prot & PAGE_WRITE) {
2029
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2030
            (pd & IO_MEM_ROMD)) {
2031
            /* Write access calls the I/O callback.  */
2032
            te->addr_write = address | TLB_MMIO;
2033
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2034
                   !cpu_physical_memory_is_dirty(pd)) {
2035
            te->addr_write = address | TLB_NOTDIRTY;
2036
        } else {
2037
            te->addr_write = address;
2038
        }
2039
    } else {
2040
        te->addr_write = -1;
2041
    }
2042
    return ret;
2043
}
2044

    
2045
#else
2046

    
2047
void tlb_flush(CPUState *env, int flush_global)
2048
{
2049
}
2050

    
2051
void tlb_flush_page(CPUState *env, target_ulong addr)
2052
{
2053
}
2054

    
2055
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2056
                      target_phys_addr_t paddr, int prot,
2057
                      int mmu_idx, int is_softmmu)
2058
{
2059
    return 0;
2060
}
2061

    
2062
/* dump memory mappings */
2063
void page_dump(FILE *f)
2064
{
2065
    unsigned long start, end;
2066
    int i, j, prot, prot1;
2067
    PageDesc *p;
2068

    
2069
    fprintf(f, "%-8s %-8s %-8s %s\n",
2070
            "start", "end", "size", "prot");
2071
    start = -1;
2072
    end = -1;
2073
    prot = 0;
2074
    for(i = 0; i <= L1_SIZE; i++) {
2075
        if (i < L1_SIZE)
2076
            p = l1_map[i];
2077
        else
2078
            p = NULL;
2079
        for(j = 0;j < L2_SIZE; j++) {
2080
            if (!p)
2081
                prot1 = 0;
2082
            else
2083
                prot1 = p[j].flags;
2084
            if (prot1 != prot) {
2085
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2086
                if (start != -1) {
2087
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2088
                            start, end, end - start,
2089
                            prot & PAGE_READ ? 'r' : '-',
2090
                            prot & PAGE_WRITE ? 'w' : '-',
2091
                            prot & PAGE_EXEC ? 'x' : '-');
2092
                }
2093
                if (prot1 != 0)
2094
                    start = end;
2095
                else
2096
                    start = -1;
2097
                prot = prot1;
2098
            }
2099
            if (!p)
2100
                break;
2101
        }
2102
    }
2103
}
2104

    
2105
int page_get_flags(target_ulong address)
2106
{
2107
    PageDesc *p;
2108

    
2109
    p = page_find(address >> TARGET_PAGE_BITS);
2110
    if (!p)
2111
        return 0;
2112
    return p->flags;
2113
}
2114

    
2115
/* modify the flags of a page and invalidate the code if
2116
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2117
   depending on PAGE_WRITE */
2118
void page_set_flags(target_ulong start, target_ulong end, int flags)
2119
{
2120
    PageDesc *p;
2121
    target_ulong addr;
2122

    
2123
    /* mmap_lock should already be held.  */
2124
    start = start & TARGET_PAGE_MASK;
2125
    end = TARGET_PAGE_ALIGN(end);
2126
    if (flags & PAGE_WRITE)
2127
        flags |= PAGE_WRITE_ORG;
2128
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2129
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2130
        /* We may be called for host regions that are outside guest
2131
           address space.  */
2132
        if (!p)
2133
            return;
2134
        /* if the write protection is set, then we invalidate the code
2135
           inside */
2136
        if (!(p->flags & PAGE_WRITE) &&
2137
            (flags & PAGE_WRITE) &&
2138
            p->first_tb) {
2139
            tb_invalidate_phys_page(addr, 0, NULL);
2140
        }
2141
        p->flags = flags;
2142
    }
2143
}
2144

    
2145
int page_check_range(target_ulong start, target_ulong len, int flags)
2146
{
2147
    PageDesc *p;
2148
    target_ulong end;
2149
    target_ulong addr;
2150

    
2151
    if (start + len < start)
2152
        /* we've wrapped around */
2153
        return -1;
2154

    
2155
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2156
    start = start & TARGET_PAGE_MASK;
2157

    
2158
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2159
        p = page_find(addr >> TARGET_PAGE_BITS);
2160
        if( !p )
2161
            return -1;
2162
        if( !(p->flags & PAGE_VALID) )
2163
            return -1;
2164

    
2165
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2166
            return -1;
2167
        if (flags & PAGE_WRITE) {
2168
            if (!(p->flags & PAGE_WRITE_ORG))
2169
                return -1;
2170
            /* unprotect the page if it was put read-only because it
2171
               contains translated code */
2172
            if (!(p->flags & PAGE_WRITE)) {
2173
                if (!page_unprotect(addr, 0, NULL))
2174
                    return -1;
2175
            }
2176
            return 0;
2177
        }
2178
    }
2179
    return 0;
2180
}
2181

    
2182
/* called from signal handler: invalidate the code and unprotect the
2183
   page. Return TRUE if the fault was succesfully handled. */
2184
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2185
{
2186
    unsigned int page_index, prot, pindex;
2187
    PageDesc *p, *p1;
2188
    target_ulong host_start, host_end, addr;
2189

    
2190
    /* Technically this isn't safe inside a signal handler.  However we
2191
       know this only ever happens in a synchronous SEGV handler, so in
2192
       practice it seems to be ok.  */
2193
    mmap_lock();
2194

    
2195
    host_start = address & qemu_host_page_mask;
2196
    page_index = host_start >> TARGET_PAGE_BITS;
2197
    p1 = page_find(page_index);
2198
    if (!p1) {
2199
        mmap_unlock();
2200
        return 0;
2201
    }
2202
    host_end = host_start + qemu_host_page_size;
2203
    p = p1;
2204
    prot = 0;
2205
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2206
        prot |= p->flags;
2207
        p++;
2208
    }
2209
    /* if the page was really writable, then we change its
2210
       protection back to writable */
2211
    if (prot & PAGE_WRITE_ORG) {
2212
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2213
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2214
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2215
                     (prot & PAGE_BITS) | PAGE_WRITE);
2216
            p1[pindex].flags |= PAGE_WRITE;
2217
            /* and since the content will be modified, we must invalidate
2218
               the corresponding translated code. */
2219
            tb_invalidate_phys_page(address, pc, puc);
2220
#ifdef DEBUG_TB_CHECK
2221
            tb_invalidate_check(address);
2222
#endif
2223
            mmap_unlock();
2224
            return 1;
2225
        }
2226
    }
2227
    mmap_unlock();
2228
    return 0;
2229
}
2230

    
2231
static inline void tlb_set_dirty(CPUState *env,
2232
                                 unsigned long addr, target_ulong vaddr)
2233
{
2234
}
2235
#endif /* defined(CONFIG_USER_ONLY) */
2236

    
2237
#if !defined(CONFIG_USER_ONLY)
2238

    
2239
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2240
                             ram_addr_t memory, ram_addr_t region_offset);
2241
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2242
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2243
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2244
                      need_subpage)                                     \
2245
    do {                                                                \
2246
        if (addr > start_addr)                                          \
2247
            start_addr2 = 0;                                            \
2248
        else {                                                          \
2249
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2250
            if (start_addr2 > 0)                                        \
2251
                need_subpage = 1;                                       \
2252
        }                                                               \
2253
                                                                        \
2254
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2255
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2256
        else {                                                          \
2257
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2258
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2259
                need_subpage = 1;                                       \
2260
        }                                                               \
2261
    } while (0)
2262

    
2263
/* register physical memory. 'size' must be a multiple of the target
2264
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2265
   io memory page.  The address used when calling the IO function is
2266
   the offset from the start of the region, plus region_offset.  Both
2267
   start_region and regon_offset are rounded down to a page boundary
2268
   before calculating this offset.  This should not be a problem unless
2269
   the low bits of start_addr and region_offset differ.  */
2270
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2271
                                         ram_addr_t size,
2272
                                         ram_addr_t phys_offset,
2273
                                         ram_addr_t region_offset)
2274
{
2275
    target_phys_addr_t addr, end_addr;
2276
    PhysPageDesc *p;
2277
    CPUState *env;
2278
    ram_addr_t orig_size = size;
2279
    void *subpage;
2280

    
2281
#ifdef USE_KQEMU
2282
    /* XXX: should not depend on cpu context */
2283
    env = first_cpu;
2284
    if (env->kqemu_enabled) {
2285
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2286
    }
2287
#endif
2288
    if (kvm_enabled())
2289
        kvm_set_phys_mem(start_addr, size, phys_offset);
2290

    
2291
    region_offset &= TARGET_PAGE_MASK;
2292
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2293
    end_addr = start_addr + (target_phys_addr_t)size;
2294
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2295
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2296
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2297
            ram_addr_t orig_memory = p->phys_offset;
2298
            target_phys_addr_t start_addr2, end_addr2;
2299
            int need_subpage = 0;
2300

    
2301
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2302
                          need_subpage);
2303
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2304
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2305
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2306
                                           &p->phys_offset, orig_memory,
2307
                                           p->region_offset);
2308
                } else {
2309
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2310
                                            >> IO_MEM_SHIFT];
2311
                }
2312
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2313
                                 region_offset);
2314
                p->region_offset = 0;
2315
            } else {
2316
                p->phys_offset = phys_offset;
2317
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2318
                    (phys_offset & IO_MEM_ROMD))
2319
                    phys_offset += TARGET_PAGE_SIZE;
2320
            }
2321
        } else {
2322
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2323
            p->phys_offset = phys_offset;
2324
            p->region_offset = region_offset;
2325
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2326
                (phys_offset & IO_MEM_ROMD)) {
2327
                phys_offset += TARGET_PAGE_SIZE;
2328
            } else {
2329
                target_phys_addr_t start_addr2, end_addr2;
2330
                int need_subpage = 0;
2331

    
2332
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2333
                              end_addr2, need_subpage);
2334

    
2335
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2336
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2337
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2338
                                           0);
2339
                    subpage_register(subpage, start_addr2, end_addr2,
2340
                                     phys_offset, region_offset);
2341
                    p->region_offset = 0;
2342
                }
2343
            }
2344
        }
2345
        region_offset += TARGET_PAGE_SIZE;
2346
    }
2347

    
2348
    /* since each CPU stores ram addresses in its TLB cache, we must
2349
       reset the modified entries */
2350
    /* XXX: slow ! */
2351
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2352
        tlb_flush(env, 1);
2353
    }
2354
}
2355

    
2356
/* XXX: temporary until new memory mapping API */
2357
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2358
{
2359
    PhysPageDesc *p;
2360

    
2361
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2362
    if (!p)
2363
        return IO_MEM_UNASSIGNED;
2364
    return p->phys_offset;
2365
}
2366

    
2367
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2368
{
2369
    if (kvm_enabled())
2370
        kvm_coalesce_mmio_region(addr, size);
2371
}
2372

    
2373
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2374
{
2375
    if (kvm_enabled())
2376
        kvm_uncoalesce_mmio_region(addr, size);
2377
}
2378

    
2379
/* XXX: better than nothing */
2380
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2381
{
2382
    ram_addr_t addr;
2383
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2384
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2385
                (uint64_t)size, (uint64_t)phys_ram_size);
2386
        abort();
2387
    }
2388
    addr = phys_ram_alloc_offset;
2389
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2390
    return addr;
2391
}
2392

    
2393
void qemu_ram_free(ram_addr_t addr)
2394
{
2395
}
2396

    
2397
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2398
{
2399
#ifdef DEBUG_UNASSIGNED
2400
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2401
#endif
2402
#if defined(TARGET_SPARC)
2403
    do_unassigned_access(addr, 0, 0, 0, 1);
2404
#endif
2405
    return 0;
2406
}
2407

    
2408
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2409
{
2410
#ifdef DEBUG_UNASSIGNED
2411
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2412
#endif
2413
#if defined(TARGET_SPARC)
2414
    do_unassigned_access(addr, 0, 0, 0, 2);
2415
#endif
2416
    return 0;
2417
}
2418

    
2419
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2420
{
2421
#ifdef DEBUG_UNASSIGNED
2422
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2423
#endif
2424
#if defined(TARGET_SPARC)
2425
    do_unassigned_access(addr, 0, 0, 0, 4);
2426
#endif
2427
    return 0;
2428
}
2429

    
2430
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2431
{
2432
#ifdef DEBUG_UNASSIGNED
2433
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2434
#endif
2435
#if defined(TARGET_SPARC)
2436
    do_unassigned_access(addr, 1, 0, 0, 1);
2437
#endif
2438
}
2439

    
2440
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2441
{
2442
#ifdef DEBUG_UNASSIGNED
2443
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2444
#endif
2445
#if defined(TARGET_SPARC)
2446
    do_unassigned_access(addr, 1, 0, 0, 2);
2447
#endif
2448
}
2449

    
2450
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2451
{
2452
#ifdef DEBUG_UNASSIGNED
2453
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2454
#endif
2455
#if defined(TARGET_SPARC)
2456
    do_unassigned_access(addr, 1, 0, 0, 4);
2457
#endif
2458
}
2459

    
2460
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2461
    unassigned_mem_readb,
2462
    unassigned_mem_readw,
2463
    unassigned_mem_readl,
2464
};
2465

    
2466
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2467
    unassigned_mem_writeb,
2468
    unassigned_mem_writew,
2469
    unassigned_mem_writel,
2470
};
2471

    
2472
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2473
                                uint32_t val)
2474
{
2475
    int dirty_flags;
2476
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2477
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2478
#if !defined(CONFIG_USER_ONLY)
2479
        tb_invalidate_phys_page_fast(ram_addr, 1);
2480
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2481
#endif
2482
    }
2483
    stb_p(phys_ram_base + ram_addr, val);
2484
#ifdef USE_KQEMU
2485
    if (cpu_single_env->kqemu_enabled &&
2486
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2487
        kqemu_modify_page(cpu_single_env, ram_addr);
2488
#endif
2489
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2490
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2491
    /* we remove the notdirty callback only if the code has been
2492
       flushed */
2493
    if (dirty_flags == 0xff)
2494
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2495
}
2496

    
2497
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2498
                                uint32_t val)
2499
{
2500
    int dirty_flags;
2501
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2502
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2503
#if !defined(CONFIG_USER_ONLY)
2504
        tb_invalidate_phys_page_fast(ram_addr, 2);
2505
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2506
#endif
2507
    }
2508
    stw_p(phys_ram_base + ram_addr, val);
2509
#ifdef USE_KQEMU
2510
    if (cpu_single_env->kqemu_enabled &&
2511
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2512
        kqemu_modify_page(cpu_single_env, ram_addr);
2513
#endif
2514
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2515
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2516
    /* we remove the notdirty callback only if the code has been
2517
       flushed */
2518
    if (dirty_flags == 0xff)
2519
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2520
}
2521

    
2522
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2523
                                uint32_t val)
2524
{
2525
    int dirty_flags;
2526
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2527
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2528
#if !defined(CONFIG_USER_ONLY)
2529
        tb_invalidate_phys_page_fast(ram_addr, 4);
2530
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2531
#endif
2532
    }
2533
    stl_p(phys_ram_base + ram_addr, val);
2534
#ifdef USE_KQEMU
2535
    if (cpu_single_env->kqemu_enabled &&
2536
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2537
        kqemu_modify_page(cpu_single_env, ram_addr);
2538
#endif
2539
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2540
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2541
    /* we remove the notdirty callback only if the code has been
2542
       flushed */
2543
    if (dirty_flags == 0xff)
2544
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2545
}
2546

    
2547
static CPUReadMemoryFunc *error_mem_read[3] = {
2548
    NULL, /* never used */
2549
    NULL, /* never used */
2550
    NULL, /* never used */
2551
};
2552

    
2553
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2554
    notdirty_mem_writeb,
2555
    notdirty_mem_writew,
2556
    notdirty_mem_writel,
2557
};
2558

    
2559
/* Generate a debug exception if a watchpoint has been hit.  */
2560
static void check_watchpoint(int offset, int len_mask, int flags)
2561
{
2562
    CPUState *env = cpu_single_env;
2563
    target_ulong pc, cs_base;
2564
    TranslationBlock *tb;
2565
    target_ulong vaddr;
2566
    CPUWatchpoint *wp;
2567
    int cpu_flags;
2568

    
2569
    if (env->watchpoint_hit) {
2570
        /* We re-entered the check after replacing the TB. Now raise
2571
         * the debug interrupt so that is will trigger after the
2572
         * current instruction. */
2573
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2574
        return;
2575
    }
2576
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2577
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2578
        if ((vaddr == (wp->vaddr & len_mask) ||
2579
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2580
            wp->flags |= BP_WATCHPOINT_HIT;
2581
            if (!env->watchpoint_hit) {
2582
                env->watchpoint_hit = wp;
2583
                tb = tb_find_pc(env->mem_io_pc);
2584
                if (!tb) {
2585
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2586
                              "pc=%p", (void *)env->mem_io_pc);
2587
                }
2588
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2589
                tb_phys_invalidate(tb, -1);
2590
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2591
                    env->exception_index = EXCP_DEBUG;
2592
                } else {
2593
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2594
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2595
                }
2596
                cpu_resume_from_signal(env, NULL);
2597
            }
2598
        } else {
2599
            wp->flags &= ~BP_WATCHPOINT_HIT;
2600
        }
2601
    }
2602
}
2603

    
2604
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2605
   so these check for a hit then pass through to the normal out-of-line
2606
   phys routines.  */
2607
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2608
{
2609
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2610
    return ldub_phys(addr);
2611
}
2612

    
2613
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2614
{
2615
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2616
    return lduw_phys(addr);
2617
}
2618

    
2619
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2620
{
2621
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2622
    return ldl_phys(addr);
2623
}
2624

    
2625
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2626
                             uint32_t val)
2627
{
2628
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2629
    stb_phys(addr, val);
2630
}
2631

    
2632
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2633
                             uint32_t val)
2634
{
2635
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2636
    stw_phys(addr, val);
2637
}
2638

    
2639
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2640
                             uint32_t val)
2641
{
2642
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2643
    stl_phys(addr, val);
2644
}
2645

    
2646
static CPUReadMemoryFunc *watch_mem_read[3] = {
2647
    watch_mem_readb,
2648
    watch_mem_readw,
2649
    watch_mem_readl,
2650
};
2651

    
2652
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2653
    watch_mem_writeb,
2654
    watch_mem_writew,
2655
    watch_mem_writel,
2656
};
2657

    
2658
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2659
                                 unsigned int len)
2660
{
2661
    uint32_t ret;
2662
    unsigned int idx;
2663

    
2664
    idx = SUBPAGE_IDX(addr);
2665
#if defined(DEBUG_SUBPAGE)
2666
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2667
           mmio, len, addr, idx);
2668
#endif
2669
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2670
                                       addr + mmio->region_offset[idx][0][len]);
2671

    
2672
    return ret;
2673
}
2674

    
2675
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2676
                              uint32_t value, unsigned int len)
2677
{
2678
    unsigned int idx;
2679

    
2680
    idx = SUBPAGE_IDX(addr);
2681
#if defined(DEBUG_SUBPAGE)
2682
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2683
           mmio, len, addr, idx, value);
2684
#endif
2685
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2686
                                  addr + mmio->region_offset[idx][1][len],
2687
                                  value);
2688
}
2689

    
2690
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2691
{
2692
#if defined(DEBUG_SUBPAGE)
2693
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2694
#endif
2695

    
2696
    return subpage_readlen(opaque, addr, 0);
2697
}
2698

    
2699
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2700
                            uint32_t value)
2701
{
2702
#if defined(DEBUG_SUBPAGE)
2703
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2704
#endif
2705
    subpage_writelen(opaque, addr, value, 0);
2706
}
2707

    
2708
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2709
{
2710
#if defined(DEBUG_SUBPAGE)
2711
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2712
#endif
2713

    
2714
    return subpage_readlen(opaque, addr, 1);
2715
}
2716

    
2717
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2718
                            uint32_t value)
2719
{
2720
#if defined(DEBUG_SUBPAGE)
2721
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2722
#endif
2723
    subpage_writelen(opaque, addr, value, 1);
2724
}
2725

    
2726
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2727
{
2728
#if defined(DEBUG_SUBPAGE)
2729
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2730
#endif
2731

    
2732
    return subpage_readlen(opaque, addr, 2);
2733
}
2734

    
2735
static void subpage_writel (void *opaque,
2736
                         target_phys_addr_t addr, uint32_t value)
2737
{
2738
#if defined(DEBUG_SUBPAGE)
2739
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2740
#endif
2741
    subpage_writelen(opaque, addr, value, 2);
2742
}
2743

    
2744
static CPUReadMemoryFunc *subpage_read[] = {
2745
    &subpage_readb,
2746
    &subpage_readw,
2747
    &subpage_readl,
2748
};
2749

    
2750
static CPUWriteMemoryFunc *subpage_write[] = {
2751
    &subpage_writeb,
2752
    &subpage_writew,
2753
    &subpage_writel,
2754
};
2755

    
2756
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2757
                             ram_addr_t memory, ram_addr_t region_offset)
2758
{
2759
    int idx, eidx;
2760
    unsigned int i;
2761

    
2762
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2763
        return -1;
2764
    idx = SUBPAGE_IDX(start);
2765
    eidx = SUBPAGE_IDX(end);
2766
#if defined(DEBUG_SUBPAGE)
2767
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2768
           mmio, start, end, idx, eidx, memory);
2769
#endif
2770
    memory >>= IO_MEM_SHIFT;
2771
    for (; idx <= eidx; idx++) {
2772
        for (i = 0; i < 4; i++) {
2773
            if (io_mem_read[memory][i]) {
2774
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2775
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2776
                mmio->region_offset[idx][0][i] = region_offset;
2777
            }
2778
            if (io_mem_write[memory][i]) {
2779
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2780
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2781
                mmio->region_offset[idx][1][i] = region_offset;
2782
            }
2783
        }
2784
    }
2785

    
2786
    return 0;
2787
}
2788

    
2789
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2790
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2791
{
2792
    subpage_t *mmio;
2793
    int subpage_memory;
2794

    
2795
    mmio = qemu_mallocz(sizeof(subpage_t));
2796
    if (mmio != NULL) {
2797
        mmio->base = base;
2798
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2799
#if defined(DEBUG_SUBPAGE)
2800
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2801
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2802
#endif
2803
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2804
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2805
                         region_offset);
2806
    }
2807

    
2808
    return mmio;
2809
}
2810

    
2811
static void io_mem_init(void)
2812
{
2813
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2814
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2815
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2816
    io_mem_nb = 5;
2817

    
2818
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2819
                                          watch_mem_write, NULL);
2820
    /* alloc dirty bits array */
2821
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2822
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2823
}
2824

    
2825
/* mem_read and mem_write are arrays of functions containing the
2826
   function to access byte (index 0), word (index 1) and dword (index
2827
   2). Functions can be omitted with a NULL function pointer. The
2828
   registered functions may be modified dynamically later.
2829
   If io_index is non zero, the corresponding io zone is
2830
   modified. If it is zero, a new io zone is allocated. The return
2831
   value can be used with cpu_register_physical_memory(). (-1) is
2832
   returned if error. */
2833
int cpu_register_io_memory(int io_index,
2834
                           CPUReadMemoryFunc **mem_read,
2835
                           CPUWriteMemoryFunc **mem_write,
2836
                           void *opaque)
2837
{
2838
    int i, subwidth = 0;
2839

    
2840
    if (io_index <= 0) {
2841
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2842
            return -1;
2843
        io_index = io_mem_nb++;
2844
    } else {
2845
        if (io_index >= IO_MEM_NB_ENTRIES)
2846
            return -1;
2847
    }
2848

    
2849
    for(i = 0;i < 3; i++) {
2850
        if (!mem_read[i] || !mem_write[i])
2851
            subwidth = IO_MEM_SUBWIDTH;
2852
        io_mem_read[io_index][i] = mem_read[i];
2853
        io_mem_write[io_index][i] = mem_write[i];
2854
    }
2855
    io_mem_opaque[io_index] = opaque;
2856
    return (io_index << IO_MEM_SHIFT) | subwidth;
2857
}
2858

    
2859
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2860
{
2861
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2862
}
2863

    
2864
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2865
{
2866
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2867
}
2868

    
2869
#endif /* !defined(CONFIG_USER_ONLY) */
2870

    
2871
/* physical memory access (slow version, mainly for debug) */
2872
#if defined(CONFIG_USER_ONLY)
2873
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2874
                            int len, int is_write)
2875
{
2876
    int l, flags;
2877
    target_ulong page;
2878
    void * p;
2879

    
2880
    while (len > 0) {
2881
        page = addr & TARGET_PAGE_MASK;
2882
        l = (page + TARGET_PAGE_SIZE) - addr;
2883
        if (l > len)
2884
            l = len;
2885
        flags = page_get_flags(page);
2886
        if (!(flags & PAGE_VALID))
2887
            return;
2888
        if (is_write) {
2889
            if (!(flags & PAGE_WRITE))
2890
                return;
2891
            /* XXX: this code should not depend on lock_user */
2892
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2893
                /* FIXME - should this return an error rather than just fail? */
2894
                return;
2895
            memcpy(p, buf, l);
2896
            unlock_user(p, addr, l);
2897
        } else {
2898
            if (!(flags & PAGE_READ))
2899
                return;
2900
            /* XXX: this code should not depend on lock_user */
2901
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2902
                /* FIXME - should this return an error rather than just fail? */
2903
                return;
2904
            memcpy(buf, p, l);
2905
            unlock_user(p, addr, 0);
2906
        }
2907
        len -= l;
2908
        buf += l;
2909
        addr += l;
2910
    }
2911
}
2912

    
2913
#else
2914
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2915
                            int len, int is_write)
2916
{
2917
    int l, io_index;
2918
    uint8_t *ptr;
2919
    uint32_t val;
2920
    target_phys_addr_t page;
2921
    unsigned long pd;
2922
    PhysPageDesc *p;
2923

    
2924
    while (len > 0) {
2925
        page = addr & TARGET_PAGE_MASK;
2926
        l = (page + TARGET_PAGE_SIZE) - addr;
2927
        if (l > len)
2928
            l = len;
2929
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2930
        if (!p) {
2931
            pd = IO_MEM_UNASSIGNED;
2932
        } else {
2933
            pd = p->phys_offset;
2934
        }
2935

    
2936
        if (is_write) {
2937
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2938
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2939
                if (p)
2940
                    addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2941
                /* XXX: could force cpu_single_env to NULL to avoid
2942
                   potential bugs */
2943
                if (l >= 4 && ((addr & 3) == 0)) {
2944
                    /* 32 bit write access */
2945
                    val = ldl_p(buf);
2946
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2947
                    l = 4;
2948
                } else if (l >= 2 && ((addr & 1) == 0)) {
2949
                    /* 16 bit write access */
2950
                    val = lduw_p(buf);
2951
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2952
                    l = 2;
2953
                } else {
2954
                    /* 8 bit write access */
2955
                    val = ldub_p(buf);
2956
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2957
                    l = 1;
2958
                }
2959
            } else {
2960
                unsigned long addr1;
2961
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2962
                /* RAM case */
2963
                ptr = phys_ram_base + addr1;
2964
                memcpy(ptr, buf, l);
2965
                if (!cpu_physical_memory_is_dirty(addr1)) {
2966
                    /* invalidate code */
2967
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2968
                    /* set dirty bit */
2969
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2970
                        (0xff & ~CODE_DIRTY_FLAG);
2971
                }
2972
            }
2973
        } else {
2974
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2975
                !(pd & IO_MEM_ROMD)) {
2976
                /* I/O case */
2977
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2978
                if (p)
2979
                    addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
2980
                if (l >= 4 && ((addr & 3) == 0)) {
2981
                    /* 32 bit read access */
2982
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2983
                    stl_p(buf, val);
2984
                    l = 4;
2985
                } else if (l >= 2 && ((addr & 1) == 0)) {
2986
                    /* 16 bit read access */
2987
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2988
                    stw_p(buf, val);
2989
                    l = 2;
2990
                } else {
2991
                    /* 8 bit read access */
2992
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2993
                    stb_p(buf, val);
2994
                    l = 1;
2995
                }
2996
            } else {
2997
                /* RAM case */
2998
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2999
                    (addr & ~TARGET_PAGE_MASK);
3000
                memcpy(buf, ptr, l);
3001
            }
3002
        }
3003
        len -= l;
3004
        buf += l;
3005
        addr += l;
3006
    }
3007
}
3008

    
3009
/* used for ROM loading : can write in RAM and ROM */
3010
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3011
                                   const uint8_t *buf, int len)
3012
{
3013
    int l;
3014
    uint8_t *ptr;
3015
    target_phys_addr_t page;
3016
    unsigned long pd;
3017
    PhysPageDesc *p;
3018

    
3019
    while (len > 0) {
3020
        page = addr & TARGET_PAGE_MASK;
3021
        l = (page + TARGET_PAGE_SIZE) - addr;
3022
        if (l > len)
3023
            l = len;
3024
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3025
        if (!p) {
3026
            pd = IO_MEM_UNASSIGNED;
3027
        } else {
3028
            pd = p->phys_offset;
3029
        }
3030

    
3031
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3032
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3033
            !(pd & IO_MEM_ROMD)) {
3034
            /* do nothing */
3035
        } else {
3036
            unsigned long addr1;
3037
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3038
            /* ROM/RAM case */
3039
            ptr = phys_ram_base + addr1;
3040
            memcpy(ptr, buf, l);
3041
        }
3042
        len -= l;
3043
        buf += l;
3044
        addr += l;
3045
    }
3046
}
3047

    
3048

    
3049
/* warning: addr must be aligned */
3050
uint32_t ldl_phys(target_phys_addr_t addr)
3051
{
3052
    int io_index;
3053
    uint8_t *ptr;
3054
    uint32_t val;
3055
    unsigned long pd;
3056
    PhysPageDesc *p;
3057

    
3058
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3059
    if (!p) {
3060
        pd = IO_MEM_UNASSIGNED;
3061
    } else {
3062
        pd = p->phys_offset;
3063
    }
3064

    
3065
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3066
        !(pd & IO_MEM_ROMD)) {
3067
        /* I/O case */
3068
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3069
        if (p)
3070
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3071
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3072
    } else {
3073
        /* RAM case */
3074
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3075
            (addr & ~TARGET_PAGE_MASK);
3076
        val = ldl_p(ptr);
3077
    }
3078
    return val;
3079
}
3080

    
3081
/* warning: addr must be aligned */
3082
uint64_t ldq_phys(target_phys_addr_t addr)
3083
{
3084
    int io_index;
3085
    uint8_t *ptr;
3086
    uint64_t val;
3087
    unsigned long pd;
3088
    PhysPageDesc *p;
3089

    
3090
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3091
    if (!p) {
3092
        pd = IO_MEM_UNASSIGNED;
3093
    } else {
3094
        pd = p->phys_offset;
3095
    }
3096

    
3097
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3098
        !(pd & IO_MEM_ROMD)) {
3099
        /* I/O case */
3100
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3101
        if (p)
3102
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3103
#ifdef TARGET_WORDS_BIGENDIAN
3104
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3105
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3106
#else
3107
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3108
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3109
#endif
3110
    } else {
3111
        /* RAM case */
3112
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3113
            (addr & ~TARGET_PAGE_MASK);
3114
        val = ldq_p(ptr);
3115
    }
3116
    return val;
3117
}
3118

    
3119
/* XXX: optimize */
3120
uint32_t ldub_phys(target_phys_addr_t addr)
3121
{
3122
    uint8_t val;
3123
    cpu_physical_memory_read(addr, &val, 1);
3124
    return val;
3125
}
3126

    
3127
/* XXX: optimize */
3128
uint32_t lduw_phys(target_phys_addr_t addr)
3129
{
3130
    uint16_t val;
3131
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3132
    return tswap16(val);
3133
}
3134

    
3135
/* warning: addr must be aligned. The ram page is not masked as dirty
3136
   and the code inside is not invalidated. It is useful if the dirty
3137
   bits are used to track modified PTEs */
3138
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3139
{
3140
    int io_index;
3141
    uint8_t *ptr;
3142
    unsigned long pd;
3143
    PhysPageDesc *p;
3144

    
3145
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3146
    if (!p) {
3147
        pd = IO_MEM_UNASSIGNED;
3148
    } else {
3149
        pd = p->phys_offset;
3150
    }
3151

    
3152
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3153
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3154
        if (p)
3155
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3156
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3157
    } else {
3158
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3159
        ptr = phys_ram_base + addr1;
3160
        stl_p(ptr, val);
3161

    
3162
        if (unlikely(in_migration)) {
3163
            if (!cpu_physical_memory_is_dirty(addr1)) {
3164
                /* invalidate code */
3165
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3166
                /* set dirty bit */
3167
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3168
                    (0xff & ~CODE_DIRTY_FLAG);
3169
            }
3170
        }
3171
    }
3172
}
3173

    
3174
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3175
{
3176
    int io_index;
3177
    uint8_t *ptr;
3178
    unsigned long pd;
3179
    PhysPageDesc *p;
3180

    
3181
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3182
    if (!p) {
3183
        pd = IO_MEM_UNASSIGNED;
3184
    } else {
3185
        pd = p->phys_offset;
3186
    }
3187

    
3188
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3189
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3190
        if (p)
3191
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3192
#ifdef TARGET_WORDS_BIGENDIAN
3193
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3194
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3195
#else
3196
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3197
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3198
#endif
3199
    } else {
3200
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
3201
            (addr & ~TARGET_PAGE_MASK);
3202
        stq_p(ptr, val);
3203
    }
3204
}
3205

    
3206
/* warning: addr must be aligned */
3207
void stl_phys(target_phys_addr_t addr, uint32_t val)
3208
{
3209
    int io_index;
3210
    uint8_t *ptr;
3211
    unsigned long pd;
3212
    PhysPageDesc *p;
3213

    
3214
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3215
    if (!p) {
3216
        pd = IO_MEM_UNASSIGNED;
3217
    } else {
3218
        pd = p->phys_offset;
3219
    }
3220

    
3221
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3222
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3223
        if (p)
3224
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3225
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3226
    } else {
3227
        unsigned long addr1;
3228
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3229
        /* RAM case */
3230
        ptr = phys_ram_base + addr1;
3231
        stl_p(ptr, val);
3232
        if (!cpu_physical_memory_is_dirty(addr1)) {
3233
            /* invalidate code */
3234
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3235
            /* set dirty bit */
3236
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3237
                (0xff & ~CODE_DIRTY_FLAG);
3238
        }
3239
    }
3240
}
3241

    
3242
/* XXX: optimize */
3243
void stb_phys(target_phys_addr_t addr, uint32_t val)
3244
{
3245
    uint8_t v = val;
3246
    cpu_physical_memory_write(addr, &v, 1);
3247
}
3248

    
3249
/* XXX: optimize */
3250
void stw_phys(target_phys_addr_t addr, uint32_t val)
3251
{
3252
    uint16_t v = tswap16(val);
3253
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3254
}
3255

    
3256
/* XXX: optimize */
3257
void stq_phys(target_phys_addr_t addr, uint64_t val)
3258
{
3259
    val = tswap64(val);
3260
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3261
}
3262

    
3263
#endif
3264

    
3265
/* virtual memory access for debug */
3266
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3267
                        uint8_t *buf, int len, int is_write)
3268
{
3269
    int l;
3270
    target_phys_addr_t phys_addr;
3271
    target_ulong page;
3272

    
3273
    while (len > 0) {
3274
        page = addr & TARGET_PAGE_MASK;
3275
        phys_addr = cpu_get_phys_page_debug(env, page);
3276
        /* if no physical page mapped, return an error */
3277
        if (phys_addr == -1)
3278
            return -1;
3279
        l = (page + TARGET_PAGE_SIZE) - addr;
3280
        if (l > len)
3281
            l = len;
3282
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3283
                               buf, l, is_write);
3284
        len -= l;
3285
        buf += l;
3286
        addr += l;
3287
    }
3288
    return 0;
3289
}
3290

    
3291
/* in deterministic execution mode, instructions doing device I/Os
3292
   must be at the end of the TB */
3293
void cpu_io_recompile(CPUState *env, void *retaddr)
3294
{
3295
    TranslationBlock *tb;
3296
    uint32_t n, cflags;
3297
    target_ulong pc, cs_base;
3298
    uint64_t flags;
3299

    
3300
    tb = tb_find_pc((unsigned long)retaddr);
3301
    if (!tb) {
3302
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3303
                  retaddr);
3304
    }
3305
    n = env->icount_decr.u16.low + tb->icount;
3306
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3307
    /* Calculate how many instructions had been executed before the fault
3308
       occurred.  */
3309
    n = n - env->icount_decr.u16.low;
3310
    /* Generate a new TB ending on the I/O insn.  */
3311
    n++;
3312
    /* On MIPS and SH, delay slot instructions can only be restarted if
3313
       they were already the first instruction in the TB.  If this is not
3314
       the first instruction in a TB then re-execute the preceding
3315
       branch.  */
3316
#if defined(TARGET_MIPS)
3317
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3318
        env->active_tc.PC -= 4;
3319
        env->icount_decr.u16.low++;
3320
        env->hflags &= ~MIPS_HFLAG_BMASK;
3321
    }
3322
#elif defined(TARGET_SH4)
3323
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3324
            && n > 1) {
3325
        env->pc -= 2;
3326
        env->icount_decr.u16.low++;
3327
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3328
    }
3329
#endif
3330
    /* This should never happen.  */
3331
    if (n > CF_COUNT_MASK)
3332
        cpu_abort(env, "TB too big during recompile");
3333

    
3334
    cflags = n | CF_LAST_IO;
3335
    pc = tb->pc;
3336
    cs_base = tb->cs_base;
3337
    flags = tb->flags;
3338
    tb_phys_invalidate(tb, -1);
3339
    /* FIXME: In theory this could raise an exception.  In practice
3340
       we have already translated the block once so it's probably ok.  */
3341
    tb_gen_code(env, pc, cs_base, flags, cflags);
3342
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3343
       the first in the TB) then we end up generating a whole new TB and
3344
       repeating the fault, which is horribly inefficient.
3345
       Better would be to execute just this insn uncached, or generate a
3346
       second new TB.  */
3347
    cpu_resume_from_signal(env, NULL);
3348
}
3349

    
3350
void dump_exec_info(FILE *f,
3351
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3352
{
3353
    int i, target_code_size, max_target_code_size;
3354
    int direct_jmp_count, direct_jmp2_count, cross_page;
3355
    TranslationBlock *tb;
3356

    
3357
    target_code_size = 0;
3358
    max_target_code_size = 0;
3359
    cross_page = 0;
3360
    direct_jmp_count = 0;
3361
    direct_jmp2_count = 0;
3362
    for(i = 0; i < nb_tbs; i++) {
3363
        tb = &tbs[i];
3364
        target_code_size += tb->size;
3365
        if (tb->size > max_target_code_size)
3366
            max_target_code_size = tb->size;
3367
        if (tb->page_addr[1] != -1)
3368
            cross_page++;
3369
        if (tb->tb_next_offset[0] != 0xffff) {
3370
            direct_jmp_count++;
3371
            if (tb->tb_next_offset[1] != 0xffff) {
3372
                direct_jmp2_count++;
3373
            }
3374
        }
3375
    }
3376
    /* XXX: avoid using doubles ? */
3377
    cpu_fprintf(f, "Translation buffer state:\n");
3378
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3379
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3380
    cpu_fprintf(f, "TB count            %d/%d\n", 
3381
                nb_tbs, code_gen_max_blocks);
3382
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3383
                nb_tbs ? target_code_size / nb_tbs : 0,
3384
                max_target_code_size);
3385
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3386
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3387
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3388
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3389
            cross_page,
3390
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3391
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3392
                direct_jmp_count,
3393
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3394
                direct_jmp2_count,
3395
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3396
    cpu_fprintf(f, "\nStatistics:\n");
3397
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3398
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3399
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3400
    tcg_dump_info(f, cpu_fprintf);
3401
}
3402

    
3403
#if !defined(CONFIG_USER_ONLY)
3404

    
3405
#define MMUSUFFIX _cmmu
3406
#define GETPC() NULL
3407
#define env cpu_single_env
3408
#define SOFTMMU_CODE_ACCESS
3409

    
3410
#define SHIFT 0
3411
#include "softmmu_template.h"
3412

    
3413
#define SHIFT 1
3414
#include "softmmu_template.h"
3415

    
3416
#define SHIFT 2
3417
#include "softmmu_template.h"
3418

    
3419
#define SHIFT 3
3420
#include "softmmu_template.h"
3421

    
3422
#undef env
3423

    
3424
#endif