Statistics
| Branch: | Revision:

root / exec.c @ 520860ef

History | View | Annotate | Download (109.5 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#include "qemu-common.h"
38
#include "tcg.h"
39
#include "hw/hw.h"
40
#include "osdep.h"
41
#include "kvm.h"
42
#if defined(CONFIG_USER_ONLY)
43
#include <qemu.h>
44
#endif
45

    
46
//#define DEBUG_TB_INVALIDATE
47
//#define DEBUG_FLUSH
48
//#define DEBUG_TLB
49
//#define DEBUG_UNASSIGNED
50

    
51
/* make various TB consistency checks */
52
//#define DEBUG_TB_CHECK
53
//#define DEBUG_TLB_CHECK
54

    
55
//#define DEBUG_IOPORT
56
//#define DEBUG_SUBPAGE
57

    
58
#if !defined(CONFIG_USER_ONLY)
59
/* TB consistency checks only implemented for usermode emulation.  */
60
#undef DEBUG_TB_CHECK
61
#endif
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#if defined(TARGET_SPARC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67
#elif defined(TARGET_SPARC)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69
#elif defined(TARGET_ALPHA)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72
#elif defined(TARGET_PPC64)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78
#else
79
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80
#define TARGET_PHYS_ADDR_SPACE_BITS 32
81
#endif
82

    
83
static TranslationBlock *tbs;
84
int code_gen_max_blocks;
85
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86
static int nb_tbs;
87
/* any access to the tbs or the page table must use this lock */
88
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89

    
90
#if defined(__arm__) || defined(__sparc_v9__)
91
/* The prologue must be reachable with a direct jump. ARM and Sparc64
92
 have limited branch ranges (possibly also PPC) so place it in a
93
 section close to code segment. */
94
#define code_gen_section                                \
95
    __attribute__((__section__(".gen_code")))           \
96
    __attribute__((aligned (32)))
97
#else
98
#define code_gen_section                                \
99
    __attribute__((aligned (32)))
100
#endif
101

    
102
uint8_t code_gen_prologue[1024] code_gen_section;
103
static uint8_t *code_gen_buffer;
104
static unsigned long code_gen_buffer_size;
105
/* threshold to flush the translated code buffer */
106
static unsigned long code_gen_buffer_max_size;
107
uint8_t *code_gen_ptr;
108

    
109
#if !defined(CONFIG_USER_ONLY)
110
ram_addr_t phys_ram_size;
111
int phys_ram_fd;
112
uint8_t *phys_ram_base;
113
uint8_t *phys_ram_dirty;
114
static int in_migration;
115
static ram_addr_t phys_ram_alloc_offset = 0;
116
#endif
117

    
118
CPUState *first_cpu;
119
/* current CPU in the current thread. It is only valid inside
120
   cpu_exec() */
121
CPUState *cpu_single_env;
122
/* 0 = Do not count executed instructions.
123
   1 = Precise instruction counting.
124
   2 = Adaptive rate instruction counting.  */
125
int use_icount = 0;
126
/* Current instruction counter.  While executing translated code this may
127
   include some instructions that have not yet been executed.  */
128
int64_t qemu_icount;
129

    
130
typedef struct PageDesc {
131
    /* list of TBs intersecting this ram page */
132
    TranslationBlock *first_tb;
133
    /* in order to optimize self modifying code, we count the number
134
       of lookups we do to a given page to use a bitmap */
135
    unsigned int code_write_count;
136
    uint8_t *code_bitmap;
137
#if defined(CONFIG_USER_ONLY)
138
    unsigned long flags;
139
#endif
140
} PageDesc;
141

    
142
typedef struct PhysPageDesc {
143
    /* offset in host memory of the page + io_index in the low bits */
144
    ram_addr_t phys_offset;
145
    ram_addr_t region_offset;
146
} PhysPageDesc;
147

    
148
#define L2_BITS 10
149
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
150
/* XXX: this is a temporary hack for alpha target.
151
 *      In the future, this is to be replaced by a multi-level table
152
 *      to actually be able to handle the complete 64 bits address space.
153
 */
154
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
155
#else
156
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
157
#endif
158

    
159
#define L1_SIZE (1 << L1_BITS)
160
#define L2_SIZE (1 << L2_BITS)
161

    
162
unsigned long qemu_real_host_page_size;
163
unsigned long qemu_host_page_bits;
164
unsigned long qemu_host_page_size;
165
unsigned long qemu_host_page_mask;
166

    
167
/* XXX: for system emulation, it could just be an array */
168
static PageDesc *l1_map[L1_SIZE];
169
static PhysPageDesc **l1_phys_map;
170

    
171
#if !defined(CONFIG_USER_ONLY)
172
static void io_mem_init(void);
173

    
174
/* io memory support */
175
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
176
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
177
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
178
static char io_mem_used[IO_MEM_NB_ENTRIES];
179
static int io_mem_watch;
180
#endif
181

    
182
/* log support */
183
static const char *logfilename = "/tmp/qemu.log";
184
FILE *logfile;
185
int loglevel;
186
static int log_append = 0;
187

    
188
/* statistics */
189
static int tlb_flush_count;
190
static int tb_flush_count;
191
static int tb_phys_invalidate_count;
192

    
193
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
194
typedef struct subpage_t {
195
    target_phys_addr_t base;
196
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
197
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
198
    void *opaque[TARGET_PAGE_SIZE][2][4];
199
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
200
} subpage_t;
201

    
202
#ifdef _WIN32
203
static void map_exec(void *addr, long size)
204
{
205
    DWORD old_protect;
206
    VirtualProtect(addr, size,
207
                   PAGE_EXECUTE_READWRITE, &old_protect);
208
    
209
}
210
#else
211
static void map_exec(void *addr, long size)
212
{
213
    unsigned long start, end, page_size;
214
    
215
    page_size = getpagesize();
216
    start = (unsigned long)addr;
217
    start &= ~(page_size - 1);
218
    
219
    end = (unsigned long)addr + size;
220
    end += page_size - 1;
221
    end &= ~(page_size - 1);
222
    
223
    mprotect((void *)start, end - start,
224
             PROT_READ | PROT_WRITE | PROT_EXEC);
225
}
226
#endif
227

    
228
static void page_init(void)
229
{
230
    /* NOTE: we can always suppose that qemu_host_page_size >=
231
       TARGET_PAGE_SIZE */
232
#ifdef _WIN32
233
    {
234
        SYSTEM_INFO system_info;
235

    
236
        GetSystemInfo(&system_info);
237
        qemu_real_host_page_size = system_info.dwPageSize;
238
    }
239
#else
240
    qemu_real_host_page_size = getpagesize();
241
#endif
242
    if (qemu_host_page_size == 0)
243
        qemu_host_page_size = qemu_real_host_page_size;
244
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
245
        qemu_host_page_size = TARGET_PAGE_SIZE;
246
    qemu_host_page_bits = 0;
247
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
248
        qemu_host_page_bits++;
249
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
250
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
251
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
252

    
253
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
254
    {
255
        long long startaddr, endaddr;
256
        FILE *f;
257
        int n;
258

    
259
        mmap_lock();
260
        last_brk = (unsigned long)sbrk(0);
261
        f = fopen("/proc/self/maps", "r");
262
        if (f) {
263
            do {
264
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
265
                if (n == 2) {
266
                    startaddr = MIN(startaddr,
267
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
268
                    endaddr = MIN(endaddr,
269
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
270
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
271
                                   TARGET_PAGE_ALIGN(endaddr),
272
                                   PAGE_RESERVED); 
273
                }
274
            } while (!feof(f));
275
            fclose(f);
276
        }
277
        mmap_unlock();
278
    }
279
#endif
280
}
281

    
282
static inline PageDesc **page_l1_map(target_ulong index)
283
{
284
#if TARGET_LONG_BITS > 32
285
    /* Host memory outside guest VM.  For 32-bit targets we have already
286
       excluded high addresses.  */
287
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
288
        return NULL;
289
#endif
290
    return &l1_map[index >> L2_BITS];
291
}
292

    
293
static inline PageDesc *page_find_alloc(target_ulong index)
294
{
295
    PageDesc **lp, *p;
296
    lp = page_l1_map(index);
297
    if (!lp)
298
        return NULL;
299

    
300
    p = *lp;
301
    if (!p) {
302
        /* allocate if not found */
303
#if defined(CONFIG_USER_ONLY)
304
        size_t len = sizeof(PageDesc) * L2_SIZE;
305
        /* Don't use qemu_malloc because it may recurse.  */
306
        p = mmap(0, len, PROT_READ | PROT_WRITE,
307
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
308
        *lp = p;
309
        if (h2g_valid(p)) {
310
            unsigned long addr = h2g(p);
311
            page_set_flags(addr & TARGET_PAGE_MASK,
312
                           TARGET_PAGE_ALIGN(addr + len),
313
                           PAGE_RESERVED); 
314
        }
315
#else
316
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
317
        *lp = p;
318
#endif
319
    }
320
    return p + (index & (L2_SIZE - 1));
321
}
322

    
323
static inline PageDesc *page_find(target_ulong index)
324
{
325
    PageDesc **lp, *p;
326
    lp = page_l1_map(index);
327
    if (!lp)
328
        return NULL;
329

    
330
    p = *lp;
331
    if (!p)
332
        return 0;
333
    return p + (index & (L2_SIZE - 1));
334
}
335

    
336
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
337
{
338
    void **lp, **p;
339
    PhysPageDesc *pd;
340

    
341
    p = (void **)l1_phys_map;
342
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
343

    
344
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
345
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
346
#endif
347
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
348
    p = *lp;
349
    if (!p) {
350
        /* allocate if not found */
351
        if (!alloc)
352
            return NULL;
353
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
354
        memset(p, 0, sizeof(void *) * L1_SIZE);
355
        *lp = p;
356
    }
357
#endif
358
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
359
    pd = *lp;
360
    if (!pd) {
361
        int i;
362
        /* allocate if not found */
363
        if (!alloc)
364
            return NULL;
365
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
366
        *lp = pd;
367
        for (i = 0; i < L2_SIZE; i++) {
368
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
369
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
370
        }
371
    }
372
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
373
}
374

    
375
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
376
{
377
    return phys_page_find_alloc(index, 0);
378
}
379

    
380
#if !defined(CONFIG_USER_ONLY)
381
static void tlb_protect_code(ram_addr_t ram_addr);
382
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
383
                                    target_ulong vaddr);
384
#define mmap_lock() do { } while(0)
385
#define mmap_unlock() do { } while(0)
386
#endif
387

    
388
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
389

    
390
#if defined(CONFIG_USER_ONLY)
391
/* Currently it is not recommanded to allocate big chunks of data in
392
   user mode. It will change when a dedicated libc will be used */
393
#define USE_STATIC_CODE_GEN_BUFFER
394
#endif
395

    
396
#ifdef USE_STATIC_CODE_GEN_BUFFER
397
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
398
#endif
399

    
400
static void code_gen_alloc(unsigned long tb_size)
401
{
402
#ifdef USE_STATIC_CODE_GEN_BUFFER
403
    code_gen_buffer = static_code_gen_buffer;
404
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
405
    map_exec(code_gen_buffer, code_gen_buffer_size);
406
#else
407
    code_gen_buffer_size = tb_size;
408
    if (code_gen_buffer_size == 0) {
409
#if defined(CONFIG_USER_ONLY)
410
        /* in user mode, phys_ram_size is not meaningful */
411
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
412
#else
413
        /* XXX: needs ajustments */
414
        code_gen_buffer_size = (unsigned long)(phys_ram_size / 4);
415
#endif
416
    }
417
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
418
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
419
    /* The code gen buffer location may have constraints depending on
420
       the host cpu and OS */
421
#if defined(__linux__) 
422
    {
423
        int flags;
424
        void *start = NULL;
425

    
426
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
427
#if defined(__x86_64__)
428
        flags |= MAP_32BIT;
429
        /* Cannot map more than that */
430
        if (code_gen_buffer_size > (800 * 1024 * 1024))
431
            code_gen_buffer_size = (800 * 1024 * 1024);
432
#elif defined(__sparc_v9__)
433
        // Map the buffer below 2G, so we can use direct calls and branches
434
        flags |= MAP_FIXED;
435
        start = (void *) 0x60000000UL;
436
        if (code_gen_buffer_size > (512 * 1024 * 1024))
437
            code_gen_buffer_size = (512 * 1024 * 1024);
438
#elif defined(__arm__)
439
        /* Map the buffer below 32M, so we can use direct calls and branches */
440
        flags |= MAP_FIXED;
441
        start = (void *) 0x01000000UL;
442
        if (code_gen_buffer_size > 16 * 1024 * 1024)
443
            code_gen_buffer_size = 16 * 1024 * 1024;
444
#endif
445
        code_gen_buffer = mmap(start, code_gen_buffer_size,
446
                               PROT_WRITE | PROT_READ | PROT_EXEC,
447
                               flags, -1, 0);
448
        if (code_gen_buffer == MAP_FAILED) {
449
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
450
            exit(1);
451
        }
452
    }
453
#elif defined(__FreeBSD__) || defined(__DragonFly__)
454
    {
455
        int flags;
456
        void *addr = NULL;
457
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
458
#if defined(__x86_64__)
459
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
460
         * 0x40000000 is free */
461
        flags |= MAP_FIXED;
462
        addr = (void *)0x40000000;
463
        /* Cannot map more than that */
464
        if (code_gen_buffer_size > (800 * 1024 * 1024))
465
            code_gen_buffer_size = (800 * 1024 * 1024);
466
#endif
467
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
468
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
469
                               flags, -1, 0);
470
        if (code_gen_buffer == MAP_FAILED) {
471
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
472
            exit(1);
473
        }
474
    }
475
#else
476
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
477
    map_exec(code_gen_buffer, code_gen_buffer_size);
478
#endif
479
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
480
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
481
    code_gen_buffer_max_size = code_gen_buffer_size - 
482
        code_gen_max_block_size();
483
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
484
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
485
}
486

    
487
/* Must be called before using the QEMU cpus. 'tb_size' is the size
488
   (in bytes) allocated to the translation buffer. Zero means default
489
   size. */
490
void cpu_exec_init_all(unsigned long tb_size)
491
{
492
    cpu_gen_init();
493
    code_gen_alloc(tb_size);
494
    code_gen_ptr = code_gen_buffer;
495
    page_init();
496
#if !defined(CONFIG_USER_ONLY)
497
    io_mem_init();
498
#endif
499
}
500

    
501
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
502

    
503
#define CPU_COMMON_SAVE_VERSION 1
504

    
505
static void cpu_common_save(QEMUFile *f, void *opaque)
506
{
507
    CPUState *env = opaque;
508

    
509
    qemu_put_be32s(f, &env->halted);
510
    qemu_put_be32s(f, &env->interrupt_request);
511
}
512

    
513
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
514
{
515
    CPUState *env = opaque;
516

    
517
    if (version_id != CPU_COMMON_SAVE_VERSION)
518
        return -EINVAL;
519

    
520
    qemu_get_be32s(f, &env->halted);
521
    qemu_get_be32s(f, &env->interrupt_request);
522
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
523
       version_id is increased. */
524
    env->interrupt_request &= ~0x01;
525
    tlb_flush(env, 1);
526

    
527
    return 0;
528
}
529
#endif
530

    
531
void cpu_exec_init(CPUState *env)
532
{
533
    CPUState **penv;
534
    int cpu_index;
535

    
536
#if defined(CONFIG_USER_ONLY)
537
    cpu_list_lock();
538
#endif
539
    env->next_cpu = NULL;
540
    penv = &first_cpu;
541
    cpu_index = 0;
542
    while (*penv != NULL) {
543
        penv = (CPUState **)&(*penv)->next_cpu;
544
        cpu_index++;
545
    }
546
    env->cpu_index = cpu_index;
547
    TAILQ_INIT(&env->breakpoints);
548
    TAILQ_INIT(&env->watchpoints);
549
    *penv = env;
550
#if defined(CONFIG_USER_ONLY)
551
    cpu_list_unlock();
552
#endif
553
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
554
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
555
                    cpu_common_save, cpu_common_load, env);
556
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
557
                    cpu_save, cpu_load, env);
558
#endif
559
}
560

    
561
static inline void invalidate_page_bitmap(PageDesc *p)
562
{
563
    if (p->code_bitmap) {
564
        qemu_free(p->code_bitmap);
565
        p->code_bitmap = NULL;
566
    }
567
    p->code_write_count = 0;
568
}
569

    
570
/* set to NULL all the 'first_tb' fields in all PageDescs */
571
static void page_flush_tb(void)
572
{
573
    int i, j;
574
    PageDesc *p;
575

    
576
    for(i = 0; i < L1_SIZE; i++) {
577
        p = l1_map[i];
578
        if (p) {
579
            for(j = 0; j < L2_SIZE; j++) {
580
                p->first_tb = NULL;
581
                invalidate_page_bitmap(p);
582
                p++;
583
            }
584
        }
585
    }
586
}
587

    
588
/* flush all the translation blocks */
589
/* XXX: tb_flush is currently not thread safe */
590
void tb_flush(CPUState *env1)
591
{
592
    CPUState *env;
593
#if defined(DEBUG_FLUSH)
594
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
595
           (unsigned long)(code_gen_ptr - code_gen_buffer),
596
           nb_tbs, nb_tbs > 0 ?
597
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
598
#endif
599
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
600
        cpu_abort(env1, "Internal error: code buffer overflow\n");
601

    
602
    nb_tbs = 0;
603

    
604
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
605
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
606
    }
607

    
608
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
609
    page_flush_tb();
610

    
611
    code_gen_ptr = code_gen_buffer;
612
    /* XXX: flush processor icache at this point if cache flush is
613
       expensive */
614
    tb_flush_count++;
615
}
616

    
617
#ifdef DEBUG_TB_CHECK
618

    
619
static void tb_invalidate_check(target_ulong address)
620
{
621
    TranslationBlock *tb;
622
    int i;
623
    address &= TARGET_PAGE_MASK;
624
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
625
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
626
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
627
                  address >= tb->pc + tb->size)) {
628
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
629
                       address, (long)tb->pc, tb->size);
630
            }
631
        }
632
    }
633
}
634

    
635
/* verify that all the pages have correct rights for code */
636
static void tb_page_check(void)
637
{
638
    TranslationBlock *tb;
639
    int i, flags1, flags2;
640

    
641
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
642
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
643
            flags1 = page_get_flags(tb->pc);
644
            flags2 = page_get_flags(tb->pc + tb->size - 1);
645
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
646
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
647
                       (long)tb->pc, tb->size, flags1, flags2);
648
            }
649
        }
650
    }
651
}
652

    
653
static void tb_jmp_check(TranslationBlock *tb)
654
{
655
    TranslationBlock *tb1;
656
    unsigned int n1;
657

    
658
    /* suppress any remaining jumps to this TB */
659
    tb1 = tb->jmp_first;
660
    for(;;) {
661
        n1 = (long)tb1 & 3;
662
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
663
        if (n1 == 2)
664
            break;
665
        tb1 = tb1->jmp_next[n1];
666
    }
667
    /* check end of list */
668
    if (tb1 != tb) {
669
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
670
    }
671
}
672

    
673
#endif
674

    
675
/* invalidate one TB */
676
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
677
                             int next_offset)
678
{
679
    TranslationBlock *tb1;
680
    for(;;) {
681
        tb1 = *ptb;
682
        if (tb1 == tb) {
683
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
684
            break;
685
        }
686
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
687
    }
688
}
689

    
690
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
691
{
692
    TranslationBlock *tb1;
693
    unsigned int n1;
694

    
695
    for(;;) {
696
        tb1 = *ptb;
697
        n1 = (long)tb1 & 3;
698
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
699
        if (tb1 == tb) {
700
            *ptb = tb1->page_next[n1];
701
            break;
702
        }
703
        ptb = &tb1->page_next[n1];
704
    }
705
}
706

    
707
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
708
{
709
    TranslationBlock *tb1, **ptb;
710
    unsigned int n1;
711

    
712
    ptb = &tb->jmp_next[n];
713
    tb1 = *ptb;
714
    if (tb1) {
715
        /* find tb(n) in circular list */
716
        for(;;) {
717
            tb1 = *ptb;
718
            n1 = (long)tb1 & 3;
719
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
720
            if (n1 == n && tb1 == tb)
721
                break;
722
            if (n1 == 2) {
723
                ptb = &tb1->jmp_first;
724
            } else {
725
                ptb = &tb1->jmp_next[n1];
726
            }
727
        }
728
        /* now we can suppress tb(n) from the list */
729
        *ptb = tb->jmp_next[n];
730

    
731
        tb->jmp_next[n] = NULL;
732
    }
733
}
734

    
735
/* reset the jump entry 'n' of a TB so that it is not chained to
736
   another TB */
737
static inline void tb_reset_jump(TranslationBlock *tb, int n)
738
{
739
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
740
}
741

    
742
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
743
{
744
    CPUState *env;
745
    PageDesc *p;
746
    unsigned int h, n1;
747
    target_phys_addr_t phys_pc;
748
    TranslationBlock *tb1, *tb2;
749

    
750
    /* remove the TB from the hash list */
751
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
752
    h = tb_phys_hash_func(phys_pc);
753
    tb_remove(&tb_phys_hash[h], tb,
754
              offsetof(TranslationBlock, phys_hash_next));
755

    
756
    /* remove the TB from the page list */
757
    if (tb->page_addr[0] != page_addr) {
758
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
759
        tb_page_remove(&p->first_tb, tb);
760
        invalidate_page_bitmap(p);
761
    }
762
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
763
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
764
        tb_page_remove(&p->first_tb, tb);
765
        invalidate_page_bitmap(p);
766
    }
767

    
768
    tb_invalidated_flag = 1;
769

    
770
    /* remove the TB from the hash list */
771
    h = tb_jmp_cache_hash_func(tb->pc);
772
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
773
        if (env->tb_jmp_cache[h] == tb)
774
            env->tb_jmp_cache[h] = NULL;
775
    }
776

    
777
    /* suppress this TB from the two jump lists */
778
    tb_jmp_remove(tb, 0);
779
    tb_jmp_remove(tb, 1);
780

    
781
    /* suppress any remaining jumps to this TB */
782
    tb1 = tb->jmp_first;
783
    for(;;) {
784
        n1 = (long)tb1 & 3;
785
        if (n1 == 2)
786
            break;
787
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
788
        tb2 = tb1->jmp_next[n1];
789
        tb_reset_jump(tb1, n1);
790
        tb1->jmp_next[n1] = NULL;
791
        tb1 = tb2;
792
    }
793
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
794

    
795
    tb_phys_invalidate_count++;
796
}
797

    
798
static inline void set_bits(uint8_t *tab, int start, int len)
799
{
800
    int end, mask, end1;
801

    
802
    end = start + len;
803
    tab += start >> 3;
804
    mask = 0xff << (start & 7);
805
    if ((start & ~7) == (end & ~7)) {
806
        if (start < end) {
807
            mask &= ~(0xff << (end & 7));
808
            *tab |= mask;
809
        }
810
    } else {
811
        *tab++ |= mask;
812
        start = (start + 8) & ~7;
813
        end1 = end & ~7;
814
        while (start < end1) {
815
            *tab++ = 0xff;
816
            start += 8;
817
        }
818
        if (start < end) {
819
            mask = ~(0xff << (end & 7));
820
            *tab |= mask;
821
        }
822
    }
823
}
824

    
825
static void build_page_bitmap(PageDesc *p)
826
{
827
    int n, tb_start, tb_end;
828
    TranslationBlock *tb;
829

    
830
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
831

    
832
    tb = p->first_tb;
833
    while (tb != NULL) {
834
        n = (long)tb & 3;
835
        tb = (TranslationBlock *)((long)tb & ~3);
836
        /* NOTE: this is subtle as a TB may span two physical pages */
837
        if (n == 0) {
838
            /* NOTE: tb_end may be after the end of the page, but
839
               it is not a problem */
840
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
841
            tb_end = tb_start + tb->size;
842
            if (tb_end > TARGET_PAGE_SIZE)
843
                tb_end = TARGET_PAGE_SIZE;
844
        } else {
845
            tb_start = 0;
846
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
847
        }
848
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
849
        tb = tb->page_next[n];
850
    }
851
}
852

    
853
TranslationBlock *tb_gen_code(CPUState *env,
854
                              target_ulong pc, target_ulong cs_base,
855
                              int flags, int cflags)
856
{
857
    TranslationBlock *tb;
858
    uint8_t *tc_ptr;
859
    target_ulong phys_pc, phys_page2, virt_page2;
860
    int code_gen_size;
861

    
862
    phys_pc = get_phys_addr_code(env, pc);
863
    tb = tb_alloc(pc);
864
    if (!tb) {
865
        /* flush must be done */
866
        tb_flush(env);
867
        /* cannot fail at this point */
868
        tb = tb_alloc(pc);
869
        /* Don't forget to invalidate previous TB info.  */
870
        tb_invalidated_flag = 1;
871
    }
872
    tc_ptr = code_gen_ptr;
873
    tb->tc_ptr = tc_ptr;
874
    tb->cs_base = cs_base;
875
    tb->flags = flags;
876
    tb->cflags = cflags;
877
    cpu_gen_code(env, tb, &code_gen_size);
878
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
879

    
880
    /* check next page if needed */
881
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
882
    phys_page2 = -1;
883
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
884
        phys_page2 = get_phys_addr_code(env, virt_page2);
885
    }
886
    tb_link_phys(tb, phys_pc, phys_page2);
887
    return tb;
888
}
889

    
890
/* invalidate all TBs which intersect with the target physical page
891
   starting in range [start;end[. NOTE: start and end must refer to
892
   the same physical page. 'is_cpu_write_access' should be true if called
893
   from a real cpu write access: the virtual CPU will exit the current
894
   TB if code is modified inside this TB. */
895
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
896
                                   int is_cpu_write_access)
897
{
898
    TranslationBlock *tb, *tb_next, *saved_tb;
899
    CPUState *env = cpu_single_env;
900
    target_ulong tb_start, tb_end;
901
    PageDesc *p;
902
    int n;
903
#ifdef TARGET_HAS_PRECISE_SMC
904
    int current_tb_not_found = is_cpu_write_access;
905
    TranslationBlock *current_tb = NULL;
906
    int current_tb_modified = 0;
907
    target_ulong current_pc = 0;
908
    target_ulong current_cs_base = 0;
909
    int current_flags = 0;
910
#endif /* TARGET_HAS_PRECISE_SMC */
911

    
912
    p = page_find(start >> TARGET_PAGE_BITS);
913
    if (!p)
914
        return;
915
    if (!p->code_bitmap &&
916
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
917
        is_cpu_write_access) {
918
        /* build code bitmap */
919
        build_page_bitmap(p);
920
    }
921

    
922
    /* we remove all the TBs in the range [start, end[ */
923
    /* XXX: see if in some cases it could be faster to invalidate all the code */
924
    tb = p->first_tb;
925
    while (tb != NULL) {
926
        n = (long)tb & 3;
927
        tb = (TranslationBlock *)((long)tb & ~3);
928
        tb_next = tb->page_next[n];
929
        /* NOTE: this is subtle as a TB may span two physical pages */
930
        if (n == 0) {
931
            /* NOTE: tb_end may be after the end of the page, but
932
               it is not a problem */
933
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
934
            tb_end = tb_start + tb->size;
935
        } else {
936
            tb_start = tb->page_addr[1];
937
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
938
        }
939
        if (!(tb_end <= start || tb_start >= end)) {
940
#ifdef TARGET_HAS_PRECISE_SMC
941
            if (current_tb_not_found) {
942
                current_tb_not_found = 0;
943
                current_tb = NULL;
944
                if (env->mem_io_pc) {
945
                    /* now we have a real cpu fault */
946
                    current_tb = tb_find_pc(env->mem_io_pc);
947
                }
948
            }
949
            if (current_tb == tb &&
950
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
951
                /* If we are modifying the current TB, we must stop
952
                its execution. We could be more precise by checking
953
                that the modification is after the current PC, but it
954
                would require a specialized function to partially
955
                restore the CPU state */
956

    
957
                current_tb_modified = 1;
958
                cpu_restore_state(current_tb, env,
959
                                  env->mem_io_pc, NULL);
960
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
961
                                     &current_flags);
962
            }
963
#endif /* TARGET_HAS_PRECISE_SMC */
964
            /* we need to do that to handle the case where a signal
965
               occurs while doing tb_phys_invalidate() */
966
            saved_tb = NULL;
967
            if (env) {
968
                saved_tb = env->current_tb;
969
                env->current_tb = NULL;
970
            }
971
            tb_phys_invalidate(tb, -1);
972
            if (env) {
973
                env->current_tb = saved_tb;
974
                if (env->interrupt_request && env->current_tb)
975
                    cpu_interrupt(env, env->interrupt_request);
976
            }
977
        }
978
        tb = tb_next;
979
    }
980
#if !defined(CONFIG_USER_ONLY)
981
    /* if no code remaining, no need to continue to use slow writes */
982
    if (!p->first_tb) {
983
        invalidate_page_bitmap(p);
984
        if (is_cpu_write_access) {
985
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
986
        }
987
    }
988
#endif
989
#ifdef TARGET_HAS_PRECISE_SMC
990
    if (current_tb_modified) {
991
        /* we generate a block containing just the instruction
992
           modifying the memory. It will ensure that it cannot modify
993
           itself */
994
        env->current_tb = NULL;
995
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
996
        cpu_resume_from_signal(env, NULL);
997
    }
998
#endif
999
}
1000

    
1001
/* len must be <= 8 and start must be a multiple of len */
1002
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1003
{
1004
    PageDesc *p;
1005
    int offset, b;
1006
#if 0
1007
    if (1) {
1008
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1009
                  cpu_single_env->mem_io_vaddr, len,
1010
                  cpu_single_env->eip,
1011
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1012
    }
1013
#endif
1014
    p = page_find(start >> TARGET_PAGE_BITS);
1015
    if (!p)
1016
        return;
1017
    if (p->code_bitmap) {
1018
        offset = start & ~TARGET_PAGE_MASK;
1019
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1020
        if (b & ((1 << len) - 1))
1021
            goto do_invalidate;
1022
    } else {
1023
    do_invalidate:
1024
        tb_invalidate_phys_page_range(start, start + len, 1);
1025
    }
1026
}
1027

    
1028
#if !defined(CONFIG_SOFTMMU)
1029
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1030
                                    unsigned long pc, void *puc)
1031
{
1032
    TranslationBlock *tb;
1033
    PageDesc *p;
1034
    int n;
1035
#ifdef TARGET_HAS_PRECISE_SMC
1036
    TranslationBlock *current_tb = NULL;
1037
    CPUState *env = cpu_single_env;
1038
    int current_tb_modified = 0;
1039
    target_ulong current_pc = 0;
1040
    target_ulong current_cs_base = 0;
1041
    int current_flags = 0;
1042
#endif
1043

    
1044
    addr &= TARGET_PAGE_MASK;
1045
    p = page_find(addr >> TARGET_PAGE_BITS);
1046
    if (!p)
1047
        return;
1048
    tb = p->first_tb;
1049
#ifdef TARGET_HAS_PRECISE_SMC
1050
    if (tb && pc != 0) {
1051
        current_tb = tb_find_pc(pc);
1052
    }
1053
#endif
1054
    while (tb != NULL) {
1055
        n = (long)tb & 3;
1056
        tb = (TranslationBlock *)((long)tb & ~3);
1057
#ifdef TARGET_HAS_PRECISE_SMC
1058
        if (current_tb == tb &&
1059
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1060
                /* If we are modifying the current TB, we must stop
1061
                   its execution. We could be more precise by checking
1062
                   that the modification is after the current PC, but it
1063
                   would require a specialized function to partially
1064
                   restore the CPU state */
1065

    
1066
            current_tb_modified = 1;
1067
            cpu_restore_state(current_tb, env, pc, puc);
1068
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1069
                                 &current_flags);
1070
        }
1071
#endif /* TARGET_HAS_PRECISE_SMC */
1072
        tb_phys_invalidate(tb, addr);
1073
        tb = tb->page_next[n];
1074
    }
1075
    p->first_tb = NULL;
1076
#ifdef TARGET_HAS_PRECISE_SMC
1077
    if (current_tb_modified) {
1078
        /* we generate a block containing just the instruction
1079
           modifying the memory. It will ensure that it cannot modify
1080
           itself */
1081
        env->current_tb = NULL;
1082
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1083
        cpu_resume_from_signal(env, puc);
1084
    }
1085
#endif
1086
}
1087
#endif
1088

    
1089
/* add the tb in the target page and protect it if necessary */
1090
static inline void tb_alloc_page(TranslationBlock *tb,
1091
                                 unsigned int n, target_ulong page_addr)
1092
{
1093
    PageDesc *p;
1094
    TranslationBlock *last_first_tb;
1095

    
1096
    tb->page_addr[n] = page_addr;
1097
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1098
    tb->page_next[n] = p->first_tb;
1099
    last_first_tb = p->first_tb;
1100
    p->first_tb = (TranslationBlock *)((long)tb | n);
1101
    invalidate_page_bitmap(p);
1102

    
1103
#if defined(TARGET_HAS_SMC) || 1
1104

    
1105
#if defined(CONFIG_USER_ONLY)
1106
    if (p->flags & PAGE_WRITE) {
1107
        target_ulong addr;
1108
        PageDesc *p2;
1109
        int prot;
1110

    
1111
        /* force the host page as non writable (writes will have a
1112
           page fault + mprotect overhead) */
1113
        page_addr &= qemu_host_page_mask;
1114
        prot = 0;
1115
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1116
            addr += TARGET_PAGE_SIZE) {
1117

    
1118
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1119
            if (!p2)
1120
                continue;
1121
            prot |= p2->flags;
1122
            p2->flags &= ~PAGE_WRITE;
1123
            page_get_flags(addr);
1124
          }
1125
        mprotect(g2h(page_addr), qemu_host_page_size,
1126
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1127
#ifdef DEBUG_TB_INVALIDATE
1128
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1129
               page_addr);
1130
#endif
1131
    }
1132
#else
1133
    /* if some code is already present, then the pages are already
1134
       protected. So we handle the case where only the first TB is
1135
       allocated in a physical page */
1136
    if (!last_first_tb) {
1137
        tlb_protect_code(page_addr);
1138
    }
1139
#endif
1140

    
1141
#endif /* TARGET_HAS_SMC */
1142
}
1143

    
1144
/* Allocate a new translation block. Flush the translation buffer if
1145
   too many translation blocks or too much generated code. */
1146
TranslationBlock *tb_alloc(target_ulong pc)
1147
{
1148
    TranslationBlock *tb;
1149

    
1150
    if (nb_tbs >= code_gen_max_blocks ||
1151
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1152
        return NULL;
1153
    tb = &tbs[nb_tbs++];
1154
    tb->pc = pc;
1155
    tb->cflags = 0;
1156
    return tb;
1157
}
1158

    
1159
void tb_free(TranslationBlock *tb)
1160
{
1161
    /* In practice this is mostly used for single use temporary TB
1162
       Ignore the hard cases and just back up if this TB happens to
1163
       be the last one generated.  */
1164
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1165
        code_gen_ptr = tb->tc_ptr;
1166
        nb_tbs--;
1167
    }
1168
}
1169

    
1170
/* add a new TB and link it to the physical page tables. phys_page2 is
1171
   (-1) to indicate that only one page contains the TB. */
1172
void tb_link_phys(TranslationBlock *tb,
1173
                  target_ulong phys_pc, target_ulong phys_page2)
1174
{
1175
    unsigned int h;
1176
    TranslationBlock **ptb;
1177

    
1178
    /* Grab the mmap lock to stop another thread invalidating this TB
1179
       before we are done.  */
1180
    mmap_lock();
1181
    /* add in the physical hash table */
1182
    h = tb_phys_hash_func(phys_pc);
1183
    ptb = &tb_phys_hash[h];
1184
    tb->phys_hash_next = *ptb;
1185
    *ptb = tb;
1186

    
1187
    /* add in the page list */
1188
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1189
    if (phys_page2 != -1)
1190
        tb_alloc_page(tb, 1, phys_page2);
1191
    else
1192
        tb->page_addr[1] = -1;
1193

    
1194
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1195
    tb->jmp_next[0] = NULL;
1196
    tb->jmp_next[1] = NULL;
1197

    
1198
    /* init original jump addresses */
1199
    if (tb->tb_next_offset[0] != 0xffff)
1200
        tb_reset_jump(tb, 0);
1201
    if (tb->tb_next_offset[1] != 0xffff)
1202
        tb_reset_jump(tb, 1);
1203

    
1204
#ifdef DEBUG_TB_CHECK
1205
    tb_page_check();
1206
#endif
1207
    mmap_unlock();
1208
}
1209

    
1210
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1211
   tb[1].tc_ptr. Return NULL if not found */
1212
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1213
{
1214
    int m_min, m_max, m;
1215
    unsigned long v;
1216
    TranslationBlock *tb;
1217

    
1218
    if (nb_tbs <= 0)
1219
        return NULL;
1220
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1221
        tc_ptr >= (unsigned long)code_gen_ptr)
1222
        return NULL;
1223
    /* binary search (cf Knuth) */
1224
    m_min = 0;
1225
    m_max = nb_tbs - 1;
1226
    while (m_min <= m_max) {
1227
        m = (m_min + m_max) >> 1;
1228
        tb = &tbs[m];
1229
        v = (unsigned long)tb->tc_ptr;
1230
        if (v == tc_ptr)
1231
            return tb;
1232
        else if (tc_ptr < v) {
1233
            m_max = m - 1;
1234
        } else {
1235
            m_min = m + 1;
1236
        }
1237
    }
1238
    return &tbs[m_max];
1239
}
1240

    
1241
static void tb_reset_jump_recursive(TranslationBlock *tb);
1242

    
1243
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1244
{
1245
    TranslationBlock *tb1, *tb_next, **ptb;
1246
    unsigned int n1;
1247

    
1248
    tb1 = tb->jmp_next[n];
1249
    if (tb1 != NULL) {
1250
        /* find head of list */
1251
        for(;;) {
1252
            n1 = (long)tb1 & 3;
1253
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1254
            if (n1 == 2)
1255
                break;
1256
            tb1 = tb1->jmp_next[n1];
1257
        }
1258
        /* we are now sure now that tb jumps to tb1 */
1259
        tb_next = tb1;
1260

    
1261
        /* remove tb from the jmp_first list */
1262
        ptb = &tb_next->jmp_first;
1263
        for(;;) {
1264
            tb1 = *ptb;
1265
            n1 = (long)tb1 & 3;
1266
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1267
            if (n1 == n && tb1 == tb)
1268
                break;
1269
            ptb = &tb1->jmp_next[n1];
1270
        }
1271
        *ptb = tb->jmp_next[n];
1272
        tb->jmp_next[n] = NULL;
1273

    
1274
        /* suppress the jump to next tb in generated code */
1275
        tb_reset_jump(tb, n);
1276

    
1277
        /* suppress jumps in the tb on which we could have jumped */
1278
        tb_reset_jump_recursive(tb_next);
1279
    }
1280
}
1281

    
1282
static void tb_reset_jump_recursive(TranslationBlock *tb)
1283
{
1284
    tb_reset_jump_recursive2(tb, 0);
1285
    tb_reset_jump_recursive2(tb, 1);
1286
}
1287

    
1288
#if defined(TARGET_HAS_ICE)
1289
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1290
{
1291
    target_phys_addr_t addr;
1292
    target_ulong pd;
1293
    ram_addr_t ram_addr;
1294
    PhysPageDesc *p;
1295

    
1296
    addr = cpu_get_phys_page_debug(env, pc);
1297
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1298
    if (!p) {
1299
        pd = IO_MEM_UNASSIGNED;
1300
    } else {
1301
        pd = p->phys_offset;
1302
    }
1303
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1304
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1305
}
1306
#endif
1307

    
1308
/* Add a watchpoint.  */
1309
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1310
                          int flags, CPUWatchpoint **watchpoint)
1311
{
1312
    target_ulong len_mask = ~(len - 1);
1313
    CPUWatchpoint *wp;
1314

    
1315
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1316
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1317
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1318
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1319
        return -EINVAL;
1320
    }
1321
    wp = qemu_malloc(sizeof(*wp));
1322

    
1323
    wp->vaddr = addr;
1324
    wp->len_mask = len_mask;
1325
    wp->flags = flags;
1326

    
1327
    /* keep all GDB-injected watchpoints in front */
1328
    if (flags & BP_GDB)
1329
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1330
    else
1331
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1332

    
1333
    tlb_flush_page(env, addr);
1334

    
1335
    if (watchpoint)
1336
        *watchpoint = wp;
1337
    return 0;
1338
}
1339

    
1340
/* Remove a specific watchpoint.  */
1341
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1342
                          int flags)
1343
{
1344
    target_ulong len_mask = ~(len - 1);
1345
    CPUWatchpoint *wp;
1346

    
1347
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1348
        if (addr == wp->vaddr && len_mask == wp->len_mask
1349
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1350
            cpu_watchpoint_remove_by_ref(env, wp);
1351
            return 0;
1352
        }
1353
    }
1354
    return -ENOENT;
1355
}
1356

    
1357
/* Remove a specific watchpoint by reference.  */
1358
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1359
{
1360
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1361

    
1362
    tlb_flush_page(env, watchpoint->vaddr);
1363

    
1364
    qemu_free(watchpoint);
1365
}
1366

    
1367
/* Remove all matching watchpoints.  */
1368
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1369
{
1370
    CPUWatchpoint *wp, *next;
1371

    
1372
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1373
        if (wp->flags & mask)
1374
            cpu_watchpoint_remove_by_ref(env, wp);
1375
    }
1376
}
1377

    
1378
/* Add a breakpoint.  */
1379
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1380
                          CPUBreakpoint **breakpoint)
1381
{
1382
#if defined(TARGET_HAS_ICE)
1383
    CPUBreakpoint *bp;
1384

    
1385
    bp = qemu_malloc(sizeof(*bp));
1386

    
1387
    bp->pc = pc;
1388
    bp->flags = flags;
1389

    
1390
    /* keep all GDB-injected breakpoints in front */
1391
    if (flags & BP_GDB)
1392
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1393
    else
1394
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1395

    
1396
    breakpoint_invalidate(env, pc);
1397

    
1398
    if (breakpoint)
1399
        *breakpoint = bp;
1400
    return 0;
1401
#else
1402
    return -ENOSYS;
1403
#endif
1404
}
1405

    
1406
/* Remove a specific breakpoint.  */
1407
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1408
{
1409
#if defined(TARGET_HAS_ICE)
1410
    CPUBreakpoint *bp;
1411

    
1412
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1413
        if (bp->pc == pc && bp->flags == flags) {
1414
            cpu_breakpoint_remove_by_ref(env, bp);
1415
            return 0;
1416
        }
1417
    }
1418
    return -ENOENT;
1419
#else
1420
    return -ENOSYS;
1421
#endif
1422
}
1423

    
1424
/* Remove a specific breakpoint by reference.  */
1425
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1426
{
1427
#if defined(TARGET_HAS_ICE)
1428
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1429

    
1430
    breakpoint_invalidate(env, breakpoint->pc);
1431

    
1432
    qemu_free(breakpoint);
1433
#endif
1434
}
1435

    
1436
/* Remove all matching breakpoints. */
1437
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1438
{
1439
#if defined(TARGET_HAS_ICE)
1440
    CPUBreakpoint *bp, *next;
1441

    
1442
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1443
        if (bp->flags & mask)
1444
            cpu_breakpoint_remove_by_ref(env, bp);
1445
    }
1446
#endif
1447
}
1448

    
1449
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1450
   CPU loop after each instruction */
1451
void cpu_single_step(CPUState *env, int enabled)
1452
{
1453
#if defined(TARGET_HAS_ICE)
1454
    if (env->singlestep_enabled != enabled) {
1455
        env->singlestep_enabled = enabled;
1456
        if (kvm_enabled())
1457
            kvm_update_guest_debug(env, 0);
1458
        else {
1459
            /* must flush all the translated code to avoid inconsistancies */
1460
            /* XXX: only flush what is necessary */
1461
            tb_flush(env);
1462
        }
1463
    }
1464
#endif
1465
}
1466

    
1467
/* enable or disable low levels log */
1468
void cpu_set_log(int log_flags)
1469
{
1470
    loglevel = log_flags;
1471
    if (loglevel && !logfile) {
1472
        logfile = fopen(logfilename, log_append ? "a" : "w");
1473
        if (!logfile) {
1474
            perror(logfilename);
1475
            _exit(1);
1476
        }
1477
#if !defined(CONFIG_SOFTMMU)
1478
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1479
        {
1480
            static char logfile_buf[4096];
1481
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1482
        }
1483
#else
1484
        setvbuf(logfile, NULL, _IOLBF, 0);
1485
#endif
1486
        log_append = 1;
1487
    }
1488
    if (!loglevel && logfile) {
1489
        fclose(logfile);
1490
        logfile = NULL;
1491
    }
1492
}
1493

    
1494
void cpu_set_log_filename(const char *filename)
1495
{
1496
    logfilename = strdup(filename);
1497
    if (logfile) {
1498
        fclose(logfile);
1499
        logfile = NULL;
1500
    }
1501
    cpu_set_log(loglevel);
1502
}
1503

    
1504
static void cpu_unlink_tb(CPUState *env)
1505
{
1506
#if defined(USE_NPTL)
1507
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1508
       problem and hope the cpu will stop of its own accord.  For userspace
1509
       emulation this often isn't actually as bad as it sounds.  Often
1510
       signals are used primarily to interrupt blocking syscalls.  */
1511
#else
1512
    TranslationBlock *tb;
1513
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1514

    
1515
    tb = env->current_tb;
1516
    /* if the cpu is currently executing code, we must unlink it and
1517
       all the potentially executing TB */
1518
    if (tb && !testandset(&interrupt_lock)) {
1519
        env->current_tb = NULL;
1520
        tb_reset_jump_recursive(tb);
1521
        resetlock(&interrupt_lock);
1522
    }
1523
#endif
1524
}
1525

    
1526
/* mask must never be zero, except for A20 change call */
1527
void cpu_interrupt(CPUState *env, int mask)
1528
{
1529
    int old_mask;
1530

    
1531
    old_mask = env->interrupt_request;
1532
    env->interrupt_request |= mask;
1533

    
1534
    if (use_icount) {
1535
        env->icount_decr.u16.high = 0xffff;
1536
#ifndef CONFIG_USER_ONLY
1537
        if (!can_do_io(env)
1538
            && (mask & ~old_mask) != 0) {
1539
            cpu_abort(env, "Raised interrupt while not in I/O function");
1540
        }
1541
#endif
1542
    } else {
1543
        cpu_unlink_tb(env);
1544
    }
1545
}
1546

    
1547
void cpu_reset_interrupt(CPUState *env, int mask)
1548
{
1549
    env->interrupt_request &= ~mask;
1550
}
1551

    
1552
void cpu_exit(CPUState *env)
1553
{
1554
    env->exit_request = 1;
1555
    cpu_unlink_tb(env);
1556
}
1557

    
1558
const CPULogItem cpu_log_items[] = {
1559
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1560
      "show generated host assembly code for each compiled TB" },
1561
    { CPU_LOG_TB_IN_ASM, "in_asm",
1562
      "show target assembly code for each compiled TB" },
1563
    { CPU_LOG_TB_OP, "op",
1564
      "show micro ops for each compiled TB" },
1565
    { CPU_LOG_TB_OP_OPT, "op_opt",
1566
      "show micro ops "
1567
#ifdef TARGET_I386
1568
      "before eflags optimization and "
1569
#endif
1570
      "after liveness analysis" },
1571
    { CPU_LOG_INT, "int",
1572
      "show interrupts/exceptions in short format" },
1573
    { CPU_LOG_EXEC, "exec",
1574
      "show trace before each executed TB (lots of logs)" },
1575
    { CPU_LOG_TB_CPU, "cpu",
1576
      "show CPU state before block translation" },
1577
#ifdef TARGET_I386
1578
    { CPU_LOG_PCALL, "pcall",
1579
      "show protected mode far calls/returns/exceptions" },
1580
    { CPU_LOG_RESET, "cpu_reset",
1581
      "show CPU state before CPU resets" },
1582
#endif
1583
#ifdef DEBUG_IOPORT
1584
    { CPU_LOG_IOPORT, "ioport",
1585
      "show all i/o ports accesses" },
1586
#endif
1587
    { 0, NULL, NULL },
1588
};
1589

    
1590
static int cmp1(const char *s1, int n, const char *s2)
1591
{
1592
    if (strlen(s2) != n)
1593
        return 0;
1594
    return memcmp(s1, s2, n) == 0;
1595
}
1596

    
1597
/* takes a comma separated list of log masks. Return 0 if error. */
1598
int cpu_str_to_log_mask(const char *str)
1599
{
1600
    const CPULogItem *item;
1601
    int mask;
1602
    const char *p, *p1;
1603

    
1604
    p = str;
1605
    mask = 0;
1606
    for(;;) {
1607
        p1 = strchr(p, ',');
1608
        if (!p1)
1609
            p1 = p + strlen(p);
1610
        if(cmp1(p,p1-p,"all")) {
1611
                for(item = cpu_log_items; item->mask != 0; item++) {
1612
                        mask |= item->mask;
1613
                }
1614
        } else {
1615
        for(item = cpu_log_items; item->mask != 0; item++) {
1616
            if (cmp1(p, p1 - p, item->name))
1617
                goto found;
1618
        }
1619
        return 0;
1620
        }
1621
    found:
1622
        mask |= item->mask;
1623
        if (*p1 != ',')
1624
            break;
1625
        p = p1 + 1;
1626
    }
1627
    return mask;
1628
}
1629

    
1630
void cpu_abort(CPUState *env, const char *fmt, ...)
1631
{
1632
    va_list ap;
1633
    va_list ap2;
1634

    
1635
    va_start(ap, fmt);
1636
    va_copy(ap2, ap);
1637
    fprintf(stderr, "qemu: fatal: ");
1638
    vfprintf(stderr, fmt, ap);
1639
    fprintf(stderr, "\n");
1640
#ifdef TARGET_I386
1641
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1642
#else
1643
    cpu_dump_state(env, stderr, fprintf, 0);
1644
#endif
1645
    if (qemu_log_enabled()) {
1646
        qemu_log("qemu: fatal: ");
1647
        qemu_log_vprintf(fmt, ap2);
1648
        qemu_log("\n");
1649
#ifdef TARGET_I386
1650
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1651
#else
1652
        log_cpu_state(env, 0);
1653
#endif
1654
        qemu_log_flush();
1655
        qemu_log_close();
1656
    }
1657
    va_end(ap2);
1658
    va_end(ap);
1659
    abort();
1660
}
1661

    
1662
CPUState *cpu_copy(CPUState *env)
1663
{
1664
    CPUState *new_env = cpu_init(env->cpu_model_str);
1665
    CPUState *next_cpu = new_env->next_cpu;
1666
    int cpu_index = new_env->cpu_index;
1667
#if defined(TARGET_HAS_ICE)
1668
    CPUBreakpoint *bp;
1669
    CPUWatchpoint *wp;
1670
#endif
1671

    
1672
    memcpy(new_env, env, sizeof(CPUState));
1673

    
1674
    /* Preserve chaining and index. */
1675
    new_env->next_cpu = next_cpu;
1676
    new_env->cpu_index = cpu_index;
1677

    
1678
    /* Clone all break/watchpoints.
1679
       Note: Once we support ptrace with hw-debug register access, make sure
1680
       BP_CPU break/watchpoints are handled correctly on clone. */
1681
    TAILQ_INIT(&env->breakpoints);
1682
    TAILQ_INIT(&env->watchpoints);
1683
#if defined(TARGET_HAS_ICE)
1684
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1685
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1686
    }
1687
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1688
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1689
                              wp->flags, NULL);
1690
    }
1691
#endif
1692

    
1693
    return new_env;
1694
}
1695

    
1696
#if !defined(CONFIG_USER_ONLY)
1697

    
1698
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1699
{
1700
    unsigned int i;
1701

    
1702
    /* Discard jump cache entries for any tb which might potentially
1703
       overlap the flushed page.  */
1704
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1705
    memset (&env->tb_jmp_cache[i], 0, 
1706
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1707

    
1708
    i = tb_jmp_cache_hash_page(addr);
1709
    memset (&env->tb_jmp_cache[i], 0, 
1710
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1711
}
1712

    
1713
/* NOTE: if flush_global is true, also flush global entries (not
1714
   implemented yet) */
1715
void tlb_flush(CPUState *env, int flush_global)
1716
{
1717
    int i;
1718

    
1719
#if defined(DEBUG_TLB)
1720
    printf("tlb_flush:\n");
1721
#endif
1722
    /* must reset current TB so that interrupts cannot modify the
1723
       links while we are modifying them */
1724
    env->current_tb = NULL;
1725

    
1726
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1727
        env->tlb_table[0][i].addr_read = -1;
1728
        env->tlb_table[0][i].addr_write = -1;
1729
        env->tlb_table[0][i].addr_code = -1;
1730
        env->tlb_table[1][i].addr_read = -1;
1731
        env->tlb_table[1][i].addr_write = -1;
1732
        env->tlb_table[1][i].addr_code = -1;
1733
#if (NB_MMU_MODES >= 3)
1734
        env->tlb_table[2][i].addr_read = -1;
1735
        env->tlb_table[2][i].addr_write = -1;
1736
        env->tlb_table[2][i].addr_code = -1;
1737
#endif
1738
#if (NB_MMU_MODES >= 4)
1739
        env->tlb_table[3][i].addr_read = -1;
1740
        env->tlb_table[3][i].addr_write = -1;
1741
        env->tlb_table[3][i].addr_code = -1;
1742
#endif
1743
#if (NB_MMU_MODES >= 5)
1744
        env->tlb_table[4][i].addr_read = -1;
1745
        env->tlb_table[4][i].addr_write = -1;
1746
        env->tlb_table[4][i].addr_code = -1;
1747
#endif
1748

    
1749
    }
1750

    
1751
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1752

    
1753
#ifdef USE_KQEMU
1754
    if (env->kqemu_enabled) {
1755
        kqemu_flush(env, flush_global);
1756
    }
1757
#endif
1758
    tlb_flush_count++;
1759
}
1760

    
1761
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1762
{
1763
    if (addr == (tlb_entry->addr_read &
1764
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1765
        addr == (tlb_entry->addr_write &
1766
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1767
        addr == (tlb_entry->addr_code &
1768
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1769
        tlb_entry->addr_read = -1;
1770
        tlb_entry->addr_write = -1;
1771
        tlb_entry->addr_code = -1;
1772
    }
1773
}
1774

    
1775
void tlb_flush_page(CPUState *env, target_ulong addr)
1776
{
1777
    int i;
1778

    
1779
#if defined(DEBUG_TLB)
1780
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1781
#endif
1782
    /* must reset current TB so that interrupts cannot modify the
1783
       links while we are modifying them */
1784
    env->current_tb = NULL;
1785

    
1786
    addr &= TARGET_PAGE_MASK;
1787
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1788
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1789
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1790
#if (NB_MMU_MODES >= 3)
1791
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1792
#endif
1793
#if (NB_MMU_MODES >= 4)
1794
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1795
#endif
1796
#if (NB_MMU_MODES >= 5)
1797
    tlb_flush_entry(&env->tlb_table[4][i], addr);
1798
#endif
1799

    
1800
    tlb_flush_jmp_cache(env, addr);
1801

    
1802
#ifdef USE_KQEMU
1803
    if (env->kqemu_enabled) {
1804
        kqemu_flush_page(env, addr);
1805
    }
1806
#endif
1807
}
1808

    
1809
/* update the TLBs so that writes to code in the virtual page 'addr'
1810
   can be detected */
1811
static void tlb_protect_code(ram_addr_t ram_addr)
1812
{
1813
    cpu_physical_memory_reset_dirty(ram_addr,
1814
                                    ram_addr + TARGET_PAGE_SIZE,
1815
                                    CODE_DIRTY_FLAG);
1816
}
1817

    
1818
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1819
   tested for self modifying code */
1820
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1821
                                    target_ulong vaddr)
1822
{
1823
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1824
}
1825

    
1826
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1827
                                         unsigned long start, unsigned long length)
1828
{
1829
    unsigned long addr;
1830
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1831
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1832
        if ((addr - start) < length) {
1833
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1834
        }
1835
    }
1836
}
1837

    
1838
/* Note: start and end must be within the same ram block.  */
1839
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1840
                                     int dirty_flags)
1841
{
1842
    CPUState *env;
1843
    unsigned long length, start1;
1844
    int i, mask, len;
1845
    uint8_t *p;
1846

    
1847
    start &= TARGET_PAGE_MASK;
1848
    end = TARGET_PAGE_ALIGN(end);
1849

    
1850
    length = end - start;
1851
    if (length == 0)
1852
        return;
1853
    len = length >> TARGET_PAGE_BITS;
1854
#ifdef USE_KQEMU
1855
    /* XXX: should not depend on cpu context */
1856
    env = first_cpu;
1857
    if (env->kqemu_enabled) {
1858
        ram_addr_t addr;
1859
        addr = start;
1860
        for(i = 0; i < len; i++) {
1861
            kqemu_set_notdirty(env, addr);
1862
            addr += TARGET_PAGE_SIZE;
1863
        }
1864
    }
1865
#endif
1866
    mask = ~dirty_flags;
1867
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1868
    for(i = 0; i < len; i++)
1869
        p[i] &= mask;
1870

    
1871
    /* we modify the TLB cache so that the dirty bit will be set again
1872
       when accessing the range */
1873
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1874
    /* Chek that we don't span multiple blocks - this breaks the
1875
       address comparisons below.  */
1876
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1877
            != (end - 1) - start) {
1878
        abort();
1879
    }
1880

    
1881
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1882
        for(i = 0; i < CPU_TLB_SIZE; i++)
1883
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1884
        for(i = 0; i < CPU_TLB_SIZE; i++)
1885
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1886
#if (NB_MMU_MODES >= 3)
1887
        for(i = 0; i < CPU_TLB_SIZE; i++)
1888
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1889
#endif
1890
#if (NB_MMU_MODES >= 4)
1891
        for(i = 0; i < CPU_TLB_SIZE; i++)
1892
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1893
#endif
1894
#if (NB_MMU_MODES >= 5)
1895
        for(i = 0; i < CPU_TLB_SIZE; i++)
1896
            tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1897
#endif
1898
    }
1899
}
1900

    
1901
int cpu_physical_memory_set_dirty_tracking(int enable)
1902
{
1903
    in_migration = enable;
1904
    return 0;
1905
}
1906

    
1907
int cpu_physical_memory_get_dirty_tracking(void)
1908
{
1909
    return in_migration;
1910
}
1911

    
1912
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1913
{
1914
    if (kvm_enabled())
1915
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1916
}
1917

    
1918
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1919
{
1920
    ram_addr_t ram_addr;
1921
    void *p;
1922

    
1923
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1924
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1925
            + tlb_entry->addend);
1926
        ram_addr = qemu_ram_addr_from_host(p);
1927
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1928
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1929
        }
1930
    }
1931
}
1932

    
1933
/* update the TLB according to the current state of the dirty bits */
1934
void cpu_tlb_update_dirty(CPUState *env)
1935
{
1936
    int i;
1937
    for(i = 0; i < CPU_TLB_SIZE; i++)
1938
        tlb_update_dirty(&env->tlb_table[0][i]);
1939
    for(i = 0; i < CPU_TLB_SIZE; i++)
1940
        tlb_update_dirty(&env->tlb_table[1][i]);
1941
#if (NB_MMU_MODES >= 3)
1942
    for(i = 0; i < CPU_TLB_SIZE; i++)
1943
        tlb_update_dirty(&env->tlb_table[2][i]);
1944
#endif
1945
#if (NB_MMU_MODES >= 4)
1946
    for(i = 0; i < CPU_TLB_SIZE; i++)
1947
        tlb_update_dirty(&env->tlb_table[3][i]);
1948
#endif
1949
#if (NB_MMU_MODES >= 5)
1950
    for(i = 0; i < CPU_TLB_SIZE; i++)
1951
        tlb_update_dirty(&env->tlb_table[4][i]);
1952
#endif
1953
}
1954

    
1955
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1956
{
1957
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1958
        tlb_entry->addr_write = vaddr;
1959
}
1960

    
1961
/* update the TLB corresponding to virtual page vaddr
1962
   so that it is no longer dirty */
1963
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1964
{
1965
    int i;
1966

    
1967
    vaddr &= TARGET_PAGE_MASK;
1968
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1969
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1970
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1971
#if (NB_MMU_MODES >= 3)
1972
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1973
#endif
1974
#if (NB_MMU_MODES >= 4)
1975
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1976
#endif
1977
#if (NB_MMU_MODES >= 5)
1978
    tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
1979
#endif
1980
}
1981

    
1982
/* add a new TLB entry. At most one entry for a given virtual address
1983
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1984
   (can only happen in non SOFTMMU mode for I/O pages or pages
1985
   conflicting with the host address space). */
1986
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1987
                      target_phys_addr_t paddr, int prot,
1988
                      int mmu_idx, int is_softmmu)
1989
{
1990
    PhysPageDesc *p;
1991
    unsigned long pd;
1992
    unsigned int index;
1993
    target_ulong address;
1994
    target_ulong code_address;
1995
    target_phys_addr_t addend;
1996
    int ret;
1997
    CPUTLBEntry *te;
1998
    CPUWatchpoint *wp;
1999
    target_phys_addr_t iotlb;
2000

    
2001
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2002
    if (!p) {
2003
        pd = IO_MEM_UNASSIGNED;
2004
    } else {
2005
        pd = p->phys_offset;
2006
    }
2007
#if defined(DEBUG_TLB)
2008
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2009
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2010
#endif
2011

    
2012
    ret = 0;
2013
    address = vaddr;
2014
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2015
        /* IO memory case (romd handled later) */
2016
        address |= TLB_MMIO;
2017
    }
2018
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2019
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2020
        /* Normal RAM.  */
2021
        iotlb = pd & TARGET_PAGE_MASK;
2022
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2023
            iotlb |= IO_MEM_NOTDIRTY;
2024
        else
2025
            iotlb |= IO_MEM_ROM;
2026
    } else {
2027
        /* IO handlers are currently passed a phsical address.
2028
           It would be nice to pass an offset from the base address
2029
           of that region.  This would avoid having to special case RAM,
2030
           and avoid full address decoding in every device.
2031
           We can't use the high bits of pd for this because
2032
           IO_MEM_ROMD uses these as a ram address.  */
2033
        iotlb = (pd & ~TARGET_PAGE_MASK);
2034
        if (p) {
2035
            iotlb += p->region_offset;
2036
        } else {
2037
            iotlb += paddr;
2038
        }
2039
    }
2040

    
2041
    code_address = address;
2042
    /* Make accesses to pages with watchpoints go via the
2043
       watchpoint trap routines.  */
2044
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2045
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2046
            iotlb = io_mem_watch + paddr;
2047
            /* TODO: The memory case can be optimized by not trapping
2048
               reads of pages with a write breakpoint.  */
2049
            address |= TLB_MMIO;
2050
        }
2051
    }
2052

    
2053
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2054
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2055
    te = &env->tlb_table[mmu_idx][index];
2056
    te->addend = addend - vaddr;
2057
    if (prot & PAGE_READ) {
2058
        te->addr_read = address;
2059
    } else {
2060
        te->addr_read = -1;
2061
    }
2062

    
2063
    if (prot & PAGE_EXEC) {
2064
        te->addr_code = code_address;
2065
    } else {
2066
        te->addr_code = -1;
2067
    }
2068
    if (prot & PAGE_WRITE) {
2069
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2070
            (pd & IO_MEM_ROMD)) {
2071
            /* Write access calls the I/O callback.  */
2072
            te->addr_write = address | TLB_MMIO;
2073
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2074
                   !cpu_physical_memory_is_dirty(pd)) {
2075
            te->addr_write = address | TLB_NOTDIRTY;
2076
        } else {
2077
            te->addr_write = address;
2078
        }
2079
    } else {
2080
        te->addr_write = -1;
2081
    }
2082
    return ret;
2083
}
2084

    
2085
#else
2086

    
2087
void tlb_flush(CPUState *env, int flush_global)
2088
{
2089
}
2090

    
2091
void tlb_flush_page(CPUState *env, target_ulong addr)
2092
{
2093
}
2094

    
2095
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2096
                      target_phys_addr_t paddr, int prot,
2097
                      int mmu_idx, int is_softmmu)
2098
{
2099
    return 0;
2100
}
2101

    
2102
/* dump memory mappings */
2103
void page_dump(FILE *f)
2104
{
2105
    unsigned long start, end;
2106
    int i, j, prot, prot1;
2107
    PageDesc *p;
2108

    
2109
    fprintf(f, "%-8s %-8s %-8s %s\n",
2110
            "start", "end", "size", "prot");
2111
    start = -1;
2112
    end = -1;
2113
    prot = 0;
2114
    for(i = 0; i <= L1_SIZE; i++) {
2115
        if (i < L1_SIZE)
2116
            p = l1_map[i];
2117
        else
2118
            p = NULL;
2119
        for(j = 0;j < L2_SIZE; j++) {
2120
            if (!p)
2121
                prot1 = 0;
2122
            else
2123
                prot1 = p[j].flags;
2124
            if (prot1 != prot) {
2125
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2126
                if (start != -1) {
2127
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2128
                            start, end, end - start,
2129
                            prot & PAGE_READ ? 'r' : '-',
2130
                            prot & PAGE_WRITE ? 'w' : '-',
2131
                            prot & PAGE_EXEC ? 'x' : '-');
2132
                }
2133
                if (prot1 != 0)
2134
                    start = end;
2135
                else
2136
                    start = -1;
2137
                prot = prot1;
2138
            }
2139
            if (!p)
2140
                break;
2141
        }
2142
    }
2143
}
2144

    
2145
int page_get_flags(target_ulong address)
2146
{
2147
    PageDesc *p;
2148

    
2149
    p = page_find(address >> TARGET_PAGE_BITS);
2150
    if (!p)
2151
        return 0;
2152
    return p->flags;
2153
}
2154

    
2155
/* modify the flags of a page and invalidate the code if
2156
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2157
   depending on PAGE_WRITE */
2158
void page_set_flags(target_ulong start, target_ulong end, int flags)
2159
{
2160
    PageDesc *p;
2161
    target_ulong addr;
2162

    
2163
    /* mmap_lock should already be held.  */
2164
    start = start & TARGET_PAGE_MASK;
2165
    end = TARGET_PAGE_ALIGN(end);
2166
    if (flags & PAGE_WRITE)
2167
        flags |= PAGE_WRITE_ORG;
2168
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2169
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2170
        /* We may be called for host regions that are outside guest
2171
           address space.  */
2172
        if (!p)
2173
            return;
2174
        /* if the write protection is set, then we invalidate the code
2175
           inside */
2176
        if (!(p->flags & PAGE_WRITE) &&
2177
            (flags & PAGE_WRITE) &&
2178
            p->first_tb) {
2179
            tb_invalidate_phys_page(addr, 0, NULL);
2180
        }
2181
        p->flags = flags;
2182
    }
2183
}
2184

    
2185
int page_check_range(target_ulong start, target_ulong len, int flags)
2186
{
2187
    PageDesc *p;
2188
    target_ulong end;
2189
    target_ulong addr;
2190

    
2191
    if (start + len < start)
2192
        /* we've wrapped around */
2193
        return -1;
2194

    
2195
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2196
    start = start & TARGET_PAGE_MASK;
2197

    
2198
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2199
        p = page_find(addr >> TARGET_PAGE_BITS);
2200
        if( !p )
2201
            return -1;
2202
        if( !(p->flags & PAGE_VALID) )
2203
            return -1;
2204

    
2205
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2206
            return -1;
2207
        if (flags & PAGE_WRITE) {
2208
            if (!(p->flags & PAGE_WRITE_ORG))
2209
                return -1;
2210
            /* unprotect the page if it was put read-only because it
2211
               contains translated code */
2212
            if (!(p->flags & PAGE_WRITE)) {
2213
                if (!page_unprotect(addr, 0, NULL))
2214
                    return -1;
2215
            }
2216
            return 0;
2217
        }
2218
    }
2219
    return 0;
2220
}
2221

    
2222
/* called from signal handler: invalidate the code and unprotect the
2223
   page. Return TRUE if the fault was succesfully handled. */
2224
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2225
{
2226
    unsigned int page_index, prot, pindex;
2227
    PageDesc *p, *p1;
2228
    target_ulong host_start, host_end, addr;
2229

    
2230
    /* Technically this isn't safe inside a signal handler.  However we
2231
       know this only ever happens in a synchronous SEGV handler, so in
2232
       practice it seems to be ok.  */
2233
    mmap_lock();
2234

    
2235
    host_start = address & qemu_host_page_mask;
2236
    page_index = host_start >> TARGET_PAGE_BITS;
2237
    p1 = page_find(page_index);
2238
    if (!p1) {
2239
        mmap_unlock();
2240
        return 0;
2241
    }
2242
    host_end = host_start + qemu_host_page_size;
2243
    p = p1;
2244
    prot = 0;
2245
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2246
        prot |= p->flags;
2247
        p++;
2248
    }
2249
    /* if the page was really writable, then we change its
2250
       protection back to writable */
2251
    if (prot & PAGE_WRITE_ORG) {
2252
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2253
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2254
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2255
                     (prot & PAGE_BITS) | PAGE_WRITE);
2256
            p1[pindex].flags |= PAGE_WRITE;
2257
            /* and since the content will be modified, we must invalidate
2258
               the corresponding translated code. */
2259
            tb_invalidate_phys_page(address, pc, puc);
2260
#ifdef DEBUG_TB_CHECK
2261
            tb_invalidate_check(address);
2262
#endif
2263
            mmap_unlock();
2264
            return 1;
2265
        }
2266
    }
2267
    mmap_unlock();
2268
    return 0;
2269
}
2270

    
2271
static inline void tlb_set_dirty(CPUState *env,
2272
                                 unsigned long addr, target_ulong vaddr)
2273
{
2274
}
2275
#endif /* defined(CONFIG_USER_ONLY) */
2276

    
2277
#if !defined(CONFIG_USER_ONLY)
2278

    
2279
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2280
                             ram_addr_t memory, ram_addr_t region_offset);
2281
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2282
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2283
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2284
                      need_subpage)                                     \
2285
    do {                                                                \
2286
        if (addr > start_addr)                                          \
2287
            start_addr2 = 0;                                            \
2288
        else {                                                          \
2289
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2290
            if (start_addr2 > 0)                                        \
2291
                need_subpage = 1;                                       \
2292
        }                                                               \
2293
                                                                        \
2294
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2295
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2296
        else {                                                          \
2297
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2298
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2299
                need_subpage = 1;                                       \
2300
        }                                                               \
2301
    } while (0)
2302

    
2303
/* register physical memory. 'size' must be a multiple of the target
2304
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2305
   io memory page.  The address used when calling the IO function is
2306
   the offset from the start of the region, plus region_offset.  Both
2307
   start_region and regon_offset are rounded down to a page boundary
2308
   before calculating this offset.  This should not be a problem unless
2309
   the low bits of start_addr and region_offset differ.  */
2310
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2311
                                         ram_addr_t size,
2312
                                         ram_addr_t phys_offset,
2313
                                         ram_addr_t region_offset)
2314
{
2315
    target_phys_addr_t addr, end_addr;
2316
    PhysPageDesc *p;
2317
    CPUState *env;
2318
    ram_addr_t orig_size = size;
2319
    void *subpage;
2320

    
2321
#ifdef USE_KQEMU
2322
    /* XXX: should not depend on cpu context */
2323
    env = first_cpu;
2324
    if (env->kqemu_enabled) {
2325
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2326
    }
2327
#endif
2328
    if (kvm_enabled())
2329
        kvm_set_phys_mem(start_addr, size, phys_offset);
2330

    
2331
    if (phys_offset == IO_MEM_UNASSIGNED) {
2332
        region_offset = start_addr;
2333
    }
2334
    region_offset &= TARGET_PAGE_MASK;
2335
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2336
    end_addr = start_addr + (target_phys_addr_t)size;
2337
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2338
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2339
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2340
            ram_addr_t orig_memory = p->phys_offset;
2341
            target_phys_addr_t start_addr2, end_addr2;
2342
            int need_subpage = 0;
2343

    
2344
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2345
                          need_subpage);
2346
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2347
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2348
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2349
                                           &p->phys_offset, orig_memory,
2350
                                           p->region_offset);
2351
                } else {
2352
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2353
                                            >> IO_MEM_SHIFT];
2354
                }
2355
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2356
                                 region_offset);
2357
                p->region_offset = 0;
2358
            } else {
2359
                p->phys_offset = phys_offset;
2360
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2361
                    (phys_offset & IO_MEM_ROMD))
2362
                    phys_offset += TARGET_PAGE_SIZE;
2363
            }
2364
        } else {
2365
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2366
            p->phys_offset = phys_offset;
2367
            p->region_offset = region_offset;
2368
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2369
                (phys_offset & IO_MEM_ROMD)) {
2370
                phys_offset += TARGET_PAGE_SIZE;
2371
            } else {
2372
                target_phys_addr_t start_addr2, end_addr2;
2373
                int need_subpage = 0;
2374

    
2375
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2376
                              end_addr2, need_subpage);
2377

    
2378
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2379
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2380
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2381
                                           addr & TARGET_PAGE_MASK);
2382
                    subpage_register(subpage, start_addr2, end_addr2,
2383
                                     phys_offset, region_offset);
2384
                    p->region_offset = 0;
2385
                }
2386
            }
2387
        }
2388
        region_offset += TARGET_PAGE_SIZE;
2389
    }
2390

    
2391
    /* since each CPU stores ram addresses in its TLB cache, we must
2392
       reset the modified entries */
2393
    /* XXX: slow ! */
2394
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2395
        tlb_flush(env, 1);
2396
    }
2397
}
2398

    
2399
/* XXX: temporary until new memory mapping API */
2400
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2401
{
2402
    PhysPageDesc *p;
2403

    
2404
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2405
    if (!p)
2406
        return IO_MEM_UNASSIGNED;
2407
    return p->phys_offset;
2408
}
2409

    
2410
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2411
{
2412
    if (kvm_enabled())
2413
        kvm_coalesce_mmio_region(addr, size);
2414
}
2415

    
2416
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2417
{
2418
    if (kvm_enabled())
2419
        kvm_uncoalesce_mmio_region(addr, size);
2420
}
2421

    
2422
/* XXX: better than nothing */
2423
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2424
{
2425
    ram_addr_t addr;
2426
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2427
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2428
                (uint64_t)size, (uint64_t)phys_ram_size);
2429
        abort();
2430
    }
2431
    addr = phys_ram_alloc_offset;
2432
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2433
    return addr;
2434
}
2435

    
2436
void qemu_ram_free(ram_addr_t addr)
2437
{
2438
}
2439

    
2440
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2441
   With the exception of the softmmu code in this file, this should
2442
   only be used for local memory (e.g. video ram) that the device owns,
2443
   and knows it isn't going to access beyond the end of the block.
2444

2445
   It should not be used for general purpose DMA.
2446
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2447
 */
2448
void *qemu_get_ram_ptr(ram_addr_t addr)
2449
{
2450
    return phys_ram_base + addr;
2451
}
2452

    
2453
/* Some of the softmmu routines need to translate from a host pointer
2454
   (typically a TLB entry) back to a ram offset.  */
2455
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2456
{
2457
  return (uint8_t *)ptr - phys_ram_base;
2458
}
2459

    
2460
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2461
{
2462
#ifdef DEBUG_UNASSIGNED
2463
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2464
#endif
2465
#if defined(TARGET_SPARC)
2466
    do_unassigned_access(addr, 0, 0, 0, 1);
2467
#endif
2468
    return 0;
2469
}
2470

    
2471
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2472
{
2473
#ifdef DEBUG_UNASSIGNED
2474
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2475
#endif
2476
#if defined(TARGET_SPARC)
2477
    do_unassigned_access(addr, 0, 0, 0, 2);
2478
#endif
2479
    return 0;
2480
}
2481

    
2482
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2483
{
2484
#ifdef DEBUG_UNASSIGNED
2485
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2486
#endif
2487
#if defined(TARGET_SPARC)
2488
    do_unassigned_access(addr, 0, 0, 0, 4);
2489
#endif
2490
    return 0;
2491
}
2492

    
2493
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2494
{
2495
#ifdef DEBUG_UNASSIGNED
2496
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2497
#endif
2498
#if defined(TARGET_SPARC)
2499
    do_unassigned_access(addr, 1, 0, 0, 1);
2500
#endif
2501
}
2502

    
2503
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2504
{
2505
#ifdef DEBUG_UNASSIGNED
2506
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2507
#endif
2508
#if defined(TARGET_SPARC)
2509
    do_unassigned_access(addr, 1, 0, 0, 2);
2510
#endif
2511
}
2512

    
2513
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2514
{
2515
#ifdef DEBUG_UNASSIGNED
2516
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2517
#endif
2518
#if defined(TARGET_SPARC)
2519
    do_unassigned_access(addr, 1, 0, 0, 4);
2520
#endif
2521
}
2522

    
2523
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2524
    unassigned_mem_readb,
2525
    unassigned_mem_readw,
2526
    unassigned_mem_readl,
2527
};
2528

    
2529
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2530
    unassigned_mem_writeb,
2531
    unassigned_mem_writew,
2532
    unassigned_mem_writel,
2533
};
2534

    
2535
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2536
                                uint32_t val)
2537
{
2538
    int dirty_flags;
2539
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2540
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2541
#if !defined(CONFIG_USER_ONLY)
2542
        tb_invalidate_phys_page_fast(ram_addr, 1);
2543
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2544
#endif
2545
    }
2546
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2547
#ifdef USE_KQEMU
2548
    if (cpu_single_env->kqemu_enabled &&
2549
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2550
        kqemu_modify_page(cpu_single_env, ram_addr);
2551
#endif
2552
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2553
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2554
    /* we remove the notdirty callback only if the code has been
2555
       flushed */
2556
    if (dirty_flags == 0xff)
2557
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2558
}
2559

    
2560
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2561
                                uint32_t val)
2562
{
2563
    int dirty_flags;
2564
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2565
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2566
#if !defined(CONFIG_USER_ONLY)
2567
        tb_invalidate_phys_page_fast(ram_addr, 2);
2568
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2569
#endif
2570
    }
2571
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2572
#ifdef USE_KQEMU
2573
    if (cpu_single_env->kqemu_enabled &&
2574
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2575
        kqemu_modify_page(cpu_single_env, ram_addr);
2576
#endif
2577
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2578
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2579
    /* we remove the notdirty callback only if the code has been
2580
       flushed */
2581
    if (dirty_flags == 0xff)
2582
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2583
}
2584

    
2585
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2586
                                uint32_t val)
2587
{
2588
    int dirty_flags;
2589
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2590
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2591
#if !defined(CONFIG_USER_ONLY)
2592
        tb_invalidate_phys_page_fast(ram_addr, 4);
2593
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2594
#endif
2595
    }
2596
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2597
#ifdef USE_KQEMU
2598
    if (cpu_single_env->kqemu_enabled &&
2599
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2600
        kqemu_modify_page(cpu_single_env, ram_addr);
2601
#endif
2602
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2603
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2604
    /* we remove the notdirty callback only if the code has been
2605
       flushed */
2606
    if (dirty_flags == 0xff)
2607
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2608
}
2609

    
2610
static CPUReadMemoryFunc *error_mem_read[3] = {
2611
    NULL, /* never used */
2612
    NULL, /* never used */
2613
    NULL, /* never used */
2614
};
2615

    
2616
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2617
    notdirty_mem_writeb,
2618
    notdirty_mem_writew,
2619
    notdirty_mem_writel,
2620
};
2621

    
2622
/* Generate a debug exception if a watchpoint has been hit.  */
2623
static void check_watchpoint(int offset, int len_mask, int flags)
2624
{
2625
    CPUState *env = cpu_single_env;
2626
    target_ulong pc, cs_base;
2627
    TranslationBlock *tb;
2628
    target_ulong vaddr;
2629
    CPUWatchpoint *wp;
2630
    int cpu_flags;
2631

    
2632
    if (env->watchpoint_hit) {
2633
        /* We re-entered the check after replacing the TB. Now raise
2634
         * the debug interrupt so that is will trigger after the
2635
         * current instruction. */
2636
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2637
        return;
2638
    }
2639
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2640
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2641
        if ((vaddr == (wp->vaddr & len_mask) ||
2642
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2643
            wp->flags |= BP_WATCHPOINT_HIT;
2644
            if (!env->watchpoint_hit) {
2645
                env->watchpoint_hit = wp;
2646
                tb = tb_find_pc(env->mem_io_pc);
2647
                if (!tb) {
2648
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2649
                              "pc=%p", (void *)env->mem_io_pc);
2650
                }
2651
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2652
                tb_phys_invalidate(tb, -1);
2653
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2654
                    env->exception_index = EXCP_DEBUG;
2655
                } else {
2656
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2657
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2658
                }
2659
                cpu_resume_from_signal(env, NULL);
2660
            }
2661
        } else {
2662
            wp->flags &= ~BP_WATCHPOINT_HIT;
2663
        }
2664
    }
2665
}
2666

    
2667
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2668
   so these check for a hit then pass through to the normal out-of-line
2669
   phys routines.  */
2670
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2671
{
2672
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2673
    return ldub_phys(addr);
2674
}
2675

    
2676
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2677
{
2678
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2679
    return lduw_phys(addr);
2680
}
2681

    
2682
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2683
{
2684
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2685
    return ldl_phys(addr);
2686
}
2687

    
2688
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2689
                             uint32_t val)
2690
{
2691
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2692
    stb_phys(addr, val);
2693
}
2694

    
2695
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2696
                             uint32_t val)
2697
{
2698
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2699
    stw_phys(addr, val);
2700
}
2701

    
2702
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2703
                             uint32_t val)
2704
{
2705
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2706
    stl_phys(addr, val);
2707
}
2708

    
2709
static CPUReadMemoryFunc *watch_mem_read[3] = {
2710
    watch_mem_readb,
2711
    watch_mem_readw,
2712
    watch_mem_readl,
2713
};
2714

    
2715
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2716
    watch_mem_writeb,
2717
    watch_mem_writew,
2718
    watch_mem_writel,
2719
};
2720

    
2721
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2722
                                 unsigned int len)
2723
{
2724
    uint32_t ret;
2725
    unsigned int idx;
2726

    
2727
    idx = SUBPAGE_IDX(addr);
2728
#if defined(DEBUG_SUBPAGE)
2729
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2730
           mmio, len, addr, idx);
2731
#endif
2732
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2733
                                       addr + mmio->region_offset[idx][0][len]);
2734

    
2735
    return ret;
2736
}
2737

    
2738
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2739
                              uint32_t value, unsigned int len)
2740
{
2741
    unsigned int idx;
2742

    
2743
    idx = SUBPAGE_IDX(addr);
2744
#if defined(DEBUG_SUBPAGE)
2745
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2746
           mmio, len, addr, idx, value);
2747
#endif
2748
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2749
                                  addr + mmio->region_offset[idx][1][len],
2750
                                  value);
2751
}
2752

    
2753
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2754
{
2755
#if defined(DEBUG_SUBPAGE)
2756
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2757
#endif
2758

    
2759
    return subpage_readlen(opaque, addr, 0);
2760
}
2761

    
2762
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2763
                            uint32_t value)
2764
{
2765
#if defined(DEBUG_SUBPAGE)
2766
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2767
#endif
2768
    subpage_writelen(opaque, addr, value, 0);
2769
}
2770

    
2771
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2772
{
2773
#if defined(DEBUG_SUBPAGE)
2774
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2775
#endif
2776

    
2777
    return subpage_readlen(opaque, addr, 1);
2778
}
2779

    
2780
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2781
                            uint32_t value)
2782
{
2783
#if defined(DEBUG_SUBPAGE)
2784
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2785
#endif
2786
    subpage_writelen(opaque, addr, value, 1);
2787
}
2788

    
2789
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2790
{
2791
#if defined(DEBUG_SUBPAGE)
2792
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2793
#endif
2794

    
2795
    return subpage_readlen(opaque, addr, 2);
2796
}
2797

    
2798
static void subpage_writel (void *opaque,
2799
                         target_phys_addr_t addr, uint32_t value)
2800
{
2801
#if defined(DEBUG_SUBPAGE)
2802
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2803
#endif
2804
    subpage_writelen(opaque, addr, value, 2);
2805
}
2806

    
2807
static CPUReadMemoryFunc *subpage_read[] = {
2808
    &subpage_readb,
2809
    &subpage_readw,
2810
    &subpage_readl,
2811
};
2812

    
2813
static CPUWriteMemoryFunc *subpage_write[] = {
2814
    &subpage_writeb,
2815
    &subpage_writew,
2816
    &subpage_writel,
2817
};
2818

    
2819
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2820
                             ram_addr_t memory, ram_addr_t region_offset)
2821
{
2822
    int idx, eidx;
2823
    unsigned int i;
2824

    
2825
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2826
        return -1;
2827
    idx = SUBPAGE_IDX(start);
2828
    eidx = SUBPAGE_IDX(end);
2829
#if defined(DEBUG_SUBPAGE)
2830
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2831
           mmio, start, end, idx, eidx, memory);
2832
#endif
2833
    memory >>= IO_MEM_SHIFT;
2834
    for (; idx <= eidx; idx++) {
2835
        for (i = 0; i < 4; i++) {
2836
            if (io_mem_read[memory][i]) {
2837
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2838
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2839
                mmio->region_offset[idx][0][i] = region_offset;
2840
            }
2841
            if (io_mem_write[memory][i]) {
2842
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2843
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2844
                mmio->region_offset[idx][1][i] = region_offset;
2845
            }
2846
        }
2847
    }
2848

    
2849
    return 0;
2850
}
2851

    
2852
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2853
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2854
{
2855
    subpage_t *mmio;
2856
    int subpage_memory;
2857

    
2858
    mmio = qemu_mallocz(sizeof(subpage_t));
2859

    
2860
    mmio->base = base;
2861
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2862
#if defined(DEBUG_SUBPAGE)
2863
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2864
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2865
#endif
2866
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2867
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2868
                         region_offset);
2869

    
2870
    return mmio;
2871
}
2872

    
2873
static int get_free_io_mem_idx(void)
2874
{
2875
    int i;
2876

    
2877
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2878
        if (!io_mem_used[i]) {
2879
            io_mem_used[i] = 1;
2880
            return i;
2881
        }
2882

    
2883
    return -1;
2884
}
2885

    
2886
static void io_mem_init(void)
2887
{
2888
    int i;
2889

    
2890
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2891
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2892
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2893
    for (i=0; i<5; i++)
2894
        io_mem_used[i] = 1;
2895

    
2896
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2897
                                          watch_mem_write, NULL);
2898
    /* alloc dirty bits array */
2899
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2900
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2901
}
2902

    
2903
/* mem_read and mem_write are arrays of functions containing the
2904
   function to access byte (index 0), word (index 1) and dword (index
2905
   2). Functions can be omitted with a NULL function pointer. The
2906
   registered functions may be modified dynamically later.
2907
   If io_index is non zero, the corresponding io zone is
2908
   modified. If it is zero, a new io zone is allocated. The return
2909
   value can be used with cpu_register_physical_memory(). (-1) is
2910
   returned if error. */
2911
int cpu_register_io_memory(int io_index,
2912
                           CPUReadMemoryFunc **mem_read,
2913
                           CPUWriteMemoryFunc **mem_write,
2914
                           void *opaque)
2915
{
2916
    int i, subwidth = 0;
2917

    
2918
    if (io_index <= 0) {
2919
        io_index = get_free_io_mem_idx();
2920
        if (io_index == -1)
2921
            return io_index;
2922
    } else {
2923
        if (io_index >= IO_MEM_NB_ENTRIES)
2924
            return -1;
2925
    }
2926

    
2927
    for(i = 0;i < 3; i++) {
2928
        if (!mem_read[i] || !mem_write[i])
2929
            subwidth = IO_MEM_SUBWIDTH;
2930
        io_mem_read[io_index][i] = mem_read[i];
2931
        io_mem_write[io_index][i] = mem_write[i];
2932
    }
2933
    io_mem_opaque[io_index] = opaque;
2934
    return (io_index << IO_MEM_SHIFT) | subwidth;
2935
}
2936

    
2937
void cpu_unregister_io_memory(int io_table_address)
2938
{
2939
    int i;
2940
    int io_index = io_table_address >> IO_MEM_SHIFT;
2941

    
2942
    for (i=0;i < 3; i++) {
2943
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2944
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2945
    }
2946
    io_mem_opaque[io_index] = NULL;
2947
    io_mem_used[io_index] = 0;
2948
}
2949

    
2950
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2951
{
2952
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2953
}
2954

    
2955
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2956
{
2957
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2958
}
2959

    
2960
#endif /* !defined(CONFIG_USER_ONLY) */
2961

    
2962
/* physical memory access (slow version, mainly for debug) */
2963
#if defined(CONFIG_USER_ONLY)
2964
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2965
                            int len, int is_write)
2966
{
2967
    int l, flags;
2968
    target_ulong page;
2969
    void * p;
2970

    
2971
    while (len > 0) {
2972
        page = addr & TARGET_PAGE_MASK;
2973
        l = (page + TARGET_PAGE_SIZE) - addr;
2974
        if (l > len)
2975
            l = len;
2976
        flags = page_get_flags(page);
2977
        if (!(flags & PAGE_VALID))
2978
            return;
2979
        if (is_write) {
2980
            if (!(flags & PAGE_WRITE))
2981
                return;
2982
            /* XXX: this code should not depend on lock_user */
2983
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2984
                /* FIXME - should this return an error rather than just fail? */
2985
                return;
2986
            memcpy(p, buf, l);
2987
            unlock_user(p, addr, l);
2988
        } else {
2989
            if (!(flags & PAGE_READ))
2990
                return;
2991
            /* XXX: this code should not depend on lock_user */
2992
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2993
                /* FIXME - should this return an error rather than just fail? */
2994
                return;
2995
            memcpy(buf, p, l);
2996
            unlock_user(p, addr, 0);
2997
        }
2998
        len -= l;
2999
        buf += l;
3000
        addr += l;
3001
    }
3002
}
3003

    
3004
#else
3005
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3006
                            int len, int is_write)
3007
{
3008
    int l, io_index;
3009
    uint8_t *ptr;
3010
    uint32_t val;
3011
    target_phys_addr_t page;
3012
    unsigned long pd;
3013
    PhysPageDesc *p;
3014

    
3015
    while (len > 0) {
3016
        page = addr & TARGET_PAGE_MASK;
3017
        l = (page + TARGET_PAGE_SIZE) - addr;
3018
        if (l > len)
3019
            l = len;
3020
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3021
        if (!p) {
3022
            pd = IO_MEM_UNASSIGNED;
3023
        } else {
3024
            pd = p->phys_offset;
3025
        }
3026

    
3027
        if (is_write) {
3028
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3029
                target_phys_addr_t addr1 = addr;
3030
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3031
                if (p)
3032
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3033
                /* XXX: could force cpu_single_env to NULL to avoid
3034
                   potential bugs */
3035
                if (l >= 4 && ((addr1 & 3) == 0)) {
3036
                    /* 32 bit write access */
3037
                    val = ldl_p(buf);
3038
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3039
                    l = 4;
3040
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3041
                    /* 16 bit write access */
3042
                    val = lduw_p(buf);
3043
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3044
                    l = 2;
3045
                } else {
3046
                    /* 8 bit write access */
3047
                    val = ldub_p(buf);
3048
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3049
                    l = 1;
3050
                }
3051
            } else {
3052
                unsigned long addr1;
3053
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3054
                /* RAM case */
3055
                ptr = qemu_get_ram_ptr(addr1);
3056
                memcpy(ptr, buf, l);
3057
                if (!cpu_physical_memory_is_dirty(addr1)) {
3058
                    /* invalidate code */
3059
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3060
                    /* set dirty bit */
3061
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3062
                        (0xff & ~CODE_DIRTY_FLAG);
3063
                }
3064
            }
3065
        } else {
3066
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3067
                !(pd & IO_MEM_ROMD)) {
3068
                target_phys_addr_t addr1 = addr;
3069
                /* I/O case */
3070
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3071
                if (p)
3072
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3073
                if (l >= 4 && ((addr1 & 3) == 0)) {
3074
                    /* 32 bit read access */
3075
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3076
                    stl_p(buf, val);
3077
                    l = 4;
3078
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3079
                    /* 16 bit read access */
3080
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3081
                    stw_p(buf, val);
3082
                    l = 2;
3083
                } else {
3084
                    /* 8 bit read access */
3085
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3086
                    stb_p(buf, val);
3087
                    l = 1;
3088
                }
3089
            } else {
3090
                /* RAM case */
3091
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3092
                    (addr & ~TARGET_PAGE_MASK);
3093
                memcpy(buf, ptr, l);
3094
            }
3095
        }
3096
        len -= l;
3097
        buf += l;
3098
        addr += l;
3099
    }
3100
}
3101

    
3102
/* used for ROM loading : can write in RAM and ROM */
3103
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3104
                                   const uint8_t *buf, int len)
3105
{
3106
    int l;
3107
    uint8_t *ptr;
3108
    target_phys_addr_t page;
3109
    unsigned long pd;
3110
    PhysPageDesc *p;
3111

    
3112
    while (len > 0) {
3113
        page = addr & TARGET_PAGE_MASK;
3114
        l = (page + TARGET_PAGE_SIZE) - addr;
3115
        if (l > len)
3116
            l = len;
3117
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3118
        if (!p) {
3119
            pd = IO_MEM_UNASSIGNED;
3120
        } else {
3121
            pd = p->phys_offset;
3122
        }
3123

    
3124
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3125
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3126
            !(pd & IO_MEM_ROMD)) {
3127
            /* do nothing */
3128
        } else {
3129
            unsigned long addr1;
3130
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3131
            /* ROM/RAM case */
3132
            ptr = qemu_get_ram_ptr(addr1);
3133
            memcpy(ptr, buf, l);
3134
        }
3135
        len -= l;
3136
        buf += l;
3137
        addr += l;
3138
    }
3139
}
3140

    
3141
typedef struct {
3142
    void *buffer;
3143
    target_phys_addr_t addr;
3144
    target_phys_addr_t len;
3145
} BounceBuffer;
3146

    
3147
static BounceBuffer bounce;
3148

    
3149
typedef struct MapClient {
3150
    void *opaque;
3151
    void (*callback)(void *opaque);
3152
    LIST_ENTRY(MapClient) link;
3153
} MapClient;
3154

    
3155
static LIST_HEAD(map_client_list, MapClient) map_client_list
3156
    = LIST_HEAD_INITIALIZER(map_client_list);
3157

    
3158
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3159
{
3160
    MapClient *client = qemu_malloc(sizeof(*client));
3161

    
3162
    client->opaque = opaque;
3163
    client->callback = callback;
3164
    LIST_INSERT_HEAD(&map_client_list, client, link);
3165
    return client;
3166
}
3167

    
3168
void cpu_unregister_map_client(void *_client)
3169
{
3170
    MapClient *client = (MapClient *)_client;
3171

    
3172
    LIST_REMOVE(client, link);
3173
}
3174

    
3175
static void cpu_notify_map_clients(void)
3176
{
3177
    MapClient *client;
3178

    
3179
    while (!LIST_EMPTY(&map_client_list)) {
3180
        client = LIST_FIRST(&map_client_list);
3181
        client->callback(client->opaque);
3182
        LIST_REMOVE(client, link);
3183
    }
3184
}
3185

    
3186
/* Map a physical memory region into a host virtual address.
3187
 * May map a subset of the requested range, given by and returned in *plen.
3188
 * May return NULL if resources needed to perform the mapping are exhausted.
3189
 * Use only for reads OR writes - not for read-modify-write operations.
3190
 * Use cpu_register_map_client() to know when retrying the map operation is
3191
 * likely to succeed.
3192
 */
3193
void *cpu_physical_memory_map(target_phys_addr_t addr,
3194
                              target_phys_addr_t *plen,
3195
                              int is_write)
3196
{
3197
    target_phys_addr_t len = *plen;
3198
    target_phys_addr_t done = 0;
3199
    int l;
3200
    uint8_t *ret = NULL;
3201
    uint8_t *ptr;
3202
    target_phys_addr_t page;
3203
    unsigned long pd;
3204
    PhysPageDesc *p;
3205
    unsigned long addr1;
3206

    
3207
    while (len > 0) {
3208
        page = addr & TARGET_PAGE_MASK;
3209
        l = (page + TARGET_PAGE_SIZE) - addr;
3210
        if (l > len)
3211
            l = len;
3212
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3213
        if (!p) {
3214
            pd = IO_MEM_UNASSIGNED;
3215
        } else {
3216
            pd = p->phys_offset;
3217
        }
3218

    
3219
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3220
            if (done || bounce.buffer) {
3221
                break;
3222
            }
3223
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3224
            bounce.addr = addr;
3225
            bounce.len = l;
3226
            if (!is_write) {
3227
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3228
            }
3229
            ptr = bounce.buffer;
3230
        } else {
3231
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3232
            ptr = qemu_get_ram_ptr(addr1);
3233
        }
3234
        if (!done) {
3235
            ret = ptr;
3236
        } else if (ret + done != ptr) {
3237
            break;
3238
        }
3239

    
3240
        len -= l;
3241
        addr += l;
3242
        done += l;
3243
    }
3244
    *plen = done;
3245
    return ret;
3246
}
3247

    
3248
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3249
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3250
 * the amount of memory that was actually read or written by the caller.
3251
 */
3252
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3253
                               int is_write, target_phys_addr_t access_len)
3254
{
3255
    if (buffer != bounce.buffer) {
3256
        if (is_write) {
3257
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3258
            while (access_len) {
3259
                unsigned l;
3260
                l = TARGET_PAGE_SIZE;
3261
                if (l > access_len)
3262
                    l = access_len;
3263
                if (!cpu_physical_memory_is_dirty(addr1)) {
3264
                    /* invalidate code */
3265
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3266
                    /* set dirty bit */
3267
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3268
                        (0xff & ~CODE_DIRTY_FLAG);
3269
                }
3270
                addr1 += l;
3271
                access_len -= l;
3272
            }
3273
        }
3274
        return;
3275
    }
3276
    if (is_write) {
3277
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3278
    }
3279
    qemu_free(bounce.buffer);
3280
    bounce.buffer = NULL;
3281
    cpu_notify_map_clients();
3282
}
3283

    
3284
/* warning: addr must be aligned */
3285
uint32_t ldl_phys(target_phys_addr_t addr)
3286
{
3287
    int io_index;
3288
    uint8_t *ptr;
3289
    uint32_t val;
3290
    unsigned long pd;
3291
    PhysPageDesc *p;
3292

    
3293
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3294
    if (!p) {
3295
        pd = IO_MEM_UNASSIGNED;
3296
    } else {
3297
        pd = p->phys_offset;
3298
    }
3299

    
3300
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3301
        !(pd & IO_MEM_ROMD)) {
3302
        /* I/O case */
3303
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3304
        if (p)
3305
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3306
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3307
    } else {
3308
        /* RAM case */
3309
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3310
            (addr & ~TARGET_PAGE_MASK);
3311
        val = ldl_p(ptr);
3312
    }
3313
    return val;
3314
}
3315

    
3316
/* warning: addr must be aligned */
3317
uint64_t ldq_phys(target_phys_addr_t addr)
3318
{
3319
    int io_index;
3320
    uint8_t *ptr;
3321
    uint64_t val;
3322
    unsigned long pd;
3323
    PhysPageDesc *p;
3324

    
3325
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3326
    if (!p) {
3327
        pd = IO_MEM_UNASSIGNED;
3328
    } else {
3329
        pd = p->phys_offset;
3330
    }
3331

    
3332
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3333
        !(pd & IO_MEM_ROMD)) {
3334
        /* I/O case */
3335
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3336
        if (p)
3337
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3338
#ifdef TARGET_WORDS_BIGENDIAN
3339
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3340
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3341
#else
3342
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3343
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3344
#endif
3345
    } else {
3346
        /* RAM case */
3347
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3348
            (addr & ~TARGET_PAGE_MASK);
3349
        val = ldq_p(ptr);
3350
    }
3351
    return val;
3352
}
3353

    
3354
/* XXX: optimize */
3355
uint32_t ldub_phys(target_phys_addr_t addr)
3356
{
3357
    uint8_t val;
3358
    cpu_physical_memory_read(addr, &val, 1);
3359
    return val;
3360
}
3361

    
3362
/* XXX: optimize */
3363
uint32_t lduw_phys(target_phys_addr_t addr)
3364
{
3365
    uint16_t val;
3366
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3367
    return tswap16(val);
3368
}
3369

    
3370
/* warning: addr must be aligned. The ram page is not masked as dirty
3371
   and the code inside is not invalidated. It is useful if the dirty
3372
   bits are used to track modified PTEs */
3373
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3374
{
3375
    int io_index;
3376
    uint8_t *ptr;
3377
    unsigned long pd;
3378
    PhysPageDesc *p;
3379

    
3380
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3381
    if (!p) {
3382
        pd = IO_MEM_UNASSIGNED;
3383
    } else {
3384
        pd = p->phys_offset;
3385
    }
3386

    
3387
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3388
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3389
        if (p)
3390
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3391
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3392
    } else {
3393
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3394
        ptr = qemu_get_ram_ptr(addr1);
3395
        stl_p(ptr, val);
3396

    
3397
        if (unlikely(in_migration)) {
3398
            if (!cpu_physical_memory_is_dirty(addr1)) {
3399
                /* invalidate code */
3400
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3401
                /* set dirty bit */
3402
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3403
                    (0xff & ~CODE_DIRTY_FLAG);
3404
            }
3405
        }
3406
    }
3407
}
3408

    
3409
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3410
{
3411
    int io_index;
3412
    uint8_t *ptr;
3413
    unsigned long pd;
3414
    PhysPageDesc *p;
3415

    
3416
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3417
    if (!p) {
3418
        pd = IO_MEM_UNASSIGNED;
3419
    } else {
3420
        pd = p->phys_offset;
3421
    }
3422

    
3423
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3424
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3425
        if (p)
3426
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3427
#ifdef TARGET_WORDS_BIGENDIAN
3428
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3429
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3430
#else
3431
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3432
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3433
#endif
3434
    } else {
3435
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3436
            (addr & ~TARGET_PAGE_MASK);
3437
        stq_p(ptr, val);
3438
    }
3439
}
3440

    
3441
/* warning: addr must be aligned */
3442
void stl_phys(target_phys_addr_t addr, uint32_t val)
3443
{
3444
    int io_index;
3445
    uint8_t *ptr;
3446
    unsigned long pd;
3447
    PhysPageDesc *p;
3448

    
3449
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3450
    if (!p) {
3451
        pd = IO_MEM_UNASSIGNED;
3452
    } else {
3453
        pd = p->phys_offset;
3454
    }
3455

    
3456
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3457
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3458
        if (p)
3459
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3460
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3461
    } else {
3462
        unsigned long addr1;
3463
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3464
        /* RAM case */
3465
        ptr = qemu_get_ram_ptr(addr1);
3466
        stl_p(ptr, val);
3467
        if (!cpu_physical_memory_is_dirty(addr1)) {
3468
            /* invalidate code */
3469
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3470
            /* set dirty bit */
3471
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3472
                (0xff & ~CODE_DIRTY_FLAG);
3473
        }
3474
    }
3475
}
3476

    
3477
/* XXX: optimize */
3478
void stb_phys(target_phys_addr_t addr, uint32_t val)
3479
{
3480
    uint8_t v = val;
3481
    cpu_physical_memory_write(addr, &v, 1);
3482
}
3483

    
3484
/* XXX: optimize */
3485
void stw_phys(target_phys_addr_t addr, uint32_t val)
3486
{
3487
    uint16_t v = tswap16(val);
3488
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3489
}
3490

    
3491
/* XXX: optimize */
3492
void stq_phys(target_phys_addr_t addr, uint64_t val)
3493
{
3494
    val = tswap64(val);
3495
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3496
}
3497

    
3498
#endif
3499

    
3500
/* virtual memory access for debug (includes writing to ROM) */
3501
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3502
                        uint8_t *buf, int len, int is_write)
3503
{
3504
    int l;
3505
    target_phys_addr_t phys_addr;
3506
    target_ulong page;
3507

    
3508
    while (len > 0) {
3509
        page = addr & TARGET_PAGE_MASK;
3510
        phys_addr = cpu_get_phys_page_debug(env, page);
3511
        /* if no physical page mapped, return an error */
3512
        if (phys_addr == -1)
3513
            return -1;
3514
        l = (page + TARGET_PAGE_SIZE) - addr;
3515
        if (l > len)
3516
            l = len;
3517
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3518
#if !defined(CONFIG_USER_ONLY)
3519
        if (is_write)
3520
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3521
        else
3522
#endif
3523
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3524
        len -= l;
3525
        buf += l;
3526
        addr += l;
3527
    }
3528
    return 0;
3529
}
3530

    
3531
/* in deterministic execution mode, instructions doing device I/Os
3532
   must be at the end of the TB */
3533
void cpu_io_recompile(CPUState *env, void *retaddr)
3534
{
3535
    TranslationBlock *tb;
3536
    uint32_t n, cflags;
3537
    target_ulong pc, cs_base;
3538
    uint64_t flags;
3539

    
3540
    tb = tb_find_pc((unsigned long)retaddr);
3541
    if (!tb) {
3542
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3543
                  retaddr);
3544
    }
3545
    n = env->icount_decr.u16.low + tb->icount;
3546
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3547
    /* Calculate how many instructions had been executed before the fault
3548
       occurred.  */
3549
    n = n - env->icount_decr.u16.low;
3550
    /* Generate a new TB ending on the I/O insn.  */
3551
    n++;
3552
    /* On MIPS and SH, delay slot instructions can only be restarted if
3553
       they were already the first instruction in the TB.  If this is not
3554
       the first instruction in a TB then re-execute the preceding
3555
       branch.  */
3556
#if defined(TARGET_MIPS)
3557
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3558
        env->active_tc.PC -= 4;
3559
        env->icount_decr.u16.low++;
3560
        env->hflags &= ~MIPS_HFLAG_BMASK;
3561
    }
3562
#elif defined(TARGET_SH4)
3563
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3564
            && n > 1) {
3565
        env->pc -= 2;
3566
        env->icount_decr.u16.low++;
3567
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3568
    }
3569
#endif
3570
    /* This should never happen.  */
3571
    if (n > CF_COUNT_MASK)
3572
        cpu_abort(env, "TB too big during recompile");
3573

    
3574
    cflags = n | CF_LAST_IO;
3575
    pc = tb->pc;
3576
    cs_base = tb->cs_base;
3577
    flags = tb->flags;
3578
    tb_phys_invalidate(tb, -1);
3579
    /* FIXME: In theory this could raise an exception.  In practice
3580
       we have already translated the block once so it's probably ok.  */
3581
    tb_gen_code(env, pc, cs_base, flags, cflags);
3582
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3583
       the first in the TB) then we end up generating a whole new TB and
3584
       repeating the fault, which is horribly inefficient.
3585
       Better would be to execute just this insn uncached, or generate a
3586
       second new TB.  */
3587
    cpu_resume_from_signal(env, NULL);
3588
}
3589

    
3590
void dump_exec_info(FILE *f,
3591
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3592
{
3593
    int i, target_code_size, max_target_code_size;
3594
    int direct_jmp_count, direct_jmp2_count, cross_page;
3595
    TranslationBlock *tb;
3596

    
3597
    target_code_size = 0;
3598
    max_target_code_size = 0;
3599
    cross_page = 0;
3600
    direct_jmp_count = 0;
3601
    direct_jmp2_count = 0;
3602
    for(i = 0; i < nb_tbs; i++) {
3603
        tb = &tbs[i];
3604
        target_code_size += tb->size;
3605
        if (tb->size > max_target_code_size)
3606
            max_target_code_size = tb->size;
3607
        if (tb->page_addr[1] != -1)
3608
            cross_page++;
3609
        if (tb->tb_next_offset[0] != 0xffff) {
3610
            direct_jmp_count++;
3611
            if (tb->tb_next_offset[1] != 0xffff) {
3612
                direct_jmp2_count++;
3613
            }
3614
        }
3615
    }
3616
    /* XXX: avoid using doubles ? */
3617
    cpu_fprintf(f, "Translation buffer state:\n");
3618
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3619
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3620
    cpu_fprintf(f, "TB count            %d/%d\n", 
3621
                nb_tbs, code_gen_max_blocks);
3622
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3623
                nb_tbs ? target_code_size / nb_tbs : 0,
3624
                max_target_code_size);
3625
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3626
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3627
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3628
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3629
            cross_page,
3630
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3631
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3632
                direct_jmp_count,
3633
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3634
                direct_jmp2_count,
3635
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3636
    cpu_fprintf(f, "\nStatistics:\n");
3637
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3638
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3639
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3640
    tcg_dump_info(f, cpu_fprintf);
3641
}
3642

    
3643
#if !defined(CONFIG_USER_ONLY)
3644

    
3645
#define MMUSUFFIX _cmmu
3646
#define GETPC() NULL
3647
#define env cpu_single_env
3648
#define SOFTMMU_CODE_ACCESS
3649

    
3650
#define SHIFT 0
3651
#include "softmmu_template.h"
3652

    
3653
#define SHIFT 1
3654
#include "softmmu_template.h"
3655

    
3656
#define SHIFT 2
3657
#include "softmmu_template.h"
3658

    
3659
#define SHIFT 3
3660
#include "softmmu_template.h"
3661

    
3662
#undef env
3663

    
3664
#endif