Statistics
| Branch: | Revision:

root / exec.c @ ef845c3b

History | View | Annotate | Download (109.5 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#ifdef _WIN32
21
#include <windows.h>
22
#else
23
#include <sys/types.h>
24
#include <sys/mman.h>
25
#endif
26
#include <stdlib.h>
27
#include <stdio.h>
28
#include <stdarg.h>
29
#include <string.h>
30
#include <errno.h>
31
#include <unistd.h>
32
#include <inttypes.h>
33

    
34
#include "cpu.h"
35
#include "exec-all.h"
36
#include "qemu-common.h"
37
#include "tcg.h"
38
#include "hw/hw.h"
39
#include "osdep.h"
40
#include "kvm.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#endif
44

    
45
//#define DEBUG_TB_INVALIDATE
46
//#define DEBUG_FLUSH
47
//#define DEBUG_TLB
48
//#define DEBUG_UNASSIGNED
49

    
50
/* make various TB consistency checks */
51
//#define DEBUG_TB_CHECK
52
//#define DEBUG_TLB_CHECK
53

    
54
//#define DEBUG_IOPORT
55
//#define DEBUG_SUBPAGE
56

    
57
#if !defined(CONFIG_USER_ONLY)
58
/* TB consistency checks only implemented for usermode emulation.  */
59
#undef DEBUG_TB_CHECK
60
#endif
61

    
62
#define SMC_BITMAP_USE_THRESHOLD 10
63

    
64
#if defined(TARGET_SPARC64)
65
#define TARGET_PHYS_ADDR_SPACE_BITS 41
66
#elif defined(TARGET_SPARC)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 36
68
#elif defined(TARGET_ALPHA)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 42
70
#define TARGET_VIRT_ADDR_SPACE_BITS 42
71
#elif defined(TARGET_PPC64)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_X86_64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_I386)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 36
77
#else
78
#define TARGET_PHYS_ADDR_SPACE_BITS 32
79
#endif
80

    
81
static TranslationBlock *tbs;
82
int code_gen_max_blocks;
83
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84
static int nb_tbs;
85
/* any access to the tbs or the page table must use this lock */
86
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87

    
88
#if defined(__arm__) || defined(__sparc_v9__)
89
/* The prologue must be reachable with a direct jump. ARM and Sparc64
90
 have limited branch ranges (possibly also PPC) so place it in a
91
 section close to code segment. */
92
#define code_gen_section                                \
93
    __attribute__((__section__(".gen_code")))           \
94
    __attribute__((aligned (32)))
95
#elif defined(_WIN32)
96
/* Maximum alignment for Win32 is 16. */
97
#define code_gen_section                                \
98
    __attribute__((aligned (16)))
99
#else
100
#define code_gen_section                                \
101
    __attribute__((aligned (32)))
102
#endif
103

    
104
uint8_t code_gen_prologue[1024] code_gen_section;
105
static uint8_t *code_gen_buffer;
106
static unsigned long code_gen_buffer_size;
107
/* threshold to flush the translated code buffer */
108
static unsigned long code_gen_buffer_max_size;
109
uint8_t *code_gen_ptr;
110

    
111
#if !defined(CONFIG_USER_ONLY)
112
int phys_ram_fd;
113
uint8_t *phys_ram_dirty;
114
static int in_migration;
115

    
116
typedef struct RAMBlock {
117
    uint8_t *host;
118
    ram_addr_t offset;
119
    ram_addr_t length;
120
    struct RAMBlock *next;
121
} RAMBlock;
122

    
123
static RAMBlock *ram_blocks;
124
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
125
   then we can no longer assume contiguous ram offsets, and external uses
126
   of this variable will break.  */
127
ram_addr_t last_ram_offset;
128
#endif
129

    
130
CPUState *first_cpu;
131
/* current CPU in the current thread. It is only valid inside
132
   cpu_exec() */
133
CPUState *cpu_single_env;
134
/* 0 = Do not count executed instructions.
135
   1 = Precise instruction counting.
136
   2 = Adaptive rate instruction counting.  */
137
int use_icount = 0;
138
/* Current instruction counter.  While executing translated code this may
139
   include some instructions that have not yet been executed.  */
140
int64_t qemu_icount;
141

    
142
typedef struct PageDesc {
143
    /* list of TBs intersecting this ram page */
144
    TranslationBlock *first_tb;
145
    /* in order to optimize self modifying code, we count the number
146
       of lookups we do to a given page to use a bitmap */
147
    unsigned int code_write_count;
148
    uint8_t *code_bitmap;
149
#if defined(CONFIG_USER_ONLY)
150
    unsigned long flags;
151
#endif
152
} PageDesc;
153

    
154
typedef struct PhysPageDesc {
155
    /* offset in host memory of the page + io_index in the low bits */
156
    ram_addr_t phys_offset;
157
    ram_addr_t region_offset;
158
} PhysPageDesc;
159

    
160
#define L2_BITS 10
161
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162
/* XXX: this is a temporary hack for alpha target.
163
 *      In the future, this is to be replaced by a multi-level table
164
 *      to actually be able to handle the complete 64 bits address space.
165
 */
166
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167
#else
168
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
169
#endif
170

    
171
#define L1_SIZE (1 << L1_BITS)
172
#define L2_SIZE (1 << L2_BITS)
173

    
174
unsigned long qemu_real_host_page_size;
175
unsigned long qemu_host_page_bits;
176
unsigned long qemu_host_page_size;
177
unsigned long qemu_host_page_mask;
178

    
179
/* XXX: for system emulation, it could just be an array */
180
static PageDesc *l1_map[L1_SIZE];
181
static PhysPageDesc **l1_phys_map;
182

    
183
#if !defined(CONFIG_USER_ONLY)
184
static void io_mem_init(void);
185

    
186
/* io memory support */
187
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
188
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
189
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
190
static char io_mem_used[IO_MEM_NB_ENTRIES];
191
static int io_mem_watch;
192
#endif
193

    
194
/* log support */
195
static const char *logfilename = "/tmp/qemu.log";
196
FILE *logfile;
197
int loglevel;
198
static int log_append = 0;
199

    
200
/* statistics */
201
static int tlb_flush_count;
202
static int tb_flush_count;
203
static int tb_phys_invalidate_count;
204

    
205
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
206
typedef struct subpage_t {
207
    target_phys_addr_t base;
208
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
209
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
210
    void *opaque[TARGET_PAGE_SIZE][2][4];
211
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
212
} subpage_t;
213

    
214
#ifdef _WIN32
215
static void map_exec(void *addr, long size)
216
{
217
    DWORD old_protect;
218
    VirtualProtect(addr, size,
219
                   PAGE_EXECUTE_READWRITE, &old_protect);
220
    
221
}
222
#else
223
static void map_exec(void *addr, long size)
224
{
225
    unsigned long start, end, page_size;
226
    
227
    page_size = getpagesize();
228
    start = (unsigned long)addr;
229
    start &= ~(page_size - 1);
230
    
231
    end = (unsigned long)addr + size;
232
    end += page_size - 1;
233
    end &= ~(page_size - 1);
234
    
235
    mprotect((void *)start, end - start,
236
             PROT_READ | PROT_WRITE | PROT_EXEC);
237
}
238
#endif
239

    
240
static void page_init(void)
241
{
242
    /* NOTE: we can always suppose that qemu_host_page_size >=
243
       TARGET_PAGE_SIZE */
244
#ifdef _WIN32
245
    {
246
        SYSTEM_INFO system_info;
247

    
248
        GetSystemInfo(&system_info);
249
        qemu_real_host_page_size = system_info.dwPageSize;
250
    }
251
#else
252
    qemu_real_host_page_size = getpagesize();
253
#endif
254
    if (qemu_host_page_size == 0)
255
        qemu_host_page_size = qemu_real_host_page_size;
256
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
257
        qemu_host_page_size = TARGET_PAGE_SIZE;
258
    qemu_host_page_bits = 0;
259
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
260
        qemu_host_page_bits++;
261
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
262
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
263
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
264

    
265
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
266
    {
267
        long long startaddr, endaddr;
268
        FILE *f;
269
        int n;
270

    
271
        mmap_lock();
272
        last_brk = (unsigned long)sbrk(0);
273
        f = fopen("/proc/self/maps", "r");
274
        if (f) {
275
            do {
276
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
277
                if (n == 2) {
278
                    startaddr = MIN(startaddr,
279
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280
                    endaddr = MIN(endaddr,
281
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
282
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
283
                                   TARGET_PAGE_ALIGN(endaddr),
284
                                   PAGE_RESERVED); 
285
                }
286
            } while (!feof(f));
287
            fclose(f);
288
        }
289
        mmap_unlock();
290
    }
291
#endif
292
}
293

    
294
static inline PageDesc **page_l1_map(target_ulong index)
295
{
296
#if TARGET_LONG_BITS > 32
297
    /* Host memory outside guest VM.  For 32-bit targets we have already
298
       excluded high addresses.  */
299
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
300
        return NULL;
301
#endif
302
    return &l1_map[index >> L2_BITS];
303
}
304

    
305
static inline PageDesc *page_find_alloc(target_ulong index)
306
{
307
    PageDesc **lp, *p;
308
    lp = page_l1_map(index);
309
    if (!lp)
310
        return NULL;
311

    
312
    p = *lp;
313
    if (!p) {
314
        /* allocate if not found */
315
#if defined(CONFIG_USER_ONLY)
316
        size_t len = sizeof(PageDesc) * L2_SIZE;
317
        /* Don't use qemu_malloc because it may recurse.  */
318
        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
319
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
320
        *lp = p;
321
        if (h2g_valid(p)) {
322
            unsigned long addr = h2g(p);
323
            page_set_flags(addr & TARGET_PAGE_MASK,
324
                           TARGET_PAGE_ALIGN(addr + len),
325
                           PAGE_RESERVED); 
326
        }
327
#else
328
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
329
        *lp = p;
330
#endif
331
    }
332
    return p + (index & (L2_SIZE - 1));
333
}
334

    
335
static inline PageDesc *page_find(target_ulong index)
336
{
337
    PageDesc **lp, *p;
338
    lp = page_l1_map(index);
339
    if (!lp)
340
        return NULL;
341

    
342
    p = *lp;
343
    if (!p) {
344
        return NULL;
345
    }
346
    return p + (index & (L2_SIZE - 1));
347
}
348

    
349
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
350
{
351
    void **lp, **p;
352
    PhysPageDesc *pd;
353

    
354
    p = (void **)l1_phys_map;
355
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
356

    
357
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359
#endif
360
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
361
    p = *lp;
362
    if (!p) {
363
        /* allocate if not found */
364
        if (!alloc)
365
            return NULL;
366
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367
        memset(p, 0, sizeof(void *) * L1_SIZE);
368
        *lp = p;
369
    }
370
#endif
371
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
372
    pd = *lp;
373
    if (!pd) {
374
        int i;
375
        /* allocate if not found */
376
        if (!alloc)
377
            return NULL;
378
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379
        *lp = pd;
380
        for (i = 0; i < L2_SIZE; i++) {
381
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
382
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
383
        }
384
    }
385
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
386
}
387

    
388
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
389
{
390
    return phys_page_find_alloc(index, 0);
391
}
392

    
393
#if !defined(CONFIG_USER_ONLY)
394
static void tlb_protect_code(ram_addr_t ram_addr);
395
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
396
                                    target_ulong vaddr);
397
#define mmap_lock() do { } while(0)
398
#define mmap_unlock() do { } while(0)
399
#endif
400

    
401
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
402

    
403
#if defined(CONFIG_USER_ONLY)
404
/* Currently it is not recommended to allocate big chunks of data in
405
   user mode. It will change when a dedicated libc will be used */
406
#define USE_STATIC_CODE_GEN_BUFFER
407
#endif
408

    
409
#ifdef USE_STATIC_CODE_GEN_BUFFER
410
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
411
#endif
412

    
413
static void code_gen_alloc(unsigned long tb_size)
414
{
415
#ifdef USE_STATIC_CODE_GEN_BUFFER
416
    code_gen_buffer = static_code_gen_buffer;
417
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
418
    map_exec(code_gen_buffer, code_gen_buffer_size);
419
#else
420
    code_gen_buffer_size = tb_size;
421
    if (code_gen_buffer_size == 0) {
422
#if defined(CONFIG_USER_ONLY)
423
        /* in user mode, phys_ram_size is not meaningful */
424
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
425
#else
426
        /* XXX: needs adjustments */
427
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
428
#endif
429
    }
430
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
431
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
432
    /* The code gen buffer location may have constraints depending on
433
       the host cpu and OS */
434
#if defined(__linux__) 
435
    {
436
        int flags;
437
        void *start = NULL;
438

    
439
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
440
#if defined(__x86_64__)
441
        flags |= MAP_32BIT;
442
        /* Cannot map more than that */
443
        if (code_gen_buffer_size > (800 * 1024 * 1024))
444
            code_gen_buffer_size = (800 * 1024 * 1024);
445
#elif defined(__sparc_v9__)
446
        // Map the buffer below 2G, so we can use direct calls and branches
447
        flags |= MAP_FIXED;
448
        start = (void *) 0x60000000UL;
449
        if (code_gen_buffer_size > (512 * 1024 * 1024))
450
            code_gen_buffer_size = (512 * 1024 * 1024);
451
#elif defined(__arm__)
452
        /* Map the buffer below 32M, so we can use direct calls and branches */
453
        flags |= MAP_FIXED;
454
        start = (void *) 0x01000000UL;
455
        if (code_gen_buffer_size > 16 * 1024 * 1024)
456
            code_gen_buffer_size = 16 * 1024 * 1024;
457
#endif
458
        code_gen_buffer = mmap(start, code_gen_buffer_size,
459
                               PROT_WRITE | PROT_READ | PROT_EXEC,
460
                               flags, -1, 0);
461
        if (code_gen_buffer == MAP_FAILED) {
462
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
463
            exit(1);
464
        }
465
    }
466
#elif defined(__FreeBSD__) || defined(__DragonFly__)
467
    {
468
        int flags;
469
        void *addr = NULL;
470
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
471
#if defined(__x86_64__)
472
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
473
         * 0x40000000 is free */
474
        flags |= MAP_FIXED;
475
        addr = (void *)0x40000000;
476
        /* Cannot map more than that */
477
        if (code_gen_buffer_size > (800 * 1024 * 1024))
478
            code_gen_buffer_size = (800 * 1024 * 1024);
479
#endif
480
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
481
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
482
                               flags, -1, 0);
483
        if (code_gen_buffer == MAP_FAILED) {
484
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
485
            exit(1);
486
        }
487
    }
488
#else
489
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
490
    map_exec(code_gen_buffer, code_gen_buffer_size);
491
#endif
492
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
493
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
494
    code_gen_buffer_max_size = code_gen_buffer_size - 
495
        code_gen_max_block_size();
496
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
497
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
498
}
499

    
500
/* Must be called before using the QEMU cpus. 'tb_size' is the size
501
   (in bytes) allocated to the translation buffer. Zero means default
502
   size. */
503
void cpu_exec_init_all(unsigned long tb_size)
504
{
505
    cpu_gen_init();
506
    code_gen_alloc(tb_size);
507
    code_gen_ptr = code_gen_buffer;
508
    page_init();
509
#if !defined(CONFIG_USER_ONLY)
510
    io_mem_init();
511
#endif
512
}
513

    
514
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
515

    
516
static void cpu_common_pre_save(void *opaque)
517
{
518
    CPUState *env = opaque;
519

    
520
    cpu_synchronize_state(env);
521
}
522

    
523
static int cpu_common_pre_load(void *opaque)
524
{
525
    CPUState *env = opaque;
526

    
527
    cpu_synchronize_state(env);
528
    return 0;
529
}
530

    
531
static int cpu_common_post_load(void *opaque, int version_id)
532
{
533
    CPUState *env = opaque;
534

    
535
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
536
       version_id is increased. */
537
    env->interrupt_request &= ~0x01;
538
    tlb_flush(env, 1);
539

    
540
    return 0;
541
}
542

    
543
static const VMStateDescription vmstate_cpu_common = {
544
    .name = "cpu_common",
545
    .version_id = 1,
546
    .minimum_version_id = 1,
547
    .minimum_version_id_old = 1,
548
    .pre_save = cpu_common_pre_save,
549
    .pre_load = cpu_common_pre_load,
550
    .post_load = cpu_common_post_load,
551
    .fields      = (VMStateField []) {
552
        VMSTATE_UINT32(halted, CPUState),
553
        VMSTATE_UINT32(interrupt_request, CPUState),
554
        VMSTATE_END_OF_LIST()
555
    }
556
};
557
#endif
558

    
559
CPUState *qemu_get_cpu(int cpu)
560
{
561
    CPUState *env = first_cpu;
562

    
563
    while (env) {
564
        if (env->cpu_index == cpu)
565
            break;
566
        env = env->next_cpu;
567
    }
568

    
569
    return env;
570
}
571

    
572
void cpu_exec_init(CPUState *env)
573
{
574
    CPUState **penv;
575
    int cpu_index;
576

    
577
#if defined(CONFIG_USER_ONLY)
578
    cpu_list_lock();
579
#endif
580
    env->next_cpu = NULL;
581
    penv = &first_cpu;
582
    cpu_index = 0;
583
    while (*penv != NULL) {
584
        penv = &(*penv)->next_cpu;
585
        cpu_index++;
586
    }
587
    env->cpu_index = cpu_index;
588
    env->numa_node = 0;
589
    QTAILQ_INIT(&env->breakpoints);
590
    QTAILQ_INIT(&env->watchpoints);
591
    *penv = env;
592
#if defined(CONFIG_USER_ONLY)
593
    cpu_list_unlock();
594
#endif
595
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
596
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
597
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
598
                    cpu_save, cpu_load, env);
599
#endif
600
}
601

    
602
static inline void invalidate_page_bitmap(PageDesc *p)
603
{
604
    if (p->code_bitmap) {
605
        qemu_free(p->code_bitmap);
606
        p->code_bitmap = NULL;
607
    }
608
    p->code_write_count = 0;
609
}
610

    
611
/* set to NULL all the 'first_tb' fields in all PageDescs */
612
static void page_flush_tb(void)
613
{
614
    int i, j;
615
    PageDesc *p;
616

    
617
    for(i = 0; i < L1_SIZE; i++) {
618
        p = l1_map[i];
619
        if (p) {
620
            for(j = 0; j < L2_SIZE; j++) {
621
                p->first_tb = NULL;
622
                invalidate_page_bitmap(p);
623
                p++;
624
            }
625
        }
626
    }
627
}
628

    
629
/* flush all the translation blocks */
630
/* XXX: tb_flush is currently not thread safe */
631
void tb_flush(CPUState *env1)
632
{
633
    CPUState *env;
634
#if defined(DEBUG_FLUSH)
635
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
636
           (unsigned long)(code_gen_ptr - code_gen_buffer),
637
           nb_tbs, nb_tbs > 0 ?
638
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
639
#endif
640
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
641
        cpu_abort(env1, "Internal error: code buffer overflow\n");
642

    
643
    nb_tbs = 0;
644

    
645
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
646
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
647
    }
648

    
649
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
650
    page_flush_tb();
651

    
652
    code_gen_ptr = code_gen_buffer;
653
    /* XXX: flush processor icache at this point if cache flush is
654
       expensive */
655
    tb_flush_count++;
656
}
657

    
658
#ifdef DEBUG_TB_CHECK
659

    
660
static void tb_invalidate_check(target_ulong address)
661
{
662
    TranslationBlock *tb;
663
    int i;
664
    address &= TARGET_PAGE_MASK;
665
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
666
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
667
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
668
                  address >= tb->pc + tb->size)) {
669
                printf("ERROR invalidate: address=" TARGET_FMT_lx
670
                       " PC=%08lx size=%04x\n",
671
                       address, (long)tb->pc, tb->size);
672
            }
673
        }
674
    }
675
}
676

    
677
/* verify that all the pages have correct rights for code */
678
static void tb_page_check(void)
679
{
680
    TranslationBlock *tb;
681
    int i, flags1, flags2;
682

    
683
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
684
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
685
            flags1 = page_get_flags(tb->pc);
686
            flags2 = page_get_flags(tb->pc + tb->size - 1);
687
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
688
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
689
                       (long)tb->pc, tb->size, flags1, flags2);
690
            }
691
        }
692
    }
693
}
694

    
695
#endif
696

    
697
/* invalidate one TB */
698
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
699
                             int next_offset)
700
{
701
    TranslationBlock *tb1;
702
    for(;;) {
703
        tb1 = *ptb;
704
        if (tb1 == tb) {
705
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
706
            break;
707
        }
708
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
709
    }
710
}
711

    
712
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
713
{
714
    TranslationBlock *tb1;
715
    unsigned int n1;
716

    
717
    for(;;) {
718
        tb1 = *ptb;
719
        n1 = (long)tb1 & 3;
720
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
721
        if (tb1 == tb) {
722
            *ptb = tb1->page_next[n1];
723
            break;
724
        }
725
        ptb = &tb1->page_next[n1];
726
    }
727
}
728

    
729
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
730
{
731
    TranslationBlock *tb1, **ptb;
732
    unsigned int n1;
733

    
734
    ptb = &tb->jmp_next[n];
735
    tb1 = *ptb;
736
    if (tb1) {
737
        /* find tb(n) in circular list */
738
        for(;;) {
739
            tb1 = *ptb;
740
            n1 = (long)tb1 & 3;
741
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
742
            if (n1 == n && tb1 == tb)
743
                break;
744
            if (n1 == 2) {
745
                ptb = &tb1->jmp_first;
746
            } else {
747
                ptb = &tb1->jmp_next[n1];
748
            }
749
        }
750
        /* now we can suppress tb(n) from the list */
751
        *ptb = tb->jmp_next[n];
752

    
753
        tb->jmp_next[n] = NULL;
754
    }
755
}
756

    
757
/* reset the jump entry 'n' of a TB so that it is not chained to
758
   another TB */
759
static inline void tb_reset_jump(TranslationBlock *tb, int n)
760
{
761
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
762
}
763

    
764
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
765
{
766
    CPUState *env;
767
    PageDesc *p;
768
    unsigned int h, n1;
769
    target_phys_addr_t phys_pc;
770
    TranslationBlock *tb1, *tb2;
771

    
772
    /* remove the TB from the hash list */
773
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
774
    h = tb_phys_hash_func(phys_pc);
775
    tb_remove(&tb_phys_hash[h], tb,
776
              offsetof(TranslationBlock, phys_hash_next));
777

    
778
    /* remove the TB from the page list */
779
    if (tb->page_addr[0] != page_addr) {
780
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
781
        tb_page_remove(&p->first_tb, tb);
782
        invalidate_page_bitmap(p);
783
    }
784
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
785
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
786
        tb_page_remove(&p->first_tb, tb);
787
        invalidate_page_bitmap(p);
788
    }
789

    
790
    tb_invalidated_flag = 1;
791

    
792
    /* remove the TB from the hash list */
793
    h = tb_jmp_cache_hash_func(tb->pc);
794
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
795
        if (env->tb_jmp_cache[h] == tb)
796
            env->tb_jmp_cache[h] = NULL;
797
    }
798

    
799
    /* suppress this TB from the two jump lists */
800
    tb_jmp_remove(tb, 0);
801
    tb_jmp_remove(tb, 1);
802

    
803
    /* suppress any remaining jumps to this TB */
804
    tb1 = tb->jmp_first;
805
    for(;;) {
806
        n1 = (long)tb1 & 3;
807
        if (n1 == 2)
808
            break;
809
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
810
        tb2 = tb1->jmp_next[n1];
811
        tb_reset_jump(tb1, n1);
812
        tb1->jmp_next[n1] = NULL;
813
        tb1 = tb2;
814
    }
815
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
816

    
817
    tb_phys_invalidate_count++;
818
}
819

    
820
static inline void set_bits(uint8_t *tab, int start, int len)
821
{
822
    int end, mask, end1;
823

    
824
    end = start + len;
825
    tab += start >> 3;
826
    mask = 0xff << (start & 7);
827
    if ((start & ~7) == (end & ~7)) {
828
        if (start < end) {
829
            mask &= ~(0xff << (end & 7));
830
            *tab |= mask;
831
        }
832
    } else {
833
        *tab++ |= mask;
834
        start = (start + 8) & ~7;
835
        end1 = end & ~7;
836
        while (start < end1) {
837
            *tab++ = 0xff;
838
            start += 8;
839
        }
840
        if (start < end) {
841
            mask = ~(0xff << (end & 7));
842
            *tab |= mask;
843
        }
844
    }
845
}
846

    
847
static void build_page_bitmap(PageDesc *p)
848
{
849
    int n, tb_start, tb_end;
850
    TranslationBlock *tb;
851

    
852
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
853

    
854
    tb = p->first_tb;
855
    while (tb != NULL) {
856
        n = (long)tb & 3;
857
        tb = (TranslationBlock *)((long)tb & ~3);
858
        /* NOTE: this is subtle as a TB may span two physical pages */
859
        if (n == 0) {
860
            /* NOTE: tb_end may be after the end of the page, but
861
               it is not a problem */
862
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
863
            tb_end = tb_start + tb->size;
864
            if (tb_end > TARGET_PAGE_SIZE)
865
                tb_end = TARGET_PAGE_SIZE;
866
        } else {
867
            tb_start = 0;
868
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
869
        }
870
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
871
        tb = tb->page_next[n];
872
    }
873
}
874

    
875
TranslationBlock *tb_gen_code(CPUState *env,
876
                              target_ulong pc, target_ulong cs_base,
877
                              int flags, int cflags)
878
{
879
    TranslationBlock *tb;
880
    uint8_t *tc_ptr;
881
    target_ulong phys_pc, phys_page2, virt_page2;
882
    int code_gen_size;
883

    
884
    phys_pc = get_phys_addr_code(env, pc);
885
    tb = tb_alloc(pc);
886
    if (!tb) {
887
        /* flush must be done */
888
        tb_flush(env);
889
        /* cannot fail at this point */
890
        tb = tb_alloc(pc);
891
        /* Don't forget to invalidate previous TB info.  */
892
        tb_invalidated_flag = 1;
893
    }
894
    tc_ptr = code_gen_ptr;
895
    tb->tc_ptr = tc_ptr;
896
    tb->cs_base = cs_base;
897
    tb->flags = flags;
898
    tb->cflags = cflags;
899
    cpu_gen_code(env, tb, &code_gen_size);
900
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
901

    
902
    /* check next page if needed */
903
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
904
    phys_page2 = -1;
905
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
906
        phys_page2 = get_phys_addr_code(env, virt_page2);
907
    }
908
    tb_link_phys(tb, phys_pc, phys_page2);
909
    return tb;
910
}
911

    
912
/* invalidate all TBs which intersect with the target physical page
913
   starting in range [start;end[. NOTE: start and end must refer to
914
   the same physical page. 'is_cpu_write_access' should be true if called
915
   from a real cpu write access: the virtual CPU will exit the current
916
   TB if code is modified inside this TB. */
917
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
918
                                   int is_cpu_write_access)
919
{
920
    TranslationBlock *tb, *tb_next, *saved_tb;
921
    CPUState *env = cpu_single_env;
922
    target_ulong tb_start, tb_end;
923
    PageDesc *p;
924
    int n;
925
#ifdef TARGET_HAS_PRECISE_SMC
926
    int current_tb_not_found = is_cpu_write_access;
927
    TranslationBlock *current_tb = NULL;
928
    int current_tb_modified = 0;
929
    target_ulong current_pc = 0;
930
    target_ulong current_cs_base = 0;
931
    int current_flags = 0;
932
#endif /* TARGET_HAS_PRECISE_SMC */
933

    
934
    p = page_find(start >> TARGET_PAGE_BITS);
935
    if (!p)
936
        return;
937
    if (!p->code_bitmap &&
938
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
939
        is_cpu_write_access) {
940
        /* build code bitmap */
941
        build_page_bitmap(p);
942
    }
943

    
944
    /* we remove all the TBs in the range [start, end[ */
945
    /* XXX: see if in some cases it could be faster to invalidate all the code */
946
    tb = p->first_tb;
947
    while (tb != NULL) {
948
        n = (long)tb & 3;
949
        tb = (TranslationBlock *)((long)tb & ~3);
950
        tb_next = tb->page_next[n];
951
        /* NOTE: this is subtle as a TB may span two physical pages */
952
        if (n == 0) {
953
            /* NOTE: tb_end may be after the end of the page, but
954
               it is not a problem */
955
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
956
            tb_end = tb_start + tb->size;
957
        } else {
958
            tb_start = tb->page_addr[1];
959
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
960
        }
961
        if (!(tb_end <= start || tb_start >= end)) {
962
#ifdef TARGET_HAS_PRECISE_SMC
963
            if (current_tb_not_found) {
964
                current_tb_not_found = 0;
965
                current_tb = NULL;
966
                if (env->mem_io_pc) {
967
                    /* now we have a real cpu fault */
968
                    current_tb = tb_find_pc(env->mem_io_pc);
969
                }
970
            }
971
            if (current_tb == tb &&
972
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
973
                /* If we are modifying the current TB, we must stop
974
                its execution. We could be more precise by checking
975
                that the modification is after the current PC, but it
976
                would require a specialized function to partially
977
                restore the CPU state */
978

    
979
                current_tb_modified = 1;
980
                cpu_restore_state(current_tb, env,
981
                                  env->mem_io_pc, NULL);
982
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
983
                                     &current_flags);
984
            }
985
#endif /* TARGET_HAS_PRECISE_SMC */
986
            /* we need to do that to handle the case where a signal
987
               occurs while doing tb_phys_invalidate() */
988
            saved_tb = NULL;
989
            if (env) {
990
                saved_tb = env->current_tb;
991
                env->current_tb = NULL;
992
            }
993
            tb_phys_invalidate(tb, -1);
994
            if (env) {
995
                env->current_tb = saved_tb;
996
                if (env->interrupt_request && env->current_tb)
997
                    cpu_interrupt(env, env->interrupt_request);
998
            }
999
        }
1000
        tb = tb_next;
1001
    }
1002
#if !defined(CONFIG_USER_ONLY)
1003
    /* if no code remaining, no need to continue to use slow writes */
1004
    if (!p->first_tb) {
1005
        invalidate_page_bitmap(p);
1006
        if (is_cpu_write_access) {
1007
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1008
        }
1009
    }
1010
#endif
1011
#ifdef TARGET_HAS_PRECISE_SMC
1012
    if (current_tb_modified) {
1013
        /* we generate a block containing just the instruction
1014
           modifying the memory. It will ensure that it cannot modify
1015
           itself */
1016
        env->current_tb = NULL;
1017
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1018
        cpu_resume_from_signal(env, NULL);
1019
    }
1020
#endif
1021
}
1022

    
1023
/* len must be <= 8 and start must be a multiple of len */
1024
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1025
{
1026
    PageDesc *p;
1027
    int offset, b;
1028
#if 0
1029
    if (1) {
1030
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1031
                  cpu_single_env->mem_io_vaddr, len,
1032
                  cpu_single_env->eip,
1033
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1034
    }
1035
#endif
1036
    p = page_find(start >> TARGET_PAGE_BITS);
1037
    if (!p)
1038
        return;
1039
    if (p->code_bitmap) {
1040
        offset = start & ~TARGET_PAGE_MASK;
1041
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1042
        if (b & ((1 << len) - 1))
1043
            goto do_invalidate;
1044
    } else {
1045
    do_invalidate:
1046
        tb_invalidate_phys_page_range(start, start + len, 1);
1047
    }
1048
}
1049

    
1050
#if !defined(CONFIG_SOFTMMU)
1051
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1052
                                    unsigned long pc, void *puc)
1053
{
1054
    TranslationBlock *tb;
1055
    PageDesc *p;
1056
    int n;
1057
#ifdef TARGET_HAS_PRECISE_SMC
1058
    TranslationBlock *current_tb = NULL;
1059
    CPUState *env = cpu_single_env;
1060
    int current_tb_modified = 0;
1061
    target_ulong current_pc = 0;
1062
    target_ulong current_cs_base = 0;
1063
    int current_flags = 0;
1064
#endif
1065

    
1066
    addr &= TARGET_PAGE_MASK;
1067
    p = page_find(addr >> TARGET_PAGE_BITS);
1068
    if (!p)
1069
        return;
1070
    tb = p->first_tb;
1071
#ifdef TARGET_HAS_PRECISE_SMC
1072
    if (tb && pc != 0) {
1073
        current_tb = tb_find_pc(pc);
1074
    }
1075
#endif
1076
    while (tb != NULL) {
1077
        n = (long)tb & 3;
1078
        tb = (TranslationBlock *)((long)tb & ~3);
1079
#ifdef TARGET_HAS_PRECISE_SMC
1080
        if (current_tb == tb &&
1081
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1082
                /* If we are modifying the current TB, we must stop
1083
                   its execution. We could be more precise by checking
1084
                   that the modification is after the current PC, but it
1085
                   would require a specialized function to partially
1086
                   restore the CPU state */
1087

    
1088
            current_tb_modified = 1;
1089
            cpu_restore_state(current_tb, env, pc, puc);
1090
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1091
                                 &current_flags);
1092
        }
1093
#endif /* TARGET_HAS_PRECISE_SMC */
1094
        tb_phys_invalidate(tb, addr);
1095
        tb = tb->page_next[n];
1096
    }
1097
    p->first_tb = NULL;
1098
#ifdef TARGET_HAS_PRECISE_SMC
1099
    if (current_tb_modified) {
1100
        /* we generate a block containing just the instruction
1101
           modifying the memory. It will ensure that it cannot modify
1102
           itself */
1103
        env->current_tb = NULL;
1104
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1105
        cpu_resume_from_signal(env, puc);
1106
    }
1107
#endif
1108
}
1109
#endif
1110

    
1111
/* add the tb in the target page and protect it if necessary */
1112
static inline void tb_alloc_page(TranslationBlock *tb,
1113
                                 unsigned int n, target_ulong page_addr)
1114
{
1115
    PageDesc *p;
1116
    TranslationBlock *last_first_tb;
1117

    
1118
    tb->page_addr[n] = page_addr;
1119
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1120
    tb->page_next[n] = p->first_tb;
1121
    last_first_tb = p->first_tb;
1122
    p->first_tb = (TranslationBlock *)((long)tb | n);
1123
    invalidate_page_bitmap(p);
1124

    
1125
#if defined(TARGET_HAS_SMC) || 1
1126

    
1127
#if defined(CONFIG_USER_ONLY)
1128
    if (p->flags & PAGE_WRITE) {
1129
        target_ulong addr;
1130
        PageDesc *p2;
1131
        int prot;
1132

    
1133
        /* force the host page as non writable (writes will have a
1134
           page fault + mprotect overhead) */
1135
        page_addr &= qemu_host_page_mask;
1136
        prot = 0;
1137
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1138
            addr += TARGET_PAGE_SIZE) {
1139

    
1140
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1141
            if (!p2)
1142
                continue;
1143
            prot |= p2->flags;
1144
            p2->flags &= ~PAGE_WRITE;
1145
            page_get_flags(addr);
1146
          }
1147
        mprotect(g2h(page_addr), qemu_host_page_size,
1148
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1149
#ifdef DEBUG_TB_INVALIDATE
1150
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1151
               page_addr);
1152
#endif
1153
    }
1154
#else
1155
    /* if some code is already present, then the pages are already
1156
       protected. So we handle the case where only the first TB is
1157
       allocated in a physical page */
1158
    if (!last_first_tb) {
1159
        tlb_protect_code(page_addr);
1160
    }
1161
#endif
1162

    
1163
#endif /* TARGET_HAS_SMC */
1164
}
1165

    
1166
/* Allocate a new translation block. Flush the translation buffer if
1167
   too many translation blocks or too much generated code. */
1168
TranslationBlock *tb_alloc(target_ulong pc)
1169
{
1170
    TranslationBlock *tb;
1171

    
1172
    if (nb_tbs >= code_gen_max_blocks ||
1173
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1174
        return NULL;
1175
    tb = &tbs[nb_tbs++];
1176
    tb->pc = pc;
1177
    tb->cflags = 0;
1178
    return tb;
1179
}
1180

    
1181
void tb_free(TranslationBlock *tb)
1182
{
1183
    /* In practice this is mostly used for single use temporary TB
1184
       Ignore the hard cases and just back up if this TB happens to
1185
       be the last one generated.  */
1186
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1187
        code_gen_ptr = tb->tc_ptr;
1188
        nb_tbs--;
1189
    }
1190
}
1191

    
1192
/* add a new TB and link it to the physical page tables. phys_page2 is
1193
   (-1) to indicate that only one page contains the TB. */
1194
void tb_link_phys(TranslationBlock *tb,
1195
                  target_ulong phys_pc, target_ulong phys_page2)
1196
{
1197
    unsigned int h;
1198
    TranslationBlock **ptb;
1199

    
1200
    /* Grab the mmap lock to stop another thread invalidating this TB
1201
       before we are done.  */
1202
    mmap_lock();
1203
    /* add in the physical hash table */
1204
    h = tb_phys_hash_func(phys_pc);
1205
    ptb = &tb_phys_hash[h];
1206
    tb->phys_hash_next = *ptb;
1207
    *ptb = tb;
1208

    
1209
    /* add in the page list */
1210
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1211
    if (phys_page2 != -1)
1212
        tb_alloc_page(tb, 1, phys_page2);
1213
    else
1214
        tb->page_addr[1] = -1;
1215

    
1216
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1217
    tb->jmp_next[0] = NULL;
1218
    tb->jmp_next[1] = NULL;
1219

    
1220
    /* init original jump addresses */
1221
    if (tb->tb_next_offset[0] != 0xffff)
1222
        tb_reset_jump(tb, 0);
1223
    if (tb->tb_next_offset[1] != 0xffff)
1224
        tb_reset_jump(tb, 1);
1225

    
1226
#ifdef DEBUG_TB_CHECK
1227
    tb_page_check();
1228
#endif
1229
    mmap_unlock();
1230
}
1231

    
1232
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1233
   tb[1].tc_ptr. Return NULL if not found */
1234
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1235
{
1236
    int m_min, m_max, m;
1237
    unsigned long v;
1238
    TranslationBlock *tb;
1239

    
1240
    if (nb_tbs <= 0)
1241
        return NULL;
1242
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1243
        tc_ptr >= (unsigned long)code_gen_ptr)
1244
        return NULL;
1245
    /* binary search (cf Knuth) */
1246
    m_min = 0;
1247
    m_max = nb_tbs - 1;
1248
    while (m_min <= m_max) {
1249
        m = (m_min + m_max) >> 1;
1250
        tb = &tbs[m];
1251
        v = (unsigned long)tb->tc_ptr;
1252
        if (v == tc_ptr)
1253
            return tb;
1254
        else if (tc_ptr < v) {
1255
            m_max = m - 1;
1256
        } else {
1257
            m_min = m + 1;
1258
        }
1259
    }
1260
    return &tbs[m_max];
1261
}
1262

    
1263
static void tb_reset_jump_recursive(TranslationBlock *tb);
1264

    
1265
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1266
{
1267
    TranslationBlock *tb1, *tb_next, **ptb;
1268
    unsigned int n1;
1269

    
1270
    tb1 = tb->jmp_next[n];
1271
    if (tb1 != NULL) {
1272
        /* find head of list */
1273
        for(;;) {
1274
            n1 = (long)tb1 & 3;
1275
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1276
            if (n1 == 2)
1277
                break;
1278
            tb1 = tb1->jmp_next[n1];
1279
        }
1280
        /* we are now sure now that tb jumps to tb1 */
1281
        tb_next = tb1;
1282

    
1283
        /* remove tb from the jmp_first list */
1284
        ptb = &tb_next->jmp_first;
1285
        for(;;) {
1286
            tb1 = *ptb;
1287
            n1 = (long)tb1 & 3;
1288
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1289
            if (n1 == n && tb1 == tb)
1290
                break;
1291
            ptb = &tb1->jmp_next[n1];
1292
        }
1293
        *ptb = tb->jmp_next[n];
1294
        tb->jmp_next[n] = NULL;
1295

    
1296
        /* suppress the jump to next tb in generated code */
1297
        tb_reset_jump(tb, n);
1298

    
1299
        /* suppress jumps in the tb on which we could have jumped */
1300
        tb_reset_jump_recursive(tb_next);
1301
    }
1302
}
1303

    
1304
static void tb_reset_jump_recursive(TranslationBlock *tb)
1305
{
1306
    tb_reset_jump_recursive2(tb, 0);
1307
    tb_reset_jump_recursive2(tb, 1);
1308
}
1309

    
1310
#if defined(TARGET_HAS_ICE)
1311
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1312
{
1313
    target_phys_addr_t addr;
1314
    target_ulong pd;
1315
    ram_addr_t ram_addr;
1316
    PhysPageDesc *p;
1317

    
1318
    addr = cpu_get_phys_page_debug(env, pc);
1319
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1320
    if (!p) {
1321
        pd = IO_MEM_UNASSIGNED;
1322
    } else {
1323
        pd = p->phys_offset;
1324
    }
1325
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1326
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1327
}
1328
#endif
1329

    
1330
/* Add a watchpoint.  */
1331
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1332
                          int flags, CPUWatchpoint **watchpoint)
1333
{
1334
    target_ulong len_mask = ~(len - 1);
1335
    CPUWatchpoint *wp;
1336

    
1337
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1338
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1339
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1340
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1341
        return -EINVAL;
1342
    }
1343
    wp = qemu_malloc(sizeof(*wp));
1344

    
1345
    wp->vaddr = addr;
1346
    wp->len_mask = len_mask;
1347
    wp->flags = flags;
1348

    
1349
    /* keep all GDB-injected watchpoints in front */
1350
    if (flags & BP_GDB)
1351
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1352
    else
1353
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1354

    
1355
    tlb_flush_page(env, addr);
1356

    
1357
    if (watchpoint)
1358
        *watchpoint = wp;
1359
    return 0;
1360
}
1361

    
1362
/* Remove a specific watchpoint.  */
1363
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1364
                          int flags)
1365
{
1366
    target_ulong len_mask = ~(len - 1);
1367
    CPUWatchpoint *wp;
1368

    
1369
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1370
        if (addr == wp->vaddr && len_mask == wp->len_mask
1371
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1372
            cpu_watchpoint_remove_by_ref(env, wp);
1373
            return 0;
1374
        }
1375
    }
1376
    return -ENOENT;
1377
}
1378

    
1379
/* Remove a specific watchpoint by reference.  */
1380
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1381
{
1382
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1383

    
1384
    tlb_flush_page(env, watchpoint->vaddr);
1385

    
1386
    qemu_free(watchpoint);
1387
}
1388

    
1389
/* Remove all matching watchpoints.  */
1390
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1391
{
1392
    CPUWatchpoint *wp, *next;
1393

    
1394
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1395
        if (wp->flags & mask)
1396
            cpu_watchpoint_remove_by_ref(env, wp);
1397
    }
1398
}
1399

    
1400
/* Add a breakpoint.  */
1401
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1402
                          CPUBreakpoint **breakpoint)
1403
{
1404
#if defined(TARGET_HAS_ICE)
1405
    CPUBreakpoint *bp;
1406

    
1407
    bp = qemu_malloc(sizeof(*bp));
1408

    
1409
    bp->pc = pc;
1410
    bp->flags = flags;
1411

    
1412
    /* keep all GDB-injected breakpoints in front */
1413
    if (flags & BP_GDB)
1414
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1415
    else
1416
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1417

    
1418
    breakpoint_invalidate(env, pc);
1419

    
1420
    if (breakpoint)
1421
        *breakpoint = bp;
1422
    return 0;
1423
#else
1424
    return -ENOSYS;
1425
#endif
1426
}
1427

    
1428
/* Remove a specific breakpoint.  */
1429
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1430
{
1431
#if defined(TARGET_HAS_ICE)
1432
    CPUBreakpoint *bp;
1433

    
1434
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1435
        if (bp->pc == pc && bp->flags == flags) {
1436
            cpu_breakpoint_remove_by_ref(env, bp);
1437
            return 0;
1438
        }
1439
    }
1440
    return -ENOENT;
1441
#else
1442
    return -ENOSYS;
1443
#endif
1444
}
1445

    
1446
/* Remove a specific breakpoint by reference.  */
1447
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1448
{
1449
#if defined(TARGET_HAS_ICE)
1450
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1451

    
1452
    breakpoint_invalidate(env, breakpoint->pc);
1453

    
1454
    qemu_free(breakpoint);
1455
#endif
1456
}
1457

    
1458
/* Remove all matching breakpoints. */
1459
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1460
{
1461
#if defined(TARGET_HAS_ICE)
1462
    CPUBreakpoint *bp, *next;
1463

    
1464
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1465
        if (bp->flags & mask)
1466
            cpu_breakpoint_remove_by_ref(env, bp);
1467
    }
1468
#endif
1469
}
1470

    
1471
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1472
   CPU loop after each instruction */
1473
void cpu_single_step(CPUState *env, int enabled)
1474
{
1475
#if defined(TARGET_HAS_ICE)
1476
    if (env->singlestep_enabled != enabled) {
1477
        env->singlestep_enabled = enabled;
1478
        if (kvm_enabled())
1479
            kvm_update_guest_debug(env, 0);
1480
        else {
1481
            /* must flush all the translated code to avoid inconsistencies */
1482
            /* XXX: only flush what is necessary */
1483
            tb_flush(env);
1484
        }
1485
    }
1486
#endif
1487
}
1488

    
1489
/* enable or disable low levels log */
1490
void cpu_set_log(int log_flags)
1491
{
1492
    loglevel = log_flags;
1493
    if (loglevel && !logfile) {
1494
        logfile = fopen(logfilename, log_append ? "a" : "w");
1495
        if (!logfile) {
1496
            perror(logfilename);
1497
            _exit(1);
1498
        }
1499
#if !defined(CONFIG_SOFTMMU)
1500
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1501
        {
1502
            static char logfile_buf[4096];
1503
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1504
        }
1505
#elif !defined(_WIN32)
1506
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1507
        setvbuf(logfile, NULL, _IOLBF, 0);
1508
#endif
1509
        log_append = 1;
1510
    }
1511
    if (!loglevel && logfile) {
1512
        fclose(logfile);
1513
        logfile = NULL;
1514
    }
1515
}
1516

    
1517
void cpu_set_log_filename(const char *filename)
1518
{
1519
    logfilename = strdup(filename);
1520
    if (logfile) {
1521
        fclose(logfile);
1522
        logfile = NULL;
1523
    }
1524
    cpu_set_log(loglevel);
1525
}
1526

    
1527
static void cpu_unlink_tb(CPUState *env)
1528
{
1529
#if defined(CONFIG_USE_NPTL)
1530
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1531
       problem and hope the cpu will stop of its own accord.  For userspace
1532
       emulation this often isn't actually as bad as it sounds.  Often
1533
       signals are used primarily to interrupt blocking syscalls.  */
1534
#else
1535
    TranslationBlock *tb;
1536
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1537

    
1538
    tb = env->current_tb;
1539
    /* if the cpu is currently executing code, we must unlink it and
1540
       all the potentially executing TB */
1541
    if (tb && !testandset(&interrupt_lock)) {
1542
        env->current_tb = NULL;
1543
        tb_reset_jump_recursive(tb);
1544
        resetlock(&interrupt_lock);
1545
    }
1546
#endif
1547
}
1548

    
1549
/* mask must never be zero, except for A20 change call */
1550
void cpu_interrupt(CPUState *env, int mask)
1551
{
1552
    int old_mask;
1553

    
1554
    old_mask = env->interrupt_request;
1555
    env->interrupt_request |= mask;
1556

    
1557
#ifndef CONFIG_USER_ONLY
1558
    /*
1559
     * If called from iothread context, wake the target cpu in
1560
     * case its halted.
1561
     */
1562
    if (!qemu_cpu_self(env)) {
1563
        qemu_cpu_kick(env);
1564
        return;
1565
    }
1566
#endif
1567

    
1568
    if (use_icount) {
1569
        env->icount_decr.u16.high = 0xffff;
1570
#ifndef CONFIG_USER_ONLY
1571
        if (!can_do_io(env)
1572
            && (mask & ~old_mask) != 0) {
1573
            cpu_abort(env, "Raised interrupt while not in I/O function");
1574
        }
1575
#endif
1576
    } else {
1577
        cpu_unlink_tb(env);
1578
    }
1579
}
1580

    
1581
void cpu_reset_interrupt(CPUState *env, int mask)
1582
{
1583
    env->interrupt_request &= ~mask;
1584
}
1585

    
1586
void cpu_exit(CPUState *env)
1587
{
1588
    env->exit_request = 1;
1589
    cpu_unlink_tb(env);
1590
}
1591

    
1592
const CPULogItem cpu_log_items[] = {
1593
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1594
      "show generated host assembly code for each compiled TB" },
1595
    { CPU_LOG_TB_IN_ASM, "in_asm",
1596
      "show target assembly code for each compiled TB" },
1597
    { CPU_LOG_TB_OP, "op",
1598
      "show micro ops for each compiled TB" },
1599
    { CPU_LOG_TB_OP_OPT, "op_opt",
1600
      "show micro ops "
1601
#ifdef TARGET_I386
1602
      "before eflags optimization and "
1603
#endif
1604
      "after liveness analysis" },
1605
    { CPU_LOG_INT, "int",
1606
      "show interrupts/exceptions in short format" },
1607
    { CPU_LOG_EXEC, "exec",
1608
      "show trace before each executed TB (lots of logs)" },
1609
    { CPU_LOG_TB_CPU, "cpu",
1610
      "show CPU state before block translation" },
1611
#ifdef TARGET_I386
1612
    { CPU_LOG_PCALL, "pcall",
1613
      "show protected mode far calls/returns/exceptions" },
1614
    { CPU_LOG_RESET, "cpu_reset",
1615
      "show CPU state before CPU resets" },
1616
#endif
1617
#ifdef DEBUG_IOPORT
1618
    { CPU_LOG_IOPORT, "ioport",
1619
      "show all i/o ports accesses" },
1620
#endif
1621
    { 0, NULL, NULL },
1622
};
1623

    
1624
static int cmp1(const char *s1, int n, const char *s2)
1625
{
1626
    if (strlen(s2) != n)
1627
        return 0;
1628
    return memcmp(s1, s2, n) == 0;
1629
}
1630

    
1631
/* takes a comma separated list of log masks. Return 0 if error. */
1632
int cpu_str_to_log_mask(const char *str)
1633
{
1634
    const CPULogItem *item;
1635
    int mask;
1636
    const char *p, *p1;
1637

    
1638
    p = str;
1639
    mask = 0;
1640
    for(;;) {
1641
        p1 = strchr(p, ',');
1642
        if (!p1)
1643
            p1 = p + strlen(p);
1644
        if(cmp1(p,p1-p,"all")) {
1645
                for(item = cpu_log_items; item->mask != 0; item++) {
1646
                        mask |= item->mask;
1647
                }
1648
        } else {
1649
        for(item = cpu_log_items; item->mask != 0; item++) {
1650
            if (cmp1(p, p1 - p, item->name))
1651
                goto found;
1652
        }
1653
        return 0;
1654
        }
1655
    found:
1656
        mask |= item->mask;
1657
        if (*p1 != ',')
1658
            break;
1659
        p = p1 + 1;
1660
    }
1661
    return mask;
1662
}
1663

    
1664
void cpu_abort(CPUState *env, const char *fmt, ...)
1665
{
1666
    va_list ap;
1667
    va_list ap2;
1668

    
1669
    va_start(ap, fmt);
1670
    va_copy(ap2, ap);
1671
    fprintf(stderr, "qemu: fatal: ");
1672
    vfprintf(stderr, fmt, ap);
1673
    fprintf(stderr, "\n");
1674
#ifdef TARGET_I386
1675
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1676
#else
1677
    cpu_dump_state(env, stderr, fprintf, 0);
1678
#endif
1679
    if (qemu_log_enabled()) {
1680
        qemu_log("qemu: fatal: ");
1681
        qemu_log_vprintf(fmt, ap2);
1682
        qemu_log("\n");
1683
#ifdef TARGET_I386
1684
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1685
#else
1686
        log_cpu_state(env, 0);
1687
#endif
1688
        qemu_log_flush();
1689
        qemu_log_close();
1690
    }
1691
    va_end(ap2);
1692
    va_end(ap);
1693
    abort();
1694
}
1695

    
1696
CPUState *cpu_copy(CPUState *env)
1697
{
1698
    CPUState *new_env = cpu_init(env->cpu_model_str);
1699
    CPUState *next_cpu = new_env->next_cpu;
1700
    int cpu_index = new_env->cpu_index;
1701
#if defined(TARGET_HAS_ICE)
1702
    CPUBreakpoint *bp;
1703
    CPUWatchpoint *wp;
1704
#endif
1705

    
1706
    memcpy(new_env, env, sizeof(CPUState));
1707

    
1708
    /* Preserve chaining and index. */
1709
    new_env->next_cpu = next_cpu;
1710
    new_env->cpu_index = cpu_index;
1711

    
1712
    /* Clone all break/watchpoints.
1713
       Note: Once we support ptrace with hw-debug register access, make sure
1714
       BP_CPU break/watchpoints are handled correctly on clone. */
1715
    QTAILQ_INIT(&env->breakpoints);
1716
    QTAILQ_INIT(&env->watchpoints);
1717
#if defined(TARGET_HAS_ICE)
1718
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1719
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1720
    }
1721
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1722
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1723
                              wp->flags, NULL);
1724
    }
1725
#endif
1726

    
1727
    return new_env;
1728
}
1729

    
1730
#if !defined(CONFIG_USER_ONLY)
1731

    
1732
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1733
{
1734
    unsigned int i;
1735

    
1736
    /* Discard jump cache entries for any tb which might potentially
1737
       overlap the flushed page.  */
1738
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1739
    memset (&env->tb_jmp_cache[i], 0, 
1740
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1741

    
1742
    i = tb_jmp_cache_hash_page(addr);
1743
    memset (&env->tb_jmp_cache[i], 0, 
1744
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1745
}
1746

    
1747
static CPUTLBEntry s_cputlb_empty_entry = {
1748
    .addr_read  = -1,
1749
    .addr_write = -1,
1750
    .addr_code  = -1,
1751
    .addend     = -1,
1752
};
1753

    
1754
/* NOTE: if flush_global is true, also flush global entries (not
1755
   implemented yet) */
1756
void tlb_flush(CPUState *env, int flush_global)
1757
{
1758
    int i;
1759

    
1760
#if defined(DEBUG_TLB)
1761
    printf("tlb_flush:\n");
1762
#endif
1763
    /* must reset current TB so that interrupts cannot modify the
1764
       links while we are modifying them */
1765
    env->current_tb = NULL;
1766

    
1767
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1768
        int mmu_idx;
1769
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1770
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1771
        }
1772
    }
1773

    
1774
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1775

    
1776
    tlb_flush_count++;
1777
}
1778

    
1779
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1780
{
1781
    if (addr == (tlb_entry->addr_read &
1782
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1783
        addr == (tlb_entry->addr_write &
1784
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1785
        addr == (tlb_entry->addr_code &
1786
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1787
        *tlb_entry = s_cputlb_empty_entry;
1788
    }
1789
}
1790

    
1791
void tlb_flush_page(CPUState *env, target_ulong addr)
1792
{
1793
    int i;
1794
    int mmu_idx;
1795

    
1796
#if defined(DEBUG_TLB)
1797
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1798
#endif
1799
    /* must reset current TB so that interrupts cannot modify the
1800
       links while we are modifying them */
1801
    env->current_tb = NULL;
1802

    
1803
    addr &= TARGET_PAGE_MASK;
1804
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1805
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1806
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1807

    
1808
    tlb_flush_jmp_cache(env, addr);
1809
}
1810

    
1811
/* update the TLBs so that writes to code in the virtual page 'addr'
1812
   can be detected */
1813
static void tlb_protect_code(ram_addr_t ram_addr)
1814
{
1815
    cpu_physical_memory_reset_dirty(ram_addr,
1816
                                    ram_addr + TARGET_PAGE_SIZE,
1817
                                    CODE_DIRTY_FLAG);
1818
}
1819

    
1820
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1821
   tested for self modifying code */
1822
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1823
                                    target_ulong vaddr)
1824
{
1825
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1826
}
1827

    
1828
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1829
                                         unsigned long start, unsigned long length)
1830
{
1831
    unsigned long addr;
1832
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1833
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1834
        if ((addr - start) < length) {
1835
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1836
        }
1837
    }
1838
}
1839

    
1840
/* Note: start and end must be within the same ram block.  */
1841
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1842
                                     int dirty_flags)
1843
{
1844
    CPUState *env;
1845
    unsigned long length, start1;
1846
    int i, mask, len;
1847
    uint8_t *p;
1848

    
1849
    start &= TARGET_PAGE_MASK;
1850
    end = TARGET_PAGE_ALIGN(end);
1851

    
1852
    length = end - start;
1853
    if (length == 0)
1854
        return;
1855
    len = length >> TARGET_PAGE_BITS;
1856
    mask = ~dirty_flags;
1857
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1858
    for(i = 0; i < len; i++)
1859
        p[i] &= mask;
1860

    
1861
    /* we modify the TLB cache so that the dirty bit will be set again
1862
       when accessing the range */
1863
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1864
    /* Chek that we don't span multiple blocks - this breaks the
1865
       address comparisons below.  */
1866
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1867
            != (end - 1) - start) {
1868
        abort();
1869
    }
1870

    
1871
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1872
        int mmu_idx;
1873
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1874
            for(i = 0; i < CPU_TLB_SIZE; i++)
1875
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1876
                                      start1, length);
1877
        }
1878
    }
1879
}
1880

    
1881
int cpu_physical_memory_set_dirty_tracking(int enable)
1882
{
1883
    in_migration = enable;
1884
    if (kvm_enabled()) {
1885
        return kvm_set_migration_log(enable);
1886
    }
1887
    return 0;
1888
}
1889

    
1890
int cpu_physical_memory_get_dirty_tracking(void)
1891
{
1892
    return in_migration;
1893
}
1894

    
1895
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1896
                                   target_phys_addr_t end_addr)
1897
{
1898
    int ret = 0;
1899

    
1900
    if (kvm_enabled())
1901
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1902
    return ret;
1903
}
1904

    
1905
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1906
{
1907
    ram_addr_t ram_addr;
1908
    void *p;
1909

    
1910
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1911
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1912
            + tlb_entry->addend);
1913
        ram_addr = qemu_ram_addr_from_host(p);
1914
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1915
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1916
        }
1917
    }
1918
}
1919

    
1920
/* update the TLB according to the current state of the dirty bits */
1921
void cpu_tlb_update_dirty(CPUState *env)
1922
{
1923
    int i;
1924
    int mmu_idx;
1925
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1926
        for(i = 0; i < CPU_TLB_SIZE; i++)
1927
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1928
    }
1929
}
1930

    
1931
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1932
{
1933
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1934
        tlb_entry->addr_write = vaddr;
1935
}
1936

    
1937
/* update the TLB corresponding to virtual page vaddr
1938
   so that it is no longer dirty */
1939
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1940
{
1941
    int i;
1942
    int mmu_idx;
1943

    
1944
    vaddr &= TARGET_PAGE_MASK;
1945
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1946
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1947
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1948
}
1949

    
1950
/* add a new TLB entry. At most one entry for a given virtual address
1951
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1952
   (can only happen in non SOFTMMU mode for I/O pages or pages
1953
   conflicting with the host address space). */
1954
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1955
                      target_phys_addr_t paddr, int prot,
1956
                      int mmu_idx, int is_softmmu)
1957
{
1958
    PhysPageDesc *p;
1959
    unsigned long pd;
1960
    unsigned int index;
1961
    target_ulong address;
1962
    target_ulong code_address;
1963
    target_phys_addr_t addend;
1964
    int ret;
1965
    CPUTLBEntry *te;
1966
    CPUWatchpoint *wp;
1967
    target_phys_addr_t iotlb;
1968

    
1969
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1970
    if (!p) {
1971
        pd = IO_MEM_UNASSIGNED;
1972
    } else {
1973
        pd = p->phys_offset;
1974
    }
1975
#if defined(DEBUG_TLB)
1976
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1977
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1978
#endif
1979

    
1980
    ret = 0;
1981
    address = vaddr;
1982
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1983
        /* IO memory case (romd handled later) */
1984
        address |= TLB_MMIO;
1985
    }
1986
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1987
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1988
        /* Normal RAM.  */
1989
        iotlb = pd & TARGET_PAGE_MASK;
1990
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1991
            iotlb |= IO_MEM_NOTDIRTY;
1992
        else
1993
            iotlb |= IO_MEM_ROM;
1994
    } else {
1995
        /* IO handlers are currently passed a physical address.
1996
           It would be nice to pass an offset from the base address
1997
           of that region.  This would avoid having to special case RAM,
1998
           and avoid full address decoding in every device.
1999
           We can't use the high bits of pd for this because
2000
           IO_MEM_ROMD uses these as a ram address.  */
2001
        iotlb = (pd & ~TARGET_PAGE_MASK);
2002
        if (p) {
2003
            iotlb += p->region_offset;
2004
        } else {
2005
            iotlb += paddr;
2006
        }
2007
    }
2008

    
2009
    code_address = address;
2010
    /* Make accesses to pages with watchpoints go via the
2011
       watchpoint trap routines.  */
2012
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2013
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2014
            iotlb = io_mem_watch + paddr;
2015
            /* TODO: The memory case can be optimized by not trapping
2016
               reads of pages with a write breakpoint.  */
2017
            address |= TLB_MMIO;
2018
        }
2019
    }
2020

    
2021
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2022
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2023
    te = &env->tlb_table[mmu_idx][index];
2024
    te->addend = addend - vaddr;
2025
    if (prot & PAGE_READ) {
2026
        te->addr_read = address;
2027
    } else {
2028
        te->addr_read = -1;
2029
    }
2030

    
2031
    if (prot & PAGE_EXEC) {
2032
        te->addr_code = code_address;
2033
    } else {
2034
        te->addr_code = -1;
2035
    }
2036
    if (prot & PAGE_WRITE) {
2037
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2038
            (pd & IO_MEM_ROMD)) {
2039
            /* Write access calls the I/O callback.  */
2040
            te->addr_write = address | TLB_MMIO;
2041
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2042
                   !cpu_physical_memory_is_dirty(pd)) {
2043
            te->addr_write = address | TLB_NOTDIRTY;
2044
        } else {
2045
            te->addr_write = address;
2046
        }
2047
    } else {
2048
        te->addr_write = -1;
2049
    }
2050
    return ret;
2051
}
2052

    
2053
#else
2054

    
2055
void tlb_flush(CPUState *env, int flush_global)
2056
{
2057
}
2058

    
2059
void tlb_flush_page(CPUState *env, target_ulong addr)
2060
{
2061
}
2062

    
2063
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2064
                      target_phys_addr_t paddr, int prot,
2065
                      int mmu_idx, int is_softmmu)
2066
{
2067
    return 0;
2068
}
2069

    
2070
/*
2071
 * Walks guest process memory "regions" one by one
2072
 * and calls callback function 'fn' for each region.
2073
 */
2074
int walk_memory_regions(void *priv,
2075
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2076
{
2077
    unsigned long start, end;
2078
    PageDesc *p = NULL;
2079
    int i, j, prot, prot1;
2080
    int rc = 0;
2081

    
2082
    start = end = -1;
2083
    prot = 0;
2084

    
2085
    for (i = 0; i <= L1_SIZE; i++) {
2086
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2087
        for (j = 0; j < L2_SIZE; j++) {
2088
            prot1 = (p == NULL) ? 0 : p[j].flags;
2089
            /*
2090
             * "region" is one continuous chunk of memory
2091
             * that has same protection flags set.
2092
             */
2093
            if (prot1 != prot) {
2094
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2095
                if (start != -1) {
2096
                    rc = (*fn)(priv, start, end, prot);
2097
                    /* callback can stop iteration by returning != 0 */
2098
                    if (rc != 0)
2099
                        return (rc);
2100
                }
2101
                if (prot1 != 0)
2102
                    start = end;
2103
                else
2104
                    start = -1;
2105
                prot = prot1;
2106
            }
2107
            if (p == NULL)
2108
                break;
2109
        }
2110
    }
2111
    return (rc);
2112
}
2113

    
2114
static int dump_region(void *priv, unsigned long start,
2115
    unsigned long end, unsigned long prot)
2116
{
2117
    FILE *f = (FILE *)priv;
2118

    
2119
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2120
        start, end, end - start,
2121
        ((prot & PAGE_READ) ? 'r' : '-'),
2122
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2123
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2124

    
2125
    return (0);
2126
}
2127

    
2128
/* dump memory mappings */
2129
void page_dump(FILE *f)
2130
{
2131
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2132
            "start", "end", "size", "prot");
2133
    walk_memory_regions(f, dump_region);
2134
}
2135

    
2136
int page_get_flags(target_ulong address)
2137
{
2138
    PageDesc *p;
2139

    
2140
    p = page_find(address >> TARGET_PAGE_BITS);
2141
    if (!p)
2142
        return 0;
2143
    return p->flags;
2144
}
2145

    
2146
/* modify the flags of a page and invalidate the code if
2147
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2148
   depending on PAGE_WRITE */
2149
void page_set_flags(target_ulong start, target_ulong end, int flags)
2150
{
2151
    PageDesc *p;
2152
    target_ulong addr;
2153

    
2154
    /* mmap_lock should already be held.  */
2155
    start = start & TARGET_PAGE_MASK;
2156
    end = TARGET_PAGE_ALIGN(end);
2157
    if (flags & PAGE_WRITE)
2158
        flags |= PAGE_WRITE_ORG;
2159
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2160
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2161
        /* We may be called for host regions that are outside guest
2162
           address space.  */
2163
        if (!p)
2164
            return;
2165
        /* if the write protection is set, then we invalidate the code
2166
           inside */
2167
        if (!(p->flags & PAGE_WRITE) &&
2168
            (flags & PAGE_WRITE) &&
2169
            p->first_tb) {
2170
            tb_invalidate_phys_page(addr, 0, NULL);
2171
        }
2172
        p->flags = flags;
2173
    }
2174
}
2175

    
2176
int page_check_range(target_ulong start, target_ulong len, int flags)
2177
{
2178
    PageDesc *p;
2179
    target_ulong end;
2180
    target_ulong addr;
2181

    
2182
    if (start + len < start)
2183
        /* we've wrapped around */
2184
        return -1;
2185

    
2186
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2187
    start = start & TARGET_PAGE_MASK;
2188

    
2189
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2190
        p = page_find(addr >> TARGET_PAGE_BITS);
2191
        if( !p )
2192
            return -1;
2193
        if( !(p->flags & PAGE_VALID) )
2194
            return -1;
2195

    
2196
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2197
            return -1;
2198
        if (flags & PAGE_WRITE) {
2199
            if (!(p->flags & PAGE_WRITE_ORG))
2200
                return -1;
2201
            /* unprotect the page if it was put read-only because it
2202
               contains translated code */
2203
            if (!(p->flags & PAGE_WRITE)) {
2204
                if (!page_unprotect(addr, 0, NULL))
2205
                    return -1;
2206
            }
2207
            return 0;
2208
        }
2209
    }
2210
    return 0;
2211
}
2212

    
2213
/* called from signal handler: invalidate the code and unprotect the
2214
   page. Return TRUE if the fault was successfully handled. */
2215
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2216
{
2217
    unsigned int page_index, prot, pindex;
2218
    PageDesc *p, *p1;
2219
    target_ulong host_start, host_end, addr;
2220

    
2221
    /* Technically this isn't safe inside a signal handler.  However we
2222
       know this only ever happens in a synchronous SEGV handler, so in
2223
       practice it seems to be ok.  */
2224
    mmap_lock();
2225

    
2226
    host_start = address & qemu_host_page_mask;
2227
    page_index = host_start >> TARGET_PAGE_BITS;
2228
    p1 = page_find(page_index);
2229
    if (!p1) {
2230
        mmap_unlock();
2231
        return 0;
2232
    }
2233
    host_end = host_start + qemu_host_page_size;
2234
    p = p1;
2235
    prot = 0;
2236
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2237
        prot |= p->flags;
2238
        p++;
2239
    }
2240
    /* if the page was really writable, then we change its
2241
       protection back to writable */
2242
    if (prot & PAGE_WRITE_ORG) {
2243
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2244
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2245
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2246
                     (prot & PAGE_BITS) | PAGE_WRITE);
2247
            p1[pindex].flags |= PAGE_WRITE;
2248
            /* and since the content will be modified, we must invalidate
2249
               the corresponding translated code. */
2250
            tb_invalidate_phys_page(address, pc, puc);
2251
#ifdef DEBUG_TB_CHECK
2252
            tb_invalidate_check(address);
2253
#endif
2254
            mmap_unlock();
2255
            return 1;
2256
        }
2257
    }
2258
    mmap_unlock();
2259
    return 0;
2260
}
2261

    
2262
static inline void tlb_set_dirty(CPUState *env,
2263
                                 unsigned long addr, target_ulong vaddr)
2264
{
2265
}
2266
#endif /* defined(CONFIG_USER_ONLY) */
2267

    
2268
#if !defined(CONFIG_USER_ONLY)
2269

    
2270
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2271
                             ram_addr_t memory, ram_addr_t region_offset);
2272
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2273
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2274
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2275
                      need_subpage)                                     \
2276
    do {                                                                \
2277
        if (addr > start_addr)                                          \
2278
            start_addr2 = 0;                                            \
2279
        else {                                                          \
2280
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2281
            if (start_addr2 > 0)                                        \
2282
                need_subpage = 1;                                       \
2283
        }                                                               \
2284
                                                                        \
2285
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2286
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2287
        else {                                                          \
2288
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2289
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2290
                need_subpage = 1;                                       \
2291
        }                                                               \
2292
    } while (0)
2293

    
2294
/* register physical memory.
2295
   For RAM, 'size' must be a multiple of the target page size.
2296
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2297
   io memory page.  The address used when calling the IO function is
2298
   the offset from the start of the region, plus region_offset.  Both
2299
   start_addr and region_offset are rounded down to a page boundary
2300
   before calculating this offset.  This should not be a problem unless
2301
   the low bits of start_addr and region_offset differ.  */
2302
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2303
                                         ram_addr_t size,
2304
                                         ram_addr_t phys_offset,
2305
                                         ram_addr_t region_offset)
2306
{
2307
    target_phys_addr_t addr, end_addr;
2308
    PhysPageDesc *p;
2309
    CPUState *env;
2310
    ram_addr_t orig_size = size;
2311
    void *subpage;
2312

    
2313
    if (kvm_enabled())
2314
        kvm_set_phys_mem(start_addr, size, phys_offset);
2315

    
2316
    if (phys_offset == IO_MEM_UNASSIGNED) {
2317
        region_offset = start_addr;
2318
    }
2319
    region_offset &= TARGET_PAGE_MASK;
2320
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2321
    end_addr = start_addr + (target_phys_addr_t)size;
2322
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2323
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2324
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2325
            ram_addr_t orig_memory = p->phys_offset;
2326
            target_phys_addr_t start_addr2, end_addr2;
2327
            int need_subpage = 0;
2328

    
2329
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2330
                          need_subpage);
2331
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2332
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2333
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2334
                                           &p->phys_offset, orig_memory,
2335
                                           p->region_offset);
2336
                } else {
2337
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2338
                                            >> IO_MEM_SHIFT];
2339
                }
2340
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2341
                                 region_offset);
2342
                p->region_offset = 0;
2343
            } else {
2344
                p->phys_offset = phys_offset;
2345
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2346
                    (phys_offset & IO_MEM_ROMD))
2347
                    phys_offset += TARGET_PAGE_SIZE;
2348
            }
2349
        } else {
2350
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2351
            p->phys_offset = phys_offset;
2352
            p->region_offset = region_offset;
2353
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2354
                (phys_offset & IO_MEM_ROMD)) {
2355
                phys_offset += TARGET_PAGE_SIZE;
2356
            } else {
2357
                target_phys_addr_t start_addr2, end_addr2;
2358
                int need_subpage = 0;
2359

    
2360
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2361
                              end_addr2, need_subpage);
2362

    
2363
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2364
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2365
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2366
                                           addr & TARGET_PAGE_MASK);
2367
                    subpage_register(subpage, start_addr2, end_addr2,
2368
                                     phys_offset, region_offset);
2369
                    p->region_offset = 0;
2370
                }
2371
            }
2372
        }
2373
        region_offset += TARGET_PAGE_SIZE;
2374
    }
2375

    
2376
    /* since each CPU stores ram addresses in its TLB cache, we must
2377
       reset the modified entries */
2378
    /* XXX: slow ! */
2379
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2380
        tlb_flush(env, 1);
2381
    }
2382
}
2383

    
2384
/* XXX: temporary until new memory mapping API */
2385
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2386
{
2387
    PhysPageDesc *p;
2388

    
2389
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2390
    if (!p)
2391
        return IO_MEM_UNASSIGNED;
2392
    return p->phys_offset;
2393
}
2394

    
2395
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2396
{
2397
    if (kvm_enabled())
2398
        kvm_coalesce_mmio_region(addr, size);
2399
}
2400

    
2401
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2402
{
2403
    if (kvm_enabled())
2404
        kvm_uncoalesce_mmio_region(addr, size);
2405
}
2406

    
2407
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2408
{
2409
    RAMBlock *new_block;
2410

    
2411
    size = TARGET_PAGE_ALIGN(size);
2412
    new_block = qemu_malloc(sizeof(*new_block));
2413

    
2414
    new_block->host = qemu_vmalloc(size);
2415
    new_block->offset = last_ram_offset;
2416
    new_block->length = size;
2417

    
2418
    new_block->next = ram_blocks;
2419
    ram_blocks = new_block;
2420

    
2421
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2422
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2423
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2424
           0xff, size >> TARGET_PAGE_BITS);
2425

    
2426
    last_ram_offset += size;
2427

    
2428
    if (kvm_enabled())
2429
        kvm_setup_guest_memory(new_block->host, size);
2430

    
2431
    return new_block->offset;
2432
}
2433

    
2434
void qemu_ram_free(ram_addr_t addr)
2435
{
2436
    /* TODO: implement this.  */
2437
}
2438

    
2439
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2440
   With the exception of the softmmu code in this file, this should
2441
   only be used for local memory (e.g. video ram) that the device owns,
2442
   and knows it isn't going to access beyond the end of the block.
2443

2444
   It should not be used for general purpose DMA.
2445
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2446
 */
2447
void *qemu_get_ram_ptr(ram_addr_t addr)
2448
{
2449
    RAMBlock *prev;
2450
    RAMBlock **prevp;
2451
    RAMBlock *block;
2452

    
2453
    prev = NULL;
2454
    prevp = &ram_blocks;
2455
    block = ram_blocks;
2456
    while (block && (block->offset > addr
2457
                     || block->offset + block->length <= addr)) {
2458
        if (prev)
2459
          prevp = &prev->next;
2460
        prev = block;
2461
        block = block->next;
2462
    }
2463
    if (!block) {
2464
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2465
        abort();
2466
    }
2467
    /* Move this entry to to start of the list.  */
2468
    if (prev) {
2469
        prev->next = block->next;
2470
        block->next = *prevp;
2471
        *prevp = block;
2472
    }
2473
    return block->host + (addr - block->offset);
2474
}
2475

    
2476
/* Some of the softmmu routines need to translate from a host pointer
2477
   (typically a TLB entry) back to a ram offset.  */
2478
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2479
{
2480
    RAMBlock *prev;
2481
    RAMBlock **prevp;
2482
    RAMBlock *block;
2483
    uint8_t *host = ptr;
2484

    
2485
    prev = NULL;
2486
    prevp = &ram_blocks;
2487
    block = ram_blocks;
2488
    while (block && (block->host > host
2489
                     || block->host + block->length <= host)) {
2490
        if (prev)
2491
          prevp = &prev->next;
2492
        prev = block;
2493
        block = block->next;
2494
    }
2495
    if (!block) {
2496
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2497
        abort();
2498
    }
2499
    return block->offset + (host - block->host);
2500
}
2501

    
2502
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2503
{
2504
#ifdef DEBUG_UNASSIGNED
2505
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2506
#endif
2507
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2508
    do_unassigned_access(addr, 0, 0, 0, 1);
2509
#endif
2510
    return 0;
2511
}
2512

    
2513
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2514
{
2515
#ifdef DEBUG_UNASSIGNED
2516
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2517
#endif
2518
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2519
    do_unassigned_access(addr, 0, 0, 0, 2);
2520
#endif
2521
    return 0;
2522
}
2523

    
2524
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2525
{
2526
#ifdef DEBUG_UNASSIGNED
2527
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2528
#endif
2529
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2530
    do_unassigned_access(addr, 0, 0, 0, 4);
2531
#endif
2532
    return 0;
2533
}
2534

    
2535
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2536
{
2537
#ifdef DEBUG_UNASSIGNED
2538
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2539
#endif
2540
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2541
    do_unassigned_access(addr, 1, 0, 0, 1);
2542
#endif
2543
}
2544

    
2545
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2546
{
2547
#ifdef DEBUG_UNASSIGNED
2548
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2549
#endif
2550
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2551
    do_unassigned_access(addr, 1, 0, 0, 2);
2552
#endif
2553
}
2554

    
2555
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2556
{
2557
#ifdef DEBUG_UNASSIGNED
2558
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2559
#endif
2560
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2561
    do_unassigned_access(addr, 1, 0, 0, 4);
2562
#endif
2563
}
2564

    
2565
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2566
    unassigned_mem_readb,
2567
    unassigned_mem_readw,
2568
    unassigned_mem_readl,
2569
};
2570

    
2571
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2572
    unassigned_mem_writeb,
2573
    unassigned_mem_writew,
2574
    unassigned_mem_writel,
2575
};
2576

    
2577
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2578
                                uint32_t val)
2579
{
2580
    int dirty_flags;
2581
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2582
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2583
#if !defined(CONFIG_USER_ONLY)
2584
        tb_invalidate_phys_page_fast(ram_addr, 1);
2585
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2586
#endif
2587
    }
2588
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2589
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2590
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2591
    /* we remove the notdirty callback only if the code has been
2592
       flushed */
2593
    if (dirty_flags == 0xff)
2594
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2595
}
2596

    
2597
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2598
                                uint32_t val)
2599
{
2600
    int dirty_flags;
2601
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2602
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2603
#if !defined(CONFIG_USER_ONLY)
2604
        tb_invalidate_phys_page_fast(ram_addr, 2);
2605
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2606
#endif
2607
    }
2608
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2609
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2610
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2611
    /* we remove the notdirty callback only if the code has been
2612
       flushed */
2613
    if (dirty_flags == 0xff)
2614
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2615
}
2616

    
2617
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2618
                                uint32_t val)
2619
{
2620
    int dirty_flags;
2621
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2622
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2623
#if !defined(CONFIG_USER_ONLY)
2624
        tb_invalidate_phys_page_fast(ram_addr, 4);
2625
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2626
#endif
2627
    }
2628
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2629
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2630
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2631
    /* we remove the notdirty callback only if the code has been
2632
       flushed */
2633
    if (dirty_flags == 0xff)
2634
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2635
}
2636

    
2637
static CPUReadMemoryFunc * const error_mem_read[3] = {
2638
    NULL, /* never used */
2639
    NULL, /* never used */
2640
    NULL, /* never used */
2641
};
2642

    
2643
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2644
    notdirty_mem_writeb,
2645
    notdirty_mem_writew,
2646
    notdirty_mem_writel,
2647
};
2648

    
2649
/* Generate a debug exception if a watchpoint has been hit.  */
2650
static void check_watchpoint(int offset, int len_mask, int flags)
2651
{
2652
    CPUState *env = cpu_single_env;
2653
    target_ulong pc, cs_base;
2654
    TranslationBlock *tb;
2655
    target_ulong vaddr;
2656
    CPUWatchpoint *wp;
2657
    int cpu_flags;
2658

    
2659
    if (env->watchpoint_hit) {
2660
        /* We re-entered the check after replacing the TB. Now raise
2661
         * the debug interrupt so that is will trigger after the
2662
         * current instruction. */
2663
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2664
        return;
2665
    }
2666
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2667
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2668
        if ((vaddr == (wp->vaddr & len_mask) ||
2669
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2670
            wp->flags |= BP_WATCHPOINT_HIT;
2671
            if (!env->watchpoint_hit) {
2672
                env->watchpoint_hit = wp;
2673
                tb = tb_find_pc(env->mem_io_pc);
2674
                if (!tb) {
2675
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2676
                              "pc=%p", (void *)env->mem_io_pc);
2677
                }
2678
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2679
                tb_phys_invalidate(tb, -1);
2680
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2681
                    env->exception_index = EXCP_DEBUG;
2682
                } else {
2683
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2684
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2685
                }
2686
                cpu_resume_from_signal(env, NULL);
2687
            }
2688
        } else {
2689
            wp->flags &= ~BP_WATCHPOINT_HIT;
2690
        }
2691
    }
2692
}
2693

    
2694
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2695
   so these check for a hit then pass through to the normal out-of-line
2696
   phys routines.  */
2697
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2698
{
2699
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2700
    return ldub_phys(addr);
2701
}
2702

    
2703
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2704
{
2705
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2706
    return lduw_phys(addr);
2707
}
2708

    
2709
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2710
{
2711
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2712
    return ldl_phys(addr);
2713
}
2714

    
2715
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2716
                             uint32_t val)
2717
{
2718
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2719
    stb_phys(addr, val);
2720
}
2721

    
2722
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2723
                             uint32_t val)
2724
{
2725
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2726
    stw_phys(addr, val);
2727
}
2728

    
2729
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2730
                             uint32_t val)
2731
{
2732
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2733
    stl_phys(addr, val);
2734
}
2735

    
2736
static CPUReadMemoryFunc * const watch_mem_read[3] = {
2737
    watch_mem_readb,
2738
    watch_mem_readw,
2739
    watch_mem_readl,
2740
};
2741

    
2742
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2743
    watch_mem_writeb,
2744
    watch_mem_writew,
2745
    watch_mem_writel,
2746
};
2747

    
2748
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2749
                                 unsigned int len)
2750
{
2751
    uint32_t ret;
2752
    unsigned int idx;
2753

    
2754
    idx = SUBPAGE_IDX(addr);
2755
#if defined(DEBUG_SUBPAGE)
2756
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2757
           mmio, len, addr, idx);
2758
#endif
2759
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2760
                                       addr + mmio->region_offset[idx][0][len]);
2761

    
2762
    return ret;
2763
}
2764

    
2765
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2766
                              uint32_t value, unsigned int len)
2767
{
2768
    unsigned int idx;
2769

    
2770
    idx = SUBPAGE_IDX(addr);
2771
#if defined(DEBUG_SUBPAGE)
2772
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2773
           mmio, len, addr, idx, value);
2774
#endif
2775
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2776
                                  addr + mmio->region_offset[idx][1][len],
2777
                                  value);
2778
}
2779

    
2780
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2781
{
2782
#if defined(DEBUG_SUBPAGE)
2783
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2784
#endif
2785

    
2786
    return subpage_readlen(opaque, addr, 0);
2787
}
2788

    
2789
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2790
                            uint32_t value)
2791
{
2792
#if defined(DEBUG_SUBPAGE)
2793
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2794
#endif
2795
    subpage_writelen(opaque, addr, value, 0);
2796
}
2797

    
2798
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2799
{
2800
#if defined(DEBUG_SUBPAGE)
2801
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2802
#endif
2803

    
2804
    return subpage_readlen(opaque, addr, 1);
2805
}
2806

    
2807
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2808
                            uint32_t value)
2809
{
2810
#if defined(DEBUG_SUBPAGE)
2811
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2812
#endif
2813
    subpage_writelen(opaque, addr, value, 1);
2814
}
2815

    
2816
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2817
{
2818
#if defined(DEBUG_SUBPAGE)
2819
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2820
#endif
2821

    
2822
    return subpage_readlen(opaque, addr, 2);
2823
}
2824

    
2825
static void subpage_writel (void *opaque,
2826
                         target_phys_addr_t addr, uint32_t value)
2827
{
2828
#if defined(DEBUG_SUBPAGE)
2829
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2830
#endif
2831
    subpage_writelen(opaque, addr, value, 2);
2832
}
2833

    
2834
static CPUReadMemoryFunc * const subpage_read[] = {
2835
    &subpage_readb,
2836
    &subpage_readw,
2837
    &subpage_readl,
2838
};
2839

    
2840
static CPUWriteMemoryFunc * const subpage_write[] = {
2841
    &subpage_writeb,
2842
    &subpage_writew,
2843
    &subpage_writel,
2844
};
2845

    
2846
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2847
                             ram_addr_t memory, ram_addr_t region_offset)
2848
{
2849
    int idx, eidx;
2850
    unsigned int i;
2851

    
2852
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2853
        return -1;
2854
    idx = SUBPAGE_IDX(start);
2855
    eidx = SUBPAGE_IDX(end);
2856
#if defined(DEBUG_SUBPAGE)
2857
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2858
           mmio, start, end, idx, eidx, memory);
2859
#endif
2860
    memory >>= IO_MEM_SHIFT;
2861
    for (; idx <= eidx; idx++) {
2862
        for (i = 0; i < 4; i++) {
2863
            if (io_mem_read[memory][i]) {
2864
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2865
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2866
                mmio->region_offset[idx][0][i] = region_offset;
2867
            }
2868
            if (io_mem_write[memory][i]) {
2869
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2870
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2871
                mmio->region_offset[idx][1][i] = region_offset;
2872
            }
2873
        }
2874
    }
2875

    
2876
    return 0;
2877
}
2878

    
2879
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2880
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2881
{
2882
    subpage_t *mmio;
2883
    int subpage_memory;
2884

    
2885
    mmio = qemu_mallocz(sizeof(subpage_t));
2886

    
2887
    mmio->base = base;
2888
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2889
#if defined(DEBUG_SUBPAGE)
2890
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2891
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2892
#endif
2893
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2894
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2895
                         region_offset);
2896

    
2897
    return mmio;
2898
}
2899

    
2900
static int get_free_io_mem_idx(void)
2901
{
2902
    int i;
2903

    
2904
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2905
        if (!io_mem_used[i]) {
2906
            io_mem_used[i] = 1;
2907
            return i;
2908
        }
2909

    
2910
    return -1;
2911
}
2912

    
2913
/* mem_read and mem_write are arrays of functions containing the
2914
   function to access byte (index 0), word (index 1) and dword (index
2915
   2). Functions can be omitted with a NULL function pointer.
2916
   If io_index is non zero, the corresponding io zone is
2917
   modified. If it is zero, a new io zone is allocated. The return
2918
   value can be used with cpu_register_physical_memory(). (-1) is
2919
   returned if error. */
2920
static int cpu_register_io_memory_fixed(int io_index,
2921
                                        CPUReadMemoryFunc * const *mem_read,
2922
                                        CPUWriteMemoryFunc * const *mem_write,
2923
                                        void *opaque)
2924
{
2925
    int i, subwidth = 0;
2926

    
2927
    if (io_index <= 0) {
2928
        io_index = get_free_io_mem_idx();
2929
        if (io_index == -1)
2930
            return io_index;
2931
    } else {
2932
        io_index >>= IO_MEM_SHIFT;
2933
        if (io_index >= IO_MEM_NB_ENTRIES)
2934
            return -1;
2935
    }
2936

    
2937
    for(i = 0;i < 3; i++) {
2938
        if (!mem_read[i] || !mem_write[i])
2939
            subwidth = IO_MEM_SUBWIDTH;
2940
        io_mem_read[io_index][i] = mem_read[i];
2941
        io_mem_write[io_index][i] = mem_write[i];
2942
    }
2943
    io_mem_opaque[io_index] = opaque;
2944
    return (io_index << IO_MEM_SHIFT) | subwidth;
2945
}
2946

    
2947
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2948
                           CPUWriteMemoryFunc * const *mem_write,
2949
                           void *opaque)
2950
{
2951
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
2952
}
2953

    
2954
void cpu_unregister_io_memory(int io_table_address)
2955
{
2956
    int i;
2957
    int io_index = io_table_address >> IO_MEM_SHIFT;
2958

    
2959
    for (i=0;i < 3; i++) {
2960
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2961
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2962
    }
2963
    io_mem_opaque[io_index] = NULL;
2964
    io_mem_used[io_index] = 0;
2965
}
2966

    
2967
static void io_mem_init(void)
2968
{
2969
    int i;
2970

    
2971
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
2972
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
2973
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
2974
    for (i=0; i<5; i++)
2975
        io_mem_used[i] = 1;
2976

    
2977
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
2978
                                          watch_mem_write, NULL);
2979
}
2980

    
2981
#endif /* !defined(CONFIG_USER_ONLY) */
2982

    
2983
/* physical memory access (slow version, mainly for debug) */
2984
#if defined(CONFIG_USER_ONLY)
2985
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2986
                            int len, int is_write)
2987
{
2988
    int l, flags;
2989
    target_ulong page;
2990
    void * p;
2991

    
2992
    while (len > 0) {
2993
        page = addr & TARGET_PAGE_MASK;
2994
        l = (page + TARGET_PAGE_SIZE) - addr;
2995
        if (l > len)
2996
            l = len;
2997
        flags = page_get_flags(page);
2998
        if (!(flags & PAGE_VALID))
2999
            return;
3000
        if (is_write) {
3001
            if (!(flags & PAGE_WRITE))
3002
                return;
3003
            /* XXX: this code should not depend on lock_user */
3004
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3005
                /* FIXME - should this return an error rather than just fail? */
3006
                return;
3007
            memcpy(p, buf, l);
3008
            unlock_user(p, addr, l);
3009
        } else {
3010
            if (!(flags & PAGE_READ))
3011
                return;
3012
            /* XXX: this code should not depend on lock_user */
3013
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3014
                /* FIXME - should this return an error rather than just fail? */
3015
                return;
3016
            memcpy(buf, p, l);
3017
            unlock_user(p, addr, 0);
3018
        }
3019
        len -= l;
3020
        buf += l;
3021
        addr += l;
3022
    }
3023
}
3024

    
3025
#else
3026
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3027
                            int len, int is_write)
3028
{
3029
    int l, io_index;
3030
    uint8_t *ptr;
3031
    uint32_t val;
3032
    target_phys_addr_t page;
3033
    unsigned long pd;
3034
    PhysPageDesc *p;
3035

    
3036
    while (len > 0) {
3037
        page = addr & TARGET_PAGE_MASK;
3038
        l = (page + TARGET_PAGE_SIZE) - addr;
3039
        if (l > len)
3040
            l = len;
3041
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3042
        if (!p) {
3043
            pd = IO_MEM_UNASSIGNED;
3044
        } else {
3045
            pd = p->phys_offset;
3046
        }
3047

    
3048
        if (is_write) {
3049
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3050
                target_phys_addr_t addr1 = addr;
3051
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3052
                if (p)
3053
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3054
                /* XXX: could force cpu_single_env to NULL to avoid
3055
                   potential bugs */
3056
                if (l >= 4 && ((addr1 & 3) == 0)) {
3057
                    /* 32 bit write access */
3058
                    val = ldl_p(buf);
3059
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3060
                    l = 4;
3061
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3062
                    /* 16 bit write access */
3063
                    val = lduw_p(buf);
3064
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3065
                    l = 2;
3066
                } else {
3067
                    /* 8 bit write access */
3068
                    val = ldub_p(buf);
3069
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3070
                    l = 1;
3071
                }
3072
            } else {
3073
                unsigned long addr1;
3074
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3075
                /* RAM case */
3076
                ptr = qemu_get_ram_ptr(addr1);
3077
                memcpy(ptr, buf, l);
3078
                if (!cpu_physical_memory_is_dirty(addr1)) {
3079
                    /* invalidate code */
3080
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3081
                    /* set dirty bit */
3082
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3083
                        (0xff & ~CODE_DIRTY_FLAG);
3084
                }
3085
            }
3086
        } else {
3087
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3088
                !(pd & IO_MEM_ROMD)) {
3089
                target_phys_addr_t addr1 = addr;
3090
                /* I/O case */
3091
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3092
                if (p)
3093
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3094
                if (l >= 4 && ((addr1 & 3) == 0)) {
3095
                    /* 32 bit read access */
3096
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3097
                    stl_p(buf, val);
3098
                    l = 4;
3099
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3100
                    /* 16 bit read access */
3101
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3102
                    stw_p(buf, val);
3103
                    l = 2;
3104
                } else {
3105
                    /* 8 bit read access */
3106
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3107
                    stb_p(buf, val);
3108
                    l = 1;
3109
                }
3110
            } else {
3111
                /* RAM case */
3112
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3113
                    (addr & ~TARGET_PAGE_MASK);
3114
                memcpy(buf, ptr, l);
3115
            }
3116
        }
3117
        len -= l;
3118
        buf += l;
3119
        addr += l;
3120
    }
3121
}
3122

    
3123
/* used for ROM loading : can write in RAM and ROM */
3124
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3125
                                   const uint8_t *buf, int len)
3126
{
3127
    int l;
3128
    uint8_t *ptr;
3129
    target_phys_addr_t page;
3130
    unsigned long pd;
3131
    PhysPageDesc *p;
3132

    
3133
    while (len > 0) {
3134
        page = addr & TARGET_PAGE_MASK;
3135
        l = (page + TARGET_PAGE_SIZE) - addr;
3136
        if (l > len)
3137
            l = len;
3138
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3139
        if (!p) {
3140
            pd = IO_MEM_UNASSIGNED;
3141
        } else {
3142
            pd = p->phys_offset;
3143
        }
3144

    
3145
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3146
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3147
            !(pd & IO_MEM_ROMD)) {
3148
            /* do nothing */
3149
        } else {
3150
            unsigned long addr1;
3151
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3152
            /* ROM/RAM case */
3153
            ptr = qemu_get_ram_ptr(addr1);
3154
            memcpy(ptr, buf, l);
3155
        }
3156
        len -= l;
3157
        buf += l;
3158
        addr += l;
3159
    }
3160
}
3161

    
3162
typedef struct {
3163
    void *buffer;
3164
    target_phys_addr_t addr;
3165
    target_phys_addr_t len;
3166
} BounceBuffer;
3167

    
3168
static BounceBuffer bounce;
3169

    
3170
typedef struct MapClient {
3171
    void *opaque;
3172
    void (*callback)(void *opaque);
3173
    QLIST_ENTRY(MapClient) link;
3174
} MapClient;
3175

    
3176
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3177
    = QLIST_HEAD_INITIALIZER(map_client_list);
3178

    
3179
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3180
{
3181
    MapClient *client = qemu_malloc(sizeof(*client));
3182

    
3183
    client->opaque = opaque;
3184
    client->callback = callback;
3185
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3186
    return client;
3187
}
3188

    
3189
void cpu_unregister_map_client(void *_client)
3190
{
3191
    MapClient *client = (MapClient *)_client;
3192

    
3193
    QLIST_REMOVE(client, link);
3194
    qemu_free(client);
3195
}
3196

    
3197
static void cpu_notify_map_clients(void)
3198
{
3199
    MapClient *client;
3200

    
3201
    while (!QLIST_EMPTY(&map_client_list)) {
3202
        client = QLIST_FIRST(&map_client_list);
3203
        client->callback(client->opaque);
3204
        cpu_unregister_map_client(client);
3205
    }
3206
}
3207

    
3208
/* Map a physical memory region into a host virtual address.
3209
 * May map a subset of the requested range, given by and returned in *plen.
3210
 * May return NULL if resources needed to perform the mapping are exhausted.
3211
 * Use only for reads OR writes - not for read-modify-write operations.
3212
 * Use cpu_register_map_client() to know when retrying the map operation is
3213
 * likely to succeed.
3214
 */
3215
void *cpu_physical_memory_map(target_phys_addr_t addr,
3216
                              target_phys_addr_t *plen,
3217
                              int is_write)
3218
{
3219
    target_phys_addr_t len = *plen;
3220
    target_phys_addr_t done = 0;
3221
    int l;
3222
    uint8_t *ret = NULL;
3223
    uint8_t *ptr;
3224
    target_phys_addr_t page;
3225
    unsigned long pd;
3226
    PhysPageDesc *p;
3227
    unsigned long addr1;
3228

    
3229
    while (len > 0) {
3230
        page = addr & TARGET_PAGE_MASK;
3231
        l = (page + TARGET_PAGE_SIZE) - addr;
3232
        if (l > len)
3233
            l = len;
3234
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3235
        if (!p) {
3236
            pd = IO_MEM_UNASSIGNED;
3237
        } else {
3238
            pd = p->phys_offset;
3239
        }
3240

    
3241
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3242
            if (done || bounce.buffer) {
3243
                break;
3244
            }
3245
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3246
            bounce.addr = addr;
3247
            bounce.len = l;
3248
            if (!is_write) {
3249
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3250
            }
3251
            ptr = bounce.buffer;
3252
        } else {
3253
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3254
            ptr = qemu_get_ram_ptr(addr1);
3255
        }
3256
        if (!done) {
3257
            ret = ptr;
3258
        } else if (ret + done != ptr) {
3259
            break;
3260
        }
3261

    
3262
        len -= l;
3263
        addr += l;
3264
        done += l;
3265
    }
3266
    *plen = done;
3267
    return ret;
3268
}
3269

    
3270
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3271
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3272
 * the amount of memory that was actually read or written by the caller.
3273
 */
3274
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3275
                               int is_write, target_phys_addr_t access_len)
3276
{
3277
    if (buffer != bounce.buffer) {
3278
        if (is_write) {
3279
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3280
            while (access_len) {
3281
                unsigned l;
3282
                l = TARGET_PAGE_SIZE;
3283
                if (l > access_len)
3284
                    l = access_len;
3285
                if (!cpu_physical_memory_is_dirty(addr1)) {
3286
                    /* invalidate code */
3287
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3288
                    /* set dirty bit */
3289
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3290
                        (0xff & ~CODE_DIRTY_FLAG);
3291
                }
3292
                addr1 += l;
3293
                access_len -= l;
3294
            }
3295
        }
3296
        return;
3297
    }
3298
    if (is_write) {
3299
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3300
    }
3301
    qemu_free(bounce.buffer);
3302
    bounce.buffer = NULL;
3303
    cpu_notify_map_clients();
3304
}
3305

    
3306
/* warning: addr must be aligned */
3307
uint32_t ldl_phys(target_phys_addr_t addr)
3308
{
3309
    int io_index;
3310
    uint8_t *ptr;
3311
    uint32_t val;
3312
    unsigned long pd;
3313
    PhysPageDesc *p;
3314

    
3315
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3316
    if (!p) {
3317
        pd = IO_MEM_UNASSIGNED;
3318
    } else {
3319
        pd = p->phys_offset;
3320
    }
3321

    
3322
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3323
        !(pd & IO_MEM_ROMD)) {
3324
        /* I/O case */
3325
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3326
        if (p)
3327
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3328
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3329
    } else {
3330
        /* RAM case */
3331
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3332
            (addr & ~TARGET_PAGE_MASK);
3333
        val = ldl_p(ptr);
3334
    }
3335
    return val;
3336
}
3337

    
3338
/* warning: addr must be aligned */
3339
uint64_t ldq_phys(target_phys_addr_t addr)
3340
{
3341
    int io_index;
3342
    uint8_t *ptr;
3343
    uint64_t val;
3344
    unsigned long pd;
3345
    PhysPageDesc *p;
3346

    
3347
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3348
    if (!p) {
3349
        pd = IO_MEM_UNASSIGNED;
3350
    } else {
3351
        pd = p->phys_offset;
3352
    }
3353

    
3354
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3355
        !(pd & IO_MEM_ROMD)) {
3356
        /* I/O case */
3357
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3358
        if (p)
3359
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3360
#ifdef TARGET_WORDS_BIGENDIAN
3361
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3362
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3363
#else
3364
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3365
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3366
#endif
3367
    } else {
3368
        /* RAM case */
3369
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3370
            (addr & ~TARGET_PAGE_MASK);
3371
        val = ldq_p(ptr);
3372
    }
3373
    return val;
3374
}
3375

    
3376
/* XXX: optimize */
3377
uint32_t ldub_phys(target_phys_addr_t addr)
3378
{
3379
    uint8_t val;
3380
    cpu_physical_memory_read(addr, &val, 1);
3381
    return val;
3382
}
3383

    
3384
/* XXX: optimize */
3385
uint32_t lduw_phys(target_phys_addr_t addr)
3386
{
3387
    uint16_t val;
3388
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3389
    return tswap16(val);
3390
}
3391

    
3392
/* warning: addr must be aligned. The ram page is not masked as dirty
3393
   and the code inside is not invalidated. It is useful if the dirty
3394
   bits are used to track modified PTEs */
3395
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3396
{
3397
    int io_index;
3398
    uint8_t *ptr;
3399
    unsigned long pd;
3400
    PhysPageDesc *p;
3401

    
3402
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3403
    if (!p) {
3404
        pd = IO_MEM_UNASSIGNED;
3405
    } else {
3406
        pd = p->phys_offset;
3407
    }
3408

    
3409
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3410
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3411
        if (p)
3412
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3413
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3414
    } else {
3415
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3416
        ptr = qemu_get_ram_ptr(addr1);
3417
        stl_p(ptr, val);
3418

    
3419
        if (unlikely(in_migration)) {
3420
            if (!cpu_physical_memory_is_dirty(addr1)) {
3421
                /* invalidate code */
3422
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3423
                /* set dirty bit */
3424
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3425
                    (0xff & ~CODE_DIRTY_FLAG);
3426
            }
3427
        }
3428
    }
3429
}
3430

    
3431
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3432
{
3433
    int io_index;
3434
    uint8_t *ptr;
3435
    unsigned long pd;
3436
    PhysPageDesc *p;
3437

    
3438
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3439
    if (!p) {
3440
        pd = IO_MEM_UNASSIGNED;
3441
    } else {
3442
        pd = p->phys_offset;
3443
    }
3444

    
3445
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3446
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3447
        if (p)
3448
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3449
#ifdef TARGET_WORDS_BIGENDIAN
3450
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3451
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3452
#else
3453
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3454
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3455
#endif
3456
    } else {
3457
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3458
            (addr & ~TARGET_PAGE_MASK);
3459
        stq_p(ptr, val);
3460
    }
3461
}
3462

    
3463
/* warning: addr must be aligned */
3464
void stl_phys(target_phys_addr_t addr, uint32_t val)
3465
{
3466
    int io_index;
3467
    uint8_t *ptr;
3468
    unsigned long pd;
3469
    PhysPageDesc *p;
3470

    
3471
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3472
    if (!p) {
3473
        pd = IO_MEM_UNASSIGNED;
3474
    } else {
3475
        pd = p->phys_offset;
3476
    }
3477

    
3478
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3479
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3480
        if (p)
3481
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3482
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3483
    } else {
3484
        unsigned long addr1;
3485
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3486
        /* RAM case */
3487
        ptr = qemu_get_ram_ptr(addr1);
3488
        stl_p(ptr, val);
3489
        if (!cpu_physical_memory_is_dirty(addr1)) {
3490
            /* invalidate code */
3491
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3492
            /* set dirty bit */
3493
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3494
                (0xff & ~CODE_DIRTY_FLAG);
3495
        }
3496
    }
3497
}
3498

    
3499
/* XXX: optimize */
3500
void stb_phys(target_phys_addr_t addr, uint32_t val)
3501
{
3502
    uint8_t v = val;
3503
    cpu_physical_memory_write(addr, &v, 1);
3504
}
3505

    
3506
/* XXX: optimize */
3507
void stw_phys(target_phys_addr_t addr, uint32_t val)
3508
{
3509
    uint16_t v = tswap16(val);
3510
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3511
}
3512

    
3513
/* XXX: optimize */
3514
void stq_phys(target_phys_addr_t addr, uint64_t val)
3515
{
3516
    val = tswap64(val);
3517
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3518
}
3519

    
3520
#endif
3521

    
3522
/* virtual memory access for debug (includes writing to ROM) */
3523
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3524
                        uint8_t *buf, int len, int is_write)
3525
{
3526
    int l;
3527
    target_phys_addr_t phys_addr;
3528
    target_ulong page;
3529

    
3530
    while (len > 0) {
3531
        page = addr & TARGET_PAGE_MASK;
3532
        phys_addr = cpu_get_phys_page_debug(env, page);
3533
        /* if no physical page mapped, return an error */
3534
        if (phys_addr == -1)
3535
            return -1;
3536
        l = (page + TARGET_PAGE_SIZE) - addr;
3537
        if (l > len)
3538
            l = len;
3539
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3540
#if !defined(CONFIG_USER_ONLY)
3541
        if (is_write)
3542
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3543
        else
3544
#endif
3545
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3546
        len -= l;
3547
        buf += l;
3548
        addr += l;
3549
    }
3550
    return 0;
3551
}
3552

    
3553
/* in deterministic execution mode, instructions doing device I/Os
3554
   must be at the end of the TB */
3555
void cpu_io_recompile(CPUState *env, void *retaddr)
3556
{
3557
    TranslationBlock *tb;
3558
    uint32_t n, cflags;
3559
    target_ulong pc, cs_base;
3560
    uint64_t flags;
3561

    
3562
    tb = tb_find_pc((unsigned long)retaddr);
3563
    if (!tb) {
3564
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3565
                  retaddr);
3566
    }
3567
    n = env->icount_decr.u16.low + tb->icount;
3568
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3569
    /* Calculate how many instructions had been executed before the fault
3570
       occurred.  */
3571
    n = n - env->icount_decr.u16.low;
3572
    /* Generate a new TB ending on the I/O insn.  */
3573
    n++;
3574
    /* On MIPS and SH, delay slot instructions can only be restarted if
3575
       they were already the first instruction in the TB.  If this is not
3576
       the first instruction in a TB then re-execute the preceding
3577
       branch.  */
3578
#if defined(TARGET_MIPS)
3579
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3580
        env->active_tc.PC -= 4;
3581
        env->icount_decr.u16.low++;
3582
        env->hflags &= ~MIPS_HFLAG_BMASK;
3583
    }
3584
#elif defined(TARGET_SH4)
3585
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3586
            && n > 1) {
3587
        env->pc -= 2;
3588
        env->icount_decr.u16.low++;
3589
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3590
    }
3591
#endif
3592
    /* This should never happen.  */
3593
    if (n > CF_COUNT_MASK)
3594
        cpu_abort(env, "TB too big during recompile");
3595

    
3596
    cflags = n | CF_LAST_IO;
3597
    pc = tb->pc;
3598
    cs_base = tb->cs_base;
3599
    flags = tb->flags;
3600
    tb_phys_invalidate(tb, -1);
3601
    /* FIXME: In theory this could raise an exception.  In practice
3602
       we have already translated the block once so it's probably ok.  */
3603
    tb_gen_code(env, pc, cs_base, flags, cflags);
3604
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3605
       the first in the TB) then we end up generating a whole new TB and
3606
       repeating the fault, which is horribly inefficient.
3607
       Better would be to execute just this insn uncached, or generate a
3608
       second new TB.  */
3609
    cpu_resume_from_signal(env, NULL);
3610
}
3611

    
3612
void dump_exec_info(FILE *f,
3613
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3614
{
3615
    int i, target_code_size, max_target_code_size;
3616
    int direct_jmp_count, direct_jmp2_count, cross_page;
3617
    TranslationBlock *tb;
3618

    
3619
    target_code_size = 0;
3620
    max_target_code_size = 0;
3621
    cross_page = 0;
3622
    direct_jmp_count = 0;
3623
    direct_jmp2_count = 0;
3624
    for(i = 0; i < nb_tbs; i++) {
3625
        tb = &tbs[i];
3626
        target_code_size += tb->size;
3627
        if (tb->size > max_target_code_size)
3628
            max_target_code_size = tb->size;
3629
        if (tb->page_addr[1] != -1)
3630
            cross_page++;
3631
        if (tb->tb_next_offset[0] != 0xffff) {
3632
            direct_jmp_count++;
3633
            if (tb->tb_next_offset[1] != 0xffff) {
3634
                direct_jmp2_count++;
3635
            }
3636
        }
3637
    }
3638
    /* XXX: avoid using doubles ? */
3639
    cpu_fprintf(f, "Translation buffer state:\n");
3640
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3641
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3642
    cpu_fprintf(f, "TB count            %d/%d\n", 
3643
                nb_tbs, code_gen_max_blocks);
3644
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3645
                nb_tbs ? target_code_size / nb_tbs : 0,
3646
                max_target_code_size);
3647
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3648
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3649
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3650
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3651
            cross_page,
3652
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3653
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3654
                direct_jmp_count,
3655
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3656
                direct_jmp2_count,
3657
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3658
    cpu_fprintf(f, "\nStatistics:\n");
3659
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3660
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3661
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3662
    tcg_dump_info(f, cpu_fprintf);
3663
}
3664

    
3665
#if !defined(CONFIG_USER_ONLY)
3666

    
3667
#define MMUSUFFIX _cmmu
3668
#define GETPC() NULL
3669
#define env cpu_single_env
3670
#define SOFTMMU_CODE_ACCESS
3671

    
3672
#define SHIFT 0
3673
#include "softmmu_template.h"
3674

    
3675
#define SHIFT 1
3676
#include "softmmu_template.h"
3677

    
3678
#define SHIFT 2
3679
#include "softmmu_template.h"
3680

    
3681
#define SHIFT 3
3682
#include "softmmu_template.h"
3683

    
3684
#undef env
3685

    
3686
#endif