Statistics
| Branch: | Revision:

root / exec.c @ 89b08ae1

History | View | Annotate | Download (109.6 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#ifdef _WIN32
21
#include <windows.h>
22
#else
23
#include <sys/types.h>
24
#include <sys/mman.h>
25
#endif
26
#include <stdlib.h>
27
#include <stdio.h>
28
#include <stdarg.h>
29
#include <string.h>
30
#include <errno.h>
31
#include <unistd.h>
32
#include <inttypes.h>
33

    
34
#include "cpu.h"
35
#include "exec-all.h"
36
#include "qemu-common.h"
37
#include "tcg.h"
38
#include "hw/hw.h"
39
#include "osdep.h"
40
#include "kvm.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#endif
44

    
45
//#define DEBUG_TB_INVALIDATE
46
//#define DEBUG_FLUSH
47
//#define DEBUG_TLB
48
//#define DEBUG_UNASSIGNED
49

    
50
/* make various TB consistency checks */
51
//#define DEBUG_TB_CHECK
52
//#define DEBUG_TLB_CHECK
53

    
54
//#define DEBUG_IOPORT
55
//#define DEBUG_SUBPAGE
56

    
57
#if !defined(CONFIG_USER_ONLY)
58
/* TB consistency checks only implemented for usermode emulation.  */
59
#undef DEBUG_TB_CHECK
60
#endif
61

    
62
#define SMC_BITMAP_USE_THRESHOLD 10
63

    
64
#if defined(TARGET_SPARC64)
65
#define TARGET_PHYS_ADDR_SPACE_BITS 41
66
#elif defined(TARGET_SPARC)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 36
68
#elif defined(TARGET_ALPHA)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 42
70
#define TARGET_VIRT_ADDR_SPACE_BITS 42
71
#elif defined(TARGET_PPC64)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_X86_64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_I386)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 36
77
#else
78
#define TARGET_PHYS_ADDR_SPACE_BITS 32
79
#endif
80

    
81
static TranslationBlock *tbs;
82
int code_gen_max_blocks;
83
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84
static int nb_tbs;
85
/* any access to the tbs or the page table must use this lock */
86
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87

    
88
#if defined(__arm__) || defined(__sparc_v9__)
89
/* The prologue must be reachable with a direct jump. ARM and Sparc64
90
 have limited branch ranges (possibly also PPC) so place it in a
91
 section close to code segment. */
92
#define code_gen_section                                \
93
    __attribute__((__section__(".gen_code")))           \
94
    __attribute__((aligned (32)))
95
#elif defined(_WIN32)
96
/* Maximum alignment for Win32 is 16. */
97
#define code_gen_section                                \
98
    __attribute__((aligned (16)))
99
#else
100
#define code_gen_section                                \
101
    __attribute__((aligned (32)))
102
#endif
103

    
104
uint8_t code_gen_prologue[1024] code_gen_section;
105
static uint8_t *code_gen_buffer;
106
static unsigned long code_gen_buffer_size;
107
/* threshold to flush the translated code buffer */
108
static unsigned long code_gen_buffer_max_size;
109
uint8_t *code_gen_ptr;
110

    
111
#if !defined(CONFIG_USER_ONLY)
112
int phys_ram_fd;
113
uint8_t *phys_ram_dirty;
114
static int in_migration;
115

    
116
typedef struct RAMBlock {
117
    uint8_t *host;
118
    ram_addr_t offset;
119
    ram_addr_t length;
120
    struct RAMBlock *next;
121
} RAMBlock;
122

    
123
static RAMBlock *ram_blocks;
124
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
125
   then we can no longer assume contiguous ram offsets, and external uses
126
   of this variable will break.  */
127
ram_addr_t last_ram_offset;
128
#endif
129

    
130
CPUState *first_cpu;
131
/* current CPU in the current thread. It is only valid inside
132
   cpu_exec() */
133
CPUState *cpu_single_env;
134
/* 0 = Do not count executed instructions.
135
   1 = Precise instruction counting.
136
   2 = Adaptive rate instruction counting.  */
137
int use_icount = 0;
138
/* Current instruction counter.  While executing translated code this may
139
   include some instructions that have not yet been executed.  */
140
int64_t qemu_icount;
141

    
142
typedef struct PageDesc {
143
    /* list of TBs intersecting this ram page */
144
    TranslationBlock *first_tb;
145
    /* in order to optimize self modifying code, we count the number
146
       of lookups we do to a given page to use a bitmap */
147
    unsigned int code_write_count;
148
    uint8_t *code_bitmap;
149
#if defined(CONFIG_USER_ONLY)
150
    unsigned long flags;
151
#endif
152
} PageDesc;
153

    
154
typedef struct PhysPageDesc {
155
    /* offset in host memory of the page + io_index in the low bits */
156
    ram_addr_t phys_offset;
157
    ram_addr_t region_offset;
158
} PhysPageDesc;
159

    
160
#define L2_BITS 10
161
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162
/* XXX: this is a temporary hack for alpha target.
163
 *      In the future, this is to be replaced by a multi-level table
164
 *      to actually be able to handle the complete 64 bits address space.
165
 */
166
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167
#else
168
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
169
#endif
170

    
171
#define L1_SIZE (1 << L1_BITS)
172
#define L2_SIZE (1 << L2_BITS)
173

    
174
unsigned long qemu_real_host_page_size;
175
unsigned long qemu_host_page_bits;
176
unsigned long qemu_host_page_size;
177
unsigned long qemu_host_page_mask;
178

    
179
/* XXX: for system emulation, it could just be an array */
180
static PageDesc *l1_map[L1_SIZE];
181
static PhysPageDesc **l1_phys_map;
182

    
183
#if !defined(CONFIG_USER_ONLY)
184
static void io_mem_init(void);
185

    
186
/* io memory support */
187
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
188
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
189
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
190
static char io_mem_used[IO_MEM_NB_ENTRIES];
191
static int io_mem_watch;
192
#endif
193

    
194
/* log support */
195
static const char *logfilename = "/tmp/qemu.log";
196
FILE *logfile;
197
int loglevel;
198
static int log_append = 0;
199

    
200
/* statistics */
201
static int tlb_flush_count;
202
static int tb_flush_count;
203
static int tb_phys_invalidate_count;
204

    
205
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
206
typedef struct subpage_t {
207
    target_phys_addr_t base;
208
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
209
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
210
    void *opaque[TARGET_PAGE_SIZE][2][4];
211
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
212
} subpage_t;
213

    
214
#ifdef _WIN32
215
static void map_exec(void *addr, long size)
216
{
217
    DWORD old_protect;
218
    VirtualProtect(addr, size,
219
                   PAGE_EXECUTE_READWRITE, &old_protect);
220
    
221
}
222
#else
223
static void map_exec(void *addr, long size)
224
{
225
    unsigned long start, end, page_size;
226
    
227
    page_size = getpagesize();
228
    start = (unsigned long)addr;
229
    start &= ~(page_size - 1);
230
    
231
    end = (unsigned long)addr + size;
232
    end += page_size - 1;
233
    end &= ~(page_size - 1);
234
    
235
    mprotect((void *)start, end - start,
236
             PROT_READ | PROT_WRITE | PROT_EXEC);
237
}
238
#endif
239

    
240
static void page_init(void)
241
{
242
    /* NOTE: we can always suppose that qemu_host_page_size >=
243
       TARGET_PAGE_SIZE */
244
#ifdef _WIN32
245
    {
246
        SYSTEM_INFO system_info;
247

    
248
        GetSystemInfo(&system_info);
249
        qemu_real_host_page_size = system_info.dwPageSize;
250
    }
251
#else
252
    qemu_real_host_page_size = getpagesize();
253
#endif
254
    if (qemu_host_page_size == 0)
255
        qemu_host_page_size = qemu_real_host_page_size;
256
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
257
        qemu_host_page_size = TARGET_PAGE_SIZE;
258
    qemu_host_page_bits = 0;
259
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
260
        qemu_host_page_bits++;
261
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
262
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
263
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
264

    
265
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
266
    {
267
        long long startaddr, endaddr;
268
        FILE *f;
269
        int n;
270

    
271
        mmap_lock();
272
        last_brk = (unsigned long)sbrk(0);
273
        f = fopen("/proc/self/maps", "r");
274
        if (f) {
275
            do {
276
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
277
                if (n == 2) {
278
                    startaddr = MIN(startaddr,
279
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280
                    endaddr = MIN(endaddr,
281
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
282
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
283
                                   TARGET_PAGE_ALIGN(endaddr),
284
                                   PAGE_RESERVED); 
285
                }
286
            } while (!feof(f));
287
            fclose(f);
288
        }
289
        mmap_unlock();
290
    }
291
#endif
292
}
293

    
294
static inline PageDesc **page_l1_map(target_ulong index)
295
{
296
#if TARGET_LONG_BITS > 32
297
    /* Host memory outside guest VM.  For 32-bit targets we have already
298
       excluded high addresses.  */
299
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
300
        return NULL;
301
#endif
302
    return &l1_map[index >> L2_BITS];
303
}
304

    
305
static inline PageDesc *page_find_alloc(target_ulong index)
306
{
307
    PageDesc **lp, *p;
308
    lp = page_l1_map(index);
309
    if (!lp)
310
        return NULL;
311

    
312
    p = *lp;
313
    if (!p) {
314
        /* allocate if not found */
315
#if defined(CONFIG_USER_ONLY)
316
        size_t len = sizeof(PageDesc) * L2_SIZE;
317
        /* Don't use qemu_malloc because it may recurse.  */
318
        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
319
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
320
        *lp = p;
321
        if (h2g_valid(p)) {
322
            unsigned long addr = h2g(p);
323
            page_set_flags(addr & TARGET_PAGE_MASK,
324
                           TARGET_PAGE_ALIGN(addr + len),
325
                           PAGE_RESERVED); 
326
        }
327
#else
328
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
329
        *lp = p;
330
#endif
331
    }
332
    return p + (index & (L2_SIZE - 1));
333
}
334

    
335
static inline PageDesc *page_find(target_ulong index)
336
{
337
    PageDesc **lp, *p;
338
    lp = page_l1_map(index);
339
    if (!lp)
340
        return NULL;
341

    
342
    p = *lp;
343
    if (!p) {
344
        return NULL;
345
    }
346
    return p + (index & (L2_SIZE - 1));
347
}
348

    
349
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
350
{
351
    void **lp, **p;
352
    PhysPageDesc *pd;
353

    
354
    p = (void **)l1_phys_map;
355
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
356

    
357
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359
#endif
360
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
361
    p = *lp;
362
    if (!p) {
363
        /* allocate if not found */
364
        if (!alloc)
365
            return NULL;
366
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367
        memset(p, 0, sizeof(void *) * L1_SIZE);
368
        *lp = p;
369
    }
370
#endif
371
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
372
    pd = *lp;
373
    if (!pd) {
374
        int i;
375
        /* allocate if not found */
376
        if (!alloc)
377
            return NULL;
378
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379
        *lp = pd;
380
        for (i = 0; i < L2_SIZE; i++) {
381
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
382
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
383
        }
384
    }
385
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
386
}
387

    
388
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
389
{
390
    return phys_page_find_alloc(index, 0);
391
}
392

    
393
#if !defined(CONFIG_USER_ONLY)
394
static void tlb_protect_code(ram_addr_t ram_addr);
395
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
396
                                    target_ulong vaddr);
397
#define mmap_lock() do { } while(0)
398
#define mmap_unlock() do { } while(0)
399
#endif
400

    
401
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
402

    
403
#if defined(CONFIG_USER_ONLY)
404
/* Currently it is not recommended to allocate big chunks of data in
405
   user mode. It will change when a dedicated libc will be used */
406
#define USE_STATIC_CODE_GEN_BUFFER
407
#endif
408

    
409
#ifdef USE_STATIC_CODE_GEN_BUFFER
410
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
411
#endif
412

    
413
static void code_gen_alloc(unsigned long tb_size)
414
{
415
#ifdef USE_STATIC_CODE_GEN_BUFFER
416
    code_gen_buffer = static_code_gen_buffer;
417
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
418
    map_exec(code_gen_buffer, code_gen_buffer_size);
419
#else
420
    code_gen_buffer_size = tb_size;
421
    if (code_gen_buffer_size == 0) {
422
#if defined(CONFIG_USER_ONLY)
423
        /* in user mode, phys_ram_size is not meaningful */
424
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
425
#else
426
        /* XXX: needs adjustments */
427
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
428
#endif
429
    }
430
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
431
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
432
    /* The code gen buffer location may have constraints depending on
433
       the host cpu and OS */
434
#if defined(__linux__) 
435
    {
436
        int flags;
437
        void *start = NULL;
438

    
439
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
440
#if defined(__x86_64__)
441
        flags |= MAP_32BIT;
442
        /* Cannot map more than that */
443
        if (code_gen_buffer_size > (800 * 1024 * 1024))
444
            code_gen_buffer_size = (800 * 1024 * 1024);
445
#elif defined(__sparc_v9__)
446
        // Map the buffer below 2G, so we can use direct calls and branches
447
        flags |= MAP_FIXED;
448
        start = (void *) 0x60000000UL;
449
        if (code_gen_buffer_size > (512 * 1024 * 1024))
450
            code_gen_buffer_size = (512 * 1024 * 1024);
451
#elif defined(__arm__)
452
        /* Map the buffer below 32M, so we can use direct calls and branches */
453
        flags |= MAP_FIXED;
454
        start = (void *) 0x01000000UL;
455
        if (code_gen_buffer_size > 16 * 1024 * 1024)
456
            code_gen_buffer_size = 16 * 1024 * 1024;
457
#endif
458
        code_gen_buffer = mmap(start, code_gen_buffer_size,
459
                               PROT_WRITE | PROT_READ | PROT_EXEC,
460
                               flags, -1, 0);
461
        if (code_gen_buffer == MAP_FAILED) {
462
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
463
            exit(1);
464
        }
465
    }
466
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
467
    {
468
        int flags;
469
        void *addr = NULL;
470
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
471
#if defined(__x86_64__)
472
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
473
         * 0x40000000 is free */
474
        flags |= MAP_FIXED;
475
        addr = (void *)0x40000000;
476
        /* Cannot map more than that */
477
        if (code_gen_buffer_size > (800 * 1024 * 1024))
478
            code_gen_buffer_size = (800 * 1024 * 1024);
479
#endif
480
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
481
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
482
                               flags, -1, 0);
483
        if (code_gen_buffer == MAP_FAILED) {
484
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
485
            exit(1);
486
        }
487
    }
488
#else
489
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
490
    map_exec(code_gen_buffer, code_gen_buffer_size);
491
#endif
492
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
493
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
494
    code_gen_buffer_max_size = code_gen_buffer_size - 
495
        code_gen_max_block_size();
496
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
497
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
498
}
499

    
500
/* Must be called before using the QEMU cpus. 'tb_size' is the size
501
   (in bytes) allocated to the translation buffer. Zero means default
502
   size. */
503
void cpu_exec_init_all(unsigned long tb_size)
504
{
505
    cpu_gen_init();
506
    code_gen_alloc(tb_size);
507
    code_gen_ptr = code_gen_buffer;
508
    page_init();
509
#if !defined(CONFIG_USER_ONLY)
510
    io_mem_init();
511
#endif
512
}
513

    
514
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
515

    
516
static void cpu_common_pre_save(void *opaque)
517
{
518
    CPUState *env = opaque;
519

    
520
    cpu_synchronize_state(env);
521
}
522

    
523
static int cpu_common_pre_load(void *opaque)
524
{
525
    CPUState *env = opaque;
526

    
527
    cpu_synchronize_state(env);
528
    return 0;
529
}
530

    
531
static int cpu_common_post_load(void *opaque, int version_id)
532
{
533
    CPUState *env = opaque;
534

    
535
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
536
       version_id is increased. */
537
    env->interrupt_request &= ~0x01;
538
    tlb_flush(env, 1);
539

    
540
    return 0;
541
}
542

    
543
static const VMStateDescription vmstate_cpu_common = {
544
    .name = "cpu_common",
545
    .version_id = 1,
546
    .minimum_version_id = 1,
547
    .minimum_version_id_old = 1,
548
    .pre_save = cpu_common_pre_save,
549
    .pre_load = cpu_common_pre_load,
550
    .post_load = cpu_common_post_load,
551
    .fields      = (VMStateField []) {
552
        VMSTATE_UINT32(halted, CPUState),
553
        VMSTATE_UINT32(interrupt_request, CPUState),
554
        VMSTATE_END_OF_LIST()
555
    }
556
};
557
#endif
558

    
559
CPUState *qemu_get_cpu(int cpu)
560
{
561
    CPUState *env = first_cpu;
562

    
563
    while (env) {
564
        if (env->cpu_index == cpu)
565
            break;
566
        env = env->next_cpu;
567
    }
568

    
569
    return env;
570
}
571

    
572
void cpu_exec_init(CPUState *env)
573
{
574
    CPUState **penv;
575
    int cpu_index;
576

    
577
#if defined(CONFIG_USER_ONLY)
578
    cpu_list_lock();
579
#endif
580
    env->next_cpu = NULL;
581
    penv = &first_cpu;
582
    cpu_index = 0;
583
    while (*penv != NULL) {
584
        penv = &(*penv)->next_cpu;
585
        cpu_index++;
586
    }
587
    env->cpu_index = cpu_index;
588
    env->numa_node = 0;
589
    QTAILQ_INIT(&env->breakpoints);
590
    QTAILQ_INIT(&env->watchpoints);
591
    *penv = env;
592
#if defined(CONFIG_USER_ONLY)
593
    cpu_list_unlock();
594
#endif
595
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
596
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
597
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
598
                    cpu_save, cpu_load, env);
599
#endif
600
}
601

    
602
static inline void invalidate_page_bitmap(PageDesc *p)
603
{
604
    if (p->code_bitmap) {
605
        qemu_free(p->code_bitmap);
606
        p->code_bitmap = NULL;
607
    }
608
    p->code_write_count = 0;
609
}
610

    
611
/* set to NULL all the 'first_tb' fields in all PageDescs */
612
static void page_flush_tb(void)
613
{
614
    int i, j;
615
    PageDesc *p;
616

    
617
    for(i = 0; i < L1_SIZE; i++) {
618
        p = l1_map[i];
619
        if (p) {
620
            for(j = 0; j < L2_SIZE; j++) {
621
                p->first_tb = NULL;
622
                invalidate_page_bitmap(p);
623
                p++;
624
            }
625
        }
626
    }
627
}
628

    
629
/* flush all the translation blocks */
630
/* XXX: tb_flush is currently not thread safe */
631
void tb_flush(CPUState *env1)
632
{
633
    CPUState *env;
634
#if defined(DEBUG_FLUSH)
635
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
636
           (unsigned long)(code_gen_ptr - code_gen_buffer),
637
           nb_tbs, nb_tbs > 0 ?
638
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
639
#endif
640
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
641
        cpu_abort(env1, "Internal error: code buffer overflow\n");
642

    
643
    nb_tbs = 0;
644

    
645
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
646
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
647
    }
648

    
649
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
650
    page_flush_tb();
651

    
652
    code_gen_ptr = code_gen_buffer;
653
    /* XXX: flush processor icache at this point if cache flush is
654
       expensive */
655
    tb_flush_count++;
656
}
657

    
658
#ifdef DEBUG_TB_CHECK
659

    
660
static void tb_invalidate_check(target_ulong address)
661
{
662
    TranslationBlock *tb;
663
    int i;
664
    address &= TARGET_PAGE_MASK;
665
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
666
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
667
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
668
                  address >= tb->pc + tb->size)) {
669
                printf("ERROR invalidate: address=" TARGET_FMT_lx
670
                       " PC=%08lx size=%04x\n",
671
                       address, (long)tb->pc, tb->size);
672
            }
673
        }
674
    }
675
}
676

    
677
/* verify that all the pages have correct rights for code */
678
static void tb_page_check(void)
679
{
680
    TranslationBlock *tb;
681
    int i, flags1, flags2;
682

    
683
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
684
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
685
            flags1 = page_get_flags(tb->pc);
686
            flags2 = page_get_flags(tb->pc + tb->size - 1);
687
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
688
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
689
                       (long)tb->pc, tb->size, flags1, flags2);
690
            }
691
        }
692
    }
693
}
694

    
695
#endif
696

    
697
/* invalidate one TB */
698
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
699
                             int next_offset)
700
{
701
    TranslationBlock *tb1;
702
    for(;;) {
703
        tb1 = *ptb;
704
        if (tb1 == tb) {
705
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
706
            break;
707
        }
708
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
709
    }
710
}
711

    
712
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
713
{
714
    TranslationBlock *tb1;
715
    unsigned int n1;
716

    
717
    for(;;) {
718
        tb1 = *ptb;
719
        n1 = (long)tb1 & 3;
720
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
721
        if (tb1 == tb) {
722
            *ptb = tb1->page_next[n1];
723
            break;
724
        }
725
        ptb = &tb1->page_next[n1];
726
    }
727
}
728

    
729
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
730
{
731
    TranslationBlock *tb1, **ptb;
732
    unsigned int n1;
733

    
734
    ptb = &tb->jmp_next[n];
735
    tb1 = *ptb;
736
    if (tb1) {
737
        /* find tb(n) in circular list */
738
        for(;;) {
739
            tb1 = *ptb;
740
            n1 = (long)tb1 & 3;
741
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
742
            if (n1 == n && tb1 == tb)
743
                break;
744
            if (n1 == 2) {
745
                ptb = &tb1->jmp_first;
746
            } else {
747
                ptb = &tb1->jmp_next[n1];
748
            }
749
        }
750
        /* now we can suppress tb(n) from the list */
751
        *ptb = tb->jmp_next[n];
752

    
753
        tb->jmp_next[n] = NULL;
754
    }
755
}
756

    
757
/* reset the jump entry 'n' of a TB so that it is not chained to
758
   another TB */
759
static inline void tb_reset_jump(TranslationBlock *tb, int n)
760
{
761
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
762
}
763

    
764
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
765
{
766
    CPUState *env;
767
    PageDesc *p;
768
    unsigned int h, n1;
769
    target_phys_addr_t phys_pc;
770
    TranslationBlock *tb1, *tb2;
771

    
772
    /* remove the TB from the hash list */
773
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
774
    h = tb_phys_hash_func(phys_pc);
775
    tb_remove(&tb_phys_hash[h], tb,
776
              offsetof(TranslationBlock, phys_hash_next));
777

    
778
    /* remove the TB from the page list */
779
    if (tb->page_addr[0] != page_addr) {
780
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
781
        tb_page_remove(&p->first_tb, tb);
782
        invalidate_page_bitmap(p);
783
    }
784
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
785
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
786
        tb_page_remove(&p->first_tb, tb);
787
        invalidate_page_bitmap(p);
788
    }
789

    
790
    tb_invalidated_flag = 1;
791

    
792
    /* remove the TB from the hash list */
793
    h = tb_jmp_cache_hash_func(tb->pc);
794
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
795
        if (env->tb_jmp_cache[h] == tb)
796
            env->tb_jmp_cache[h] = NULL;
797
    }
798

    
799
    /* suppress this TB from the two jump lists */
800
    tb_jmp_remove(tb, 0);
801
    tb_jmp_remove(tb, 1);
802

    
803
    /* suppress any remaining jumps to this TB */
804
    tb1 = tb->jmp_first;
805
    for(;;) {
806
        n1 = (long)tb1 & 3;
807
        if (n1 == 2)
808
            break;
809
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
810
        tb2 = tb1->jmp_next[n1];
811
        tb_reset_jump(tb1, n1);
812
        tb1->jmp_next[n1] = NULL;
813
        tb1 = tb2;
814
    }
815
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
816

    
817
    tb_phys_invalidate_count++;
818
}
819

    
820
static inline void set_bits(uint8_t *tab, int start, int len)
821
{
822
    int end, mask, end1;
823

    
824
    end = start + len;
825
    tab += start >> 3;
826
    mask = 0xff << (start & 7);
827
    if ((start & ~7) == (end & ~7)) {
828
        if (start < end) {
829
            mask &= ~(0xff << (end & 7));
830
            *tab |= mask;
831
        }
832
    } else {
833
        *tab++ |= mask;
834
        start = (start + 8) & ~7;
835
        end1 = end & ~7;
836
        while (start < end1) {
837
            *tab++ = 0xff;
838
            start += 8;
839
        }
840
        if (start < end) {
841
            mask = ~(0xff << (end & 7));
842
            *tab |= mask;
843
        }
844
    }
845
}
846

    
847
static void build_page_bitmap(PageDesc *p)
848
{
849
    int n, tb_start, tb_end;
850
    TranslationBlock *tb;
851

    
852
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
853

    
854
    tb = p->first_tb;
855
    while (tb != NULL) {
856
        n = (long)tb & 3;
857
        tb = (TranslationBlock *)((long)tb & ~3);
858
        /* NOTE: this is subtle as a TB may span two physical pages */
859
        if (n == 0) {
860
            /* NOTE: tb_end may be after the end of the page, but
861
               it is not a problem */
862
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
863
            tb_end = tb_start + tb->size;
864
            if (tb_end > TARGET_PAGE_SIZE)
865
                tb_end = TARGET_PAGE_SIZE;
866
        } else {
867
            tb_start = 0;
868
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
869
        }
870
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
871
        tb = tb->page_next[n];
872
    }
873
}
874

    
875
TranslationBlock *tb_gen_code(CPUState *env,
876
                              target_ulong pc, target_ulong cs_base,
877
                              int flags, int cflags)
878
{
879
    TranslationBlock *tb;
880
    uint8_t *tc_ptr;
881
    target_ulong phys_pc, phys_page2, virt_page2;
882
    int code_gen_size;
883

    
884
    phys_pc = get_phys_addr_code(env, pc);
885
    tb = tb_alloc(pc);
886
    if (!tb) {
887
        /* flush must be done */
888
        tb_flush(env);
889
        /* cannot fail at this point */
890
        tb = tb_alloc(pc);
891
        /* Don't forget to invalidate previous TB info.  */
892
        tb_invalidated_flag = 1;
893
    }
894
    tc_ptr = code_gen_ptr;
895
    tb->tc_ptr = tc_ptr;
896
    tb->cs_base = cs_base;
897
    tb->flags = flags;
898
    tb->cflags = cflags;
899
    cpu_gen_code(env, tb, &code_gen_size);
900
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
901

    
902
    /* check next page if needed */
903
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
904
    phys_page2 = -1;
905
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
906
        phys_page2 = get_phys_addr_code(env, virt_page2);
907
    }
908
    tb_link_phys(tb, phys_pc, phys_page2);
909
    return tb;
910
}
911

    
912
/* invalidate all TBs which intersect with the target physical page
913
   starting in range [start;end[. NOTE: start and end must refer to
914
   the same physical page. 'is_cpu_write_access' should be true if called
915
   from a real cpu write access: the virtual CPU will exit the current
916
   TB if code is modified inside this TB. */
917
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
918
                                   int is_cpu_write_access)
919
{
920
    TranslationBlock *tb, *tb_next, *saved_tb;
921
    CPUState *env = cpu_single_env;
922
    target_ulong tb_start, tb_end;
923
    PageDesc *p;
924
    int n;
925
#ifdef TARGET_HAS_PRECISE_SMC
926
    int current_tb_not_found = is_cpu_write_access;
927
    TranslationBlock *current_tb = NULL;
928
    int current_tb_modified = 0;
929
    target_ulong current_pc = 0;
930
    target_ulong current_cs_base = 0;
931
    int current_flags = 0;
932
#endif /* TARGET_HAS_PRECISE_SMC */
933

    
934
    p = page_find(start >> TARGET_PAGE_BITS);
935
    if (!p)
936
        return;
937
    if (!p->code_bitmap &&
938
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
939
        is_cpu_write_access) {
940
        /* build code bitmap */
941
        build_page_bitmap(p);
942
    }
943

    
944
    /* we remove all the TBs in the range [start, end[ */
945
    /* XXX: see if in some cases it could be faster to invalidate all the code */
946
    tb = p->first_tb;
947
    while (tb != NULL) {
948
        n = (long)tb & 3;
949
        tb = (TranslationBlock *)((long)tb & ~3);
950
        tb_next = tb->page_next[n];
951
        /* NOTE: this is subtle as a TB may span two physical pages */
952
        if (n == 0) {
953
            /* NOTE: tb_end may be after the end of the page, but
954
               it is not a problem */
955
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
956
            tb_end = tb_start + tb->size;
957
        } else {
958
            tb_start = tb->page_addr[1];
959
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
960
        }
961
        if (!(tb_end <= start || tb_start >= end)) {
962
#ifdef TARGET_HAS_PRECISE_SMC
963
            if (current_tb_not_found) {
964
                current_tb_not_found = 0;
965
                current_tb = NULL;
966
                if (env->mem_io_pc) {
967
                    /* now we have a real cpu fault */
968
                    current_tb = tb_find_pc(env->mem_io_pc);
969
                }
970
            }
971
            if (current_tb == tb &&
972
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
973
                /* If we are modifying the current TB, we must stop
974
                its execution. We could be more precise by checking
975
                that the modification is after the current PC, but it
976
                would require a specialized function to partially
977
                restore the CPU state */
978

    
979
                current_tb_modified = 1;
980
                cpu_restore_state(current_tb, env,
981
                                  env->mem_io_pc, NULL);
982
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
983
                                     &current_flags);
984
            }
985
#endif /* TARGET_HAS_PRECISE_SMC */
986
            /* we need to do that to handle the case where a signal
987
               occurs while doing tb_phys_invalidate() */
988
            saved_tb = NULL;
989
            if (env) {
990
                saved_tb = env->current_tb;
991
                env->current_tb = NULL;
992
            }
993
            tb_phys_invalidate(tb, -1);
994
            if (env) {
995
                env->current_tb = saved_tb;
996
                if (env->interrupt_request && env->current_tb)
997
                    cpu_interrupt(env, env->interrupt_request);
998
            }
999
        }
1000
        tb = tb_next;
1001
    }
1002
#if !defined(CONFIG_USER_ONLY)
1003
    /* if no code remaining, no need to continue to use slow writes */
1004
    if (!p->first_tb) {
1005
        invalidate_page_bitmap(p);
1006
        if (is_cpu_write_access) {
1007
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1008
        }
1009
    }
1010
#endif
1011
#ifdef TARGET_HAS_PRECISE_SMC
1012
    if (current_tb_modified) {
1013
        /* we generate a block containing just the instruction
1014
           modifying the memory. It will ensure that it cannot modify
1015
           itself */
1016
        env->current_tb = NULL;
1017
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1018
        cpu_resume_from_signal(env, NULL);
1019
    }
1020
#endif
1021
}
1022

    
1023
/* len must be <= 8 and start must be a multiple of len */
1024
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1025
{
1026
    PageDesc *p;
1027
    int offset, b;
1028
#if 0
1029
    if (1) {
1030
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1031
                  cpu_single_env->mem_io_vaddr, len,
1032
                  cpu_single_env->eip,
1033
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1034
    }
1035
#endif
1036
    p = page_find(start >> TARGET_PAGE_BITS);
1037
    if (!p)
1038
        return;
1039
    if (p->code_bitmap) {
1040
        offset = start & ~TARGET_PAGE_MASK;
1041
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1042
        if (b & ((1 << len) - 1))
1043
            goto do_invalidate;
1044
    } else {
1045
    do_invalidate:
1046
        tb_invalidate_phys_page_range(start, start + len, 1);
1047
    }
1048
}
1049

    
1050
#if !defined(CONFIG_SOFTMMU)
1051
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1052
                                    unsigned long pc, void *puc)
1053
{
1054
    TranslationBlock *tb;
1055
    PageDesc *p;
1056
    int n;
1057
#ifdef TARGET_HAS_PRECISE_SMC
1058
    TranslationBlock *current_tb = NULL;
1059
    CPUState *env = cpu_single_env;
1060
    int current_tb_modified = 0;
1061
    target_ulong current_pc = 0;
1062
    target_ulong current_cs_base = 0;
1063
    int current_flags = 0;
1064
#endif
1065

    
1066
    addr &= TARGET_PAGE_MASK;
1067
    p = page_find(addr >> TARGET_PAGE_BITS);
1068
    if (!p)
1069
        return;
1070
    tb = p->first_tb;
1071
#ifdef TARGET_HAS_PRECISE_SMC
1072
    if (tb && pc != 0) {
1073
        current_tb = tb_find_pc(pc);
1074
    }
1075
#endif
1076
    while (tb != NULL) {
1077
        n = (long)tb & 3;
1078
        tb = (TranslationBlock *)((long)tb & ~3);
1079
#ifdef TARGET_HAS_PRECISE_SMC
1080
        if (current_tb == tb &&
1081
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1082
                /* If we are modifying the current TB, we must stop
1083
                   its execution. We could be more precise by checking
1084
                   that the modification is after the current PC, but it
1085
                   would require a specialized function to partially
1086
                   restore the CPU state */
1087

    
1088
            current_tb_modified = 1;
1089
            cpu_restore_state(current_tb, env, pc, puc);
1090
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1091
                                 &current_flags);
1092
        }
1093
#endif /* TARGET_HAS_PRECISE_SMC */
1094
        tb_phys_invalidate(tb, addr);
1095
        tb = tb->page_next[n];
1096
    }
1097
    p->first_tb = NULL;
1098
#ifdef TARGET_HAS_PRECISE_SMC
1099
    if (current_tb_modified) {
1100
        /* we generate a block containing just the instruction
1101
           modifying the memory. It will ensure that it cannot modify
1102
           itself */
1103
        env->current_tb = NULL;
1104
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1105
        cpu_resume_from_signal(env, puc);
1106
    }
1107
#endif
1108
}
1109
#endif
1110

    
1111
/* add the tb in the target page and protect it if necessary */
1112
static inline void tb_alloc_page(TranslationBlock *tb,
1113
                                 unsigned int n, target_ulong page_addr)
1114
{
1115
    PageDesc *p;
1116
    TranslationBlock *last_first_tb;
1117

    
1118
    tb->page_addr[n] = page_addr;
1119
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1120
    tb->page_next[n] = p->first_tb;
1121
    last_first_tb = p->first_tb;
1122
    p->first_tb = (TranslationBlock *)((long)tb | n);
1123
    invalidate_page_bitmap(p);
1124

    
1125
#if defined(TARGET_HAS_SMC) || 1
1126

    
1127
#if defined(CONFIG_USER_ONLY)
1128
    if (p->flags & PAGE_WRITE) {
1129
        target_ulong addr;
1130
        PageDesc *p2;
1131
        int prot;
1132

    
1133
        /* force the host page as non writable (writes will have a
1134
           page fault + mprotect overhead) */
1135
        page_addr &= qemu_host_page_mask;
1136
        prot = 0;
1137
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1138
            addr += TARGET_PAGE_SIZE) {
1139

    
1140
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1141
            if (!p2)
1142
                continue;
1143
            prot |= p2->flags;
1144
            p2->flags &= ~PAGE_WRITE;
1145
            page_get_flags(addr);
1146
          }
1147
        mprotect(g2h(page_addr), qemu_host_page_size,
1148
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1149
#ifdef DEBUG_TB_INVALIDATE
1150
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1151
               page_addr);
1152
#endif
1153
    }
1154
#else
1155
    /* if some code is already present, then the pages are already
1156
       protected. So we handle the case where only the first TB is
1157
       allocated in a physical page */
1158
    if (!last_first_tb) {
1159
        tlb_protect_code(page_addr);
1160
    }
1161
#endif
1162

    
1163
#endif /* TARGET_HAS_SMC */
1164
}
1165

    
1166
/* Allocate a new translation block. Flush the translation buffer if
1167
   too many translation blocks or too much generated code. */
1168
TranslationBlock *tb_alloc(target_ulong pc)
1169
{
1170
    TranslationBlock *tb;
1171

    
1172
    if (nb_tbs >= code_gen_max_blocks ||
1173
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1174
        return NULL;
1175
    tb = &tbs[nb_tbs++];
1176
    tb->pc = pc;
1177
    tb->cflags = 0;
1178
    return tb;
1179
}
1180

    
1181
void tb_free(TranslationBlock *tb)
1182
{
1183
    /* In practice this is mostly used for single use temporary TB
1184
       Ignore the hard cases and just back up if this TB happens to
1185
       be the last one generated.  */
1186
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1187
        code_gen_ptr = tb->tc_ptr;
1188
        nb_tbs--;
1189
    }
1190
}
1191

    
1192
/* add a new TB and link it to the physical page tables. phys_page2 is
1193
   (-1) to indicate that only one page contains the TB. */
1194
void tb_link_phys(TranslationBlock *tb,
1195
                  target_ulong phys_pc, target_ulong phys_page2)
1196
{
1197
    unsigned int h;
1198
    TranslationBlock **ptb;
1199

    
1200
    /* Grab the mmap lock to stop another thread invalidating this TB
1201
       before we are done.  */
1202
    mmap_lock();
1203
    /* add in the physical hash table */
1204
    h = tb_phys_hash_func(phys_pc);
1205
    ptb = &tb_phys_hash[h];
1206
    tb->phys_hash_next = *ptb;
1207
    *ptb = tb;
1208

    
1209
    /* add in the page list */
1210
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1211
    if (phys_page2 != -1)
1212
        tb_alloc_page(tb, 1, phys_page2);
1213
    else
1214
        tb->page_addr[1] = -1;
1215

    
1216
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1217
    tb->jmp_next[0] = NULL;
1218
    tb->jmp_next[1] = NULL;
1219

    
1220
    /* init original jump addresses */
1221
    if (tb->tb_next_offset[0] != 0xffff)
1222
        tb_reset_jump(tb, 0);
1223
    if (tb->tb_next_offset[1] != 0xffff)
1224
        tb_reset_jump(tb, 1);
1225

    
1226
#ifdef DEBUG_TB_CHECK
1227
    tb_page_check();
1228
#endif
1229
    mmap_unlock();
1230
}
1231

    
1232
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1233
   tb[1].tc_ptr. Return NULL if not found */
1234
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1235
{
1236
    int m_min, m_max, m;
1237
    unsigned long v;
1238
    TranslationBlock *tb;
1239

    
1240
    if (nb_tbs <= 0)
1241
        return NULL;
1242
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1243
        tc_ptr >= (unsigned long)code_gen_ptr)
1244
        return NULL;
1245
    /* binary search (cf Knuth) */
1246
    m_min = 0;
1247
    m_max = nb_tbs - 1;
1248
    while (m_min <= m_max) {
1249
        m = (m_min + m_max) >> 1;
1250
        tb = &tbs[m];
1251
        v = (unsigned long)tb->tc_ptr;
1252
        if (v == tc_ptr)
1253
            return tb;
1254
        else if (tc_ptr < v) {
1255
            m_max = m - 1;
1256
        } else {
1257
            m_min = m + 1;
1258
        }
1259
    }
1260
    return &tbs[m_max];
1261
}
1262

    
1263
static void tb_reset_jump_recursive(TranslationBlock *tb);
1264

    
1265
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1266
{
1267
    TranslationBlock *tb1, *tb_next, **ptb;
1268
    unsigned int n1;
1269

    
1270
    tb1 = tb->jmp_next[n];
1271
    if (tb1 != NULL) {
1272
        /* find head of list */
1273
        for(;;) {
1274
            n1 = (long)tb1 & 3;
1275
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1276
            if (n1 == 2)
1277
                break;
1278
            tb1 = tb1->jmp_next[n1];
1279
        }
1280
        /* we are now sure now that tb jumps to tb1 */
1281
        tb_next = tb1;
1282

    
1283
        /* remove tb from the jmp_first list */
1284
        ptb = &tb_next->jmp_first;
1285
        for(;;) {
1286
            tb1 = *ptb;
1287
            n1 = (long)tb1 & 3;
1288
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1289
            if (n1 == n && tb1 == tb)
1290
                break;
1291
            ptb = &tb1->jmp_next[n1];
1292
        }
1293
        *ptb = tb->jmp_next[n];
1294
        tb->jmp_next[n] = NULL;
1295

    
1296
        /* suppress the jump to next tb in generated code */
1297
        tb_reset_jump(tb, n);
1298

    
1299
        /* suppress jumps in the tb on which we could have jumped */
1300
        tb_reset_jump_recursive(tb_next);
1301
    }
1302
}
1303

    
1304
static void tb_reset_jump_recursive(TranslationBlock *tb)
1305
{
1306
    tb_reset_jump_recursive2(tb, 0);
1307
    tb_reset_jump_recursive2(tb, 1);
1308
}
1309

    
1310
#if defined(TARGET_HAS_ICE)
1311
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1312
{
1313
    target_phys_addr_t addr;
1314
    target_ulong pd;
1315
    ram_addr_t ram_addr;
1316
    PhysPageDesc *p;
1317

    
1318
    addr = cpu_get_phys_page_debug(env, pc);
1319
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1320
    if (!p) {
1321
        pd = IO_MEM_UNASSIGNED;
1322
    } else {
1323
        pd = p->phys_offset;
1324
    }
1325
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1326
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1327
}
1328
#endif
1329

    
1330
/* Add a watchpoint.  */
1331
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1332
                          int flags, CPUWatchpoint **watchpoint)
1333
{
1334
    target_ulong len_mask = ~(len - 1);
1335
    CPUWatchpoint *wp;
1336

    
1337
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1338
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1339
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1340
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1341
        return -EINVAL;
1342
    }
1343
    wp = qemu_malloc(sizeof(*wp));
1344

    
1345
    wp->vaddr = addr;
1346
    wp->len_mask = len_mask;
1347
    wp->flags = flags;
1348

    
1349
    /* keep all GDB-injected watchpoints in front */
1350
    if (flags & BP_GDB)
1351
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1352
    else
1353
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1354

    
1355
    tlb_flush_page(env, addr);
1356

    
1357
    if (watchpoint)
1358
        *watchpoint = wp;
1359
    return 0;
1360
}
1361

    
1362
/* Remove a specific watchpoint.  */
1363
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1364
                          int flags)
1365
{
1366
    target_ulong len_mask = ~(len - 1);
1367
    CPUWatchpoint *wp;
1368

    
1369
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1370
        if (addr == wp->vaddr && len_mask == wp->len_mask
1371
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1372
            cpu_watchpoint_remove_by_ref(env, wp);
1373
            return 0;
1374
        }
1375
    }
1376
    return -ENOENT;
1377
}
1378

    
1379
/* Remove a specific watchpoint by reference.  */
1380
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1381
{
1382
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1383

    
1384
    tlb_flush_page(env, watchpoint->vaddr);
1385

    
1386
    qemu_free(watchpoint);
1387
}
1388

    
1389
/* Remove all matching watchpoints.  */
1390
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1391
{
1392
    CPUWatchpoint *wp, *next;
1393

    
1394
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1395
        if (wp->flags & mask)
1396
            cpu_watchpoint_remove_by_ref(env, wp);
1397
    }
1398
}
1399

    
1400
/* Add a breakpoint.  */
1401
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1402
                          CPUBreakpoint **breakpoint)
1403
{
1404
#if defined(TARGET_HAS_ICE)
1405
    CPUBreakpoint *bp;
1406

    
1407
    bp = qemu_malloc(sizeof(*bp));
1408

    
1409
    bp->pc = pc;
1410
    bp->flags = flags;
1411

    
1412
    /* keep all GDB-injected breakpoints in front */
1413
    if (flags & BP_GDB)
1414
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1415
    else
1416
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1417

    
1418
    breakpoint_invalidate(env, pc);
1419

    
1420
    if (breakpoint)
1421
        *breakpoint = bp;
1422
    return 0;
1423
#else
1424
    return -ENOSYS;
1425
#endif
1426
}
1427

    
1428
/* Remove a specific breakpoint.  */
1429
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1430
{
1431
#if defined(TARGET_HAS_ICE)
1432
    CPUBreakpoint *bp;
1433

    
1434
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1435
        if (bp->pc == pc && bp->flags == flags) {
1436
            cpu_breakpoint_remove_by_ref(env, bp);
1437
            return 0;
1438
        }
1439
    }
1440
    return -ENOENT;
1441
#else
1442
    return -ENOSYS;
1443
#endif
1444
}
1445

    
1446
/* Remove a specific breakpoint by reference.  */
1447
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1448
{
1449
#if defined(TARGET_HAS_ICE)
1450
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1451

    
1452
    breakpoint_invalidate(env, breakpoint->pc);
1453

    
1454
    qemu_free(breakpoint);
1455
#endif
1456
}
1457

    
1458
/* Remove all matching breakpoints. */
1459
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1460
{
1461
#if defined(TARGET_HAS_ICE)
1462
    CPUBreakpoint *bp, *next;
1463

    
1464
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1465
        if (bp->flags & mask)
1466
            cpu_breakpoint_remove_by_ref(env, bp);
1467
    }
1468
#endif
1469
}
1470

    
1471
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1472
   CPU loop after each instruction */
1473
void cpu_single_step(CPUState *env, int enabled)
1474
{
1475
#if defined(TARGET_HAS_ICE)
1476
    if (env->singlestep_enabled != enabled) {
1477
        env->singlestep_enabled = enabled;
1478
        if (kvm_enabled())
1479
            kvm_update_guest_debug(env, 0);
1480
        else {
1481
            /* must flush all the translated code to avoid inconsistencies */
1482
            /* XXX: only flush what is necessary */
1483
            tb_flush(env);
1484
        }
1485
    }
1486
#endif
1487
}
1488

    
1489
/* enable or disable low levels log */
1490
void cpu_set_log(int log_flags)
1491
{
1492
    loglevel = log_flags;
1493
    if (loglevel && !logfile) {
1494
        logfile = fopen(logfilename, log_append ? "a" : "w");
1495
        if (!logfile) {
1496
            perror(logfilename);
1497
            _exit(1);
1498
        }
1499
#if !defined(CONFIG_SOFTMMU)
1500
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1501
        {
1502
            static char logfile_buf[4096];
1503
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1504
        }
1505
#elif !defined(_WIN32)
1506
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1507
        setvbuf(logfile, NULL, _IOLBF, 0);
1508
#endif
1509
        log_append = 1;
1510
    }
1511
    if (!loglevel && logfile) {
1512
        fclose(logfile);
1513
        logfile = NULL;
1514
    }
1515
}
1516

    
1517
void cpu_set_log_filename(const char *filename)
1518
{
1519
    logfilename = strdup(filename);
1520
    if (logfile) {
1521
        fclose(logfile);
1522
        logfile = NULL;
1523
    }
1524
    cpu_set_log(loglevel);
1525
}
1526

    
1527
static void cpu_unlink_tb(CPUState *env)
1528
{
1529
#if defined(CONFIG_USE_NPTL)
1530
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1531
       problem and hope the cpu will stop of its own accord.  For userspace
1532
       emulation this often isn't actually as bad as it sounds.  Often
1533
       signals are used primarily to interrupt blocking syscalls.  */
1534
#else
1535
    TranslationBlock *tb;
1536
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1537

    
1538
    tb = env->current_tb;
1539
    /* if the cpu is currently executing code, we must unlink it and
1540
       all the potentially executing TB */
1541
    if (tb && !testandset(&interrupt_lock)) {
1542
        env->current_tb = NULL;
1543
        tb_reset_jump_recursive(tb);
1544
        resetlock(&interrupt_lock);
1545
    }
1546
#endif
1547
}
1548

    
1549
/* mask must never be zero, except for A20 change call */
1550
void cpu_interrupt(CPUState *env, int mask)
1551
{
1552
    int old_mask;
1553

    
1554
    old_mask = env->interrupt_request;
1555
    env->interrupt_request |= mask;
1556

    
1557
#ifndef CONFIG_USER_ONLY
1558
    /*
1559
     * If called from iothread context, wake the target cpu in
1560
     * case its halted.
1561
     */
1562
    if (!qemu_cpu_self(env)) {
1563
        qemu_cpu_kick(env);
1564
        return;
1565
    }
1566
#endif
1567

    
1568
    if (use_icount) {
1569
        env->icount_decr.u16.high = 0xffff;
1570
#ifndef CONFIG_USER_ONLY
1571
        if (!can_do_io(env)
1572
            && (mask & ~old_mask) != 0) {
1573
            cpu_abort(env, "Raised interrupt while not in I/O function");
1574
        }
1575
#endif
1576
    } else {
1577
        cpu_unlink_tb(env);
1578
    }
1579
}
1580

    
1581
void cpu_reset_interrupt(CPUState *env, int mask)
1582
{
1583
    env->interrupt_request &= ~mask;
1584
}
1585

    
1586
void cpu_exit(CPUState *env)
1587
{
1588
    env->exit_request = 1;
1589
    cpu_unlink_tb(env);
1590
}
1591

    
1592
const CPULogItem cpu_log_items[] = {
1593
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1594
      "show generated host assembly code for each compiled TB" },
1595
    { CPU_LOG_TB_IN_ASM, "in_asm",
1596
      "show target assembly code for each compiled TB" },
1597
    { CPU_LOG_TB_OP, "op",
1598
      "show micro ops for each compiled TB" },
1599
    { CPU_LOG_TB_OP_OPT, "op_opt",
1600
      "show micro ops "
1601
#ifdef TARGET_I386
1602
      "before eflags optimization and "
1603
#endif
1604
      "after liveness analysis" },
1605
    { CPU_LOG_INT, "int",
1606
      "show interrupts/exceptions in short format" },
1607
    { CPU_LOG_EXEC, "exec",
1608
      "show trace before each executed TB (lots of logs)" },
1609
    { CPU_LOG_TB_CPU, "cpu",
1610
      "show CPU state before block translation" },
1611
#ifdef TARGET_I386
1612
    { CPU_LOG_PCALL, "pcall",
1613
      "show protected mode far calls/returns/exceptions" },
1614
    { CPU_LOG_RESET, "cpu_reset",
1615
      "show CPU state before CPU resets" },
1616
#endif
1617
#ifdef DEBUG_IOPORT
1618
    { CPU_LOG_IOPORT, "ioport",
1619
      "show all i/o ports accesses" },
1620
#endif
1621
    { 0, NULL, NULL },
1622
};
1623

    
1624
static int cmp1(const char *s1, int n, const char *s2)
1625
{
1626
    if (strlen(s2) != n)
1627
        return 0;
1628
    return memcmp(s1, s2, n) == 0;
1629
}
1630

    
1631
/* takes a comma separated list of log masks. Return 0 if error. */
1632
int cpu_str_to_log_mask(const char *str)
1633
{
1634
    const CPULogItem *item;
1635
    int mask;
1636
    const char *p, *p1;
1637

    
1638
    p = str;
1639
    mask = 0;
1640
    for(;;) {
1641
        p1 = strchr(p, ',');
1642
        if (!p1)
1643
            p1 = p + strlen(p);
1644
        if(cmp1(p,p1-p,"all")) {
1645
                for(item = cpu_log_items; item->mask != 0; item++) {
1646
                        mask |= item->mask;
1647
                }
1648
        } else {
1649
        for(item = cpu_log_items; item->mask != 0; item++) {
1650
            if (cmp1(p, p1 - p, item->name))
1651
                goto found;
1652
        }
1653
        return 0;
1654
        }
1655
    found:
1656
        mask |= item->mask;
1657
        if (*p1 != ',')
1658
            break;
1659
        p = p1 + 1;
1660
    }
1661
    return mask;
1662
}
1663

    
1664
void cpu_abort(CPUState *env, const char *fmt, ...)
1665
{
1666
    va_list ap;
1667
    va_list ap2;
1668

    
1669
    va_start(ap, fmt);
1670
    va_copy(ap2, ap);
1671
    fprintf(stderr, "qemu: fatal: ");
1672
    vfprintf(stderr, fmt, ap);
1673
    fprintf(stderr, "\n");
1674
#ifdef TARGET_I386
1675
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1676
#else
1677
    cpu_dump_state(env, stderr, fprintf, 0);
1678
#endif
1679
    if (qemu_log_enabled()) {
1680
        qemu_log("qemu: fatal: ");
1681
        qemu_log_vprintf(fmt, ap2);
1682
        qemu_log("\n");
1683
#ifdef TARGET_I386
1684
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1685
#else
1686
        log_cpu_state(env, 0);
1687
#endif
1688
        qemu_log_flush();
1689
        qemu_log_close();
1690
    }
1691
    va_end(ap2);
1692
    va_end(ap);
1693
    abort();
1694
}
1695

    
1696
CPUState *cpu_copy(CPUState *env)
1697
{
1698
    CPUState *new_env = cpu_init(env->cpu_model_str);
1699
    CPUState *next_cpu = new_env->next_cpu;
1700
    int cpu_index = new_env->cpu_index;
1701
#if defined(TARGET_HAS_ICE)
1702
    CPUBreakpoint *bp;
1703
    CPUWatchpoint *wp;
1704
#endif
1705

    
1706
    memcpy(new_env, env, sizeof(CPUState));
1707

    
1708
    /* Preserve chaining and index. */
1709
    new_env->next_cpu = next_cpu;
1710
    new_env->cpu_index = cpu_index;
1711

    
1712
    /* Clone all break/watchpoints.
1713
       Note: Once we support ptrace with hw-debug register access, make sure
1714
       BP_CPU break/watchpoints are handled correctly on clone. */
1715
    QTAILQ_INIT(&env->breakpoints);
1716
    QTAILQ_INIT(&env->watchpoints);
1717
#if defined(TARGET_HAS_ICE)
1718
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1719
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1720
    }
1721
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1722
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1723
                              wp->flags, NULL);
1724
    }
1725
#endif
1726

    
1727
    return new_env;
1728
}
1729

    
1730
#if !defined(CONFIG_USER_ONLY)
1731

    
1732
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1733
{
1734
    unsigned int i;
1735

    
1736
    /* Discard jump cache entries for any tb which might potentially
1737
       overlap the flushed page.  */
1738
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1739
    memset (&env->tb_jmp_cache[i], 0, 
1740
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1741

    
1742
    i = tb_jmp_cache_hash_page(addr);
1743
    memset (&env->tb_jmp_cache[i], 0, 
1744
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1745
}
1746

    
1747
static CPUTLBEntry s_cputlb_empty_entry = {
1748
    .addr_read  = -1,
1749
    .addr_write = -1,
1750
    .addr_code  = -1,
1751
    .addend     = -1,
1752
};
1753

    
1754
/* NOTE: if flush_global is true, also flush global entries (not
1755
   implemented yet) */
1756
void tlb_flush(CPUState *env, int flush_global)
1757
{
1758
    int i;
1759

    
1760
#if defined(DEBUG_TLB)
1761
    printf("tlb_flush:\n");
1762
#endif
1763
    /* must reset current TB so that interrupts cannot modify the
1764
       links while we are modifying them */
1765
    env->current_tb = NULL;
1766

    
1767
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1768
        int mmu_idx;
1769
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1770
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1771
        }
1772
    }
1773

    
1774
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1775

    
1776
    tlb_flush_count++;
1777
}
1778

    
1779
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1780
{
1781
    if (addr == (tlb_entry->addr_read &
1782
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1783
        addr == (tlb_entry->addr_write &
1784
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1785
        addr == (tlb_entry->addr_code &
1786
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1787
        *tlb_entry = s_cputlb_empty_entry;
1788
    }
1789
}
1790

    
1791
void tlb_flush_page(CPUState *env, target_ulong addr)
1792
{
1793
    int i;
1794
    int mmu_idx;
1795

    
1796
#if defined(DEBUG_TLB)
1797
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1798
#endif
1799
    /* must reset current TB so that interrupts cannot modify the
1800
       links while we are modifying them */
1801
    env->current_tb = NULL;
1802

    
1803
    addr &= TARGET_PAGE_MASK;
1804
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1805
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1806
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1807

    
1808
    tlb_flush_jmp_cache(env, addr);
1809
}
1810

    
1811
/* update the TLBs so that writes to code in the virtual page 'addr'
1812
   can be detected */
1813
static void tlb_protect_code(ram_addr_t ram_addr)
1814
{
1815
    cpu_physical_memory_reset_dirty(ram_addr,
1816
                                    ram_addr + TARGET_PAGE_SIZE,
1817
                                    CODE_DIRTY_FLAG);
1818
}
1819

    
1820
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1821
   tested for self modifying code */
1822
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1823
                                    target_ulong vaddr)
1824
{
1825
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1826
}
1827

    
1828
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1829
                                         unsigned long start, unsigned long length)
1830
{
1831
    unsigned long addr;
1832
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1833
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1834
        if ((addr - start) < length) {
1835
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1836
        }
1837
    }
1838
}
1839

    
1840
/* Note: start and end must be within the same ram block.  */
1841
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1842
                                     int dirty_flags)
1843
{
1844
    CPUState *env;
1845
    unsigned long length, start1;
1846
    int i, mask, len;
1847
    uint8_t *p;
1848

    
1849
    start &= TARGET_PAGE_MASK;
1850
    end = TARGET_PAGE_ALIGN(end);
1851

    
1852
    length = end - start;
1853
    if (length == 0)
1854
        return;
1855
    len = length >> TARGET_PAGE_BITS;
1856
    mask = ~dirty_flags;
1857
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1858
    for(i = 0; i < len; i++)
1859
        p[i] &= mask;
1860

    
1861
    /* we modify the TLB cache so that the dirty bit will be set again
1862
       when accessing the range */
1863
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1864
    /* Chek that we don't span multiple blocks - this breaks the
1865
       address comparisons below.  */
1866
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1867
            != (end - 1) - start) {
1868
        abort();
1869
    }
1870

    
1871
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1872
        int mmu_idx;
1873
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1874
            for(i = 0; i < CPU_TLB_SIZE; i++)
1875
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1876
                                      start1, length);
1877
        }
1878
    }
1879
}
1880

    
1881
int cpu_physical_memory_set_dirty_tracking(int enable)
1882
{
1883
    in_migration = enable;
1884
    if (kvm_enabled()) {
1885
        return kvm_set_migration_log(enable);
1886
    }
1887
    return 0;
1888
}
1889

    
1890
int cpu_physical_memory_get_dirty_tracking(void)
1891
{
1892
    return in_migration;
1893
}
1894

    
1895
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1896
                                   target_phys_addr_t end_addr)
1897
{
1898
    int ret = 0;
1899

    
1900
    if (kvm_enabled())
1901
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1902
    return ret;
1903
}
1904

    
1905
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1906
{
1907
    ram_addr_t ram_addr;
1908
    void *p;
1909

    
1910
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1911
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1912
            + tlb_entry->addend);
1913
        ram_addr = qemu_ram_addr_from_host(p);
1914
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1915
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1916
        }
1917
    }
1918
}
1919

    
1920
/* update the TLB according to the current state of the dirty bits */
1921
void cpu_tlb_update_dirty(CPUState *env)
1922
{
1923
    int i;
1924
    int mmu_idx;
1925
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1926
        for(i = 0; i < CPU_TLB_SIZE; i++)
1927
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1928
    }
1929
}
1930

    
1931
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1932
{
1933
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1934
        tlb_entry->addr_write = vaddr;
1935
}
1936

    
1937
/* update the TLB corresponding to virtual page vaddr
1938
   so that it is no longer dirty */
1939
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1940
{
1941
    int i;
1942
    int mmu_idx;
1943

    
1944
    vaddr &= TARGET_PAGE_MASK;
1945
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1946
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1947
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1948
}
1949

    
1950
/* add a new TLB entry. At most one entry for a given virtual address
1951
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1952
   (can only happen in non SOFTMMU mode for I/O pages or pages
1953
   conflicting with the host address space). */
1954
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1955
                      target_phys_addr_t paddr, int prot,
1956
                      int mmu_idx, int is_softmmu)
1957
{
1958
    PhysPageDesc *p;
1959
    unsigned long pd;
1960
    unsigned int index;
1961
    target_ulong address;
1962
    target_ulong code_address;
1963
    target_phys_addr_t addend;
1964
    int ret;
1965
    CPUTLBEntry *te;
1966
    CPUWatchpoint *wp;
1967
    target_phys_addr_t iotlb;
1968

    
1969
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1970
    if (!p) {
1971
        pd = IO_MEM_UNASSIGNED;
1972
    } else {
1973
        pd = p->phys_offset;
1974
    }
1975
#if defined(DEBUG_TLB)
1976
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1977
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1978
#endif
1979

    
1980
    ret = 0;
1981
    address = vaddr;
1982
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1983
        /* IO memory case (romd handled later) */
1984
        address |= TLB_MMIO;
1985
    }
1986
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1987
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1988
        /* Normal RAM.  */
1989
        iotlb = pd & TARGET_PAGE_MASK;
1990
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1991
            iotlb |= IO_MEM_NOTDIRTY;
1992
        else
1993
            iotlb |= IO_MEM_ROM;
1994
    } else {
1995
        /* IO handlers are currently passed a physical address.
1996
           It would be nice to pass an offset from the base address
1997
           of that region.  This would avoid having to special case RAM,
1998
           and avoid full address decoding in every device.
1999
           We can't use the high bits of pd for this because
2000
           IO_MEM_ROMD uses these as a ram address.  */
2001
        iotlb = (pd & ~TARGET_PAGE_MASK);
2002
        if (p) {
2003
            iotlb += p->region_offset;
2004
        } else {
2005
            iotlb += paddr;
2006
        }
2007
    }
2008

    
2009
    code_address = address;
2010
    /* Make accesses to pages with watchpoints go via the
2011
       watchpoint trap routines.  */
2012
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2013
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2014
            iotlb = io_mem_watch + paddr;
2015
            /* TODO: The memory case can be optimized by not trapping
2016
               reads of pages with a write breakpoint.  */
2017
            address |= TLB_MMIO;
2018
        }
2019
    }
2020

    
2021
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2022
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2023
    te = &env->tlb_table[mmu_idx][index];
2024
    te->addend = addend - vaddr;
2025
    if (prot & PAGE_READ) {
2026
        te->addr_read = address;
2027
    } else {
2028
        te->addr_read = -1;
2029
    }
2030

    
2031
    if (prot & PAGE_EXEC) {
2032
        te->addr_code = code_address;
2033
    } else {
2034
        te->addr_code = -1;
2035
    }
2036
    if (prot & PAGE_WRITE) {
2037
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2038
            (pd & IO_MEM_ROMD)) {
2039
            /* Write access calls the I/O callback.  */
2040
            te->addr_write = address | TLB_MMIO;
2041
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2042
                   !cpu_physical_memory_is_dirty(pd)) {
2043
            te->addr_write = address | TLB_NOTDIRTY;
2044
        } else {
2045
            te->addr_write = address;
2046
        }
2047
    } else {
2048
        te->addr_write = -1;
2049
    }
2050
    return ret;
2051
}
2052

    
2053
#else
2054

    
2055
void tlb_flush(CPUState *env, int flush_global)
2056
{
2057
}
2058

    
2059
void tlb_flush_page(CPUState *env, target_ulong addr)
2060
{
2061
}
2062

    
2063
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2064
                      target_phys_addr_t paddr, int prot,
2065
                      int mmu_idx, int is_softmmu)
2066
{
2067
    return 0;
2068
}
2069

    
2070
/*
2071
 * Walks guest process memory "regions" one by one
2072
 * and calls callback function 'fn' for each region.
2073
 */
2074
int walk_memory_regions(void *priv,
2075
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2076
{
2077
    unsigned long start, end;
2078
    PageDesc *p = NULL;
2079
    int i, j, prot, prot1;
2080
    int rc = 0;
2081

    
2082
    start = end = -1;
2083
    prot = 0;
2084

    
2085
    for (i = 0; i <= L1_SIZE; i++) {
2086
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2087
        for (j = 0; j < L2_SIZE; j++) {
2088
            prot1 = (p == NULL) ? 0 : p[j].flags;
2089
            /*
2090
             * "region" is one continuous chunk of memory
2091
             * that has same protection flags set.
2092
             */
2093
            if (prot1 != prot) {
2094
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2095
                if (start != -1) {
2096
                    rc = (*fn)(priv, start, end, prot);
2097
                    /* callback can stop iteration by returning != 0 */
2098
                    if (rc != 0)
2099
                        return (rc);
2100
                }
2101
                if (prot1 != 0)
2102
                    start = end;
2103
                else
2104
                    start = -1;
2105
                prot = prot1;
2106
            }
2107
            if (p == NULL)
2108
                break;
2109
        }
2110
    }
2111
    return (rc);
2112
}
2113

    
2114
static int dump_region(void *priv, unsigned long start,
2115
    unsigned long end, unsigned long prot)
2116
{
2117
    FILE *f = (FILE *)priv;
2118

    
2119
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2120
        start, end, end - start,
2121
        ((prot & PAGE_READ) ? 'r' : '-'),
2122
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2123
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2124

    
2125
    return (0);
2126
}
2127

    
2128
/* dump memory mappings */
2129
void page_dump(FILE *f)
2130
{
2131
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2132
            "start", "end", "size", "prot");
2133
    walk_memory_regions(f, dump_region);
2134
}
2135

    
2136
int page_get_flags(target_ulong address)
2137
{
2138
    PageDesc *p;
2139

    
2140
    p = page_find(address >> TARGET_PAGE_BITS);
2141
    if (!p)
2142
        return 0;
2143
    return p->flags;
2144
}
2145

    
2146
/* modify the flags of a page and invalidate the code if
2147
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2148
   depending on PAGE_WRITE */
2149
void page_set_flags(target_ulong start, target_ulong end, int flags)
2150
{
2151
    PageDesc *p;
2152
    target_ulong addr;
2153

    
2154
    /* mmap_lock should already be held.  */
2155
    start = start & TARGET_PAGE_MASK;
2156
    end = TARGET_PAGE_ALIGN(end);
2157
    if (flags & PAGE_WRITE)
2158
        flags |= PAGE_WRITE_ORG;
2159
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2160
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2161
        /* We may be called for host regions that are outside guest
2162
           address space.  */
2163
        if (!p)
2164
            return;
2165
        /* if the write protection is set, then we invalidate the code
2166
           inside */
2167
        if (!(p->flags & PAGE_WRITE) &&
2168
            (flags & PAGE_WRITE) &&
2169
            p->first_tb) {
2170
            tb_invalidate_phys_page(addr, 0, NULL);
2171
        }
2172
        p->flags = flags;
2173
    }
2174
}
2175

    
2176
int page_check_range(target_ulong start, target_ulong len, int flags)
2177
{
2178
    PageDesc *p;
2179
    target_ulong end;
2180
    target_ulong addr;
2181

    
2182
    if (start + len < start)
2183
        /* we've wrapped around */
2184
        return -1;
2185

    
2186
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2187
    start = start & TARGET_PAGE_MASK;
2188

    
2189
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2190
        p = page_find(addr >> TARGET_PAGE_BITS);
2191
        if( !p )
2192
            return -1;
2193
        if( !(p->flags & PAGE_VALID) )
2194
            return -1;
2195

    
2196
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2197
            return -1;
2198
        if (flags & PAGE_WRITE) {
2199
            if (!(p->flags & PAGE_WRITE_ORG))
2200
                return -1;
2201
            /* unprotect the page if it was put read-only because it
2202
               contains translated code */
2203
            if (!(p->flags & PAGE_WRITE)) {
2204
                if (!page_unprotect(addr, 0, NULL))
2205
                    return -1;
2206
            }
2207
            return 0;
2208
        }
2209
    }
2210
    return 0;
2211
}
2212

    
2213
/* called from signal handler: invalidate the code and unprotect the
2214
   page. Return TRUE if the fault was successfully handled. */
2215
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2216
{
2217
    unsigned int page_index, prot, pindex;
2218
    PageDesc *p, *p1;
2219
    target_ulong host_start, host_end, addr;
2220

    
2221
    /* Technically this isn't safe inside a signal handler.  However we
2222
       know this only ever happens in a synchronous SEGV handler, so in
2223
       practice it seems to be ok.  */
2224
    mmap_lock();
2225

    
2226
    host_start = address & qemu_host_page_mask;
2227
    page_index = host_start >> TARGET_PAGE_BITS;
2228
    p1 = page_find(page_index);
2229
    if (!p1) {
2230
        mmap_unlock();
2231
        return 0;
2232
    }
2233
    host_end = host_start + qemu_host_page_size;
2234
    p = p1;
2235
    prot = 0;
2236
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2237
        prot |= p->flags;
2238
        p++;
2239
    }
2240
    /* if the page was really writable, then we change its
2241
       protection back to writable */
2242
    if (prot & PAGE_WRITE_ORG) {
2243
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2244
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2245
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2246
                     (prot & PAGE_BITS) | PAGE_WRITE);
2247
            p1[pindex].flags |= PAGE_WRITE;
2248
            /* and since the content will be modified, we must invalidate
2249
               the corresponding translated code. */
2250
            tb_invalidate_phys_page(address, pc, puc);
2251
#ifdef DEBUG_TB_CHECK
2252
            tb_invalidate_check(address);
2253
#endif
2254
            mmap_unlock();
2255
            return 1;
2256
        }
2257
    }
2258
    mmap_unlock();
2259
    return 0;
2260
}
2261

    
2262
static inline void tlb_set_dirty(CPUState *env,
2263
                                 unsigned long addr, target_ulong vaddr)
2264
{
2265
}
2266
#endif /* defined(CONFIG_USER_ONLY) */
2267

    
2268
#if !defined(CONFIG_USER_ONLY)
2269

    
2270
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2271
                             ram_addr_t memory, ram_addr_t region_offset);
2272
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2273
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2274
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2275
                      need_subpage)                                     \
2276
    do {                                                                \
2277
        if (addr > start_addr)                                          \
2278
            start_addr2 = 0;                                            \
2279
        else {                                                          \
2280
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2281
            if (start_addr2 > 0)                                        \
2282
                need_subpage = 1;                                       \
2283
        }                                                               \
2284
                                                                        \
2285
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2286
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2287
        else {                                                          \
2288
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2289
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2290
                need_subpage = 1;                                       \
2291
        }                                                               \
2292
    } while (0)
2293

    
2294
/* register physical memory.
2295
   For RAM, 'size' must be a multiple of the target page size.
2296
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2297
   io memory page.  The address used when calling the IO function is
2298
   the offset from the start of the region, plus region_offset.  Both
2299
   start_addr and region_offset are rounded down to a page boundary
2300
   before calculating this offset.  This should not be a problem unless
2301
   the low bits of start_addr and region_offset differ.  */
2302
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2303
                                         ram_addr_t size,
2304
                                         ram_addr_t phys_offset,
2305
                                         ram_addr_t region_offset)
2306
{
2307
    target_phys_addr_t addr, end_addr;
2308
    PhysPageDesc *p;
2309
    CPUState *env;
2310
    ram_addr_t orig_size = size;
2311
    void *subpage;
2312

    
2313
    if (kvm_enabled())
2314
        kvm_set_phys_mem(start_addr, size, phys_offset);
2315

    
2316
    if (phys_offset == IO_MEM_UNASSIGNED) {
2317
        region_offset = start_addr;
2318
    }
2319
    region_offset &= TARGET_PAGE_MASK;
2320
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2321
    end_addr = start_addr + (target_phys_addr_t)size;
2322
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2323
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2324
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2325
            ram_addr_t orig_memory = p->phys_offset;
2326
            target_phys_addr_t start_addr2, end_addr2;
2327
            int need_subpage = 0;
2328

    
2329
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2330
                          need_subpage);
2331
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2332
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2333
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2334
                                           &p->phys_offset, orig_memory,
2335
                                           p->region_offset);
2336
                } else {
2337
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2338
                                            >> IO_MEM_SHIFT];
2339
                }
2340
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2341
                                 region_offset);
2342
                p->region_offset = 0;
2343
            } else {
2344
                p->phys_offset = phys_offset;
2345
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2346
                    (phys_offset & IO_MEM_ROMD))
2347
                    phys_offset += TARGET_PAGE_SIZE;
2348
            }
2349
        } else {
2350
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2351
            p->phys_offset = phys_offset;
2352
            p->region_offset = region_offset;
2353
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2354
                (phys_offset & IO_MEM_ROMD)) {
2355
                phys_offset += TARGET_PAGE_SIZE;
2356
            } else {
2357
                target_phys_addr_t start_addr2, end_addr2;
2358
                int need_subpage = 0;
2359

    
2360
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2361
                              end_addr2, need_subpage);
2362

    
2363
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2364
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2365
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2366
                                           addr & TARGET_PAGE_MASK);
2367
                    subpage_register(subpage, start_addr2, end_addr2,
2368
                                     phys_offset, region_offset);
2369
                    p->region_offset = 0;
2370
                }
2371
            }
2372
        }
2373
        region_offset += TARGET_PAGE_SIZE;
2374
    }
2375

    
2376
    /* since each CPU stores ram addresses in its TLB cache, we must
2377
       reset the modified entries */
2378
    /* XXX: slow ! */
2379
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2380
        tlb_flush(env, 1);
2381
    }
2382
}
2383

    
2384
/* XXX: temporary until new memory mapping API */
2385
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2386
{
2387
    PhysPageDesc *p;
2388

    
2389
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2390
    if (!p)
2391
        return IO_MEM_UNASSIGNED;
2392
    return p->phys_offset;
2393
}
2394

    
2395
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2396
{
2397
    if (kvm_enabled())
2398
        kvm_coalesce_mmio_region(addr, size);
2399
}
2400

    
2401
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2402
{
2403
    if (kvm_enabled())
2404
        kvm_uncoalesce_mmio_region(addr, size);
2405
}
2406

    
2407
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2408
{
2409
    RAMBlock *new_block;
2410

    
2411
    size = TARGET_PAGE_ALIGN(size);
2412
    new_block = qemu_malloc(sizeof(*new_block));
2413

    
2414
    new_block->host = qemu_vmalloc(size);
2415
#ifdef MADV_MERGEABLE
2416
    madvise(new_block->host, size, MADV_MERGEABLE);
2417
#endif
2418
    new_block->offset = last_ram_offset;
2419
    new_block->length = size;
2420

    
2421
    new_block->next = ram_blocks;
2422
    ram_blocks = new_block;
2423

    
2424
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2425
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2426
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2427
           0xff, size >> TARGET_PAGE_BITS);
2428

    
2429
    last_ram_offset += size;
2430

    
2431
    if (kvm_enabled())
2432
        kvm_setup_guest_memory(new_block->host, size);
2433

    
2434
    return new_block->offset;
2435
}
2436

    
2437
void qemu_ram_free(ram_addr_t addr)
2438
{
2439
    /* TODO: implement this.  */
2440
}
2441

    
2442
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2443
   With the exception of the softmmu code in this file, this should
2444
   only be used for local memory (e.g. video ram) that the device owns,
2445
   and knows it isn't going to access beyond the end of the block.
2446

2447
   It should not be used for general purpose DMA.
2448
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2449
 */
2450
void *qemu_get_ram_ptr(ram_addr_t addr)
2451
{
2452
    RAMBlock *prev;
2453
    RAMBlock **prevp;
2454
    RAMBlock *block;
2455

    
2456
    prev = NULL;
2457
    prevp = &ram_blocks;
2458
    block = ram_blocks;
2459
    while (block && (block->offset > addr
2460
                     || block->offset + block->length <= addr)) {
2461
        if (prev)
2462
          prevp = &prev->next;
2463
        prev = block;
2464
        block = block->next;
2465
    }
2466
    if (!block) {
2467
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2468
        abort();
2469
    }
2470
    /* Move this entry to to start of the list.  */
2471
    if (prev) {
2472
        prev->next = block->next;
2473
        block->next = *prevp;
2474
        *prevp = block;
2475
    }
2476
    return block->host + (addr - block->offset);
2477
}
2478

    
2479
/* Some of the softmmu routines need to translate from a host pointer
2480
   (typically a TLB entry) back to a ram offset.  */
2481
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2482
{
2483
    RAMBlock *prev;
2484
    RAMBlock **prevp;
2485
    RAMBlock *block;
2486
    uint8_t *host = ptr;
2487

    
2488
    prev = NULL;
2489
    prevp = &ram_blocks;
2490
    block = ram_blocks;
2491
    while (block && (block->host > host
2492
                     || block->host + block->length <= host)) {
2493
        if (prev)
2494
          prevp = &prev->next;
2495
        prev = block;
2496
        block = block->next;
2497
    }
2498
    if (!block) {
2499
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2500
        abort();
2501
    }
2502
    return block->offset + (host - block->host);
2503
}
2504

    
2505
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2506
{
2507
#ifdef DEBUG_UNASSIGNED
2508
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2509
#endif
2510
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2511
    do_unassigned_access(addr, 0, 0, 0, 1);
2512
#endif
2513
    return 0;
2514
}
2515

    
2516
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2517
{
2518
#ifdef DEBUG_UNASSIGNED
2519
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2520
#endif
2521
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2522
    do_unassigned_access(addr, 0, 0, 0, 2);
2523
#endif
2524
    return 0;
2525
}
2526

    
2527
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2528
{
2529
#ifdef DEBUG_UNASSIGNED
2530
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2531
#endif
2532
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2533
    do_unassigned_access(addr, 0, 0, 0, 4);
2534
#endif
2535
    return 0;
2536
}
2537

    
2538
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2539
{
2540
#ifdef DEBUG_UNASSIGNED
2541
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2542
#endif
2543
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2544
    do_unassigned_access(addr, 1, 0, 0, 1);
2545
#endif
2546
}
2547

    
2548
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2549
{
2550
#ifdef DEBUG_UNASSIGNED
2551
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2552
#endif
2553
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2554
    do_unassigned_access(addr, 1, 0, 0, 2);
2555
#endif
2556
}
2557

    
2558
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2559
{
2560
#ifdef DEBUG_UNASSIGNED
2561
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2562
#endif
2563
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2564
    do_unassigned_access(addr, 1, 0, 0, 4);
2565
#endif
2566
}
2567

    
2568
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2569
    unassigned_mem_readb,
2570
    unassigned_mem_readw,
2571
    unassigned_mem_readl,
2572
};
2573

    
2574
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2575
    unassigned_mem_writeb,
2576
    unassigned_mem_writew,
2577
    unassigned_mem_writel,
2578
};
2579

    
2580
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2581
                                uint32_t val)
2582
{
2583
    int dirty_flags;
2584
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2585
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2586
#if !defined(CONFIG_USER_ONLY)
2587
        tb_invalidate_phys_page_fast(ram_addr, 1);
2588
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2589
#endif
2590
    }
2591
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2592
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2593
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2594
    /* we remove the notdirty callback only if the code has been
2595
       flushed */
2596
    if (dirty_flags == 0xff)
2597
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2598
}
2599

    
2600
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2601
                                uint32_t val)
2602
{
2603
    int dirty_flags;
2604
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2605
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2606
#if !defined(CONFIG_USER_ONLY)
2607
        tb_invalidate_phys_page_fast(ram_addr, 2);
2608
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2609
#endif
2610
    }
2611
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2612
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2613
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2614
    /* we remove the notdirty callback only if the code has been
2615
       flushed */
2616
    if (dirty_flags == 0xff)
2617
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2618
}
2619

    
2620
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2621
                                uint32_t val)
2622
{
2623
    int dirty_flags;
2624
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2625
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2626
#if !defined(CONFIG_USER_ONLY)
2627
        tb_invalidate_phys_page_fast(ram_addr, 4);
2628
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2629
#endif
2630
    }
2631
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2632
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2633
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2634
    /* we remove the notdirty callback only if the code has been
2635
       flushed */
2636
    if (dirty_flags == 0xff)
2637
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2638
}
2639

    
2640
static CPUReadMemoryFunc * const error_mem_read[3] = {
2641
    NULL, /* never used */
2642
    NULL, /* never used */
2643
    NULL, /* never used */
2644
};
2645

    
2646
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2647
    notdirty_mem_writeb,
2648
    notdirty_mem_writew,
2649
    notdirty_mem_writel,
2650
};
2651

    
2652
/* Generate a debug exception if a watchpoint has been hit.  */
2653
static void check_watchpoint(int offset, int len_mask, int flags)
2654
{
2655
    CPUState *env = cpu_single_env;
2656
    target_ulong pc, cs_base;
2657
    TranslationBlock *tb;
2658
    target_ulong vaddr;
2659
    CPUWatchpoint *wp;
2660
    int cpu_flags;
2661

    
2662
    if (env->watchpoint_hit) {
2663
        /* We re-entered the check after replacing the TB. Now raise
2664
         * the debug interrupt so that is will trigger after the
2665
         * current instruction. */
2666
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2667
        return;
2668
    }
2669
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2670
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2671
        if ((vaddr == (wp->vaddr & len_mask) ||
2672
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2673
            wp->flags |= BP_WATCHPOINT_HIT;
2674
            if (!env->watchpoint_hit) {
2675
                env->watchpoint_hit = wp;
2676
                tb = tb_find_pc(env->mem_io_pc);
2677
                if (!tb) {
2678
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2679
                              "pc=%p", (void *)env->mem_io_pc);
2680
                }
2681
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2682
                tb_phys_invalidate(tb, -1);
2683
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2684
                    env->exception_index = EXCP_DEBUG;
2685
                } else {
2686
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2687
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2688
                }
2689
                cpu_resume_from_signal(env, NULL);
2690
            }
2691
        } else {
2692
            wp->flags &= ~BP_WATCHPOINT_HIT;
2693
        }
2694
    }
2695
}
2696

    
2697
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2698
   so these check for a hit then pass through to the normal out-of-line
2699
   phys routines.  */
2700
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2701
{
2702
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2703
    return ldub_phys(addr);
2704
}
2705

    
2706
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2707
{
2708
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2709
    return lduw_phys(addr);
2710
}
2711

    
2712
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2713
{
2714
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2715
    return ldl_phys(addr);
2716
}
2717

    
2718
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2719
                             uint32_t val)
2720
{
2721
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2722
    stb_phys(addr, val);
2723
}
2724

    
2725
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2726
                             uint32_t val)
2727
{
2728
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2729
    stw_phys(addr, val);
2730
}
2731

    
2732
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2733
                             uint32_t val)
2734
{
2735
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2736
    stl_phys(addr, val);
2737
}
2738

    
2739
static CPUReadMemoryFunc * const watch_mem_read[3] = {
2740
    watch_mem_readb,
2741
    watch_mem_readw,
2742
    watch_mem_readl,
2743
};
2744

    
2745
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2746
    watch_mem_writeb,
2747
    watch_mem_writew,
2748
    watch_mem_writel,
2749
};
2750

    
2751
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2752
                                 unsigned int len)
2753
{
2754
    uint32_t ret;
2755
    unsigned int idx;
2756

    
2757
    idx = SUBPAGE_IDX(addr);
2758
#if defined(DEBUG_SUBPAGE)
2759
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2760
           mmio, len, addr, idx);
2761
#endif
2762
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2763
                                       addr + mmio->region_offset[idx][0][len]);
2764

    
2765
    return ret;
2766
}
2767

    
2768
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2769
                              uint32_t value, unsigned int len)
2770
{
2771
    unsigned int idx;
2772

    
2773
    idx = SUBPAGE_IDX(addr);
2774
#if defined(DEBUG_SUBPAGE)
2775
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2776
           mmio, len, addr, idx, value);
2777
#endif
2778
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2779
                                  addr + mmio->region_offset[idx][1][len],
2780
                                  value);
2781
}
2782

    
2783
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2784
{
2785
#if defined(DEBUG_SUBPAGE)
2786
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2787
#endif
2788

    
2789
    return subpage_readlen(opaque, addr, 0);
2790
}
2791

    
2792
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2793
                            uint32_t value)
2794
{
2795
#if defined(DEBUG_SUBPAGE)
2796
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2797
#endif
2798
    subpage_writelen(opaque, addr, value, 0);
2799
}
2800

    
2801
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2802
{
2803
#if defined(DEBUG_SUBPAGE)
2804
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2805
#endif
2806

    
2807
    return subpage_readlen(opaque, addr, 1);
2808
}
2809

    
2810
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2811
                            uint32_t value)
2812
{
2813
#if defined(DEBUG_SUBPAGE)
2814
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2815
#endif
2816
    subpage_writelen(opaque, addr, value, 1);
2817
}
2818

    
2819
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2820
{
2821
#if defined(DEBUG_SUBPAGE)
2822
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2823
#endif
2824

    
2825
    return subpage_readlen(opaque, addr, 2);
2826
}
2827

    
2828
static void subpage_writel (void *opaque,
2829
                         target_phys_addr_t addr, uint32_t value)
2830
{
2831
#if defined(DEBUG_SUBPAGE)
2832
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2833
#endif
2834
    subpage_writelen(opaque, addr, value, 2);
2835
}
2836

    
2837
static CPUReadMemoryFunc * const subpage_read[] = {
2838
    &subpage_readb,
2839
    &subpage_readw,
2840
    &subpage_readl,
2841
};
2842

    
2843
static CPUWriteMemoryFunc * const subpage_write[] = {
2844
    &subpage_writeb,
2845
    &subpage_writew,
2846
    &subpage_writel,
2847
};
2848

    
2849
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2850
                             ram_addr_t memory, ram_addr_t region_offset)
2851
{
2852
    int idx, eidx;
2853
    unsigned int i;
2854

    
2855
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2856
        return -1;
2857
    idx = SUBPAGE_IDX(start);
2858
    eidx = SUBPAGE_IDX(end);
2859
#if defined(DEBUG_SUBPAGE)
2860
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2861
           mmio, start, end, idx, eidx, memory);
2862
#endif
2863
    memory >>= IO_MEM_SHIFT;
2864
    for (; idx <= eidx; idx++) {
2865
        for (i = 0; i < 4; i++) {
2866
            if (io_mem_read[memory][i]) {
2867
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2868
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2869
                mmio->region_offset[idx][0][i] = region_offset;
2870
            }
2871
            if (io_mem_write[memory][i]) {
2872
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2873
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2874
                mmio->region_offset[idx][1][i] = region_offset;
2875
            }
2876
        }
2877
    }
2878

    
2879
    return 0;
2880
}
2881

    
2882
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2883
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2884
{
2885
    subpage_t *mmio;
2886
    int subpage_memory;
2887

    
2888
    mmio = qemu_mallocz(sizeof(subpage_t));
2889

    
2890
    mmio->base = base;
2891
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2892
#if defined(DEBUG_SUBPAGE)
2893
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2894
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2895
#endif
2896
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2897
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2898
                         region_offset);
2899

    
2900
    return mmio;
2901
}
2902

    
2903
static int get_free_io_mem_idx(void)
2904
{
2905
    int i;
2906

    
2907
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2908
        if (!io_mem_used[i]) {
2909
            io_mem_used[i] = 1;
2910
            return i;
2911
        }
2912

    
2913
    return -1;
2914
}
2915

    
2916
/* mem_read and mem_write are arrays of functions containing the
2917
   function to access byte (index 0), word (index 1) and dword (index
2918
   2). Functions can be omitted with a NULL function pointer.
2919
   If io_index is non zero, the corresponding io zone is
2920
   modified. If it is zero, a new io zone is allocated. The return
2921
   value can be used with cpu_register_physical_memory(). (-1) is
2922
   returned if error. */
2923
static int cpu_register_io_memory_fixed(int io_index,
2924
                                        CPUReadMemoryFunc * const *mem_read,
2925
                                        CPUWriteMemoryFunc * const *mem_write,
2926
                                        void *opaque)
2927
{
2928
    int i, subwidth = 0;
2929

    
2930
    if (io_index <= 0) {
2931
        io_index = get_free_io_mem_idx();
2932
        if (io_index == -1)
2933
            return io_index;
2934
    } else {
2935
        io_index >>= IO_MEM_SHIFT;
2936
        if (io_index >= IO_MEM_NB_ENTRIES)
2937
            return -1;
2938
    }
2939

    
2940
    for(i = 0;i < 3; i++) {
2941
        if (!mem_read[i] || !mem_write[i])
2942
            subwidth = IO_MEM_SUBWIDTH;
2943
        io_mem_read[io_index][i] = mem_read[i];
2944
        io_mem_write[io_index][i] = mem_write[i];
2945
    }
2946
    io_mem_opaque[io_index] = opaque;
2947
    return (io_index << IO_MEM_SHIFT) | subwidth;
2948
}
2949

    
2950
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2951
                           CPUWriteMemoryFunc * const *mem_write,
2952
                           void *opaque)
2953
{
2954
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
2955
}
2956

    
2957
void cpu_unregister_io_memory(int io_table_address)
2958
{
2959
    int i;
2960
    int io_index = io_table_address >> IO_MEM_SHIFT;
2961

    
2962
    for (i=0;i < 3; i++) {
2963
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2964
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2965
    }
2966
    io_mem_opaque[io_index] = NULL;
2967
    io_mem_used[io_index] = 0;
2968
}
2969

    
2970
static void io_mem_init(void)
2971
{
2972
    int i;
2973

    
2974
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
2975
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
2976
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
2977
    for (i=0; i<5; i++)
2978
        io_mem_used[i] = 1;
2979

    
2980
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
2981
                                          watch_mem_write, NULL);
2982
}
2983

    
2984
#endif /* !defined(CONFIG_USER_ONLY) */
2985

    
2986
/* physical memory access (slow version, mainly for debug) */
2987
#if defined(CONFIG_USER_ONLY)
2988
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2989
                            int len, int is_write)
2990
{
2991
    int l, flags;
2992
    target_ulong page;
2993
    void * p;
2994

    
2995
    while (len > 0) {
2996
        page = addr & TARGET_PAGE_MASK;
2997
        l = (page + TARGET_PAGE_SIZE) - addr;
2998
        if (l > len)
2999
            l = len;
3000
        flags = page_get_flags(page);
3001
        if (!(flags & PAGE_VALID))
3002
            return;
3003
        if (is_write) {
3004
            if (!(flags & PAGE_WRITE))
3005
                return;
3006
            /* XXX: this code should not depend on lock_user */
3007
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3008
                /* FIXME - should this return an error rather than just fail? */
3009
                return;
3010
            memcpy(p, buf, l);
3011
            unlock_user(p, addr, l);
3012
        } else {
3013
            if (!(flags & PAGE_READ))
3014
                return;
3015
            /* XXX: this code should not depend on lock_user */
3016
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3017
                /* FIXME - should this return an error rather than just fail? */
3018
                return;
3019
            memcpy(buf, p, l);
3020
            unlock_user(p, addr, 0);
3021
        }
3022
        len -= l;
3023
        buf += l;
3024
        addr += l;
3025
    }
3026
}
3027

    
3028
#else
3029
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3030
                            int len, int is_write)
3031
{
3032
    int l, io_index;
3033
    uint8_t *ptr;
3034
    uint32_t val;
3035
    target_phys_addr_t page;
3036
    unsigned long pd;
3037
    PhysPageDesc *p;
3038

    
3039
    while (len > 0) {
3040
        page = addr & TARGET_PAGE_MASK;
3041
        l = (page + TARGET_PAGE_SIZE) - addr;
3042
        if (l > len)
3043
            l = len;
3044
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3045
        if (!p) {
3046
            pd = IO_MEM_UNASSIGNED;
3047
        } else {
3048
            pd = p->phys_offset;
3049
        }
3050

    
3051
        if (is_write) {
3052
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3053
                target_phys_addr_t addr1 = addr;
3054
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3055
                if (p)
3056
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3057
                /* XXX: could force cpu_single_env to NULL to avoid
3058
                   potential bugs */
3059
                if (l >= 4 && ((addr1 & 3) == 0)) {
3060
                    /* 32 bit write access */
3061
                    val = ldl_p(buf);
3062
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3063
                    l = 4;
3064
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3065
                    /* 16 bit write access */
3066
                    val = lduw_p(buf);
3067
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3068
                    l = 2;
3069
                } else {
3070
                    /* 8 bit write access */
3071
                    val = ldub_p(buf);
3072
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3073
                    l = 1;
3074
                }
3075
            } else {
3076
                unsigned long addr1;
3077
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3078
                /* RAM case */
3079
                ptr = qemu_get_ram_ptr(addr1);
3080
                memcpy(ptr, buf, l);
3081
                if (!cpu_physical_memory_is_dirty(addr1)) {
3082
                    /* invalidate code */
3083
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3084
                    /* set dirty bit */
3085
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3086
                        (0xff & ~CODE_DIRTY_FLAG);
3087
                }
3088
            }
3089
        } else {
3090
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3091
                !(pd & IO_MEM_ROMD)) {
3092
                target_phys_addr_t addr1 = addr;
3093
                /* I/O case */
3094
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3095
                if (p)
3096
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3097
                if (l >= 4 && ((addr1 & 3) == 0)) {
3098
                    /* 32 bit read access */
3099
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3100
                    stl_p(buf, val);
3101
                    l = 4;
3102
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3103
                    /* 16 bit read access */
3104
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3105
                    stw_p(buf, val);
3106
                    l = 2;
3107
                } else {
3108
                    /* 8 bit read access */
3109
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3110
                    stb_p(buf, val);
3111
                    l = 1;
3112
                }
3113
            } else {
3114
                /* RAM case */
3115
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3116
                    (addr & ~TARGET_PAGE_MASK);
3117
                memcpy(buf, ptr, l);
3118
            }
3119
        }
3120
        len -= l;
3121
        buf += l;
3122
        addr += l;
3123
    }
3124
}
3125

    
3126
/* used for ROM loading : can write in RAM and ROM */
3127
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3128
                                   const uint8_t *buf, int len)
3129
{
3130
    int l;
3131
    uint8_t *ptr;
3132
    target_phys_addr_t page;
3133
    unsigned long pd;
3134
    PhysPageDesc *p;
3135

    
3136
    while (len > 0) {
3137
        page = addr & TARGET_PAGE_MASK;
3138
        l = (page + TARGET_PAGE_SIZE) - addr;
3139
        if (l > len)
3140
            l = len;
3141
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3142
        if (!p) {
3143
            pd = IO_MEM_UNASSIGNED;
3144
        } else {
3145
            pd = p->phys_offset;
3146
        }
3147

    
3148
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3149
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3150
            !(pd & IO_MEM_ROMD)) {
3151
            /* do nothing */
3152
        } else {
3153
            unsigned long addr1;
3154
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3155
            /* ROM/RAM case */
3156
            ptr = qemu_get_ram_ptr(addr1);
3157
            memcpy(ptr, buf, l);
3158
        }
3159
        len -= l;
3160
        buf += l;
3161
        addr += l;
3162
    }
3163
}
3164

    
3165
typedef struct {
3166
    void *buffer;
3167
    target_phys_addr_t addr;
3168
    target_phys_addr_t len;
3169
} BounceBuffer;
3170

    
3171
static BounceBuffer bounce;
3172

    
3173
typedef struct MapClient {
3174
    void *opaque;
3175
    void (*callback)(void *opaque);
3176
    QLIST_ENTRY(MapClient) link;
3177
} MapClient;
3178

    
3179
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3180
    = QLIST_HEAD_INITIALIZER(map_client_list);
3181

    
3182
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3183
{
3184
    MapClient *client = qemu_malloc(sizeof(*client));
3185

    
3186
    client->opaque = opaque;
3187
    client->callback = callback;
3188
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3189
    return client;
3190
}
3191

    
3192
void cpu_unregister_map_client(void *_client)
3193
{
3194
    MapClient *client = (MapClient *)_client;
3195

    
3196
    QLIST_REMOVE(client, link);
3197
    qemu_free(client);
3198
}
3199

    
3200
static void cpu_notify_map_clients(void)
3201
{
3202
    MapClient *client;
3203

    
3204
    while (!QLIST_EMPTY(&map_client_list)) {
3205
        client = QLIST_FIRST(&map_client_list);
3206
        client->callback(client->opaque);
3207
        cpu_unregister_map_client(client);
3208
    }
3209
}
3210

    
3211
/* Map a physical memory region into a host virtual address.
3212
 * May map a subset of the requested range, given by and returned in *plen.
3213
 * May return NULL if resources needed to perform the mapping are exhausted.
3214
 * Use only for reads OR writes - not for read-modify-write operations.
3215
 * Use cpu_register_map_client() to know when retrying the map operation is
3216
 * likely to succeed.
3217
 */
3218
void *cpu_physical_memory_map(target_phys_addr_t addr,
3219
                              target_phys_addr_t *plen,
3220
                              int is_write)
3221
{
3222
    target_phys_addr_t len = *plen;
3223
    target_phys_addr_t done = 0;
3224
    int l;
3225
    uint8_t *ret = NULL;
3226
    uint8_t *ptr;
3227
    target_phys_addr_t page;
3228
    unsigned long pd;
3229
    PhysPageDesc *p;
3230
    unsigned long addr1;
3231

    
3232
    while (len > 0) {
3233
        page = addr & TARGET_PAGE_MASK;
3234
        l = (page + TARGET_PAGE_SIZE) - addr;
3235
        if (l > len)
3236
            l = len;
3237
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3238
        if (!p) {
3239
            pd = IO_MEM_UNASSIGNED;
3240
        } else {
3241
            pd = p->phys_offset;
3242
        }
3243

    
3244
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3245
            if (done || bounce.buffer) {
3246
                break;
3247
            }
3248
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3249
            bounce.addr = addr;
3250
            bounce.len = l;
3251
            if (!is_write) {
3252
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3253
            }
3254
            ptr = bounce.buffer;
3255
        } else {
3256
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3257
            ptr = qemu_get_ram_ptr(addr1);
3258
        }
3259
        if (!done) {
3260
            ret = ptr;
3261
        } else if (ret + done != ptr) {
3262
            break;
3263
        }
3264

    
3265
        len -= l;
3266
        addr += l;
3267
        done += l;
3268
    }
3269
    *plen = done;
3270
    return ret;
3271
}
3272

    
3273
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3274
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3275
 * the amount of memory that was actually read or written by the caller.
3276
 */
3277
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3278
                               int is_write, target_phys_addr_t access_len)
3279
{
3280
    if (buffer != bounce.buffer) {
3281
        if (is_write) {
3282
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3283
            while (access_len) {
3284
                unsigned l;
3285
                l = TARGET_PAGE_SIZE;
3286
                if (l > access_len)
3287
                    l = access_len;
3288
                if (!cpu_physical_memory_is_dirty(addr1)) {
3289
                    /* invalidate code */
3290
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3291
                    /* set dirty bit */
3292
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3293
                        (0xff & ~CODE_DIRTY_FLAG);
3294
                }
3295
                addr1 += l;
3296
                access_len -= l;
3297
            }
3298
        }
3299
        return;
3300
    }
3301
    if (is_write) {
3302
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3303
    }
3304
    qemu_free(bounce.buffer);
3305
    bounce.buffer = NULL;
3306
    cpu_notify_map_clients();
3307
}
3308

    
3309
/* warning: addr must be aligned */
3310
uint32_t ldl_phys(target_phys_addr_t addr)
3311
{
3312
    int io_index;
3313
    uint8_t *ptr;
3314
    uint32_t val;
3315
    unsigned long pd;
3316
    PhysPageDesc *p;
3317

    
3318
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3319
    if (!p) {
3320
        pd = IO_MEM_UNASSIGNED;
3321
    } else {
3322
        pd = p->phys_offset;
3323
    }
3324

    
3325
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3326
        !(pd & IO_MEM_ROMD)) {
3327
        /* I/O case */
3328
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3329
        if (p)
3330
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3331
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3332
    } else {
3333
        /* RAM case */
3334
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3335
            (addr & ~TARGET_PAGE_MASK);
3336
        val = ldl_p(ptr);
3337
    }
3338
    return val;
3339
}
3340

    
3341
/* warning: addr must be aligned */
3342
uint64_t ldq_phys(target_phys_addr_t addr)
3343
{
3344
    int io_index;
3345
    uint8_t *ptr;
3346
    uint64_t val;
3347
    unsigned long pd;
3348
    PhysPageDesc *p;
3349

    
3350
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3351
    if (!p) {
3352
        pd = IO_MEM_UNASSIGNED;
3353
    } else {
3354
        pd = p->phys_offset;
3355
    }
3356

    
3357
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3358
        !(pd & IO_MEM_ROMD)) {
3359
        /* I/O case */
3360
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3361
        if (p)
3362
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3363
#ifdef TARGET_WORDS_BIGENDIAN
3364
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3365
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3366
#else
3367
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3368
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3369
#endif
3370
    } else {
3371
        /* RAM case */
3372
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3373
            (addr & ~TARGET_PAGE_MASK);
3374
        val = ldq_p(ptr);
3375
    }
3376
    return val;
3377
}
3378

    
3379
/* XXX: optimize */
3380
uint32_t ldub_phys(target_phys_addr_t addr)
3381
{
3382
    uint8_t val;
3383
    cpu_physical_memory_read(addr, &val, 1);
3384
    return val;
3385
}
3386

    
3387
/* XXX: optimize */
3388
uint32_t lduw_phys(target_phys_addr_t addr)
3389
{
3390
    uint16_t val;
3391
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3392
    return tswap16(val);
3393
}
3394

    
3395
/* warning: addr must be aligned. The ram page is not masked as dirty
3396
   and the code inside is not invalidated. It is useful if the dirty
3397
   bits are used to track modified PTEs */
3398
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3399
{
3400
    int io_index;
3401
    uint8_t *ptr;
3402
    unsigned long pd;
3403
    PhysPageDesc *p;
3404

    
3405
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3406
    if (!p) {
3407
        pd = IO_MEM_UNASSIGNED;
3408
    } else {
3409
        pd = p->phys_offset;
3410
    }
3411

    
3412
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3413
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3414
        if (p)
3415
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3416
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3417
    } else {
3418
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3419
        ptr = qemu_get_ram_ptr(addr1);
3420
        stl_p(ptr, val);
3421

    
3422
        if (unlikely(in_migration)) {
3423
            if (!cpu_physical_memory_is_dirty(addr1)) {
3424
                /* invalidate code */
3425
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3426
                /* set dirty bit */
3427
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3428
                    (0xff & ~CODE_DIRTY_FLAG);
3429
            }
3430
        }
3431
    }
3432
}
3433

    
3434
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3435
{
3436
    int io_index;
3437
    uint8_t *ptr;
3438
    unsigned long pd;
3439
    PhysPageDesc *p;
3440

    
3441
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3442
    if (!p) {
3443
        pd = IO_MEM_UNASSIGNED;
3444
    } else {
3445
        pd = p->phys_offset;
3446
    }
3447

    
3448
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3449
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3450
        if (p)
3451
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3452
#ifdef TARGET_WORDS_BIGENDIAN
3453
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3454
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3455
#else
3456
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3457
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3458
#endif
3459
    } else {
3460
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3461
            (addr & ~TARGET_PAGE_MASK);
3462
        stq_p(ptr, val);
3463
    }
3464
}
3465

    
3466
/* warning: addr must be aligned */
3467
void stl_phys(target_phys_addr_t addr, uint32_t val)
3468
{
3469
    int io_index;
3470
    uint8_t *ptr;
3471
    unsigned long pd;
3472
    PhysPageDesc *p;
3473

    
3474
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3475
    if (!p) {
3476
        pd = IO_MEM_UNASSIGNED;
3477
    } else {
3478
        pd = p->phys_offset;
3479
    }
3480

    
3481
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3482
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3483
        if (p)
3484
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3485
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3486
    } else {
3487
        unsigned long addr1;
3488
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3489
        /* RAM case */
3490
        ptr = qemu_get_ram_ptr(addr1);
3491
        stl_p(ptr, val);
3492
        if (!cpu_physical_memory_is_dirty(addr1)) {
3493
            /* invalidate code */
3494
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3495
            /* set dirty bit */
3496
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3497
                (0xff & ~CODE_DIRTY_FLAG);
3498
        }
3499
    }
3500
}
3501

    
3502
/* XXX: optimize */
3503
void stb_phys(target_phys_addr_t addr, uint32_t val)
3504
{
3505
    uint8_t v = val;
3506
    cpu_physical_memory_write(addr, &v, 1);
3507
}
3508

    
3509
/* XXX: optimize */
3510
void stw_phys(target_phys_addr_t addr, uint32_t val)
3511
{
3512
    uint16_t v = tswap16(val);
3513
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3514
}
3515

    
3516
/* XXX: optimize */
3517
void stq_phys(target_phys_addr_t addr, uint64_t val)
3518
{
3519
    val = tswap64(val);
3520
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3521
}
3522

    
3523
#endif
3524

    
3525
/* virtual memory access for debug (includes writing to ROM) */
3526
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3527
                        uint8_t *buf, int len, int is_write)
3528
{
3529
    int l;
3530
    target_phys_addr_t phys_addr;
3531
    target_ulong page;
3532

    
3533
    while (len > 0) {
3534
        page = addr & TARGET_PAGE_MASK;
3535
        phys_addr = cpu_get_phys_page_debug(env, page);
3536
        /* if no physical page mapped, return an error */
3537
        if (phys_addr == -1)
3538
            return -1;
3539
        l = (page + TARGET_PAGE_SIZE) - addr;
3540
        if (l > len)
3541
            l = len;
3542
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3543
#if !defined(CONFIG_USER_ONLY)
3544
        if (is_write)
3545
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3546
        else
3547
#endif
3548
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3549
        len -= l;
3550
        buf += l;
3551
        addr += l;
3552
    }
3553
    return 0;
3554
}
3555

    
3556
/* in deterministic execution mode, instructions doing device I/Os
3557
   must be at the end of the TB */
3558
void cpu_io_recompile(CPUState *env, void *retaddr)
3559
{
3560
    TranslationBlock *tb;
3561
    uint32_t n, cflags;
3562
    target_ulong pc, cs_base;
3563
    uint64_t flags;
3564

    
3565
    tb = tb_find_pc((unsigned long)retaddr);
3566
    if (!tb) {
3567
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3568
                  retaddr);
3569
    }
3570
    n = env->icount_decr.u16.low + tb->icount;
3571
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3572
    /* Calculate how many instructions had been executed before the fault
3573
       occurred.  */
3574
    n = n - env->icount_decr.u16.low;
3575
    /* Generate a new TB ending on the I/O insn.  */
3576
    n++;
3577
    /* On MIPS and SH, delay slot instructions can only be restarted if
3578
       they were already the first instruction in the TB.  If this is not
3579
       the first instruction in a TB then re-execute the preceding
3580
       branch.  */
3581
#if defined(TARGET_MIPS)
3582
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3583
        env->active_tc.PC -= 4;
3584
        env->icount_decr.u16.low++;
3585
        env->hflags &= ~MIPS_HFLAG_BMASK;
3586
    }
3587
#elif defined(TARGET_SH4)
3588
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3589
            && n > 1) {
3590
        env->pc -= 2;
3591
        env->icount_decr.u16.low++;
3592
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3593
    }
3594
#endif
3595
    /* This should never happen.  */
3596
    if (n > CF_COUNT_MASK)
3597
        cpu_abort(env, "TB too big during recompile");
3598

    
3599
    cflags = n | CF_LAST_IO;
3600
    pc = tb->pc;
3601
    cs_base = tb->cs_base;
3602
    flags = tb->flags;
3603
    tb_phys_invalidate(tb, -1);
3604
    /* FIXME: In theory this could raise an exception.  In practice
3605
       we have already translated the block once so it's probably ok.  */
3606
    tb_gen_code(env, pc, cs_base, flags, cflags);
3607
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3608
       the first in the TB) then we end up generating a whole new TB and
3609
       repeating the fault, which is horribly inefficient.
3610
       Better would be to execute just this insn uncached, or generate a
3611
       second new TB.  */
3612
    cpu_resume_from_signal(env, NULL);
3613
}
3614

    
3615
void dump_exec_info(FILE *f,
3616
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3617
{
3618
    int i, target_code_size, max_target_code_size;
3619
    int direct_jmp_count, direct_jmp2_count, cross_page;
3620
    TranslationBlock *tb;
3621

    
3622
    target_code_size = 0;
3623
    max_target_code_size = 0;
3624
    cross_page = 0;
3625
    direct_jmp_count = 0;
3626
    direct_jmp2_count = 0;
3627
    for(i = 0; i < nb_tbs; i++) {
3628
        tb = &tbs[i];
3629
        target_code_size += tb->size;
3630
        if (tb->size > max_target_code_size)
3631
            max_target_code_size = tb->size;
3632
        if (tb->page_addr[1] != -1)
3633
            cross_page++;
3634
        if (tb->tb_next_offset[0] != 0xffff) {
3635
            direct_jmp_count++;
3636
            if (tb->tb_next_offset[1] != 0xffff) {
3637
                direct_jmp2_count++;
3638
            }
3639
        }
3640
    }
3641
    /* XXX: avoid using doubles ? */
3642
    cpu_fprintf(f, "Translation buffer state:\n");
3643
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3644
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3645
    cpu_fprintf(f, "TB count            %d/%d\n", 
3646
                nb_tbs, code_gen_max_blocks);
3647
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3648
                nb_tbs ? target_code_size / nb_tbs : 0,
3649
                max_target_code_size);
3650
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3651
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3652
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3653
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3654
            cross_page,
3655
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3656
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3657
                direct_jmp_count,
3658
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3659
                direct_jmp2_count,
3660
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3661
    cpu_fprintf(f, "\nStatistics:\n");
3662
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3663
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3664
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3665
    tcg_dump_info(f, cpu_fprintf);
3666
}
3667

    
3668
#if !defined(CONFIG_USER_ONLY)
3669

    
3670
#define MMUSUFFIX _cmmu
3671
#define GETPC() NULL
3672
#define env cpu_single_env
3673
#define SOFTMMU_CODE_ACCESS
3674

    
3675
#define SHIFT 0
3676
#include "softmmu_template.h"
3677

    
3678
#define SHIFT 1
3679
#include "softmmu_template.h"
3680

    
3681
#define SHIFT 2
3682
#include "softmmu_template.h"
3683

    
3684
#define SHIFT 3
3685
#include "softmmu_template.h"
3686

    
3687
#undef env
3688

    
3689
#endif