Statistics
| Branch: | Revision:

root / exec.c @ cca1af8c

History | View | Annotate | Download (110.2 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#ifdef _WIN32
21
#include <windows.h>
22
#else
23
#include <sys/types.h>
24
#include <sys/mman.h>
25
#endif
26
#include <stdlib.h>
27
#include <stdio.h>
28
#include <stdarg.h>
29
#include <string.h>
30
#include <errno.h>
31
#include <unistd.h>
32
#include <inttypes.h>
33

    
34
#include "cpu.h"
35
#include "exec-all.h"
36
#include "qemu-common.h"
37
#include "tcg.h"
38
#include "hw/hw.h"
39
#include "osdep.h"
40
#include "kvm.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#include <signal.h>
44
#endif
45

    
46
//#define DEBUG_TB_INVALIDATE
47
//#define DEBUG_FLUSH
48
//#define DEBUG_TLB
49
//#define DEBUG_UNASSIGNED
50

    
51
/* make various TB consistency checks */
52
//#define DEBUG_TB_CHECK
53
//#define DEBUG_TLB_CHECK
54

    
55
//#define DEBUG_IOPORT
56
//#define DEBUG_SUBPAGE
57

    
58
#if !defined(CONFIG_USER_ONLY)
59
/* TB consistency checks only implemented for usermode emulation.  */
60
#undef DEBUG_TB_CHECK
61
#endif
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#if defined(TARGET_SPARC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67
#elif defined(TARGET_SPARC)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69
#elif defined(TARGET_ALPHA)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72
#elif defined(TARGET_PPC64)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_X86_64)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_I386)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78
#else
79
#define TARGET_PHYS_ADDR_SPACE_BITS 32
80
#endif
81

    
82
static TranslationBlock *tbs;
83
int code_gen_max_blocks;
84
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85
static int nb_tbs;
86
/* any access to the tbs or the page table must use this lock */
87
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88

    
89
#if defined(__arm__) || defined(__sparc_v9__)
90
/* The prologue must be reachable with a direct jump. ARM and Sparc64
91
 have limited branch ranges (possibly also PPC) so place it in a
92
 section close to code segment. */
93
#define code_gen_section                                \
94
    __attribute__((__section__(".gen_code")))           \
95
    __attribute__((aligned (32)))
96
#elif defined(_WIN32)
97
/* Maximum alignment for Win32 is 16. */
98
#define code_gen_section                                \
99
    __attribute__((aligned (16)))
100
#else
101
#define code_gen_section                                \
102
    __attribute__((aligned (32)))
103
#endif
104

    
105
uint8_t code_gen_prologue[1024] code_gen_section;
106
static uint8_t *code_gen_buffer;
107
static unsigned long code_gen_buffer_size;
108
/* threshold to flush the translated code buffer */
109
static unsigned long code_gen_buffer_max_size;
110
uint8_t *code_gen_ptr;
111

    
112
#if !defined(CONFIG_USER_ONLY)
113
int phys_ram_fd;
114
uint8_t *phys_ram_dirty;
115
static int in_migration;
116

    
117
typedef struct RAMBlock {
118
    uint8_t *host;
119
    ram_addr_t offset;
120
    ram_addr_t length;
121
    struct RAMBlock *next;
122
} RAMBlock;
123

    
124
static RAMBlock *ram_blocks;
125
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126
   then we can no longer assume contiguous ram offsets, and external uses
127
   of this variable will break.  */
128
ram_addr_t last_ram_offset;
129
#endif
130

    
131
CPUState *first_cpu;
132
/* current CPU in the current thread. It is only valid inside
133
   cpu_exec() */
134
CPUState *cpu_single_env;
135
/* 0 = Do not count executed instructions.
136
   1 = Precise instruction counting.
137
   2 = Adaptive rate instruction counting.  */
138
int use_icount = 0;
139
/* Current instruction counter.  While executing translated code this may
140
   include some instructions that have not yet been executed.  */
141
int64_t qemu_icount;
142

    
143
typedef struct PageDesc {
144
    /* list of TBs intersecting this ram page */
145
    TranslationBlock *first_tb;
146
    /* in order to optimize self modifying code, we count the number
147
       of lookups we do to a given page to use a bitmap */
148
    unsigned int code_write_count;
149
    uint8_t *code_bitmap;
150
#if defined(CONFIG_USER_ONLY)
151
    unsigned long flags;
152
#endif
153
} PageDesc;
154

    
155
typedef struct PhysPageDesc {
156
    /* offset in host memory of the page + io_index in the low bits */
157
    ram_addr_t phys_offset;
158
    ram_addr_t region_offset;
159
} PhysPageDesc;
160

    
161
#define L2_BITS 10
162
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163
/* XXX: this is a temporary hack for alpha target.
164
 *      In the future, this is to be replaced by a multi-level table
165
 *      to actually be able to handle the complete 64 bits address space.
166
 */
167
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168
#else
169
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
170
#endif
171

    
172
#define L1_SIZE (1 << L1_BITS)
173
#define L2_SIZE (1 << L2_BITS)
174

    
175
unsigned long qemu_real_host_page_size;
176
unsigned long qemu_host_page_bits;
177
unsigned long qemu_host_page_size;
178
unsigned long qemu_host_page_mask;
179

    
180
/* XXX: for system emulation, it could just be an array */
181
static PageDesc *l1_map[L1_SIZE];
182
static PhysPageDesc **l1_phys_map;
183

    
184
#if !defined(CONFIG_USER_ONLY)
185
static void io_mem_init(void);
186

    
187
/* io memory support */
188
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
189
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
190
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
191
static char io_mem_used[IO_MEM_NB_ENTRIES];
192
static int io_mem_watch;
193
#endif
194

    
195
/* log support */
196
#ifdef WIN32
197
static const char *logfilename = "qemu.log";
198
#else
199
static const char *logfilename = "/tmp/qemu.log";
200
#endif
201
FILE *logfile;
202
int loglevel;
203
static int log_append = 0;
204

    
205
/* statistics */
206
static int tlb_flush_count;
207
static int tb_flush_count;
208
static int tb_phys_invalidate_count;
209

    
210
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
211
typedef struct subpage_t {
212
    target_phys_addr_t base;
213
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
214
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
215
    void *opaque[TARGET_PAGE_SIZE][2][4];
216
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
217
} subpage_t;
218

    
219
#ifdef _WIN32
220
static void map_exec(void *addr, long size)
221
{
222
    DWORD old_protect;
223
    VirtualProtect(addr, size,
224
                   PAGE_EXECUTE_READWRITE, &old_protect);
225
    
226
}
227
#else
228
static void map_exec(void *addr, long size)
229
{
230
    unsigned long start, end, page_size;
231
    
232
    page_size = getpagesize();
233
    start = (unsigned long)addr;
234
    start &= ~(page_size - 1);
235
    
236
    end = (unsigned long)addr + size;
237
    end += page_size - 1;
238
    end &= ~(page_size - 1);
239
    
240
    mprotect((void *)start, end - start,
241
             PROT_READ | PROT_WRITE | PROT_EXEC);
242
}
243
#endif
244

    
245
static void page_init(void)
246
{
247
    /* NOTE: we can always suppose that qemu_host_page_size >=
248
       TARGET_PAGE_SIZE */
249
#ifdef _WIN32
250
    {
251
        SYSTEM_INFO system_info;
252

    
253
        GetSystemInfo(&system_info);
254
        qemu_real_host_page_size = system_info.dwPageSize;
255
    }
256
#else
257
    qemu_real_host_page_size = getpagesize();
258
#endif
259
    if (qemu_host_page_size == 0)
260
        qemu_host_page_size = qemu_real_host_page_size;
261
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
262
        qemu_host_page_size = TARGET_PAGE_SIZE;
263
    qemu_host_page_bits = 0;
264
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
265
        qemu_host_page_bits++;
266
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
267
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
268
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
269

    
270
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
271
    {
272
        long long startaddr, endaddr;
273
        FILE *f;
274
        int n;
275

    
276
        mmap_lock();
277
        last_brk = (unsigned long)sbrk(0);
278
        f = fopen("/proc/self/maps", "r");
279
        if (f) {
280
            do {
281
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
282
                if (n == 2) {
283
                    startaddr = MIN(startaddr,
284
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
285
                    endaddr = MIN(endaddr,
286
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
287
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
288
                                   TARGET_PAGE_ALIGN(endaddr),
289
                                   PAGE_RESERVED); 
290
                }
291
            } while (!feof(f));
292
            fclose(f);
293
        }
294
        mmap_unlock();
295
    }
296
#endif
297
}
298

    
299
static inline PageDesc **page_l1_map(target_ulong index)
300
{
301
#if TARGET_LONG_BITS > 32
302
    /* Host memory outside guest VM.  For 32-bit targets we have already
303
       excluded high addresses.  */
304
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
305
        return NULL;
306
#endif
307
    return &l1_map[index >> L2_BITS];
308
}
309

    
310
static inline PageDesc *page_find_alloc(target_ulong index)
311
{
312
    PageDesc **lp, *p;
313
    lp = page_l1_map(index);
314
    if (!lp)
315
        return NULL;
316

    
317
    p = *lp;
318
    if (!p) {
319
        /* allocate if not found */
320
#if defined(CONFIG_USER_ONLY)
321
        size_t len = sizeof(PageDesc) * L2_SIZE;
322
        /* Don't use qemu_malloc because it may recurse.  */
323
        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
324
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
325
        *lp = p;
326
        if (h2g_valid(p)) {
327
            unsigned long addr = h2g(p);
328
            page_set_flags(addr & TARGET_PAGE_MASK,
329
                           TARGET_PAGE_ALIGN(addr + len),
330
                           PAGE_RESERVED); 
331
        }
332
#else
333
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
334
        *lp = p;
335
#endif
336
    }
337
    return p + (index & (L2_SIZE - 1));
338
}
339

    
340
static inline PageDesc *page_find(target_ulong index)
341
{
342
    PageDesc **lp, *p;
343
    lp = page_l1_map(index);
344
    if (!lp)
345
        return NULL;
346

    
347
    p = *lp;
348
    if (!p) {
349
        return NULL;
350
    }
351
    return p + (index & (L2_SIZE - 1));
352
}
353

    
354
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
355
{
356
    void **lp, **p;
357
    PhysPageDesc *pd;
358

    
359
    p = (void **)l1_phys_map;
360
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
361

    
362
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
363
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
364
#endif
365
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
366
    p = *lp;
367
    if (!p) {
368
        /* allocate if not found */
369
        if (!alloc)
370
            return NULL;
371
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
372
        memset(p, 0, sizeof(void *) * L1_SIZE);
373
        *lp = p;
374
    }
375
#endif
376
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
377
    pd = *lp;
378
    if (!pd) {
379
        int i;
380
        /* allocate if not found */
381
        if (!alloc)
382
            return NULL;
383
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
384
        *lp = pd;
385
        for (i = 0; i < L2_SIZE; i++) {
386
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
387
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
388
        }
389
    }
390
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
391
}
392

    
393
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
394
{
395
    return phys_page_find_alloc(index, 0);
396
}
397

    
398
#if !defined(CONFIG_USER_ONLY)
399
static void tlb_protect_code(ram_addr_t ram_addr);
400
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
401
                                    target_ulong vaddr);
402
#define mmap_lock() do { } while(0)
403
#define mmap_unlock() do { } while(0)
404
#endif
405

    
406
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
407

    
408
#if defined(CONFIG_USER_ONLY)
409
/* Currently it is not recommended to allocate big chunks of data in
410
   user mode. It will change when a dedicated libc will be used */
411
#define USE_STATIC_CODE_GEN_BUFFER
412
#endif
413

    
414
#ifdef USE_STATIC_CODE_GEN_BUFFER
415
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
416
#endif
417

    
418
static void code_gen_alloc(unsigned long tb_size)
419
{
420
#ifdef USE_STATIC_CODE_GEN_BUFFER
421
    code_gen_buffer = static_code_gen_buffer;
422
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
423
    map_exec(code_gen_buffer, code_gen_buffer_size);
424
#else
425
    code_gen_buffer_size = tb_size;
426
    if (code_gen_buffer_size == 0) {
427
#if defined(CONFIG_USER_ONLY)
428
        /* in user mode, phys_ram_size is not meaningful */
429
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
430
#else
431
        /* XXX: needs adjustments */
432
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
433
#endif
434
    }
435
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
436
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
437
    /* The code gen buffer location may have constraints depending on
438
       the host cpu and OS */
439
#if defined(__linux__) 
440
    {
441
        int flags;
442
        void *start = NULL;
443

    
444
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
445
#if defined(__x86_64__)
446
        flags |= MAP_32BIT;
447
        /* Cannot map more than that */
448
        if (code_gen_buffer_size > (800 * 1024 * 1024))
449
            code_gen_buffer_size = (800 * 1024 * 1024);
450
#elif defined(__sparc_v9__)
451
        // Map the buffer below 2G, so we can use direct calls and branches
452
        flags |= MAP_FIXED;
453
        start = (void *) 0x60000000UL;
454
        if (code_gen_buffer_size > (512 * 1024 * 1024))
455
            code_gen_buffer_size = (512 * 1024 * 1024);
456
#elif defined(__arm__)
457
        /* Map the buffer below 32M, so we can use direct calls and branches */
458
        flags |= MAP_FIXED;
459
        start = (void *) 0x01000000UL;
460
        if (code_gen_buffer_size > 16 * 1024 * 1024)
461
            code_gen_buffer_size = 16 * 1024 * 1024;
462
#endif
463
        code_gen_buffer = mmap(start, code_gen_buffer_size,
464
                               PROT_WRITE | PROT_READ | PROT_EXEC,
465
                               flags, -1, 0);
466
        if (code_gen_buffer == MAP_FAILED) {
467
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
468
            exit(1);
469
        }
470
    }
471
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
472
    {
473
        int flags;
474
        void *addr = NULL;
475
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
476
#if defined(__x86_64__)
477
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
478
         * 0x40000000 is free */
479
        flags |= MAP_FIXED;
480
        addr = (void *)0x40000000;
481
        /* Cannot map more than that */
482
        if (code_gen_buffer_size > (800 * 1024 * 1024))
483
            code_gen_buffer_size = (800 * 1024 * 1024);
484
#endif
485
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
486
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
487
                               flags, -1, 0);
488
        if (code_gen_buffer == MAP_FAILED) {
489
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
490
            exit(1);
491
        }
492
    }
493
#else
494
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
495
    map_exec(code_gen_buffer, code_gen_buffer_size);
496
#endif
497
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
498
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
499
    code_gen_buffer_max_size = code_gen_buffer_size - 
500
        code_gen_max_block_size();
501
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
502
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
503
}
504

    
505
/* Must be called before using the QEMU cpus. 'tb_size' is the size
506
   (in bytes) allocated to the translation buffer. Zero means default
507
   size. */
508
void cpu_exec_init_all(unsigned long tb_size)
509
{
510
    cpu_gen_init();
511
    code_gen_alloc(tb_size);
512
    code_gen_ptr = code_gen_buffer;
513
    page_init();
514
#if !defined(CONFIG_USER_ONLY)
515
    io_mem_init();
516
#endif
517
}
518

    
519
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
520

    
521
static void cpu_common_pre_save(void *opaque)
522
{
523
    CPUState *env = opaque;
524

    
525
    cpu_synchronize_state(env);
526
}
527

    
528
static int cpu_common_pre_load(void *opaque)
529
{
530
    CPUState *env = opaque;
531

    
532
    cpu_synchronize_state(env);
533
    return 0;
534
}
535

    
536
static int cpu_common_post_load(void *opaque, int version_id)
537
{
538
    CPUState *env = opaque;
539

    
540
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
541
       version_id is increased. */
542
    env->interrupt_request &= ~0x01;
543
    tlb_flush(env, 1);
544

    
545
    return 0;
546
}
547

    
548
static const VMStateDescription vmstate_cpu_common = {
549
    .name = "cpu_common",
550
    .version_id = 1,
551
    .minimum_version_id = 1,
552
    .minimum_version_id_old = 1,
553
    .pre_save = cpu_common_pre_save,
554
    .pre_load = cpu_common_pre_load,
555
    .post_load = cpu_common_post_load,
556
    .fields      = (VMStateField []) {
557
        VMSTATE_UINT32(halted, CPUState),
558
        VMSTATE_UINT32(interrupt_request, CPUState),
559
        VMSTATE_END_OF_LIST()
560
    }
561
};
562
#endif
563

    
564
CPUState *qemu_get_cpu(int cpu)
565
{
566
    CPUState *env = first_cpu;
567

    
568
    while (env) {
569
        if (env->cpu_index == cpu)
570
            break;
571
        env = env->next_cpu;
572
    }
573

    
574
    return env;
575
}
576

    
577
void cpu_exec_init(CPUState *env)
578
{
579
    CPUState **penv;
580
    int cpu_index;
581

    
582
#if defined(CONFIG_USER_ONLY)
583
    cpu_list_lock();
584
#endif
585
    env->next_cpu = NULL;
586
    penv = &first_cpu;
587
    cpu_index = 0;
588
    while (*penv != NULL) {
589
        penv = &(*penv)->next_cpu;
590
        cpu_index++;
591
    }
592
    env->cpu_index = cpu_index;
593
    env->numa_node = 0;
594
    QTAILQ_INIT(&env->breakpoints);
595
    QTAILQ_INIT(&env->watchpoints);
596
    *penv = env;
597
#if defined(CONFIG_USER_ONLY)
598
    cpu_list_unlock();
599
#endif
600
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
601
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
602
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
603
                    cpu_save, cpu_load, env);
604
#endif
605
}
606

    
607
static inline void invalidate_page_bitmap(PageDesc *p)
608
{
609
    if (p->code_bitmap) {
610
        qemu_free(p->code_bitmap);
611
        p->code_bitmap = NULL;
612
    }
613
    p->code_write_count = 0;
614
}
615

    
616
/* set to NULL all the 'first_tb' fields in all PageDescs */
617
static void page_flush_tb(void)
618
{
619
    int i, j;
620
    PageDesc *p;
621

    
622
    for(i = 0; i < L1_SIZE; i++) {
623
        p = l1_map[i];
624
        if (p) {
625
            for(j = 0; j < L2_SIZE; j++) {
626
                p->first_tb = NULL;
627
                invalidate_page_bitmap(p);
628
                p++;
629
            }
630
        }
631
    }
632
}
633

    
634
/* flush all the translation blocks */
635
/* XXX: tb_flush is currently not thread safe */
636
void tb_flush(CPUState *env1)
637
{
638
    CPUState *env;
639
#if defined(DEBUG_FLUSH)
640
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
641
           (unsigned long)(code_gen_ptr - code_gen_buffer),
642
           nb_tbs, nb_tbs > 0 ?
643
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
644
#endif
645
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
646
        cpu_abort(env1, "Internal error: code buffer overflow\n");
647

    
648
    nb_tbs = 0;
649

    
650
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
651
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
652
    }
653

    
654
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
655
    page_flush_tb();
656

    
657
    code_gen_ptr = code_gen_buffer;
658
    /* XXX: flush processor icache at this point if cache flush is
659
       expensive */
660
    tb_flush_count++;
661
}
662

    
663
#ifdef DEBUG_TB_CHECK
664

    
665
static void tb_invalidate_check(target_ulong address)
666
{
667
    TranslationBlock *tb;
668
    int i;
669
    address &= TARGET_PAGE_MASK;
670
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
671
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
672
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
673
                  address >= tb->pc + tb->size)) {
674
                printf("ERROR invalidate: address=" TARGET_FMT_lx
675
                       " PC=%08lx size=%04x\n",
676
                       address, (long)tb->pc, tb->size);
677
            }
678
        }
679
    }
680
}
681

    
682
/* verify that all the pages have correct rights for code */
683
static void tb_page_check(void)
684
{
685
    TranslationBlock *tb;
686
    int i, flags1, flags2;
687

    
688
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
689
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
690
            flags1 = page_get_flags(tb->pc);
691
            flags2 = page_get_flags(tb->pc + tb->size - 1);
692
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
693
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
694
                       (long)tb->pc, tb->size, flags1, flags2);
695
            }
696
        }
697
    }
698
}
699

    
700
#endif
701

    
702
/* invalidate one TB */
703
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
704
                             int next_offset)
705
{
706
    TranslationBlock *tb1;
707
    for(;;) {
708
        tb1 = *ptb;
709
        if (tb1 == tb) {
710
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
711
            break;
712
        }
713
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
714
    }
715
}
716

    
717
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
718
{
719
    TranslationBlock *tb1;
720
    unsigned int n1;
721

    
722
    for(;;) {
723
        tb1 = *ptb;
724
        n1 = (long)tb1 & 3;
725
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
726
        if (tb1 == tb) {
727
            *ptb = tb1->page_next[n1];
728
            break;
729
        }
730
        ptb = &tb1->page_next[n1];
731
    }
732
}
733

    
734
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
735
{
736
    TranslationBlock *tb1, **ptb;
737
    unsigned int n1;
738

    
739
    ptb = &tb->jmp_next[n];
740
    tb1 = *ptb;
741
    if (tb1) {
742
        /* find tb(n) in circular list */
743
        for(;;) {
744
            tb1 = *ptb;
745
            n1 = (long)tb1 & 3;
746
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
747
            if (n1 == n && tb1 == tb)
748
                break;
749
            if (n1 == 2) {
750
                ptb = &tb1->jmp_first;
751
            } else {
752
                ptb = &tb1->jmp_next[n1];
753
            }
754
        }
755
        /* now we can suppress tb(n) from the list */
756
        *ptb = tb->jmp_next[n];
757

    
758
        tb->jmp_next[n] = NULL;
759
    }
760
}
761

    
762
/* reset the jump entry 'n' of a TB so that it is not chained to
763
   another TB */
764
static inline void tb_reset_jump(TranslationBlock *tb, int n)
765
{
766
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
767
}
768

    
769
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
770
{
771
    CPUState *env;
772
    PageDesc *p;
773
    unsigned int h, n1;
774
    target_phys_addr_t phys_pc;
775
    TranslationBlock *tb1, *tb2;
776

    
777
    /* remove the TB from the hash list */
778
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
779
    h = tb_phys_hash_func(phys_pc);
780
    tb_remove(&tb_phys_hash[h], tb,
781
              offsetof(TranslationBlock, phys_hash_next));
782

    
783
    /* remove the TB from the page list */
784
    if (tb->page_addr[0] != page_addr) {
785
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
786
        tb_page_remove(&p->first_tb, tb);
787
        invalidate_page_bitmap(p);
788
    }
789
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
790
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
791
        tb_page_remove(&p->first_tb, tb);
792
        invalidate_page_bitmap(p);
793
    }
794

    
795
    tb_invalidated_flag = 1;
796

    
797
    /* remove the TB from the hash list */
798
    h = tb_jmp_cache_hash_func(tb->pc);
799
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
800
        if (env->tb_jmp_cache[h] == tb)
801
            env->tb_jmp_cache[h] = NULL;
802
    }
803

    
804
    /* suppress this TB from the two jump lists */
805
    tb_jmp_remove(tb, 0);
806
    tb_jmp_remove(tb, 1);
807

    
808
    /* suppress any remaining jumps to this TB */
809
    tb1 = tb->jmp_first;
810
    for(;;) {
811
        n1 = (long)tb1 & 3;
812
        if (n1 == 2)
813
            break;
814
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
815
        tb2 = tb1->jmp_next[n1];
816
        tb_reset_jump(tb1, n1);
817
        tb1->jmp_next[n1] = NULL;
818
        tb1 = tb2;
819
    }
820
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
821

    
822
    tb_phys_invalidate_count++;
823
}
824

    
825
static inline void set_bits(uint8_t *tab, int start, int len)
826
{
827
    int end, mask, end1;
828

    
829
    end = start + len;
830
    tab += start >> 3;
831
    mask = 0xff << (start & 7);
832
    if ((start & ~7) == (end & ~7)) {
833
        if (start < end) {
834
            mask &= ~(0xff << (end & 7));
835
            *tab |= mask;
836
        }
837
    } else {
838
        *tab++ |= mask;
839
        start = (start + 8) & ~7;
840
        end1 = end & ~7;
841
        while (start < end1) {
842
            *tab++ = 0xff;
843
            start += 8;
844
        }
845
        if (start < end) {
846
            mask = ~(0xff << (end & 7));
847
            *tab |= mask;
848
        }
849
    }
850
}
851

    
852
static void build_page_bitmap(PageDesc *p)
853
{
854
    int n, tb_start, tb_end;
855
    TranslationBlock *tb;
856

    
857
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
858

    
859
    tb = p->first_tb;
860
    while (tb != NULL) {
861
        n = (long)tb & 3;
862
        tb = (TranslationBlock *)((long)tb & ~3);
863
        /* NOTE: this is subtle as a TB may span two physical pages */
864
        if (n == 0) {
865
            /* NOTE: tb_end may be after the end of the page, but
866
               it is not a problem */
867
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
868
            tb_end = tb_start + tb->size;
869
            if (tb_end > TARGET_PAGE_SIZE)
870
                tb_end = TARGET_PAGE_SIZE;
871
        } else {
872
            tb_start = 0;
873
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
874
        }
875
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
876
        tb = tb->page_next[n];
877
    }
878
}
879

    
880
TranslationBlock *tb_gen_code(CPUState *env,
881
                              target_ulong pc, target_ulong cs_base,
882
                              int flags, int cflags)
883
{
884
    TranslationBlock *tb;
885
    uint8_t *tc_ptr;
886
    target_ulong phys_pc, phys_page2, virt_page2;
887
    int code_gen_size;
888

    
889
    phys_pc = get_phys_addr_code(env, pc);
890
    tb = tb_alloc(pc);
891
    if (!tb) {
892
        /* flush must be done */
893
        tb_flush(env);
894
        /* cannot fail at this point */
895
        tb = tb_alloc(pc);
896
        /* Don't forget to invalidate previous TB info.  */
897
        tb_invalidated_flag = 1;
898
    }
899
    tc_ptr = code_gen_ptr;
900
    tb->tc_ptr = tc_ptr;
901
    tb->cs_base = cs_base;
902
    tb->flags = flags;
903
    tb->cflags = cflags;
904
    cpu_gen_code(env, tb, &code_gen_size);
905
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
906

    
907
    /* check next page if needed */
908
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
909
    phys_page2 = -1;
910
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
911
        phys_page2 = get_phys_addr_code(env, virt_page2);
912
    }
913
    tb_link_phys(tb, phys_pc, phys_page2);
914
    return tb;
915
}
916

    
917
/* invalidate all TBs which intersect with the target physical page
918
   starting in range [start;end[. NOTE: start and end must refer to
919
   the same physical page. 'is_cpu_write_access' should be true if called
920
   from a real cpu write access: the virtual CPU will exit the current
921
   TB if code is modified inside this TB. */
922
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
923
                                   int is_cpu_write_access)
924
{
925
    TranslationBlock *tb, *tb_next, *saved_tb;
926
    CPUState *env = cpu_single_env;
927
    target_ulong tb_start, tb_end;
928
    PageDesc *p;
929
    int n;
930
#ifdef TARGET_HAS_PRECISE_SMC
931
    int current_tb_not_found = is_cpu_write_access;
932
    TranslationBlock *current_tb = NULL;
933
    int current_tb_modified = 0;
934
    target_ulong current_pc = 0;
935
    target_ulong current_cs_base = 0;
936
    int current_flags = 0;
937
#endif /* TARGET_HAS_PRECISE_SMC */
938

    
939
    p = page_find(start >> TARGET_PAGE_BITS);
940
    if (!p)
941
        return;
942
    if (!p->code_bitmap &&
943
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
944
        is_cpu_write_access) {
945
        /* build code bitmap */
946
        build_page_bitmap(p);
947
    }
948

    
949
    /* we remove all the TBs in the range [start, end[ */
950
    /* XXX: see if in some cases it could be faster to invalidate all the code */
951
    tb = p->first_tb;
952
    while (tb != NULL) {
953
        n = (long)tb & 3;
954
        tb = (TranslationBlock *)((long)tb & ~3);
955
        tb_next = tb->page_next[n];
956
        /* NOTE: this is subtle as a TB may span two physical pages */
957
        if (n == 0) {
958
            /* NOTE: tb_end may be after the end of the page, but
959
               it is not a problem */
960
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
961
            tb_end = tb_start + tb->size;
962
        } else {
963
            tb_start = tb->page_addr[1];
964
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
965
        }
966
        if (!(tb_end <= start || tb_start >= end)) {
967
#ifdef TARGET_HAS_PRECISE_SMC
968
            if (current_tb_not_found) {
969
                current_tb_not_found = 0;
970
                current_tb = NULL;
971
                if (env->mem_io_pc) {
972
                    /* now we have a real cpu fault */
973
                    current_tb = tb_find_pc(env->mem_io_pc);
974
                }
975
            }
976
            if (current_tb == tb &&
977
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
978
                /* If we are modifying the current TB, we must stop
979
                its execution. We could be more precise by checking
980
                that the modification is after the current PC, but it
981
                would require a specialized function to partially
982
                restore the CPU state */
983

    
984
                current_tb_modified = 1;
985
                cpu_restore_state(current_tb, env,
986
                                  env->mem_io_pc, NULL);
987
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
988
                                     &current_flags);
989
            }
990
#endif /* TARGET_HAS_PRECISE_SMC */
991
            /* we need to do that to handle the case where a signal
992
               occurs while doing tb_phys_invalidate() */
993
            saved_tb = NULL;
994
            if (env) {
995
                saved_tb = env->current_tb;
996
                env->current_tb = NULL;
997
            }
998
            tb_phys_invalidate(tb, -1);
999
            if (env) {
1000
                env->current_tb = saved_tb;
1001
                if (env->interrupt_request && env->current_tb)
1002
                    cpu_interrupt(env, env->interrupt_request);
1003
            }
1004
        }
1005
        tb = tb_next;
1006
    }
1007
#if !defined(CONFIG_USER_ONLY)
1008
    /* if no code remaining, no need to continue to use slow writes */
1009
    if (!p->first_tb) {
1010
        invalidate_page_bitmap(p);
1011
        if (is_cpu_write_access) {
1012
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1013
        }
1014
    }
1015
#endif
1016
#ifdef TARGET_HAS_PRECISE_SMC
1017
    if (current_tb_modified) {
1018
        /* we generate a block containing just the instruction
1019
           modifying the memory. It will ensure that it cannot modify
1020
           itself */
1021
        env->current_tb = NULL;
1022
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1023
        cpu_resume_from_signal(env, NULL);
1024
    }
1025
#endif
1026
}
1027

    
1028
/* len must be <= 8 and start must be a multiple of len */
1029
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1030
{
1031
    PageDesc *p;
1032
    int offset, b;
1033
#if 0
1034
    if (1) {
1035
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1036
                  cpu_single_env->mem_io_vaddr, len,
1037
                  cpu_single_env->eip,
1038
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1039
    }
1040
#endif
1041
    p = page_find(start >> TARGET_PAGE_BITS);
1042
    if (!p)
1043
        return;
1044
    if (p->code_bitmap) {
1045
        offset = start & ~TARGET_PAGE_MASK;
1046
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1047
        if (b & ((1 << len) - 1))
1048
            goto do_invalidate;
1049
    } else {
1050
    do_invalidate:
1051
        tb_invalidate_phys_page_range(start, start + len, 1);
1052
    }
1053
}
1054

    
1055
#if !defined(CONFIG_SOFTMMU)
1056
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1057
                                    unsigned long pc, void *puc)
1058
{
1059
    TranslationBlock *tb;
1060
    PageDesc *p;
1061
    int n;
1062
#ifdef TARGET_HAS_PRECISE_SMC
1063
    TranslationBlock *current_tb = NULL;
1064
    CPUState *env = cpu_single_env;
1065
    int current_tb_modified = 0;
1066
    target_ulong current_pc = 0;
1067
    target_ulong current_cs_base = 0;
1068
    int current_flags = 0;
1069
#endif
1070

    
1071
    addr &= TARGET_PAGE_MASK;
1072
    p = page_find(addr >> TARGET_PAGE_BITS);
1073
    if (!p)
1074
        return;
1075
    tb = p->first_tb;
1076
#ifdef TARGET_HAS_PRECISE_SMC
1077
    if (tb && pc != 0) {
1078
        current_tb = tb_find_pc(pc);
1079
    }
1080
#endif
1081
    while (tb != NULL) {
1082
        n = (long)tb & 3;
1083
        tb = (TranslationBlock *)((long)tb & ~3);
1084
#ifdef TARGET_HAS_PRECISE_SMC
1085
        if (current_tb == tb &&
1086
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1087
                /* If we are modifying the current TB, we must stop
1088
                   its execution. We could be more precise by checking
1089
                   that the modification is after the current PC, but it
1090
                   would require a specialized function to partially
1091
                   restore the CPU state */
1092

    
1093
            current_tb_modified = 1;
1094
            cpu_restore_state(current_tb, env, pc, puc);
1095
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1096
                                 &current_flags);
1097
        }
1098
#endif /* TARGET_HAS_PRECISE_SMC */
1099
        tb_phys_invalidate(tb, addr);
1100
        tb = tb->page_next[n];
1101
    }
1102
    p->first_tb = NULL;
1103
#ifdef TARGET_HAS_PRECISE_SMC
1104
    if (current_tb_modified) {
1105
        /* we generate a block containing just the instruction
1106
           modifying the memory. It will ensure that it cannot modify
1107
           itself */
1108
        env->current_tb = NULL;
1109
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1110
        cpu_resume_from_signal(env, puc);
1111
    }
1112
#endif
1113
}
1114
#endif
1115

    
1116
/* add the tb in the target page and protect it if necessary */
1117
static inline void tb_alloc_page(TranslationBlock *tb,
1118
                                 unsigned int n, target_ulong page_addr)
1119
{
1120
    PageDesc *p;
1121
    TranslationBlock *last_first_tb;
1122

    
1123
    tb->page_addr[n] = page_addr;
1124
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1125
    tb->page_next[n] = p->first_tb;
1126
    last_first_tb = p->first_tb;
1127
    p->first_tb = (TranslationBlock *)((long)tb | n);
1128
    invalidate_page_bitmap(p);
1129

    
1130
#if defined(TARGET_HAS_SMC) || 1
1131

    
1132
#if defined(CONFIG_USER_ONLY)
1133
    if (p->flags & PAGE_WRITE) {
1134
        target_ulong addr;
1135
        PageDesc *p2;
1136
        int prot;
1137

    
1138
        /* force the host page as non writable (writes will have a
1139
           page fault + mprotect overhead) */
1140
        page_addr &= qemu_host_page_mask;
1141
        prot = 0;
1142
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1143
            addr += TARGET_PAGE_SIZE) {
1144

    
1145
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1146
            if (!p2)
1147
                continue;
1148
            prot |= p2->flags;
1149
            p2->flags &= ~PAGE_WRITE;
1150
            page_get_flags(addr);
1151
          }
1152
        mprotect(g2h(page_addr), qemu_host_page_size,
1153
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1154
#ifdef DEBUG_TB_INVALIDATE
1155
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1156
               page_addr);
1157
#endif
1158
    }
1159
#else
1160
    /* if some code is already present, then the pages are already
1161
       protected. So we handle the case where only the first TB is
1162
       allocated in a physical page */
1163
    if (!last_first_tb) {
1164
        tlb_protect_code(page_addr);
1165
    }
1166
#endif
1167

    
1168
#endif /* TARGET_HAS_SMC */
1169
}
1170

    
1171
/* Allocate a new translation block. Flush the translation buffer if
1172
   too many translation blocks or too much generated code. */
1173
TranslationBlock *tb_alloc(target_ulong pc)
1174
{
1175
    TranslationBlock *tb;
1176

    
1177
    if (nb_tbs >= code_gen_max_blocks ||
1178
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1179
        return NULL;
1180
    tb = &tbs[nb_tbs++];
1181
    tb->pc = pc;
1182
    tb->cflags = 0;
1183
    return tb;
1184
}
1185

    
1186
void tb_free(TranslationBlock *tb)
1187
{
1188
    /* In practice this is mostly used for single use temporary TB
1189
       Ignore the hard cases and just back up if this TB happens to
1190
       be the last one generated.  */
1191
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1192
        code_gen_ptr = tb->tc_ptr;
1193
        nb_tbs--;
1194
    }
1195
}
1196

    
1197
/* add a new TB and link it to the physical page tables. phys_page2 is
1198
   (-1) to indicate that only one page contains the TB. */
1199
void tb_link_phys(TranslationBlock *tb,
1200
                  target_ulong phys_pc, target_ulong phys_page2)
1201
{
1202
    unsigned int h;
1203
    TranslationBlock **ptb;
1204

    
1205
    /* Grab the mmap lock to stop another thread invalidating this TB
1206
       before we are done.  */
1207
    mmap_lock();
1208
    /* add in the physical hash table */
1209
    h = tb_phys_hash_func(phys_pc);
1210
    ptb = &tb_phys_hash[h];
1211
    tb->phys_hash_next = *ptb;
1212
    *ptb = tb;
1213

    
1214
    /* add in the page list */
1215
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1216
    if (phys_page2 != -1)
1217
        tb_alloc_page(tb, 1, phys_page2);
1218
    else
1219
        tb->page_addr[1] = -1;
1220

    
1221
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1222
    tb->jmp_next[0] = NULL;
1223
    tb->jmp_next[1] = NULL;
1224

    
1225
    /* init original jump addresses */
1226
    if (tb->tb_next_offset[0] != 0xffff)
1227
        tb_reset_jump(tb, 0);
1228
    if (tb->tb_next_offset[1] != 0xffff)
1229
        tb_reset_jump(tb, 1);
1230

    
1231
#ifdef DEBUG_TB_CHECK
1232
    tb_page_check();
1233
#endif
1234
    mmap_unlock();
1235
}
1236

    
1237
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1238
   tb[1].tc_ptr. Return NULL if not found */
1239
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1240
{
1241
    int m_min, m_max, m;
1242
    unsigned long v;
1243
    TranslationBlock *tb;
1244

    
1245
    if (nb_tbs <= 0)
1246
        return NULL;
1247
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1248
        tc_ptr >= (unsigned long)code_gen_ptr)
1249
        return NULL;
1250
    /* binary search (cf Knuth) */
1251
    m_min = 0;
1252
    m_max = nb_tbs - 1;
1253
    while (m_min <= m_max) {
1254
        m = (m_min + m_max) >> 1;
1255
        tb = &tbs[m];
1256
        v = (unsigned long)tb->tc_ptr;
1257
        if (v == tc_ptr)
1258
            return tb;
1259
        else if (tc_ptr < v) {
1260
            m_max = m - 1;
1261
        } else {
1262
            m_min = m + 1;
1263
        }
1264
    }
1265
    return &tbs[m_max];
1266
}
1267

    
1268
static void tb_reset_jump_recursive(TranslationBlock *tb);
1269

    
1270
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1271
{
1272
    TranslationBlock *tb1, *tb_next, **ptb;
1273
    unsigned int n1;
1274

    
1275
    tb1 = tb->jmp_next[n];
1276
    if (tb1 != NULL) {
1277
        /* find head of list */
1278
        for(;;) {
1279
            n1 = (long)tb1 & 3;
1280
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1281
            if (n1 == 2)
1282
                break;
1283
            tb1 = tb1->jmp_next[n1];
1284
        }
1285
        /* we are now sure now that tb jumps to tb1 */
1286
        tb_next = tb1;
1287

    
1288
        /* remove tb from the jmp_first list */
1289
        ptb = &tb_next->jmp_first;
1290
        for(;;) {
1291
            tb1 = *ptb;
1292
            n1 = (long)tb1 & 3;
1293
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1294
            if (n1 == n && tb1 == tb)
1295
                break;
1296
            ptb = &tb1->jmp_next[n1];
1297
        }
1298
        *ptb = tb->jmp_next[n];
1299
        tb->jmp_next[n] = NULL;
1300

    
1301
        /* suppress the jump to next tb in generated code */
1302
        tb_reset_jump(tb, n);
1303

    
1304
        /* suppress jumps in the tb on which we could have jumped */
1305
        tb_reset_jump_recursive(tb_next);
1306
    }
1307
}
1308

    
1309
static void tb_reset_jump_recursive(TranslationBlock *tb)
1310
{
1311
    tb_reset_jump_recursive2(tb, 0);
1312
    tb_reset_jump_recursive2(tb, 1);
1313
}
1314

    
1315
#if defined(TARGET_HAS_ICE)
1316
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1317
{
1318
    target_phys_addr_t addr;
1319
    target_ulong pd;
1320
    ram_addr_t ram_addr;
1321
    PhysPageDesc *p;
1322

    
1323
    addr = cpu_get_phys_page_debug(env, pc);
1324
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1325
    if (!p) {
1326
        pd = IO_MEM_UNASSIGNED;
1327
    } else {
1328
        pd = p->phys_offset;
1329
    }
1330
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1331
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1332
}
1333
#endif
1334

    
1335
/* Add a watchpoint.  */
1336
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1337
                          int flags, CPUWatchpoint **watchpoint)
1338
{
1339
    target_ulong len_mask = ~(len - 1);
1340
    CPUWatchpoint *wp;
1341

    
1342
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1343
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1344
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1345
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1346
        return -EINVAL;
1347
    }
1348
    wp = qemu_malloc(sizeof(*wp));
1349

    
1350
    wp->vaddr = addr;
1351
    wp->len_mask = len_mask;
1352
    wp->flags = flags;
1353

    
1354
    /* keep all GDB-injected watchpoints in front */
1355
    if (flags & BP_GDB)
1356
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1357
    else
1358
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1359

    
1360
    tlb_flush_page(env, addr);
1361

    
1362
    if (watchpoint)
1363
        *watchpoint = wp;
1364
    return 0;
1365
}
1366

    
1367
/* Remove a specific watchpoint.  */
1368
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1369
                          int flags)
1370
{
1371
    target_ulong len_mask = ~(len - 1);
1372
    CPUWatchpoint *wp;
1373

    
1374
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1375
        if (addr == wp->vaddr && len_mask == wp->len_mask
1376
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1377
            cpu_watchpoint_remove_by_ref(env, wp);
1378
            return 0;
1379
        }
1380
    }
1381
    return -ENOENT;
1382
}
1383

    
1384
/* Remove a specific watchpoint by reference.  */
1385
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1386
{
1387
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1388

    
1389
    tlb_flush_page(env, watchpoint->vaddr);
1390

    
1391
    qemu_free(watchpoint);
1392
}
1393

    
1394
/* Remove all matching watchpoints.  */
1395
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1396
{
1397
    CPUWatchpoint *wp, *next;
1398

    
1399
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1400
        if (wp->flags & mask)
1401
            cpu_watchpoint_remove_by_ref(env, wp);
1402
    }
1403
}
1404

    
1405
/* Add a breakpoint.  */
1406
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1407
                          CPUBreakpoint **breakpoint)
1408
{
1409
#if defined(TARGET_HAS_ICE)
1410
    CPUBreakpoint *bp;
1411

    
1412
    bp = qemu_malloc(sizeof(*bp));
1413

    
1414
    bp->pc = pc;
1415
    bp->flags = flags;
1416

    
1417
    /* keep all GDB-injected breakpoints in front */
1418
    if (flags & BP_GDB)
1419
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1420
    else
1421
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1422

    
1423
    breakpoint_invalidate(env, pc);
1424

    
1425
    if (breakpoint)
1426
        *breakpoint = bp;
1427
    return 0;
1428
#else
1429
    return -ENOSYS;
1430
#endif
1431
}
1432

    
1433
/* Remove a specific breakpoint.  */
1434
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1435
{
1436
#if defined(TARGET_HAS_ICE)
1437
    CPUBreakpoint *bp;
1438

    
1439
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1440
        if (bp->pc == pc && bp->flags == flags) {
1441
            cpu_breakpoint_remove_by_ref(env, bp);
1442
            return 0;
1443
        }
1444
    }
1445
    return -ENOENT;
1446
#else
1447
    return -ENOSYS;
1448
#endif
1449
}
1450

    
1451
/* Remove a specific breakpoint by reference.  */
1452
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1453
{
1454
#if defined(TARGET_HAS_ICE)
1455
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1456

    
1457
    breakpoint_invalidate(env, breakpoint->pc);
1458

    
1459
    qemu_free(breakpoint);
1460
#endif
1461
}
1462

    
1463
/* Remove all matching breakpoints. */
1464
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1465
{
1466
#if defined(TARGET_HAS_ICE)
1467
    CPUBreakpoint *bp, *next;
1468

    
1469
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1470
        if (bp->flags & mask)
1471
            cpu_breakpoint_remove_by_ref(env, bp);
1472
    }
1473
#endif
1474
}
1475

    
1476
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1477
   CPU loop after each instruction */
1478
void cpu_single_step(CPUState *env, int enabled)
1479
{
1480
#if defined(TARGET_HAS_ICE)
1481
    if (env->singlestep_enabled != enabled) {
1482
        env->singlestep_enabled = enabled;
1483
        if (kvm_enabled())
1484
            kvm_update_guest_debug(env, 0);
1485
        else {
1486
            /* must flush all the translated code to avoid inconsistencies */
1487
            /* XXX: only flush what is necessary */
1488
            tb_flush(env);
1489
        }
1490
    }
1491
#endif
1492
}
1493

    
1494
/* enable or disable low levels log */
1495
void cpu_set_log(int log_flags)
1496
{
1497
    loglevel = log_flags;
1498
    if (loglevel && !logfile) {
1499
        logfile = fopen(logfilename, log_append ? "a" : "w");
1500
        if (!logfile) {
1501
            perror(logfilename);
1502
            _exit(1);
1503
        }
1504
#if !defined(CONFIG_SOFTMMU)
1505
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1506
        {
1507
            static char logfile_buf[4096];
1508
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1509
        }
1510
#elif !defined(_WIN32)
1511
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1512
        setvbuf(logfile, NULL, _IOLBF, 0);
1513
#endif
1514
        log_append = 1;
1515
    }
1516
    if (!loglevel && logfile) {
1517
        fclose(logfile);
1518
        logfile = NULL;
1519
    }
1520
}
1521

    
1522
void cpu_set_log_filename(const char *filename)
1523
{
1524
    logfilename = strdup(filename);
1525
    if (logfile) {
1526
        fclose(logfile);
1527
        logfile = NULL;
1528
    }
1529
    cpu_set_log(loglevel);
1530
}
1531

    
1532
static void cpu_unlink_tb(CPUState *env)
1533
{
1534
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1535
       problem and hope the cpu will stop of its own accord.  For userspace
1536
       emulation this often isn't actually as bad as it sounds.  Often
1537
       signals are used primarily to interrupt blocking syscalls.  */
1538
    TranslationBlock *tb;
1539
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1540

    
1541
    spin_lock(&interrupt_lock);
1542
    tb = env->current_tb;
1543
    /* if the cpu is currently executing code, we must unlink it and
1544
       all the potentially executing TB */
1545
    if (tb) {
1546
        env->current_tb = NULL;
1547
        tb_reset_jump_recursive(tb);
1548
    }
1549
    spin_unlock(&interrupt_lock);
1550
}
1551

    
1552
/* mask must never be zero, except for A20 change call */
1553
void cpu_interrupt(CPUState *env, int mask)
1554
{
1555
    int old_mask;
1556

    
1557
    old_mask = env->interrupt_request;
1558
    env->interrupt_request |= mask;
1559

    
1560
#ifndef CONFIG_USER_ONLY
1561
    /*
1562
     * If called from iothread context, wake the target cpu in
1563
     * case its halted.
1564
     */
1565
    if (!qemu_cpu_self(env)) {
1566
        qemu_cpu_kick(env);
1567
        return;
1568
    }
1569
#endif
1570

    
1571
    if (use_icount) {
1572
        env->icount_decr.u16.high = 0xffff;
1573
#ifndef CONFIG_USER_ONLY
1574
        if (!can_do_io(env)
1575
            && (mask & ~old_mask) != 0) {
1576
            cpu_abort(env, "Raised interrupt while not in I/O function");
1577
        }
1578
#endif
1579
    } else {
1580
        cpu_unlink_tb(env);
1581
    }
1582
}
1583

    
1584
void cpu_reset_interrupt(CPUState *env, int mask)
1585
{
1586
    env->interrupt_request &= ~mask;
1587
}
1588

    
1589
void cpu_exit(CPUState *env)
1590
{
1591
    env->exit_request = 1;
1592
    cpu_unlink_tb(env);
1593
}
1594

    
1595
const CPULogItem cpu_log_items[] = {
1596
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1597
      "show generated host assembly code for each compiled TB" },
1598
    { CPU_LOG_TB_IN_ASM, "in_asm",
1599
      "show target assembly code for each compiled TB" },
1600
    { CPU_LOG_TB_OP, "op",
1601
      "show micro ops for each compiled TB" },
1602
    { CPU_LOG_TB_OP_OPT, "op_opt",
1603
      "show micro ops "
1604
#ifdef TARGET_I386
1605
      "before eflags optimization and "
1606
#endif
1607
      "after liveness analysis" },
1608
    { CPU_LOG_INT, "int",
1609
      "show interrupts/exceptions in short format" },
1610
    { CPU_LOG_EXEC, "exec",
1611
      "show trace before each executed TB (lots of logs)" },
1612
    { CPU_LOG_TB_CPU, "cpu",
1613
      "show CPU state before block translation" },
1614
#ifdef TARGET_I386
1615
    { CPU_LOG_PCALL, "pcall",
1616
      "show protected mode far calls/returns/exceptions" },
1617
    { CPU_LOG_RESET, "cpu_reset",
1618
      "show CPU state before CPU resets" },
1619
#endif
1620
#ifdef DEBUG_IOPORT
1621
    { CPU_LOG_IOPORT, "ioport",
1622
      "show all i/o ports accesses" },
1623
#endif
1624
    { 0, NULL, NULL },
1625
};
1626

    
1627
static int cmp1(const char *s1, int n, const char *s2)
1628
{
1629
    if (strlen(s2) != n)
1630
        return 0;
1631
    return memcmp(s1, s2, n) == 0;
1632
}
1633

    
1634
/* takes a comma separated list of log masks. Return 0 if error. */
1635
int cpu_str_to_log_mask(const char *str)
1636
{
1637
    const CPULogItem *item;
1638
    int mask;
1639
    const char *p, *p1;
1640

    
1641
    p = str;
1642
    mask = 0;
1643
    for(;;) {
1644
        p1 = strchr(p, ',');
1645
        if (!p1)
1646
            p1 = p + strlen(p);
1647
        if(cmp1(p,p1-p,"all")) {
1648
                for(item = cpu_log_items; item->mask != 0; item++) {
1649
                        mask |= item->mask;
1650
                }
1651
        } else {
1652
        for(item = cpu_log_items; item->mask != 0; item++) {
1653
            if (cmp1(p, p1 - p, item->name))
1654
                goto found;
1655
        }
1656
        return 0;
1657
        }
1658
    found:
1659
        mask |= item->mask;
1660
        if (*p1 != ',')
1661
            break;
1662
        p = p1 + 1;
1663
    }
1664
    return mask;
1665
}
1666

    
1667
void cpu_abort(CPUState *env, const char *fmt, ...)
1668
{
1669
    va_list ap;
1670
    va_list ap2;
1671

    
1672
    va_start(ap, fmt);
1673
    va_copy(ap2, ap);
1674
    fprintf(stderr, "qemu: fatal: ");
1675
    vfprintf(stderr, fmt, ap);
1676
    fprintf(stderr, "\n");
1677
#ifdef TARGET_I386
1678
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1679
#else
1680
    cpu_dump_state(env, stderr, fprintf, 0);
1681
#endif
1682
    if (qemu_log_enabled()) {
1683
        qemu_log("qemu: fatal: ");
1684
        qemu_log_vprintf(fmt, ap2);
1685
        qemu_log("\n");
1686
#ifdef TARGET_I386
1687
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1688
#else
1689
        log_cpu_state(env, 0);
1690
#endif
1691
        qemu_log_flush();
1692
        qemu_log_close();
1693
    }
1694
    va_end(ap2);
1695
    va_end(ap);
1696
#if defined(CONFIG_USER_ONLY)
1697
    {
1698
        struct sigaction act;
1699
        sigfillset(&act.sa_mask);
1700
        act.sa_handler = SIG_DFL;
1701
        sigaction(SIGABRT, &act, NULL);
1702
    }
1703
#endif
1704
    abort();
1705
}
1706

    
1707
CPUState *cpu_copy(CPUState *env)
1708
{
1709
    CPUState *new_env = cpu_init(env->cpu_model_str);
1710
    CPUState *next_cpu = new_env->next_cpu;
1711
    int cpu_index = new_env->cpu_index;
1712
#if defined(TARGET_HAS_ICE)
1713
    CPUBreakpoint *bp;
1714
    CPUWatchpoint *wp;
1715
#endif
1716

    
1717
    memcpy(new_env, env, sizeof(CPUState));
1718

    
1719
    /* Preserve chaining and index. */
1720
    new_env->next_cpu = next_cpu;
1721
    new_env->cpu_index = cpu_index;
1722

    
1723
    /* Clone all break/watchpoints.
1724
       Note: Once we support ptrace with hw-debug register access, make sure
1725
       BP_CPU break/watchpoints are handled correctly on clone. */
1726
    QTAILQ_INIT(&env->breakpoints);
1727
    QTAILQ_INIT(&env->watchpoints);
1728
#if defined(TARGET_HAS_ICE)
1729
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1730
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1731
    }
1732
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1733
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1734
                              wp->flags, NULL);
1735
    }
1736
#endif
1737

    
1738
    return new_env;
1739
}
1740

    
1741
#if !defined(CONFIG_USER_ONLY)
1742

    
1743
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1744
{
1745
    unsigned int i;
1746

    
1747
    /* Discard jump cache entries for any tb which might potentially
1748
       overlap the flushed page.  */
1749
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1750
    memset (&env->tb_jmp_cache[i], 0, 
1751
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1752

    
1753
    i = tb_jmp_cache_hash_page(addr);
1754
    memset (&env->tb_jmp_cache[i], 0, 
1755
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1756
}
1757

    
1758
static CPUTLBEntry s_cputlb_empty_entry = {
1759
    .addr_read  = -1,
1760
    .addr_write = -1,
1761
    .addr_code  = -1,
1762
    .addend     = -1,
1763
};
1764

    
1765
/* NOTE: if flush_global is true, also flush global entries (not
1766
   implemented yet) */
1767
void tlb_flush(CPUState *env, int flush_global)
1768
{
1769
    int i;
1770

    
1771
#if defined(DEBUG_TLB)
1772
    printf("tlb_flush:\n");
1773
#endif
1774
    /* must reset current TB so that interrupts cannot modify the
1775
       links while we are modifying them */
1776
    env->current_tb = NULL;
1777

    
1778
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1779
        int mmu_idx;
1780
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1781
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1782
        }
1783
    }
1784

    
1785
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1786

    
1787
    tlb_flush_count++;
1788
}
1789

    
1790
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1791
{
1792
    if (addr == (tlb_entry->addr_read &
1793
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1794
        addr == (tlb_entry->addr_write &
1795
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1796
        addr == (tlb_entry->addr_code &
1797
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1798
        *tlb_entry = s_cputlb_empty_entry;
1799
    }
1800
}
1801

    
1802
void tlb_flush_page(CPUState *env, target_ulong addr)
1803
{
1804
    int i;
1805
    int mmu_idx;
1806

    
1807
#if defined(DEBUG_TLB)
1808
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1809
#endif
1810
    /* must reset current TB so that interrupts cannot modify the
1811
       links while we are modifying them */
1812
    env->current_tb = NULL;
1813

    
1814
    addr &= TARGET_PAGE_MASK;
1815
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1816
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1817
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1818

    
1819
    tlb_flush_jmp_cache(env, addr);
1820
}
1821

    
1822
/* update the TLBs so that writes to code in the virtual page 'addr'
1823
   can be detected */
1824
static void tlb_protect_code(ram_addr_t ram_addr)
1825
{
1826
    cpu_physical_memory_reset_dirty(ram_addr,
1827
                                    ram_addr + TARGET_PAGE_SIZE,
1828
                                    CODE_DIRTY_FLAG);
1829
}
1830

    
1831
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1832
   tested for self modifying code */
1833
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1834
                                    target_ulong vaddr)
1835
{
1836
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1837
}
1838

    
1839
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1840
                                         unsigned long start, unsigned long length)
1841
{
1842
    unsigned long addr;
1843
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1844
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1845
        if ((addr - start) < length) {
1846
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1847
        }
1848
    }
1849
}
1850

    
1851
/* Note: start and end must be within the same ram block.  */
1852
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1853
                                     int dirty_flags)
1854
{
1855
    CPUState *env;
1856
    unsigned long length, start1;
1857
    int i, mask, len;
1858
    uint8_t *p;
1859

    
1860
    start &= TARGET_PAGE_MASK;
1861
    end = TARGET_PAGE_ALIGN(end);
1862

    
1863
    length = end - start;
1864
    if (length == 0)
1865
        return;
1866
    len = length >> TARGET_PAGE_BITS;
1867
    mask = ~dirty_flags;
1868
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1869
    for(i = 0; i < len; i++)
1870
        p[i] &= mask;
1871

    
1872
    /* we modify the TLB cache so that the dirty bit will be set again
1873
       when accessing the range */
1874
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1875
    /* Chek that we don't span multiple blocks - this breaks the
1876
       address comparisons below.  */
1877
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1878
            != (end - 1) - start) {
1879
        abort();
1880
    }
1881

    
1882
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1883
        int mmu_idx;
1884
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1885
            for(i = 0; i < CPU_TLB_SIZE; i++)
1886
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1887
                                      start1, length);
1888
        }
1889
    }
1890
}
1891

    
1892
int cpu_physical_memory_set_dirty_tracking(int enable)
1893
{
1894
    in_migration = enable;
1895
    if (kvm_enabled()) {
1896
        return kvm_set_migration_log(enable);
1897
    }
1898
    return 0;
1899
}
1900

    
1901
int cpu_physical_memory_get_dirty_tracking(void)
1902
{
1903
    return in_migration;
1904
}
1905

    
1906
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1907
                                   target_phys_addr_t end_addr)
1908
{
1909
    int ret = 0;
1910

    
1911
    if (kvm_enabled())
1912
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1913
    return ret;
1914
}
1915

    
1916
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1917
{
1918
    ram_addr_t ram_addr;
1919
    void *p;
1920

    
1921
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1922
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1923
            + tlb_entry->addend);
1924
        ram_addr = qemu_ram_addr_from_host(p);
1925
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1926
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1927
        }
1928
    }
1929
}
1930

    
1931
/* update the TLB according to the current state of the dirty bits */
1932
void cpu_tlb_update_dirty(CPUState *env)
1933
{
1934
    int i;
1935
    int mmu_idx;
1936
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1937
        for(i = 0; i < CPU_TLB_SIZE; i++)
1938
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1939
    }
1940
}
1941

    
1942
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1943
{
1944
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1945
        tlb_entry->addr_write = vaddr;
1946
}
1947

    
1948
/* update the TLB corresponding to virtual page vaddr
1949
   so that it is no longer dirty */
1950
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1951
{
1952
    int i;
1953
    int mmu_idx;
1954

    
1955
    vaddr &= TARGET_PAGE_MASK;
1956
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1957
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1958
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1959
}
1960

    
1961
/* add a new TLB entry. At most one entry for a given virtual address
1962
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1963
   (can only happen in non SOFTMMU mode for I/O pages or pages
1964
   conflicting with the host address space). */
1965
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1966
                      target_phys_addr_t paddr, int prot,
1967
                      int mmu_idx, int is_softmmu)
1968
{
1969
    PhysPageDesc *p;
1970
    unsigned long pd;
1971
    unsigned int index;
1972
    target_ulong address;
1973
    target_ulong code_address;
1974
    target_phys_addr_t addend;
1975
    int ret;
1976
    CPUTLBEntry *te;
1977
    CPUWatchpoint *wp;
1978
    target_phys_addr_t iotlb;
1979

    
1980
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1981
    if (!p) {
1982
        pd = IO_MEM_UNASSIGNED;
1983
    } else {
1984
        pd = p->phys_offset;
1985
    }
1986
#if defined(DEBUG_TLB)
1987
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1988
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1989
#endif
1990

    
1991
    ret = 0;
1992
    address = vaddr;
1993
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1994
        /* IO memory case (romd handled later) */
1995
        address |= TLB_MMIO;
1996
    }
1997
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1998
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1999
        /* Normal RAM.  */
2000
        iotlb = pd & TARGET_PAGE_MASK;
2001
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2002
            iotlb |= IO_MEM_NOTDIRTY;
2003
        else
2004
            iotlb |= IO_MEM_ROM;
2005
    } else {
2006
        /* IO handlers are currently passed a physical address.
2007
           It would be nice to pass an offset from the base address
2008
           of that region.  This would avoid having to special case RAM,
2009
           and avoid full address decoding in every device.
2010
           We can't use the high bits of pd for this because
2011
           IO_MEM_ROMD uses these as a ram address.  */
2012
        iotlb = (pd & ~TARGET_PAGE_MASK);
2013
        if (p) {
2014
            iotlb += p->region_offset;
2015
        } else {
2016
            iotlb += paddr;
2017
        }
2018
    }
2019

    
2020
    code_address = address;
2021
    /* Make accesses to pages with watchpoints go via the
2022
       watchpoint trap routines.  */
2023
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2024
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2025
            iotlb = io_mem_watch + paddr;
2026
            /* TODO: The memory case can be optimized by not trapping
2027
               reads of pages with a write breakpoint.  */
2028
            address |= TLB_MMIO;
2029
        }
2030
    }
2031

    
2032
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2033
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2034
    te = &env->tlb_table[mmu_idx][index];
2035
    te->addend = addend - vaddr;
2036
    if (prot & PAGE_READ) {
2037
        te->addr_read = address;
2038
    } else {
2039
        te->addr_read = -1;
2040
    }
2041

    
2042
    if (prot & PAGE_EXEC) {
2043
        te->addr_code = code_address;
2044
    } else {
2045
        te->addr_code = -1;
2046
    }
2047
    if (prot & PAGE_WRITE) {
2048
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2049
            (pd & IO_MEM_ROMD)) {
2050
            /* Write access calls the I/O callback.  */
2051
            te->addr_write = address | TLB_MMIO;
2052
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2053
                   !cpu_physical_memory_is_dirty(pd)) {
2054
            te->addr_write = address | TLB_NOTDIRTY;
2055
        } else {
2056
            te->addr_write = address;
2057
        }
2058
    } else {
2059
        te->addr_write = -1;
2060
    }
2061
    return ret;
2062
}
2063

    
2064
#else
2065

    
2066
void tlb_flush(CPUState *env, int flush_global)
2067
{
2068
}
2069

    
2070
void tlb_flush_page(CPUState *env, target_ulong addr)
2071
{
2072
}
2073

    
2074
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2075
                      target_phys_addr_t paddr, int prot,
2076
                      int mmu_idx, int is_softmmu)
2077
{
2078
    return 0;
2079
}
2080

    
2081
/*
2082
 * Walks guest process memory "regions" one by one
2083
 * and calls callback function 'fn' for each region.
2084
 */
2085
int walk_memory_regions(void *priv,
2086
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2087
{
2088
    unsigned long start, end;
2089
    PageDesc *p = NULL;
2090
    int i, j, prot, prot1;
2091
    int rc = 0;
2092

    
2093
    start = end = -1;
2094
    prot = 0;
2095

    
2096
    for (i = 0; i <= L1_SIZE; i++) {
2097
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2098
        for (j = 0; j < L2_SIZE; j++) {
2099
            prot1 = (p == NULL) ? 0 : p[j].flags;
2100
            /*
2101
             * "region" is one continuous chunk of memory
2102
             * that has same protection flags set.
2103
             */
2104
            if (prot1 != prot) {
2105
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2106
                if (start != -1) {
2107
                    rc = (*fn)(priv, start, end, prot);
2108
                    /* callback can stop iteration by returning != 0 */
2109
                    if (rc != 0)
2110
                        return (rc);
2111
                }
2112
                if (prot1 != 0)
2113
                    start = end;
2114
                else
2115
                    start = -1;
2116
                prot = prot1;
2117
            }
2118
            if (p == NULL)
2119
                break;
2120
        }
2121
    }
2122
    return (rc);
2123
}
2124

    
2125
static int dump_region(void *priv, unsigned long start,
2126
    unsigned long end, unsigned long prot)
2127
{
2128
    FILE *f = (FILE *)priv;
2129

    
2130
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2131
        start, end, end - start,
2132
        ((prot & PAGE_READ) ? 'r' : '-'),
2133
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2134
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2135

    
2136
    return (0);
2137
}
2138

    
2139
/* dump memory mappings */
2140
void page_dump(FILE *f)
2141
{
2142
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2143
            "start", "end", "size", "prot");
2144
    walk_memory_regions(f, dump_region);
2145
}
2146

    
2147
int page_get_flags(target_ulong address)
2148
{
2149
    PageDesc *p;
2150

    
2151
    p = page_find(address >> TARGET_PAGE_BITS);
2152
    if (!p)
2153
        return 0;
2154
    return p->flags;
2155
}
2156

    
2157
/* modify the flags of a page and invalidate the code if
2158
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2159
   depending on PAGE_WRITE */
2160
void page_set_flags(target_ulong start, target_ulong end, int flags)
2161
{
2162
    PageDesc *p;
2163
    target_ulong addr;
2164

    
2165
    /* mmap_lock should already be held.  */
2166
    start = start & TARGET_PAGE_MASK;
2167
    end = TARGET_PAGE_ALIGN(end);
2168
    if (flags & PAGE_WRITE)
2169
        flags |= PAGE_WRITE_ORG;
2170
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2171
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2172
        /* We may be called for host regions that are outside guest
2173
           address space.  */
2174
        if (!p)
2175
            return;
2176
        /* if the write protection is set, then we invalidate the code
2177
           inside */
2178
        if (!(p->flags & PAGE_WRITE) &&
2179
            (flags & PAGE_WRITE) &&
2180
            p->first_tb) {
2181
            tb_invalidate_phys_page(addr, 0, NULL);
2182
        }
2183
        p->flags = flags;
2184
    }
2185
}
2186

    
2187
int page_check_range(target_ulong start, target_ulong len, int flags)
2188
{
2189
    PageDesc *p;
2190
    target_ulong end;
2191
    target_ulong addr;
2192

    
2193
    if (start + len < start)
2194
        /* we've wrapped around */
2195
        return -1;
2196

    
2197
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2198
    start = start & TARGET_PAGE_MASK;
2199

    
2200
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2201
        p = page_find(addr >> TARGET_PAGE_BITS);
2202
        if( !p )
2203
            return -1;
2204
        if( !(p->flags & PAGE_VALID) )
2205
            return -1;
2206

    
2207
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2208
            return -1;
2209
        if (flags & PAGE_WRITE) {
2210
            if (!(p->flags & PAGE_WRITE_ORG))
2211
                return -1;
2212
            /* unprotect the page if it was put read-only because it
2213
               contains translated code */
2214
            if (!(p->flags & PAGE_WRITE)) {
2215
                if (!page_unprotect(addr, 0, NULL))
2216
                    return -1;
2217
            }
2218
            return 0;
2219
        }
2220
    }
2221
    return 0;
2222
}
2223

    
2224
/* called from signal handler: invalidate the code and unprotect the
2225
   page. Return TRUE if the fault was successfully handled. */
2226
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2227
{
2228
    unsigned int page_index, prot, pindex;
2229
    PageDesc *p, *p1;
2230
    target_ulong host_start, host_end, addr;
2231

    
2232
    /* Technically this isn't safe inside a signal handler.  However we
2233
       know this only ever happens in a synchronous SEGV handler, so in
2234
       practice it seems to be ok.  */
2235
    mmap_lock();
2236

    
2237
    host_start = address & qemu_host_page_mask;
2238
    page_index = host_start >> TARGET_PAGE_BITS;
2239
    p1 = page_find(page_index);
2240
    if (!p1) {
2241
        mmap_unlock();
2242
        return 0;
2243
    }
2244
    host_end = host_start + qemu_host_page_size;
2245
    p = p1;
2246
    prot = 0;
2247
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2248
        prot |= p->flags;
2249
        p++;
2250
    }
2251
    /* if the page was really writable, then we change its
2252
       protection back to writable */
2253
    if (prot & PAGE_WRITE_ORG) {
2254
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2255
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2256
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2257
                     (prot & PAGE_BITS) | PAGE_WRITE);
2258
            p1[pindex].flags |= PAGE_WRITE;
2259
            /* and since the content will be modified, we must invalidate
2260
               the corresponding translated code. */
2261
            tb_invalidate_phys_page(address, pc, puc);
2262
#ifdef DEBUG_TB_CHECK
2263
            tb_invalidate_check(address);
2264
#endif
2265
            mmap_unlock();
2266
            return 1;
2267
        }
2268
    }
2269
    mmap_unlock();
2270
    return 0;
2271
}
2272

    
2273
static inline void tlb_set_dirty(CPUState *env,
2274
                                 unsigned long addr, target_ulong vaddr)
2275
{
2276
}
2277
#endif /* defined(CONFIG_USER_ONLY) */
2278

    
2279
#if !defined(CONFIG_USER_ONLY)
2280

    
2281
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2282
                             ram_addr_t memory, ram_addr_t region_offset);
2283
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2284
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2285
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2286
                      need_subpage)                                     \
2287
    do {                                                                \
2288
        if (addr > start_addr)                                          \
2289
            start_addr2 = 0;                                            \
2290
        else {                                                          \
2291
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2292
            if (start_addr2 > 0)                                        \
2293
                need_subpage = 1;                                       \
2294
        }                                                               \
2295
                                                                        \
2296
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2297
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2298
        else {                                                          \
2299
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2300
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2301
                need_subpage = 1;                                       \
2302
        }                                                               \
2303
    } while (0)
2304

    
2305
/* register physical memory.
2306
   For RAM, 'size' must be a multiple of the target page size.
2307
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2308
   io memory page.  The address used when calling the IO function is
2309
   the offset from the start of the region, plus region_offset.  Both
2310
   start_addr and region_offset are rounded down to a page boundary
2311
   before calculating this offset.  This should not be a problem unless
2312
   the low bits of start_addr and region_offset differ.  */
2313
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2314
                                         ram_addr_t size,
2315
                                         ram_addr_t phys_offset,
2316
                                         ram_addr_t region_offset)
2317
{
2318
    target_phys_addr_t addr, end_addr;
2319
    PhysPageDesc *p;
2320
    CPUState *env;
2321
    ram_addr_t orig_size = size;
2322
    void *subpage;
2323

    
2324
    if (kvm_enabled())
2325
        kvm_set_phys_mem(start_addr, size, phys_offset);
2326

    
2327
    if (phys_offset == IO_MEM_UNASSIGNED) {
2328
        region_offset = start_addr;
2329
    }
2330
    region_offset &= TARGET_PAGE_MASK;
2331
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2332
    end_addr = start_addr + (target_phys_addr_t)size;
2333
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2334
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2335
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2336
            ram_addr_t orig_memory = p->phys_offset;
2337
            target_phys_addr_t start_addr2, end_addr2;
2338
            int need_subpage = 0;
2339

    
2340
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2341
                          need_subpage);
2342
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2343
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2344
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2345
                                           &p->phys_offset, orig_memory,
2346
                                           p->region_offset);
2347
                } else {
2348
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2349
                                            >> IO_MEM_SHIFT];
2350
                }
2351
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2352
                                 region_offset);
2353
                p->region_offset = 0;
2354
            } else {
2355
                p->phys_offset = phys_offset;
2356
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2357
                    (phys_offset & IO_MEM_ROMD))
2358
                    phys_offset += TARGET_PAGE_SIZE;
2359
            }
2360
        } else {
2361
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2362
            p->phys_offset = phys_offset;
2363
            p->region_offset = region_offset;
2364
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2365
                (phys_offset & IO_MEM_ROMD)) {
2366
                phys_offset += TARGET_PAGE_SIZE;
2367
            } else {
2368
                target_phys_addr_t start_addr2, end_addr2;
2369
                int need_subpage = 0;
2370

    
2371
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2372
                              end_addr2, need_subpage);
2373

    
2374
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2375
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2376
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2377
                                           addr & TARGET_PAGE_MASK);
2378
                    subpage_register(subpage, start_addr2, end_addr2,
2379
                                     phys_offset, region_offset);
2380
                    p->region_offset = 0;
2381
                }
2382
            }
2383
        }
2384
        region_offset += TARGET_PAGE_SIZE;
2385
    }
2386

    
2387
    /* since each CPU stores ram addresses in its TLB cache, we must
2388
       reset the modified entries */
2389
    /* XXX: slow ! */
2390
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2391
        tlb_flush(env, 1);
2392
    }
2393
}
2394

    
2395
/* XXX: temporary until new memory mapping API */
2396
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2397
{
2398
    PhysPageDesc *p;
2399

    
2400
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2401
    if (!p)
2402
        return IO_MEM_UNASSIGNED;
2403
    return p->phys_offset;
2404
}
2405

    
2406
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2407
{
2408
    if (kvm_enabled())
2409
        kvm_coalesce_mmio_region(addr, size);
2410
}
2411

    
2412
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2413
{
2414
    if (kvm_enabled())
2415
        kvm_uncoalesce_mmio_region(addr, size);
2416
}
2417

    
2418
void qemu_flush_coalesced_mmio_buffer(void)
2419
{
2420
    if (kvm_enabled())
2421
        kvm_flush_coalesced_mmio_buffer();
2422
}
2423

    
2424
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2425
{
2426
    RAMBlock *new_block;
2427

    
2428
    size = TARGET_PAGE_ALIGN(size);
2429
    new_block = qemu_malloc(sizeof(*new_block));
2430

    
2431
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2432
    /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2433
    new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
2434
                           MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2435
#else
2436
    new_block->host = qemu_vmalloc(size);
2437
#endif
2438
#ifdef MADV_MERGEABLE
2439
    madvise(new_block->host, size, MADV_MERGEABLE);
2440
#endif
2441
    new_block->offset = last_ram_offset;
2442
    new_block->length = size;
2443

    
2444
    new_block->next = ram_blocks;
2445
    ram_blocks = new_block;
2446

    
2447
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2448
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2449
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2450
           0xff, size >> TARGET_PAGE_BITS);
2451

    
2452
    last_ram_offset += size;
2453

    
2454
    if (kvm_enabled())
2455
        kvm_setup_guest_memory(new_block->host, size);
2456

    
2457
    return new_block->offset;
2458
}
2459

    
2460
void qemu_ram_free(ram_addr_t addr)
2461
{
2462
    /* TODO: implement this.  */
2463
}
2464

    
2465
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2466
   With the exception of the softmmu code in this file, this should
2467
   only be used for local memory (e.g. video ram) that the device owns,
2468
   and knows it isn't going to access beyond the end of the block.
2469

2470
   It should not be used for general purpose DMA.
2471
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2472
 */
2473
void *qemu_get_ram_ptr(ram_addr_t addr)
2474
{
2475
    RAMBlock *prev;
2476
    RAMBlock **prevp;
2477
    RAMBlock *block;
2478

    
2479
    prev = NULL;
2480
    prevp = &ram_blocks;
2481
    block = ram_blocks;
2482
    while (block && (block->offset > addr
2483
                     || block->offset + block->length <= addr)) {
2484
        if (prev)
2485
          prevp = &prev->next;
2486
        prev = block;
2487
        block = block->next;
2488
    }
2489
    if (!block) {
2490
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2491
        abort();
2492
    }
2493
    /* Move this entry to to start of the list.  */
2494
    if (prev) {
2495
        prev->next = block->next;
2496
        block->next = *prevp;
2497
        *prevp = block;
2498
    }
2499
    return block->host + (addr - block->offset);
2500
}
2501

    
2502
/* Some of the softmmu routines need to translate from a host pointer
2503
   (typically a TLB entry) back to a ram offset.  */
2504
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2505
{
2506
    RAMBlock *prev;
2507
    RAMBlock *block;
2508
    uint8_t *host = ptr;
2509

    
2510
    prev = NULL;
2511
    block = ram_blocks;
2512
    while (block && (block->host > host
2513
                     || block->host + block->length <= host)) {
2514
        prev = block;
2515
        block = block->next;
2516
    }
2517
    if (!block) {
2518
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2519
        abort();
2520
    }
2521
    return block->offset + (host - block->host);
2522
}
2523

    
2524
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2525
{
2526
#ifdef DEBUG_UNASSIGNED
2527
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2528
#endif
2529
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2530
    do_unassigned_access(addr, 0, 0, 0, 1);
2531
#endif
2532
    return 0;
2533
}
2534

    
2535
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2536
{
2537
#ifdef DEBUG_UNASSIGNED
2538
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2539
#endif
2540
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2541
    do_unassigned_access(addr, 0, 0, 0, 2);
2542
#endif
2543
    return 0;
2544
}
2545

    
2546
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2547
{
2548
#ifdef DEBUG_UNASSIGNED
2549
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2550
#endif
2551
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2552
    do_unassigned_access(addr, 0, 0, 0, 4);
2553
#endif
2554
    return 0;
2555
}
2556

    
2557
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2558
{
2559
#ifdef DEBUG_UNASSIGNED
2560
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2561
#endif
2562
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2563
    do_unassigned_access(addr, 1, 0, 0, 1);
2564
#endif
2565
}
2566

    
2567
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2568
{
2569
#ifdef DEBUG_UNASSIGNED
2570
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2571
#endif
2572
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2573
    do_unassigned_access(addr, 1, 0, 0, 2);
2574
#endif
2575
}
2576

    
2577
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2578
{
2579
#ifdef DEBUG_UNASSIGNED
2580
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2581
#endif
2582
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2583
    do_unassigned_access(addr, 1, 0, 0, 4);
2584
#endif
2585
}
2586

    
2587
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2588
    unassigned_mem_readb,
2589
    unassigned_mem_readw,
2590
    unassigned_mem_readl,
2591
};
2592

    
2593
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2594
    unassigned_mem_writeb,
2595
    unassigned_mem_writew,
2596
    unassigned_mem_writel,
2597
};
2598

    
2599
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2600
                                uint32_t val)
2601
{
2602
    int dirty_flags;
2603
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2604
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2605
#if !defined(CONFIG_USER_ONLY)
2606
        tb_invalidate_phys_page_fast(ram_addr, 1);
2607
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2608
#endif
2609
    }
2610
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2611
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2612
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2613
    /* we remove the notdirty callback only if the code has been
2614
       flushed */
2615
    if (dirty_flags == 0xff)
2616
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2617
}
2618

    
2619
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2620
                                uint32_t val)
2621
{
2622
    int dirty_flags;
2623
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2624
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2625
#if !defined(CONFIG_USER_ONLY)
2626
        tb_invalidate_phys_page_fast(ram_addr, 2);
2627
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2628
#endif
2629
    }
2630
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2631
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2632
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2633
    /* we remove the notdirty callback only if the code has been
2634
       flushed */
2635
    if (dirty_flags == 0xff)
2636
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2637
}
2638

    
2639
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2640
                                uint32_t val)
2641
{
2642
    int dirty_flags;
2643
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2644
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2645
#if !defined(CONFIG_USER_ONLY)
2646
        tb_invalidate_phys_page_fast(ram_addr, 4);
2647
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2648
#endif
2649
    }
2650
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2651
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2652
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2653
    /* we remove the notdirty callback only if the code has been
2654
       flushed */
2655
    if (dirty_flags == 0xff)
2656
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2657
}
2658

    
2659
static CPUReadMemoryFunc * const error_mem_read[3] = {
2660
    NULL, /* never used */
2661
    NULL, /* never used */
2662
    NULL, /* never used */
2663
};
2664

    
2665
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2666
    notdirty_mem_writeb,
2667
    notdirty_mem_writew,
2668
    notdirty_mem_writel,
2669
};
2670

    
2671
/* Generate a debug exception if a watchpoint has been hit.  */
2672
static void check_watchpoint(int offset, int len_mask, int flags)
2673
{
2674
    CPUState *env = cpu_single_env;
2675
    target_ulong pc, cs_base;
2676
    TranslationBlock *tb;
2677
    target_ulong vaddr;
2678
    CPUWatchpoint *wp;
2679
    int cpu_flags;
2680

    
2681
    if (env->watchpoint_hit) {
2682
        /* We re-entered the check after replacing the TB. Now raise
2683
         * the debug interrupt so that is will trigger after the
2684
         * current instruction. */
2685
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2686
        return;
2687
    }
2688
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2689
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2690
        if ((vaddr == (wp->vaddr & len_mask) ||
2691
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2692
            wp->flags |= BP_WATCHPOINT_HIT;
2693
            if (!env->watchpoint_hit) {
2694
                env->watchpoint_hit = wp;
2695
                tb = tb_find_pc(env->mem_io_pc);
2696
                if (!tb) {
2697
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2698
                              "pc=%p", (void *)env->mem_io_pc);
2699
                }
2700
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2701
                tb_phys_invalidate(tb, -1);
2702
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2703
                    env->exception_index = EXCP_DEBUG;
2704
                } else {
2705
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2706
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2707
                }
2708
                cpu_resume_from_signal(env, NULL);
2709
            }
2710
        } else {
2711
            wp->flags &= ~BP_WATCHPOINT_HIT;
2712
        }
2713
    }
2714
}
2715

    
2716
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2717
   so these check for a hit then pass through to the normal out-of-line
2718
   phys routines.  */
2719
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2720
{
2721
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2722
    return ldub_phys(addr);
2723
}
2724

    
2725
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2726
{
2727
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2728
    return lduw_phys(addr);
2729
}
2730

    
2731
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2732
{
2733
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2734
    return ldl_phys(addr);
2735
}
2736

    
2737
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2738
                             uint32_t val)
2739
{
2740
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2741
    stb_phys(addr, val);
2742
}
2743

    
2744
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2745
                             uint32_t val)
2746
{
2747
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2748
    stw_phys(addr, val);
2749
}
2750

    
2751
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2752
                             uint32_t val)
2753
{
2754
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2755
    stl_phys(addr, val);
2756
}
2757

    
2758
static CPUReadMemoryFunc * const watch_mem_read[3] = {
2759
    watch_mem_readb,
2760
    watch_mem_readw,
2761
    watch_mem_readl,
2762
};
2763

    
2764
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2765
    watch_mem_writeb,
2766
    watch_mem_writew,
2767
    watch_mem_writel,
2768
};
2769

    
2770
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2771
                                 unsigned int len)
2772
{
2773
    uint32_t ret;
2774
    unsigned int idx;
2775

    
2776
    idx = SUBPAGE_IDX(addr);
2777
#if defined(DEBUG_SUBPAGE)
2778
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2779
           mmio, len, addr, idx);
2780
#endif
2781
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2782
                                       addr + mmio->region_offset[idx][0][len]);
2783

    
2784
    return ret;
2785
}
2786

    
2787
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2788
                              uint32_t value, unsigned int len)
2789
{
2790
    unsigned int idx;
2791

    
2792
    idx = SUBPAGE_IDX(addr);
2793
#if defined(DEBUG_SUBPAGE)
2794
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2795
           mmio, len, addr, idx, value);
2796
#endif
2797
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2798
                                  addr + mmio->region_offset[idx][1][len],
2799
                                  value);
2800
}
2801

    
2802
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2803
{
2804
#if defined(DEBUG_SUBPAGE)
2805
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2806
#endif
2807

    
2808
    return subpage_readlen(opaque, addr, 0);
2809
}
2810

    
2811
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2812
                            uint32_t value)
2813
{
2814
#if defined(DEBUG_SUBPAGE)
2815
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2816
#endif
2817
    subpage_writelen(opaque, addr, value, 0);
2818
}
2819

    
2820
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2821
{
2822
#if defined(DEBUG_SUBPAGE)
2823
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2824
#endif
2825

    
2826
    return subpage_readlen(opaque, addr, 1);
2827
}
2828

    
2829
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2830
                            uint32_t value)
2831
{
2832
#if defined(DEBUG_SUBPAGE)
2833
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2834
#endif
2835
    subpage_writelen(opaque, addr, value, 1);
2836
}
2837

    
2838
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2839
{
2840
#if defined(DEBUG_SUBPAGE)
2841
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2842
#endif
2843

    
2844
    return subpage_readlen(opaque, addr, 2);
2845
}
2846

    
2847
static void subpage_writel (void *opaque,
2848
                         target_phys_addr_t addr, uint32_t value)
2849
{
2850
#if defined(DEBUG_SUBPAGE)
2851
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2852
#endif
2853
    subpage_writelen(opaque, addr, value, 2);
2854
}
2855

    
2856
static CPUReadMemoryFunc * const subpage_read[] = {
2857
    &subpage_readb,
2858
    &subpage_readw,
2859
    &subpage_readl,
2860
};
2861

    
2862
static CPUWriteMemoryFunc * const subpage_write[] = {
2863
    &subpage_writeb,
2864
    &subpage_writew,
2865
    &subpage_writel,
2866
};
2867

    
2868
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2869
                             ram_addr_t memory, ram_addr_t region_offset)
2870
{
2871
    int idx, eidx;
2872
    unsigned int i;
2873

    
2874
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2875
        return -1;
2876
    idx = SUBPAGE_IDX(start);
2877
    eidx = SUBPAGE_IDX(end);
2878
#if defined(DEBUG_SUBPAGE)
2879
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2880
           mmio, start, end, idx, eidx, memory);
2881
#endif
2882
    memory >>= IO_MEM_SHIFT;
2883
    for (; idx <= eidx; idx++) {
2884
        for (i = 0; i < 4; i++) {
2885
            if (io_mem_read[memory][i]) {
2886
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2887
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2888
                mmio->region_offset[idx][0][i] = region_offset;
2889
            }
2890
            if (io_mem_write[memory][i]) {
2891
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2892
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2893
                mmio->region_offset[idx][1][i] = region_offset;
2894
            }
2895
        }
2896
    }
2897

    
2898
    return 0;
2899
}
2900

    
2901
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2902
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2903
{
2904
    subpage_t *mmio;
2905
    int subpage_memory;
2906

    
2907
    mmio = qemu_mallocz(sizeof(subpage_t));
2908

    
2909
    mmio->base = base;
2910
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2911
#if defined(DEBUG_SUBPAGE)
2912
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2913
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2914
#endif
2915
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2916
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2917
                         region_offset);
2918

    
2919
    return mmio;
2920
}
2921

    
2922
static int get_free_io_mem_idx(void)
2923
{
2924
    int i;
2925

    
2926
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2927
        if (!io_mem_used[i]) {
2928
            io_mem_used[i] = 1;
2929
            return i;
2930
        }
2931
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
2932
    return -1;
2933
}
2934

    
2935
/* mem_read and mem_write are arrays of functions containing the
2936
   function to access byte (index 0), word (index 1) and dword (index
2937
   2). Functions can be omitted with a NULL function pointer.
2938
   If io_index is non zero, the corresponding io zone is
2939
   modified. If it is zero, a new io zone is allocated. The return
2940
   value can be used with cpu_register_physical_memory(). (-1) is
2941
   returned if error. */
2942
static int cpu_register_io_memory_fixed(int io_index,
2943
                                        CPUReadMemoryFunc * const *mem_read,
2944
                                        CPUWriteMemoryFunc * const *mem_write,
2945
                                        void *opaque)
2946
{
2947
    int i, subwidth = 0;
2948

    
2949
    if (io_index <= 0) {
2950
        io_index = get_free_io_mem_idx();
2951
        if (io_index == -1)
2952
            return io_index;
2953
    } else {
2954
        io_index >>= IO_MEM_SHIFT;
2955
        if (io_index >= IO_MEM_NB_ENTRIES)
2956
            return -1;
2957
    }
2958

    
2959
    for(i = 0;i < 3; i++) {
2960
        if (!mem_read[i] || !mem_write[i])
2961
            subwidth = IO_MEM_SUBWIDTH;
2962
        io_mem_read[io_index][i] = mem_read[i];
2963
        io_mem_write[io_index][i] = mem_write[i];
2964
    }
2965
    io_mem_opaque[io_index] = opaque;
2966
    return (io_index << IO_MEM_SHIFT) | subwidth;
2967
}
2968

    
2969
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2970
                           CPUWriteMemoryFunc * const *mem_write,
2971
                           void *opaque)
2972
{
2973
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
2974
}
2975

    
2976
void cpu_unregister_io_memory(int io_table_address)
2977
{
2978
    int i;
2979
    int io_index = io_table_address >> IO_MEM_SHIFT;
2980

    
2981
    for (i=0;i < 3; i++) {
2982
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2983
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2984
    }
2985
    io_mem_opaque[io_index] = NULL;
2986
    io_mem_used[io_index] = 0;
2987
}
2988

    
2989
static void io_mem_init(void)
2990
{
2991
    int i;
2992

    
2993
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
2994
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
2995
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
2996
    for (i=0; i<5; i++)
2997
        io_mem_used[i] = 1;
2998

    
2999
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
3000
                                          watch_mem_write, NULL);
3001
}
3002

    
3003
#endif /* !defined(CONFIG_USER_ONLY) */
3004

    
3005
/* physical memory access (slow version, mainly for debug) */
3006
#if defined(CONFIG_USER_ONLY)
3007
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3008
                            int len, int is_write)
3009
{
3010
    int l, flags;
3011
    target_ulong page;
3012
    void * p;
3013

    
3014
    while (len > 0) {
3015
        page = addr & TARGET_PAGE_MASK;
3016
        l = (page + TARGET_PAGE_SIZE) - addr;
3017
        if (l > len)
3018
            l = len;
3019
        flags = page_get_flags(page);
3020
        if (!(flags & PAGE_VALID))
3021
            return;
3022
        if (is_write) {
3023
            if (!(flags & PAGE_WRITE))
3024
                return;
3025
            /* XXX: this code should not depend on lock_user */
3026
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3027
                /* FIXME - should this return an error rather than just fail? */
3028
                return;
3029
            memcpy(p, buf, l);
3030
            unlock_user(p, addr, l);
3031
        } else {
3032
            if (!(flags & PAGE_READ))
3033
                return;
3034
            /* XXX: this code should not depend on lock_user */
3035
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3036
                /* FIXME - should this return an error rather than just fail? */
3037
                return;
3038
            memcpy(buf, p, l);
3039
            unlock_user(p, addr, 0);
3040
        }
3041
        len -= l;
3042
        buf += l;
3043
        addr += l;
3044
    }
3045
}
3046

    
3047
#else
3048
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3049
                            int len, int is_write)
3050
{
3051
    int l, io_index;
3052
    uint8_t *ptr;
3053
    uint32_t val;
3054
    target_phys_addr_t page;
3055
    unsigned long pd;
3056
    PhysPageDesc *p;
3057

    
3058
    while (len > 0) {
3059
        page = addr & TARGET_PAGE_MASK;
3060
        l = (page + TARGET_PAGE_SIZE) - addr;
3061
        if (l > len)
3062
            l = len;
3063
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3064
        if (!p) {
3065
            pd = IO_MEM_UNASSIGNED;
3066
        } else {
3067
            pd = p->phys_offset;
3068
        }
3069

    
3070
        if (is_write) {
3071
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3072
                target_phys_addr_t addr1 = addr;
3073
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3074
                if (p)
3075
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3076
                /* XXX: could force cpu_single_env to NULL to avoid
3077
                   potential bugs */
3078
                if (l >= 4 && ((addr1 & 3) == 0)) {
3079
                    /* 32 bit write access */
3080
                    val = ldl_p(buf);
3081
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3082
                    l = 4;
3083
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3084
                    /* 16 bit write access */
3085
                    val = lduw_p(buf);
3086
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3087
                    l = 2;
3088
                } else {
3089
                    /* 8 bit write access */
3090
                    val = ldub_p(buf);
3091
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3092
                    l = 1;
3093
                }
3094
            } else {
3095
                unsigned long addr1;
3096
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3097
                /* RAM case */
3098
                ptr = qemu_get_ram_ptr(addr1);
3099
                memcpy(ptr, buf, l);
3100
                if (!cpu_physical_memory_is_dirty(addr1)) {
3101
                    /* invalidate code */
3102
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3103
                    /* set dirty bit */
3104
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3105
                        (0xff & ~CODE_DIRTY_FLAG);
3106
                }
3107
            }
3108
        } else {
3109
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3110
                !(pd & IO_MEM_ROMD)) {
3111
                target_phys_addr_t addr1 = addr;
3112
                /* I/O case */
3113
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3114
                if (p)
3115
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3116
                if (l >= 4 && ((addr1 & 3) == 0)) {
3117
                    /* 32 bit read access */
3118
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3119
                    stl_p(buf, val);
3120
                    l = 4;
3121
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3122
                    /* 16 bit read access */
3123
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3124
                    stw_p(buf, val);
3125
                    l = 2;
3126
                } else {
3127
                    /* 8 bit read access */
3128
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3129
                    stb_p(buf, val);
3130
                    l = 1;
3131
                }
3132
            } else {
3133
                /* RAM case */
3134
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3135
                    (addr & ~TARGET_PAGE_MASK);
3136
                memcpy(buf, ptr, l);
3137
            }
3138
        }
3139
        len -= l;
3140
        buf += l;
3141
        addr += l;
3142
    }
3143
}
3144

    
3145
/* used for ROM loading : can write in RAM and ROM */
3146
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3147
                                   const uint8_t *buf, int len)
3148
{
3149
    int l;
3150
    uint8_t *ptr;
3151
    target_phys_addr_t page;
3152
    unsigned long pd;
3153
    PhysPageDesc *p;
3154

    
3155
    while (len > 0) {
3156
        page = addr & TARGET_PAGE_MASK;
3157
        l = (page + TARGET_PAGE_SIZE) - addr;
3158
        if (l > len)
3159
            l = len;
3160
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3161
        if (!p) {
3162
            pd = IO_MEM_UNASSIGNED;
3163
        } else {
3164
            pd = p->phys_offset;
3165
        }
3166

    
3167
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3168
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3169
            !(pd & IO_MEM_ROMD)) {
3170
            /* do nothing */
3171
        } else {
3172
            unsigned long addr1;
3173
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3174
            /* ROM/RAM case */
3175
            ptr = qemu_get_ram_ptr(addr1);
3176
            memcpy(ptr, buf, l);
3177
        }
3178
        len -= l;
3179
        buf += l;
3180
        addr += l;
3181
    }
3182
}
3183

    
3184
typedef struct {
3185
    void *buffer;
3186
    target_phys_addr_t addr;
3187
    target_phys_addr_t len;
3188
} BounceBuffer;
3189

    
3190
static BounceBuffer bounce;
3191

    
3192
typedef struct MapClient {
3193
    void *opaque;
3194
    void (*callback)(void *opaque);
3195
    QLIST_ENTRY(MapClient) link;
3196
} MapClient;
3197

    
3198
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3199
    = QLIST_HEAD_INITIALIZER(map_client_list);
3200

    
3201
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3202
{
3203
    MapClient *client = qemu_malloc(sizeof(*client));
3204

    
3205
    client->opaque = opaque;
3206
    client->callback = callback;
3207
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3208
    return client;
3209
}
3210

    
3211
void cpu_unregister_map_client(void *_client)
3212
{
3213
    MapClient *client = (MapClient *)_client;
3214

    
3215
    QLIST_REMOVE(client, link);
3216
    qemu_free(client);
3217
}
3218

    
3219
static void cpu_notify_map_clients(void)
3220
{
3221
    MapClient *client;
3222

    
3223
    while (!QLIST_EMPTY(&map_client_list)) {
3224
        client = QLIST_FIRST(&map_client_list);
3225
        client->callback(client->opaque);
3226
        cpu_unregister_map_client(client);
3227
    }
3228
}
3229

    
3230
/* Map a physical memory region into a host virtual address.
3231
 * May map a subset of the requested range, given by and returned in *plen.
3232
 * May return NULL if resources needed to perform the mapping are exhausted.
3233
 * Use only for reads OR writes - not for read-modify-write operations.
3234
 * Use cpu_register_map_client() to know when retrying the map operation is
3235
 * likely to succeed.
3236
 */
3237
void *cpu_physical_memory_map(target_phys_addr_t addr,
3238
                              target_phys_addr_t *plen,
3239
                              int is_write)
3240
{
3241
    target_phys_addr_t len = *plen;
3242
    target_phys_addr_t done = 0;
3243
    int l;
3244
    uint8_t *ret = NULL;
3245
    uint8_t *ptr;
3246
    target_phys_addr_t page;
3247
    unsigned long pd;
3248
    PhysPageDesc *p;
3249
    unsigned long addr1;
3250

    
3251
    while (len > 0) {
3252
        page = addr & TARGET_PAGE_MASK;
3253
        l = (page + TARGET_PAGE_SIZE) - addr;
3254
        if (l > len)
3255
            l = len;
3256
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3257
        if (!p) {
3258
            pd = IO_MEM_UNASSIGNED;
3259
        } else {
3260
            pd = p->phys_offset;
3261
        }
3262

    
3263
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3264
            if (done || bounce.buffer) {
3265
                break;
3266
            }
3267
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3268
            bounce.addr = addr;
3269
            bounce.len = l;
3270
            if (!is_write) {
3271
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3272
            }
3273
            ptr = bounce.buffer;
3274
        } else {
3275
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3276
            ptr = qemu_get_ram_ptr(addr1);
3277
        }
3278
        if (!done) {
3279
            ret = ptr;
3280
        } else if (ret + done != ptr) {
3281
            break;
3282
        }
3283

    
3284
        len -= l;
3285
        addr += l;
3286
        done += l;
3287
    }
3288
    *plen = done;
3289
    return ret;
3290
}
3291

    
3292
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3293
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3294
 * the amount of memory that was actually read or written by the caller.
3295
 */
3296
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3297
                               int is_write, target_phys_addr_t access_len)
3298
{
3299
    if (buffer != bounce.buffer) {
3300
        if (is_write) {
3301
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3302
            while (access_len) {
3303
                unsigned l;
3304
                l = TARGET_PAGE_SIZE;
3305
                if (l > access_len)
3306
                    l = access_len;
3307
                if (!cpu_physical_memory_is_dirty(addr1)) {
3308
                    /* invalidate code */
3309
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3310
                    /* set dirty bit */
3311
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3312
                        (0xff & ~CODE_DIRTY_FLAG);
3313
                }
3314
                addr1 += l;
3315
                access_len -= l;
3316
            }
3317
        }
3318
        return;
3319
    }
3320
    if (is_write) {
3321
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3322
    }
3323
    qemu_vfree(bounce.buffer);
3324
    bounce.buffer = NULL;
3325
    cpu_notify_map_clients();
3326
}
3327

    
3328
/* warning: addr must be aligned */
3329
uint32_t ldl_phys(target_phys_addr_t addr)
3330
{
3331
    int io_index;
3332
    uint8_t *ptr;
3333
    uint32_t val;
3334
    unsigned long pd;
3335
    PhysPageDesc *p;
3336

    
3337
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3338
    if (!p) {
3339
        pd = IO_MEM_UNASSIGNED;
3340
    } else {
3341
        pd = p->phys_offset;
3342
    }
3343

    
3344
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3345
        !(pd & IO_MEM_ROMD)) {
3346
        /* I/O case */
3347
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3348
        if (p)
3349
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3350
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3351
    } else {
3352
        /* RAM case */
3353
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3354
            (addr & ~TARGET_PAGE_MASK);
3355
        val = ldl_p(ptr);
3356
    }
3357
    return val;
3358
}
3359

    
3360
/* warning: addr must be aligned */
3361
uint64_t ldq_phys(target_phys_addr_t addr)
3362
{
3363
    int io_index;
3364
    uint8_t *ptr;
3365
    uint64_t val;
3366
    unsigned long pd;
3367
    PhysPageDesc *p;
3368

    
3369
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3370
    if (!p) {
3371
        pd = IO_MEM_UNASSIGNED;
3372
    } else {
3373
        pd = p->phys_offset;
3374
    }
3375

    
3376
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3377
        !(pd & IO_MEM_ROMD)) {
3378
        /* I/O case */
3379
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3380
        if (p)
3381
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3382
#ifdef TARGET_WORDS_BIGENDIAN
3383
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3384
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3385
#else
3386
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3387
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3388
#endif
3389
    } else {
3390
        /* RAM case */
3391
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3392
            (addr & ~TARGET_PAGE_MASK);
3393
        val = ldq_p(ptr);
3394
    }
3395
    return val;
3396
}
3397

    
3398
/* XXX: optimize */
3399
uint32_t ldub_phys(target_phys_addr_t addr)
3400
{
3401
    uint8_t val;
3402
    cpu_physical_memory_read(addr, &val, 1);
3403
    return val;
3404
}
3405

    
3406
/* XXX: optimize */
3407
uint32_t lduw_phys(target_phys_addr_t addr)
3408
{
3409
    uint16_t val;
3410
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3411
    return tswap16(val);
3412
}
3413

    
3414
/* warning: addr must be aligned. The ram page is not masked as dirty
3415
   and the code inside is not invalidated. It is useful if the dirty
3416
   bits are used to track modified PTEs */
3417
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3418
{
3419
    int io_index;
3420
    uint8_t *ptr;
3421
    unsigned long pd;
3422
    PhysPageDesc *p;
3423

    
3424
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3425
    if (!p) {
3426
        pd = IO_MEM_UNASSIGNED;
3427
    } else {
3428
        pd = p->phys_offset;
3429
    }
3430

    
3431
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3432
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3433
        if (p)
3434
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3435
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3436
    } else {
3437
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3438
        ptr = qemu_get_ram_ptr(addr1);
3439
        stl_p(ptr, val);
3440

    
3441
        if (unlikely(in_migration)) {
3442
            if (!cpu_physical_memory_is_dirty(addr1)) {
3443
                /* invalidate code */
3444
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3445
                /* set dirty bit */
3446
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3447
                    (0xff & ~CODE_DIRTY_FLAG);
3448
            }
3449
        }
3450
    }
3451
}
3452

    
3453
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3454
{
3455
    int io_index;
3456
    uint8_t *ptr;
3457
    unsigned long pd;
3458
    PhysPageDesc *p;
3459

    
3460
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3461
    if (!p) {
3462
        pd = IO_MEM_UNASSIGNED;
3463
    } else {
3464
        pd = p->phys_offset;
3465
    }
3466

    
3467
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3468
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3469
        if (p)
3470
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3471
#ifdef TARGET_WORDS_BIGENDIAN
3472
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3473
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3474
#else
3475
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3476
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3477
#endif
3478
    } else {
3479
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3480
            (addr & ~TARGET_PAGE_MASK);
3481
        stq_p(ptr, val);
3482
    }
3483
}
3484

    
3485
/* warning: addr must be aligned */
3486
void stl_phys(target_phys_addr_t addr, uint32_t val)
3487
{
3488
    int io_index;
3489
    uint8_t *ptr;
3490
    unsigned long pd;
3491
    PhysPageDesc *p;
3492

    
3493
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3494
    if (!p) {
3495
        pd = IO_MEM_UNASSIGNED;
3496
    } else {
3497
        pd = p->phys_offset;
3498
    }
3499

    
3500
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3501
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3502
        if (p)
3503
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3504
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3505
    } else {
3506
        unsigned long addr1;
3507
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3508
        /* RAM case */
3509
        ptr = qemu_get_ram_ptr(addr1);
3510
        stl_p(ptr, val);
3511
        if (!cpu_physical_memory_is_dirty(addr1)) {
3512
            /* invalidate code */
3513
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3514
            /* set dirty bit */
3515
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3516
                (0xff & ~CODE_DIRTY_FLAG);
3517
        }
3518
    }
3519
}
3520

    
3521
/* XXX: optimize */
3522
void stb_phys(target_phys_addr_t addr, uint32_t val)
3523
{
3524
    uint8_t v = val;
3525
    cpu_physical_memory_write(addr, &v, 1);
3526
}
3527

    
3528
/* XXX: optimize */
3529
void stw_phys(target_phys_addr_t addr, uint32_t val)
3530
{
3531
    uint16_t v = tswap16(val);
3532
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3533
}
3534

    
3535
/* XXX: optimize */
3536
void stq_phys(target_phys_addr_t addr, uint64_t val)
3537
{
3538
    val = tswap64(val);
3539
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3540
}
3541

    
3542
#endif
3543

    
3544
/* virtual memory access for debug (includes writing to ROM) */
3545
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3546
                        uint8_t *buf, int len, int is_write)
3547
{
3548
    int l;
3549
    target_phys_addr_t phys_addr;
3550
    target_ulong page;
3551

    
3552
    while (len > 0) {
3553
        page = addr & TARGET_PAGE_MASK;
3554
        phys_addr = cpu_get_phys_page_debug(env, page);
3555
        /* if no physical page mapped, return an error */
3556
        if (phys_addr == -1)
3557
            return -1;
3558
        l = (page + TARGET_PAGE_SIZE) - addr;
3559
        if (l > len)
3560
            l = len;
3561
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3562
#if !defined(CONFIG_USER_ONLY)
3563
        if (is_write)
3564
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3565
        else
3566
#endif
3567
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3568
        len -= l;
3569
        buf += l;
3570
        addr += l;
3571
    }
3572
    return 0;
3573
}
3574

    
3575
/* in deterministic execution mode, instructions doing device I/Os
3576
   must be at the end of the TB */
3577
void cpu_io_recompile(CPUState *env, void *retaddr)
3578
{
3579
    TranslationBlock *tb;
3580
    uint32_t n, cflags;
3581
    target_ulong pc, cs_base;
3582
    uint64_t flags;
3583

    
3584
    tb = tb_find_pc((unsigned long)retaddr);
3585
    if (!tb) {
3586
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3587
                  retaddr);
3588
    }
3589
    n = env->icount_decr.u16.low + tb->icount;
3590
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3591
    /* Calculate how many instructions had been executed before the fault
3592
       occurred.  */
3593
    n = n - env->icount_decr.u16.low;
3594
    /* Generate a new TB ending on the I/O insn.  */
3595
    n++;
3596
    /* On MIPS and SH, delay slot instructions can only be restarted if
3597
       they were already the first instruction in the TB.  If this is not
3598
       the first instruction in a TB then re-execute the preceding
3599
       branch.  */
3600
#if defined(TARGET_MIPS)
3601
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3602
        env->active_tc.PC -= 4;
3603
        env->icount_decr.u16.low++;
3604
        env->hflags &= ~MIPS_HFLAG_BMASK;
3605
    }
3606
#elif defined(TARGET_SH4)
3607
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3608
            && n > 1) {
3609
        env->pc -= 2;
3610
        env->icount_decr.u16.low++;
3611
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3612
    }
3613
#endif
3614
    /* This should never happen.  */
3615
    if (n > CF_COUNT_MASK)
3616
        cpu_abort(env, "TB too big during recompile");
3617

    
3618
    cflags = n | CF_LAST_IO;
3619
    pc = tb->pc;
3620
    cs_base = tb->cs_base;
3621
    flags = tb->flags;
3622
    tb_phys_invalidate(tb, -1);
3623
    /* FIXME: In theory this could raise an exception.  In practice
3624
       we have already translated the block once so it's probably ok.  */
3625
    tb_gen_code(env, pc, cs_base, flags, cflags);
3626
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3627
       the first in the TB) then we end up generating a whole new TB and
3628
       repeating the fault, which is horribly inefficient.
3629
       Better would be to execute just this insn uncached, or generate a
3630
       second new TB.  */
3631
    cpu_resume_from_signal(env, NULL);
3632
}
3633

    
3634
void dump_exec_info(FILE *f,
3635
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3636
{
3637
    int i, target_code_size, max_target_code_size;
3638
    int direct_jmp_count, direct_jmp2_count, cross_page;
3639
    TranslationBlock *tb;
3640

    
3641
    target_code_size = 0;
3642
    max_target_code_size = 0;
3643
    cross_page = 0;
3644
    direct_jmp_count = 0;
3645
    direct_jmp2_count = 0;
3646
    for(i = 0; i < nb_tbs; i++) {
3647
        tb = &tbs[i];
3648
        target_code_size += tb->size;
3649
        if (tb->size > max_target_code_size)
3650
            max_target_code_size = tb->size;
3651
        if (tb->page_addr[1] != -1)
3652
            cross_page++;
3653
        if (tb->tb_next_offset[0] != 0xffff) {
3654
            direct_jmp_count++;
3655
            if (tb->tb_next_offset[1] != 0xffff) {
3656
                direct_jmp2_count++;
3657
            }
3658
        }
3659
    }
3660
    /* XXX: avoid using doubles ? */
3661
    cpu_fprintf(f, "Translation buffer state:\n");
3662
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3663
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3664
    cpu_fprintf(f, "TB count            %d/%d\n", 
3665
                nb_tbs, code_gen_max_blocks);
3666
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3667
                nb_tbs ? target_code_size / nb_tbs : 0,
3668
                max_target_code_size);
3669
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3670
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3671
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3672
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3673
            cross_page,
3674
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3675
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3676
                direct_jmp_count,
3677
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3678
                direct_jmp2_count,
3679
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3680
    cpu_fprintf(f, "\nStatistics:\n");
3681
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3682
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3683
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3684
    tcg_dump_info(f, cpu_fprintf);
3685
}
3686

    
3687
#if !defined(CONFIG_USER_ONLY)
3688

    
3689
#define MMUSUFFIX _cmmu
3690
#define GETPC() NULL
3691
#define env cpu_single_env
3692
#define SOFTMMU_CODE_ACCESS
3693

    
3694
#define SHIFT 0
3695
#include "softmmu_template.h"
3696

    
3697
#define SHIFT 1
3698
#include "softmmu_template.h"
3699

    
3700
#define SHIFT 2
3701
#include "softmmu_template.h"
3702

    
3703
#define SHIFT 3
3704
#include "softmmu_template.h"
3705

    
3706
#undef env
3707

    
3708
#endif