Statistics
| Branch: | Revision:

root / exec.c @ 4cb26382

History | View | Annotate | Download (110.1 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#ifdef _WIN32
21
#include <windows.h>
22
#else
23
#include <sys/types.h>
24
#include <sys/mman.h>
25
#endif
26
#include <stdlib.h>
27
#include <stdio.h>
28
#include <stdarg.h>
29
#include <string.h>
30
#include <errno.h>
31
#include <unistd.h>
32
#include <inttypes.h>
33

    
34
#include "cpu.h"
35
#include "exec-all.h"
36
#include "qemu-common.h"
37
#include "tcg.h"
38
#include "hw/hw.h"
39
#include "osdep.h"
40
#include "kvm.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#include <signal.h>
44
#endif
45

    
46
//#define DEBUG_TB_INVALIDATE
47
//#define DEBUG_FLUSH
48
//#define DEBUG_TLB
49
//#define DEBUG_UNASSIGNED
50

    
51
/* make various TB consistency checks */
52
//#define DEBUG_TB_CHECK
53
//#define DEBUG_TLB_CHECK
54

    
55
//#define DEBUG_IOPORT
56
//#define DEBUG_SUBPAGE
57

    
58
#if !defined(CONFIG_USER_ONLY)
59
/* TB consistency checks only implemented for usermode emulation.  */
60
#undef DEBUG_TB_CHECK
61
#endif
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#if defined(TARGET_SPARC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67
#elif defined(TARGET_SPARC)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69
#elif defined(TARGET_ALPHA)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72
#elif defined(TARGET_PPC64)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_X86_64)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_I386)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78
#else
79
#define TARGET_PHYS_ADDR_SPACE_BITS 32
80
#endif
81

    
82
static TranslationBlock *tbs;
83
int code_gen_max_blocks;
84
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
85
static int nb_tbs;
86
/* any access to the tbs or the page table must use this lock */
87
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
88

    
89
#if defined(__arm__) || defined(__sparc_v9__)
90
/* The prologue must be reachable with a direct jump. ARM and Sparc64
91
 have limited branch ranges (possibly also PPC) so place it in a
92
 section close to code segment. */
93
#define code_gen_section                                \
94
    __attribute__((__section__(".gen_code")))           \
95
    __attribute__((aligned (32)))
96
#elif defined(_WIN32)
97
/* Maximum alignment for Win32 is 16. */
98
#define code_gen_section                                \
99
    __attribute__((aligned (16)))
100
#else
101
#define code_gen_section                                \
102
    __attribute__((aligned (32)))
103
#endif
104

    
105
uint8_t code_gen_prologue[1024] code_gen_section;
106
static uint8_t *code_gen_buffer;
107
static unsigned long code_gen_buffer_size;
108
/* threshold to flush the translated code buffer */
109
static unsigned long code_gen_buffer_max_size;
110
uint8_t *code_gen_ptr;
111

    
112
#if !defined(CONFIG_USER_ONLY)
113
int phys_ram_fd;
114
uint8_t *phys_ram_dirty;
115
static int in_migration;
116

    
117
typedef struct RAMBlock {
118
    uint8_t *host;
119
    ram_addr_t offset;
120
    ram_addr_t length;
121
    struct RAMBlock *next;
122
} RAMBlock;
123

    
124
static RAMBlock *ram_blocks;
125
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
126
   then we can no longer assume contiguous ram offsets, and external uses
127
   of this variable will break.  */
128
ram_addr_t last_ram_offset;
129
#endif
130

    
131
CPUState *first_cpu;
132
/* current CPU in the current thread. It is only valid inside
133
   cpu_exec() */
134
CPUState *cpu_single_env;
135
/* 0 = Do not count executed instructions.
136
   1 = Precise instruction counting.
137
   2 = Adaptive rate instruction counting.  */
138
int use_icount = 0;
139
/* Current instruction counter.  While executing translated code this may
140
   include some instructions that have not yet been executed.  */
141
int64_t qemu_icount;
142

    
143
typedef struct PageDesc {
144
    /* list of TBs intersecting this ram page */
145
    TranslationBlock *first_tb;
146
    /* in order to optimize self modifying code, we count the number
147
       of lookups we do to a given page to use a bitmap */
148
    unsigned int code_write_count;
149
    uint8_t *code_bitmap;
150
#if defined(CONFIG_USER_ONLY)
151
    unsigned long flags;
152
#endif
153
} PageDesc;
154

    
155
typedef struct PhysPageDesc {
156
    /* offset in host memory of the page + io_index in the low bits */
157
    ram_addr_t phys_offset;
158
    ram_addr_t region_offset;
159
} PhysPageDesc;
160

    
161
#define L2_BITS 10
162
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
163
/* XXX: this is a temporary hack for alpha target.
164
 *      In the future, this is to be replaced by a multi-level table
165
 *      to actually be able to handle the complete 64 bits address space.
166
 */
167
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
168
#else
169
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
170
#endif
171

    
172
#define L1_SIZE (1 << L1_BITS)
173
#define L2_SIZE (1 << L2_BITS)
174

    
175
unsigned long qemu_real_host_page_size;
176
unsigned long qemu_host_page_bits;
177
unsigned long qemu_host_page_size;
178
unsigned long qemu_host_page_mask;
179

    
180
/* XXX: for system emulation, it could just be an array */
181
static PageDesc *l1_map[L1_SIZE];
182
static PhysPageDesc **l1_phys_map;
183

    
184
#if !defined(CONFIG_USER_ONLY)
185
static void io_mem_init(void);
186

    
187
/* io memory support */
188
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
189
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
190
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
191
static char io_mem_used[IO_MEM_NB_ENTRIES];
192
static int io_mem_watch;
193
#endif
194

    
195
/* log support */
196
#ifdef WIN32
197
static const char *logfilename = "qemu.log";
198
#else
199
static const char *logfilename = "/tmp/qemu.log";
200
#endif
201
FILE *logfile;
202
int loglevel;
203
static int log_append = 0;
204

    
205
/* statistics */
206
static int tlb_flush_count;
207
static int tb_flush_count;
208
static int tb_phys_invalidate_count;
209

    
210
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
211
typedef struct subpage_t {
212
    target_phys_addr_t base;
213
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
214
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
215
    void *opaque[TARGET_PAGE_SIZE][2][4];
216
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
217
} subpage_t;
218

    
219
#ifdef _WIN32
220
static void map_exec(void *addr, long size)
221
{
222
    DWORD old_protect;
223
    VirtualProtect(addr, size,
224
                   PAGE_EXECUTE_READWRITE, &old_protect);
225
    
226
}
227
#else
228
static void map_exec(void *addr, long size)
229
{
230
    unsigned long start, end, page_size;
231
    
232
    page_size = getpagesize();
233
    start = (unsigned long)addr;
234
    start &= ~(page_size - 1);
235
    
236
    end = (unsigned long)addr + size;
237
    end += page_size - 1;
238
    end &= ~(page_size - 1);
239
    
240
    mprotect((void *)start, end - start,
241
             PROT_READ | PROT_WRITE | PROT_EXEC);
242
}
243
#endif
244

    
245
static void page_init(void)
246
{
247
    /* NOTE: we can always suppose that qemu_host_page_size >=
248
       TARGET_PAGE_SIZE */
249
#ifdef _WIN32
250
    {
251
        SYSTEM_INFO system_info;
252

    
253
        GetSystemInfo(&system_info);
254
        qemu_real_host_page_size = system_info.dwPageSize;
255
    }
256
#else
257
    qemu_real_host_page_size = getpagesize();
258
#endif
259
    if (qemu_host_page_size == 0)
260
        qemu_host_page_size = qemu_real_host_page_size;
261
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
262
        qemu_host_page_size = TARGET_PAGE_SIZE;
263
    qemu_host_page_bits = 0;
264
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
265
        qemu_host_page_bits++;
266
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
267
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
268
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
269

    
270
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
271
    {
272
        long long startaddr, endaddr;
273
        FILE *f;
274
        int n;
275

    
276
        mmap_lock();
277
        last_brk = (unsigned long)sbrk(0);
278
        f = fopen("/proc/self/maps", "r");
279
        if (f) {
280
            do {
281
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
282
                if (n == 2) {
283
                    startaddr = MIN(startaddr,
284
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
285
                    endaddr = MIN(endaddr,
286
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
287
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
288
                                   TARGET_PAGE_ALIGN(endaddr),
289
                                   PAGE_RESERVED); 
290
                }
291
            } while (!feof(f));
292
            fclose(f);
293
        }
294
        mmap_unlock();
295
    }
296
#endif
297
}
298

    
299
static inline PageDesc **page_l1_map(target_ulong index)
300
{
301
#if TARGET_LONG_BITS > 32
302
    /* Host memory outside guest VM.  For 32-bit targets we have already
303
       excluded high addresses.  */
304
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
305
        return NULL;
306
#endif
307
    return &l1_map[index >> L2_BITS];
308
}
309

    
310
static inline PageDesc *page_find_alloc(target_ulong index)
311
{
312
    PageDesc **lp, *p;
313
    lp = page_l1_map(index);
314
    if (!lp)
315
        return NULL;
316

    
317
    p = *lp;
318
    if (!p) {
319
        /* allocate if not found */
320
#if defined(CONFIG_USER_ONLY)
321
        size_t len = sizeof(PageDesc) * L2_SIZE;
322
        /* Don't use qemu_malloc because it may recurse.  */
323
        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
324
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
325
        *lp = p;
326
        if (h2g_valid(p)) {
327
            unsigned long addr = h2g(p);
328
            page_set_flags(addr & TARGET_PAGE_MASK,
329
                           TARGET_PAGE_ALIGN(addr + len),
330
                           PAGE_RESERVED); 
331
        }
332
#else
333
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
334
        *lp = p;
335
#endif
336
    }
337
    return p + (index & (L2_SIZE - 1));
338
}
339

    
340
static inline PageDesc *page_find(target_ulong index)
341
{
342
    PageDesc **lp, *p;
343
    lp = page_l1_map(index);
344
    if (!lp)
345
        return NULL;
346

    
347
    p = *lp;
348
    if (!p) {
349
        return NULL;
350
    }
351
    return p + (index & (L2_SIZE - 1));
352
}
353

    
354
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
355
{
356
    void **lp, **p;
357
    PhysPageDesc *pd;
358

    
359
    p = (void **)l1_phys_map;
360
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
361

    
362
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
363
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
364
#endif
365
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
366
    p = *lp;
367
    if (!p) {
368
        /* allocate if not found */
369
        if (!alloc)
370
            return NULL;
371
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
372
        memset(p, 0, sizeof(void *) * L1_SIZE);
373
        *lp = p;
374
    }
375
#endif
376
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
377
    pd = *lp;
378
    if (!pd) {
379
        int i;
380
        /* allocate if not found */
381
        if (!alloc)
382
            return NULL;
383
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
384
        *lp = pd;
385
        for (i = 0; i < L2_SIZE; i++) {
386
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
387
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
388
        }
389
    }
390
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
391
}
392

    
393
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
394
{
395
    return phys_page_find_alloc(index, 0);
396
}
397

    
398
#if !defined(CONFIG_USER_ONLY)
399
static void tlb_protect_code(ram_addr_t ram_addr);
400
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
401
                                    target_ulong vaddr);
402
#define mmap_lock() do { } while(0)
403
#define mmap_unlock() do { } while(0)
404
#endif
405

    
406
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
407

    
408
#if defined(CONFIG_USER_ONLY)
409
/* Currently it is not recommended to allocate big chunks of data in
410
   user mode. It will change when a dedicated libc will be used */
411
#define USE_STATIC_CODE_GEN_BUFFER
412
#endif
413

    
414
#ifdef USE_STATIC_CODE_GEN_BUFFER
415
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
416
#endif
417

    
418
static void code_gen_alloc(unsigned long tb_size)
419
{
420
#ifdef USE_STATIC_CODE_GEN_BUFFER
421
    code_gen_buffer = static_code_gen_buffer;
422
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
423
    map_exec(code_gen_buffer, code_gen_buffer_size);
424
#else
425
    code_gen_buffer_size = tb_size;
426
    if (code_gen_buffer_size == 0) {
427
#if defined(CONFIG_USER_ONLY)
428
        /* in user mode, phys_ram_size is not meaningful */
429
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
430
#else
431
        /* XXX: needs adjustments */
432
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
433
#endif
434
    }
435
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
436
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
437
    /* The code gen buffer location may have constraints depending on
438
       the host cpu and OS */
439
#if defined(__linux__) 
440
    {
441
        int flags;
442
        void *start = NULL;
443

    
444
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
445
#if defined(__x86_64__)
446
        flags |= MAP_32BIT;
447
        /* Cannot map more than that */
448
        if (code_gen_buffer_size > (800 * 1024 * 1024))
449
            code_gen_buffer_size = (800 * 1024 * 1024);
450
#elif defined(__sparc_v9__)
451
        // Map the buffer below 2G, so we can use direct calls and branches
452
        flags |= MAP_FIXED;
453
        start = (void *) 0x60000000UL;
454
        if (code_gen_buffer_size > (512 * 1024 * 1024))
455
            code_gen_buffer_size = (512 * 1024 * 1024);
456
#elif defined(__arm__)
457
        /* Map the buffer below 32M, so we can use direct calls and branches */
458
        flags |= MAP_FIXED;
459
        start = (void *) 0x01000000UL;
460
        if (code_gen_buffer_size > 16 * 1024 * 1024)
461
            code_gen_buffer_size = 16 * 1024 * 1024;
462
#endif
463
        code_gen_buffer = mmap(start, code_gen_buffer_size,
464
                               PROT_WRITE | PROT_READ | PROT_EXEC,
465
                               flags, -1, 0);
466
        if (code_gen_buffer == MAP_FAILED) {
467
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
468
            exit(1);
469
        }
470
    }
471
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
472
    {
473
        int flags;
474
        void *addr = NULL;
475
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
476
#if defined(__x86_64__)
477
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
478
         * 0x40000000 is free */
479
        flags |= MAP_FIXED;
480
        addr = (void *)0x40000000;
481
        /* Cannot map more than that */
482
        if (code_gen_buffer_size > (800 * 1024 * 1024))
483
            code_gen_buffer_size = (800 * 1024 * 1024);
484
#endif
485
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
486
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
487
                               flags, -1, 0);
488
        if (code_gen_buffer == MAP_FAILED) {
489
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
490
            exit(1);
491
        }
492
    }
493
#else
494
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
495
    map_exec(code_gen_buffer, code_gen_buffer_size);
496
#endif
497
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
498
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
499
    code_gen_buffer_max_size = code_gen_buffer_size - 
500
        code_gen_max_block_size();
501
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
502
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
503
}
504

    
505
/* Must be called before using the QEMU cpus. 'tb_size' is the size
506
   (in bytes) allocated to the translation buffer. Zero means default
507
   size. */
508
void cpu_exec_init_all(unsigned long tb_size)
509
{
510
    cpu_gen_init();
511
    code_gen_alloc(tb_size);
512
    code_gen_ptr = code_gen_buffer;
513
    page_init();
514
#if !defined(CONFIG_USER_ONLY)
515
    io_mem_init();
516
#endif
517
}
518

    
519
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
520

    
521
static void cpu_common_pre_save(void *opaque)
522
{
523
    CPUState *env = opaque;
524

    
525
    cpu_synchronize_state(env);
526
}
527

    
528
static int cpu_common_pre_load(void *opaque)
529
{
530
    CPUState *env = opaque;
531

    
532
    cpu_synchronize_state(env);
533
    return 0;
534
}
535

    
536
static int cpu_common_post_load(void *opaque, int version_id)
537
{
538
    CPUState *env = opaque;
539

    
540
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
541
       version_id is increased. */
542
    env->interrupt_request &= ~0x01;
543
    tlb_flush(env, 1);
544

    
545
    return 0;
546
}
547

    
548
static const VMStateDescription vmstate_cpu_common = {
549
    .name = "cpu_common",
550
    .version_id = 1,
551
    .minimum_version_id = 1,
552
    .minimum_version_id_old = 1,
553
    .pre_save = cpu_common_pre_save,
554
    .pre_load = cpu_common_pre_load,
555
    .post_load = cpu_common_post_load,
556
    .fields      = (VMStateField []) {
557
        VMSTATE_UINT32(halted, CPUState),
558
        VMSTATE_UINT32(interrupt_request, CPUState),
559
        VMSTATE_END_OF_LIST()
560
    }
561
};
562
#endif
563

    
564
CPUState *qemu_get_cpu(int cpu)
565
{
566
    CPUState *env = first_cpu;
567

    
568
    while (env) {
569
        if (env->cpu_index == cpu)
570
            break;
571
        env = env->next_cpu;
572
    }
573

    
574
    return env;
575
}
576

    
577
void cpu_exec_init(CPUState *env)
578
{
579
    CPUState **penv;
580
    int cpu_index;
581

    
582
#if defined(CONFIG_USER_ONLY)
583
    cpu_list_lock();
584
#endif
585
    env->next_cpu = NULL;
586
    penv = &first_cpu;
587
    cpu_index = 0;
588
    while (*penv != NULL) {
589
        penv = &(*penv)->next_cpu;
590
        cpu_index++;
591
    }
592
    env->cpu_index = cpu_index;
593
    env->numa_node = 0;
594
    QTAILQ_INIT(&env->breakpoints);
595
    QTAILQ_INIT(&env->watchpoints);
596
    *penv = env;
597
#if defined(CONFIG_USER_ONLY)
598
    cpu_list_unlock();
599
#endif
600
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
601
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
602
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
603
                    cpu_save, cpu_load, env);
604
#endif
605
}
606

    
607
static inline void invalidate_page_bitmap(PageDesc *p)
608
{
609
    if (p->code_bitmap) {
610
        qemu_free(p->code_bitmap);
611
        p->code_bitmap = NULL;
612
    }
613
    p->code_write_count = 0;
614
}
615

    
616
/* set to NULL all the 'first_tb' fields in all PageDescs */
617
static void page_flush_tb(void)
618
{
619
    int i, j;
620
    PageDesc *p;
621

    
622
    for(i = 0; i < L1_SIZE; i++) {
623
        p = l1_map[i];
624
        if (p) {
625
            for(j = 0; j < L2_SIZE; j++) {
626
                p->first_tb = NULL;
627
                invalidate_page_bitmap(p);
628
                p++;
629
            }
630
        }
631
    }
632
}
633

    
634
/* flush all the translation blocks */
635
/* XXX: tb_flush is currently not thread safe */
636
void tb_flush(CPUState *env1)
637
{
638
    CPUState *env;
639
#if defined(DEBUG_FLUSH)
640
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
641
           (unsigned long)(code_gen_ptr - code_gen_buffer),
642
           nb_tbs, nb_tbs > 0 ?
643
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
644
#endif
645
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
646
        cpu_abort(env1, "Internal error: code buffer overflow\n");
647

    
648
    nb_tbs = 0;
649

    
650
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
651
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
652
    }
653

    
654
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
655
    page_flush_tb();
656

    
657
    code_gen_ptr = code_gen_buffer;
658
    /* XXX: flush processor icache at this point if cache flush is
659
       expensive */
660
    tb_flush_count++;
661
}
662

    
663
#ifdef DEBUG_TB_CHECK
664

    
665
static void tb_invalidate_check(target_ulong address)
666
{
667
    TranslationBlock *tb;
668
    int i;
669
    address &= TARGET_PAGE_MASK;
670
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
671
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
672
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
673
                  address >= tb->pc + tb->size)) {
674
                printf("ERROR invalidate: address=" TARGET_FMT_lx
675
                       " PC=%08lx size=%04x\n",
676
                       address, (long)tb->pc, tb->size);
677
            }
678
        }
679
    }
680
}
681

    
682
/* verify that all the pages have correct rights for code */
683
static void tb_page_check(void)
684
{
685
    TranslationBlock *tb;
686
    int i, flags1, flags2;
687

    
688
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
689
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
690
            flags1 = page_get_flags(tb->pc);
691
            flags2 = page_get_flags(tb->pc + tb->size - 1);
692
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
693
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
694
                       (long)tb->pc, tb->size, flags1, flags2);
695
            }
696
        }
697
    }
698
}
699

    
700
#endif
701

    
702
/* invalidate one TB */
703
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
704
                             int next_offset)
705
{
706
    TranslationBlock *tb1;
707
    for(;;) {
708
        tb1 = *ptb;
709
        if (tb1 == tb) {
710
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
711
            break;
712
        }
713
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
714
    }
715
}
716

    
717
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
718
{
719
    TranslationBlock *tb1;
720
    unsigned int n1;
721

    
722
    for(;;) {
723
        tb1 = *ptb;
724
        n1 = (long)tb1 & 3;
725
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
726
        if (tb1 == tb) {
727
            *ptb = tb1->page_next[n1];
728
            break;
729
        }
730
        ptb = &tb1->page_next[n1];
731
    }
732
}
733

    
734
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
735
{
736
    TranslationBlock *tb1, **ptb;
737
    unsigned int n1;
738

    
739
    ptb = &tb->jmp_next[n];
740
    tb1 = *ptb;
741
    if (tb1) {
742
        /* find tb(n) in circular list */
743
        for(;;) {
744
            tb1 = *ptb;
745
            n1 = (long)tb1 & 3;
746
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
747
            if (n1 == n && tb1 == tb)
748
                break;
749
            if (n1 == 2) {
750
                ptb = &tb1->jmp_first;
751
            } else {
752
                ptb = &tb1->jmp_next[n1];
753
            }
754
        }
755
        /* now we can suppress tb(n) from the list */
756
        *ptb = tb->jmp_next[n];
757

    
758
        tb->jmp_next[n] = NULL;
759
    }
760
}
761

    
762
/* reset the jump entry 'n' of a TB so that it is not chained to
763
   another TB */
764
static inline void tb_reset_jump(TranslationBlock *tb, int n)
765
{
766
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
767
}
768

    
769
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
770
{
771
    CPUState *env;
772
    PageDesc *p;
773
    unsigned int h, n1;
774
    target_phys_addr_t phys_pc;
775
    TranslationBlock *tb1, *tb2;
776

    
777
    /* remove the TB from the hash list */
778
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
779
    h = tb_phys_hash_func(phys_pc);
780
    tb_remove(&tb_phys_hash[h], tb,
781
              offsetof(TranslationBlock, phys_hash_next));
782

    
783
    /* remove the TB from the page list */
784
    if (tb->page_addr[0] != page_addr) {
785
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
786
        tb_page_remove(&p->first_tb, tb);
787
        invalidate_page_bitmap(p);
788
    }
789
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
790
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
791
        tb_page_remove(&p->first_tb, tb);
792
        invalidate_page_bitmap(p);
793
    }
794

    
795
    tb_invalidated_flag = 1;
796

    
797
    /* remove the TB from the hash list */
798
    h = tb_jmp_cache_hash_func(tb->pc);
799
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
800
        if (env->tb_jmp_cache[h] == tb)
801
            env->tb_jmp_cache[h] = NULL;
802
    }
803

    
804
    /* suppress this TB from the two jump lists */
805
    tb_jmp_remove(tb, 0);
806
    tb_jmp_remove(tb, 1);
807

    
808
    /* suppress any remaining jumps to this TB */
809
    tb1 = tb->jmp_first;
810
    for(;;) {
811
        n1 = (long)tb1 & 3;
812
        if (n1 == 2)
813
            break;
814
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
815
        tb2 = tb1->jmp_next[n1];
816
        tb_reset_jump(tb1, n1);
817
        tb1->jmp_next[n1] = NULL;
818
        tb1 = tb2;
819
    }
820
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
821

    
822
    tb_phys_invalidate_count++;
823
}
824

    
825
static inline void set_bits(uint8_t *tab, int start, int len)
826
{
827
    int end, mask, end1;
828

    
829
    end = start + len;
830
    tab += start >> 3;
831
    mask = 0xff << (start & 7);
832
    if ((start & ~7) == (end & ~7)) {
833
        if (start < end) {
834
            mask &= ~(0xff << (end & 7));
835
            *tab |= mask;
836
        }
837
    } else {
838
        *tab++ |= mask;
839
        start = (start + 8) & ~7;
840
        end1 = end & ~7;
841
        while (start < end1) {
842
            *tab++ = 0xff;
843
            start += 8;
844
        }
845
        if (start < end) {
846
            mask = ~(0xff << (end & 7));
847
            *tab |= mask;
848
        }
849
    }
850
}
851

    
852
static void build_page_bitmap(PageDesc *p)
853
{
854
    int n, tb_start, tb_end;
855
    TranslationBlock *tb;
856

    
857
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
858

    
859
    tb = p->first_tb;
860
    while (tb != NULL) {
861
        n = (long)tb & 3;
862
        tb = (TranslationBlock *)((long)tb & ~3);
863
        /* NOTE: this is subtle as a TB may span two physical pages */
864
        if (n == 0) {
865
            /* NOTE: tb_end may be after the end of the page, but
866
               it is not a problem */
867
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
868
            tb_end = tb_start + tb->size;
869
            if (tb_end > TARGET_PAGE_SIZE)
870
                tb_end = TARGET_PAGE_SIZE;
871
        } else {
872
            tb_start = 0;
873
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
874
        }
875
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
876
        tb = tb->page_next[n];
877
    }
878
}
879

    
880
TranslationBlock *tb_gen_code(CPUState *env,
881
                              target_ulong pc, target_ulong cs_base,
882
                              int flags, int cflags)
883
{
884
    TranslationBlock *tb;
885
    uint8_t *tc_ptr;
886
    target_ulong phys_pc, phys_page2, virt_page2;
887
    int code_gen_size;
888

    
889
    phys_pc = get_phys_addr_code(env, pc);
890
    tb = tb_alloc(pc);
891
    if (!tb) {
892
        /* flush must be done */
893
        tb_flush(env);
894
        /* cannot fail at this point */
895
        tb = tb_alloc(pc);
896
        /* Don't forget to invalidate previous TB info.  */
897
        tb_invalidated_flag = 1;
898
    }
899
    tc_ptr = code_gen_ptr;
900
    tb->tc_ptr = tc_ptr;
901
    tb->cs_base = cs_base;
902
    tb->flags = flags;
903
    tb->cflags = cflags;
904
    cpu_gen_code(env, tb, &code_gen_size);
905
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
906

    
907
    /* check next page if needed */
908
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
909
    phys_page2 = -1;
910
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
911
        phys_page2 = get_phys_addr_code(env, virt_page2);
912
    }
913
    tb_link_phys(tb, phys_pc, phys_page2);
914
    return tb;
915
}
916

    
917
/* invalidate all TBs which intersect with the target physical page
918
   starting in range [start;end[. NOTE: start and end must refer to
919
   the same physical page. 'is_cpu_write_access' should be true if called
920
   from a real cpu write access: the virtual CPU will exit the current
921
   TB if code is modified inside this TB. */
922
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
923
                                   int is_cpu_write_access)
924
{
925
    TranslationBlock *tb, *tb_next, *saved_tb;
926
    CPUState *env = cpu_single_env;
927
    target_ulong tb_start, tb_end;
928
    PageDesc *p;
929
    int n;
930
#ifdef TARGET_HAS_PRECISE_SMC
931
    int current_tb_not_found = is_cpu_write_access;
932
    TranslationBlock *current_tb = NULL;
933
    int current_tb_modified = 0;
934
    target_ulong current_pc = 0;
935
    target_ulong current_cs_base = 0;
936
    int current_flags = 0;
937
#endif /* TARGET_HAS_PRECISE_SMC */
938

    
939
    p = page_find(start >> TARGET_PAGE_BITS);
940
    if (!p)
941
        return;
942
    if (!p->code_bitmap &&
943
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
944
        is_cpu_write_access) {
945
        /* build code bitmap */
946
        build_page_bitmap(p);
947
    }
948

    
949
    /* we remove all the TBs in the range [start, end[ */
950
    /* XXX: see if in some cases it could be faster to invalidate all the code */
951
    tb = p->first_tb;
952
    while (tb != NULL) {
953
        n = (long)tb & 3;
954
        tb = (TranslationBlock *)((long)tb & ~3);
955
        tb_next = tb->page_next[n];
956
        /* NOTE: this is subtle as a TB may span two physical pages */
957
        if (n == 0) {
958
            /* NOTE: tb_end may be after the end of the page, but
959
               it is not a problem */
960
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
961
            tb_end = tb_start + tb->size;
962
        } else {
963
            tb_start = tb->page_addr[1];
964
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
965
        }
966
        if (!(tb_end <= start || tb_start >= end)) {
967
#ifdef TARGET_HAS_PRECISE_SMC
968
            if (current_tb_not_found) {
969
                current_tb_not_found = 0;
970
                current_tb = NULL;
971
                if (env->mem_io_pc) {
972
                    /* now we have a real cpu fault */
973
                    current_tb = tb_find_pc(env->mem_io_pc);
974
                }
975
            }
976
            if (current_tb == tb &&
977
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
978
                /* If we are modifying the current TB, we must stop
979
                its execution. We could be more precise by checking
980
                that the modification is after the current PC, but it
981
                would require a specialized function to partially
982
                restore the CPU state */
983

    
984
                current_tb_modified = 1;
985
                cpu_restore_state(current_tb, env,
986
                                  env->mem_io_pc, NULL);
987
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
988
                                     &current_flags);
989
            }
990
#endif /* TARGET_HAS_PRECISE_SMC */
991
            /* we need to do that to handle the case where a signal
992
               occurs while doing tb_phys_invalidate() */
993
            saved_tb = NULL;
994
            if (env) {
995
                saved_tb = env->current_tb;
996
                env->current_tb = NULL;
997
            }
998
            tb_phys_invalidate(tb, -1);
999
            if (env) {
1000
                env->current_tb = saved_tb;
1001
                if (env->interrupt_request && env->current_tb)
1002
                    cpu_interrupt(env, env->interrupt_request);
1003
            }
1004
        }
1005
        tb = tb_next;
1006
    }
1007
#if !defined(CONFIG_USER_ONLY)
1008
    /* if no code remaining, no need to continue to use slow writes */
1009
    if (!p->first_tb) {
1010
        invalidate_page_bitmap(p);
1011
        if (is_cpu_write_access) {
1012
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1013
        }
1014
    }
1015
#endif
1016
#ifdef TARGET_HAS_PRECISE_SMC
1017
    if (current_tb_modified) {
1018
        /* we generate a block containing just the instruction
1019
           modifying the memory. It will ensure that it cannot modify
1020
           itself */
1021
        env->current_tb = NULL;
1022
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1023
        cpu_resume_from_signal(env, NULL);
1024
    }
1025
#endif
1026
}
1027

    
1028
/* len must be <= 8 and start must be a multiple of len */
1029
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1030
{
1031
    PageDesc *p;
1032
    int offset, b;
1033
#if 0
1034
    if (1) {
1035
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1036
                  cpu_single_env->mem_io_vaddr, len,
1037
                  cpu_single_env->eip,
1038
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1039
    }
1040
#endif
1041
    p = page_find(start >> TARGET_PAGE_BITS);
1042
    if (!p)
1043
        return;
1044
    if (p->code_bitmap) {
1045
        offset = start & ~TARGET_PAGE_MASK;
1046
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1047
        if (b & ((1 << len) - 1))
1048
            goto do_invalidate;
1049
    } else {
1050
    do_invalidate:
1051
        tb_invalidate_phys_page_range(start, start + len, 1);
1052
    }
1053
}
1054

    
1055
#if !defined(CONFIG_SOFTMMU)
1056
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1057
                                    unsigned long pc, void *puc)
1058
{
1059
    TranslationBlock *tb;
1060
    PageDesc *p;
1061
    int n;
1062
#ifdef TARGET_HAS_PRECISE_SMC
1063
    TranslationBlock *current_tb = NULL;
1064
    CPUState *env = cpu_single_env;
1065
    int current_tb_modified = 0;
1066
    target_ulong current_pc = 0;
1067
    target_ulong current_cs_base = 0;
1068
    int current_flags = 0;
1069
#endif
1070

    
1071
    addr &= TARGET_PAGE_MASK;
1072
    p = page_find(addr >> TARGET_PAGE_BITS);
1073
    if (!p)
1074
        return;
1075
    tb = p->first_tb;
1076
#ifdef TARGET_HAS_PRECISE_SMC
1077
    if (tb && pc != 0) {
1078
        current_tb = tb_find_pc(pc);
1079
    }
1080
#endif
1081
    while (tb != NULL) {
1082
        n = (long)tb & 3;
1083
        tb = (TranslationBlock *)((long)tb & ~3);
1084
#ifdef TARGET_HAS_PRECISE_SMC
1085
        if (current_tb == tb &&
1086
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1087
                /* If we are modifying the current TB, we must stop
1088
                   its execution. We could be more precise by checking
1089
                   that the modification is after the current PC, but it
1090
                   would require a specialized function to partially
1091
                   restore the CPU state */
1092

    
1093
            current_tb_modified = 1;
1094
            cpu_restore_state(current_tb, env, pc, puc);
1095
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1096
                                 &current_flags);
1097
        }
1098
#endif /* TARGET_HAS_PRECISE_SMC */
1099
        tb_phys_invalidate(tb, addr);
1100
        tb = tb->page_next[n];
1101
    }
1102
    p->first_tb = NULL;
1103
#ifdef TARGET_HAS_PRECISE_SMC
1104
    if (current_tb_modified) {
1105
        /* we generate a block containing just the instruction
1106
           modifying the memory. It will ensure that it cannot modify
1107
           itself */
1108
        env->current_tb = NULL;
1109
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1110
        cpu_resume_from_signal(env, puc);
1111
    }
1112
#endif
1113
}
1114
#endif
1115

    
1116
/* add the tb in the target page and protect it if necessary */
1117
static inline void tb_alloc_page(TranslationBlock *tb,
1118
                                 unsigned int n, target_ulong page_addr)
1119
{
1120
    PageDesc *p;
1121
    TranslationBlock *last_first_tb;
1122

    
1123
    tb->page_addr[n] = page_addr;
1124
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1125
    tb->page_next[n] = p->first_tb;
1126
    last_first_tb = p->first_tb;
1127
    p->first_tb = (TranslationBlock *)((long)tb | n);
1128
    invalidate_page_bitmap(p);
1129

    
1130
#if defined(TARGET_HAS_SMC) || 1
1131

    
1132
#if defined(CONFIG_USER_ONLY)
1133
    if (p->flags & PAGE_WRITE) {
1134
        target_ulong addr;
1135
        PageDesc *p2;
1136
        int prot;
1137

    
1138
        /* force the host page as non writable (writes will have a
1139
           page fault + mprotect overhead) */
1140
        page_addr &= qemu_host_page_mask;
1141
        prot = 0;
1142
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1143
            addr += TARGET_PAGE_SIZE) {
1144

    
1145
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1146
            if (!p2)
1147
                continue;
1148
            prot |= p2->flags;
1149
            p2->flags &= ~PAGE_WRITE;
1150
            page_get_flags(addr);
1151
          }
1152
        mprotect(g2h(page_addr), qemu_host_page_size,
1153
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1154
#ifdef DEBUG_TB_INVALIDATE
1155
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1156
               page_addr);
1157
#endif
1158
    }
1159
#else
1160
    /* if some code is already present, then the pages are already
1161
       protected. So we handle the case where only the first TB is
1162
       allocated in a physical page */
1163
    if (!last_first_tb) {
1164
        tlb_protect_code(page_addr);
1165
    }
1166
#endif
1167

    
1168
#endif /* TARGET_HAS_SMC */
1169
}
1170

    
1171
/* Allocate a new translation block. Flush the translation buffer if
1172
   too many translation blocks or too much generated code. */
1173
TranslationBlock *tb_alloc(target_ulong pc)
1174
{
1175
    TranslationBlock *tb;
1176

    
1177
    if (nb_tbs >= code_gen_max_blocks ||
1178
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1179
        return NULL;
1180
    tb = &tbs[nb_tbs++];
1181
    tb->pc = pc;
1182
    tb->cflags = 0;
1183
    return tb;
1184
}
1185

    
1186
void tb_free(TranslationBlock *tb)
1187
{
1188
    /* In practice this is mostly used for single use temporary TB
1189
       Ignore the hard cases and just back up if this TB happens to
1190
       be the last one generated.  */
1191
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1192
        code_gen_ptr = tb->tc_ptr;
1193
        nb_tbs--;
1194
    }
1195
}
1196

    
1197
/* add a new TB and link it to the physical page tables. phys_page2 is
1198
   (-1) to indicate that only one page contains the TB. */
1199
void tb_link_phys(TranslationBlock *tb,
1200
                  target_ulong phys_pc, target_ulong phys_page2)
1201
{
1202
    unsigned int h;
1203
    TranslationBlock **ptb;
1204

    
1205
    /* Grab the mmap lock to stop another thread invalidating this TB
1206
       before we are done.  */
1207
    mmap_lock();
1208
    /* add in the physical hash table */
1209
    h = tb_phys_hash_func(phys_pc);
1210
    ptb = &tb_phys_hash[h];
1211
    tb->phys_hash_next = *ptb;
1212
    *ptb = tb;
1213

    
1214
    /* add in the page list */
1215
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1216
    if (phys_page2 != -1)
1217
        tb_alloc_page(tb, 1, phys_page2);
1218
    else
1219
        tb->page_addr[1] = -1;
1220

    
1221
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1222
    tb->jmp_next[0] = NULL;
1223
    tb->jmp_next[1] = NULL;
1224

    
1225
    /* init original jump addresses */
1226
    if (tb->tb_next_offset[0] != 0xffff)
1227
        tb_reset_jump(tb, 0);
1228
    if (tb->tb_next_offset[1] != 0xffff)
1229
        tb_reset_jump(tb, 1);
1230

    
1231
#ifdef DEBUG_TB_CHECK
1232
    tb_page_check();
1233
#endif
1234
    mmap_unlock();
1235
}
1236

    
1237
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1238
   tb[1].tc_ptr. Return NULL if not found */
1239
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1240
{
1241
    int m_min, m_max, m;
1242
    unsigned long v;
1243
    TranslationBlock *tb;
1244

    
1245
    if (nb_tbs <= 0)
1246
        return NULL;
1247
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1248
        tc_ptr >= (unsigned long)code_gen_ptr)
1249
        return NULL;
1250
    /* binary search (cf Knuth) */
1251
    m_min = 0;
1252
    m_max = nb_tbs - 1;
1253
    while (m_min <= m_max) {
1254
        m = (m_min + m_max) >> 1;
1255
        tb = &tbs[m];
1256
        v = (unsigned long)tb->tc_ptr;
1257
        if (v == tc_ptr)
1258
            return tb;
1259
        else if (tc_ptr < v) {
1260
            m_max = m - 1;
1261
        } else {
1262
            m_min = m + 1;
1263
        }
1264
    }
1265
    return &tbs[m_max];
1266
}
1267

    
1268
static void tb_reset_jump_recursive(TranslationBlock *tb);
1269

    
1270
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1271
{
1272
    TranslationBlock *tb1, *tb_next, **ptb;
1273
    unsigned int n1;
1274

    
1275
    tb1 = tb->jmp_next[n];
1276
    if (tb1 != NULL) {
1277
        /* find head of list */
1278
        for(;;) {
1279
            n1 = (long)tb1 & 3;
1280
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1281
            if (n1 == 2)
1282
                break;
1283
            tb1 = tb1->jmp_next[n1];
1284
        }
1285
        /* we are now sure now that tb jumps to tb1 */
1286
        tb_next = tb1;
1287

    
1288
        /* remove tb from the jmp_first list */
1289
        ptb = &tb_next->jmp_first;
1290
        for(;;) {
1291
            tb1 = *ptb;
1292
            n1 = (long)tb1 & 3;
1293
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1294
            if (n1 == n && tb1 == tb)
1295
                break;
1296
            ptb = &tb1->jmp_next[n1];
1297
        }
1298
        *ptb = tb->jmp_next[n];
1299
        tb->jmp_next[n] = NULL;
1300

    
1301
        /* suppress the jump to next tb in generated code */
1302
        tb_reset_jump(tb, n);
1303

    
1304
        /* suppress jumps in the tb on which we could have jumped */
1305
        tb_reset_jump_recursive(tb_next);
1306
    }
1307
}
1308

    
1309
static void tb_reset_jump_recursive(TranslationBlock *tb)
1310
{
1311
    tb_reset_jump_recursive2(tb, 0);
1312
    tb_reset_jump_recursive2(tb, 1);
1313
}
1314

    
1315
#if defined(TARGET_HAS_ICE)
1316
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1317
{
1318
    target_phys_addr_t addr;
1319
    target_ulong pd;
1320
    ram_addr_t ram_addr;
1321
    PhysPageDesc *p;
1322

    
1323
    addr = cpu_get_phys_page_debug(env, pc);
1324
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1325
    if (!p) {
1326
        pd = IO_MEM_UNASSIGNED;
1327
    } else {
1328
        pd = p->phys_offset;
1329
    }
1330
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1331
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1332
}
1333
#endif
1334

    
1335
/* Add a watchpoint.  */
1336
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1337
                          int flags, CPUWatchpoint **watchpoint)
1338
{
1339
    target_ulong len_mask = ~(len - 1);
1340
    CPUWatchpoint *wp;
1341

    
1342
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1343
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1344
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1345
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1346
        return -EINVAL;
1347
    }
1348
    wp = qemu_malloc(sizeof(*wp));
1349

    
1350
    wp->vaddr = addr;
1351
    wp->len_mask = len_mask;
1352
    wp->flags = flags;
1353

    
1354
    /* keep all GDB-injected watchpoints in front */
1355
    if (flags & BP_GDB)
1356
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1357
    else
1358
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1359

    
1360
    tlb_flush_page(env, addr);
1361

    
1362
    if (watchpoint)
1363
        *watchpoint = wp;
1364
    return 0;
1365
}
1366

    
1367
/* Remove a specific watchpoint.  */
1368
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1369
                          int flags)
1370
{
1371
    target_ulong len_mask = ~(len - 1);
1372
    CPUWatchpoint *wp;
1373

    
1374
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1375
        if (addr == wp->vaddr && len_mask == wp->len_mask
1376
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1377
            cpu_watchpoint_remove_by_ref(env, wp);
1378
            return 0;
1379
        }
1380
    }
1381
    return -ENOENT;
1382
}
1383

    
1384
/* Remove a specific watchpoint by reference.  */
1385
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1386
{
1387
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1388

    
1389
    tlb_flush_page(env, watchpoint->vaddr);
1390

    
1391
    qemu_free(watchpoint);
1392
}
1393

    
1394
/* Remove all matching watchpoints.  */
1395
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1396
{
1397
    CPUWatchpoint *wp, *next;
1398

    
1399
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1400
        if (wp->flags & mask)
1401
            cpu_watchpoint_remove_by_ref(env, wp);
1402
    }
1403
}
1404

    
1405
/* Add a breakpoint.  */
1406
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1407
                          CPUBreakpoint **breakpoint)
1408
{
1409
#if defined(TARGET_HAS_ICE)
1410
    CPUBreakpoint *bp;
1411

    
1412
    bp = qemu_malloc(sizeof(*bp));
1413

    
1414
    bp->pc = pc;
1415
    bp->flags = flags;
1416

    
1417
    /* keep all GDB-injected breakpoints in front */
1418
    if (flags & BP_GDB)
1419
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1420
    else
1421
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1422

    
1423
    breakpoint_invalidate(env, pc);
1424

    
1425
    if (breakpoint)
1426
        *breakpoint = bp;
1427
    return 0;
1428
#else
1429
    return -ENOSYS;
1430
#endif
1431
}
1432

    
1433
/* Remove a specific breakpoint.  */
1434
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1435
{
1436
#if defined(TARGET_HAS_ICE)
1437
    CPUBreakpoint *bp;
1438

    
1439
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1440
        if (bp->pc == pc && bp->flags == flags) {
1441
            cpu_breakpoint_remove_by_ref(env, bp);
1442
            return 0;
1443
        }
1444
    }
1445
    return -ENOENT;
1446
#else
1447
    return -ENOSYS;
1448
#endif
1449
}
1450

    
1451
/* Remove a specific breakpoint by reference.  */
1452
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1453
{
1454
#if defined(TARGET_HAS_ICE)
1455
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1456

    
1457
    breakpoint_invalidate(env, breakpoint->pc);
1458

    
1459
    qemu_free(breakpoint);
1460
#endif
1461
}
1462

    
1463
/* Remove all matching breakpoints. */
1464
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1465
{
1466
#if defined(TARGET_HAS_ICE)
1467
    CPUBreakpoint *bp, *next;
1468

    
1469
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1470
        if (bp->flags & mask)
1471
            cpu_breakpoint_remove_by_ref(env, bp);
1472
    }
1473
#endif
1474
}
1475

    
1476
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1477
   CPU loop after each instruction */
1478
void cpu_single_step(CPUState *env, int enabled)
1479
{
1480
#if defined(TARGET_HAS_ICE)
1481
    if (env->singlestep_enabled != enabled) {
1482
        env->singlestep_enabled = enabled;
1483
        if (kvm_enabled())
1484
            kvm_update_guest_debug(env, 0);
1485
        else {
1486
            /* must flush all the translated code to avoid inconsistencies */
1487
            /* XXX: only flush what is necessary */
1488
            tb_flush(env);
1489
        }
1490
    }
1491
#endif
1492
}
1493

    
1494
/* enable or disable low levels log */
1495
void cpu_set_log(int log_flags)
1496
{
1497
    loglevel = log_flags;
1498
    if (loglevel && !logfile) {
1499
        logfile = fopen(logfilename, log_append ? "a" : "w");
1500
        if (!logfile) {
1501
            perror(logfilename);
1502
            _exit(1);
1503
        }
1504
#if !defined(CONFIG_SOFTMMU)
1505
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1506
        {
1507
            static char logfile_buf[4096];
1508
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1509
        }
1510
#elif !defined(_WIN32)
1511
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1512
        setvbuf(logfile, NULL, _IOLBF, 0);
1513
#endif
1514
        log_append = 1;
1515
    }
1516
    if (!loglevel && logfile) {
1517
        fclose(logfile);
1518
        logfile = NULL;
1519
    }
1520
}
1521

    
1522
void cpu_set_log_filename(const char *filename)
1523
{
1524
    logfilename = strdup(filename);
1525
    if (logfile) {
1526
        fclose(logfile);
1527
        logfile = NULL;
1528
    }
1529
    cpu_set_log(loglevel);
1530
}
1531

    
1532
static void cpu_unlink_tb(CPUState *env)
1533
{
1534
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1535
       problem and hope the cpu will stop of its own accord.  For userspace
1536
       emulation this often isn't actually as bad as it sounds.  Often
1537
       signals are used primarily to interrupt blocking syscalls.  */
1538
    TranslationBlock *tb;
1539
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1540

    
1541
    spin_lock(&interrupt_lock);
1542
    tb = env->current_tb;
1543
    /* if the cpu is currently executing code, we must unlink it and
1544
       all the potentially executing TB */
1545
    if (tb) {
1546
        env->current_tb = NULL;
1547
        tb_reset_jump_recursive(tb);
1548
    }
1549
    spin_unlock(&interrupt_lock);
1550
}
1551

    
1552
/* mask must never be zero, except for A20 change call */
1553
void cpu_interrupt(CPUState *env, int mask)
1554
{
1555
    int old_mask;
1556

    
1557
    old_mask = env->interrupt_request;
1558
    env->interrupt_request |= mask;
1559

    
1560
#ifndef CONFIG_USER_ONLY
1561
    /*
1562
     * If called from iothread context, wake the target cpu in
1563
     * case its halted.
1564
     */
1565
    if (!qemu_cpu_self(env)) {
1566
        qemu_cpu_kick(env);
1567
        return;
1568
    }
1569
#endif
1570

    
1571
    if (use_icount) {
1572
        env->icount_decr.u16.high = 0xffff;
1573
#ifndef CONFIG_USER_ONLY
1574
        if (!can_do_io(env)
1575
            && (mask & ~old_mask) != 0) {
1576
            cpu_abort(env, "Raised interrupt while not in I/O function");
1577
        }
1578
#endif
1579
    } else {
1580
        cpu_unlink_tb(env);
1581
    }
1582
}
1583

    
1584
void cpu_reset_interrupt(CPUState *env, int mask)
1585
{
1586
    env->interrupt_request &= ~mask;
1587
}
1588

    
1589
void cpu_exit(CPUState *env)
1590
{
1591
    env->exit_request = 1;
1592
    cpu_unlink_tb(env);
1593
}
1594

    
1595
const CPULogItem cpu_log_items[] = {
1596
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1597
      "show generated host assembly code for each compiled TB" },
1598
    { CPU_LOG_TB_IN_ASM, "in_asm",
1599
      "show target assembly code for each compiled TB" },
1600
    { CPU_LOG_TB_OP, "op",
1601
      "show micro ops for each compiled TB" },
1602
    { CPU_LOG_TB_OP_OPT, "op_opt",
1603
      "show micro ops "
1604
#ifdef TARGET_I386
1605
      "before eflags optimization and "
1606
#endif
1607
      "after liveness analysis" },
1608
    { CPU_LOG_INT, "int",
1609
      "show interrupts/exceptions in short format" },
1610
    { CPU_LOG_EXEC, "exec",
1611
      "show trace before each executed TB (lots of logs)" },
1612
    { CPU_LOG_TB_CPU, "cpu",
1613
      "show CPU state before block translation" },
1614
#ifdef TARGET_I386
1615
    { CPU_LOG_PCALL, "pcall",
1616
      "show protected mode far calls/returns/exceptions" },
1617
    { CPU_LOG_RESET, "cpu_reset",
1618
      "show CPU state before CPU resets" },
1619
#endif
1620
#ifdef DEBUG_IOPORT
1621
    { CPU_LOG_IOPORT, "ioport",
1622
      "show all i/o ports accesses" },
1623
#endif
1624
    { 0, NULL, NULL },
1625
};
1626

    
1627
static int cmp1(const char *s1, int n, const char *s2)
1628
{
1629
    if (strlen(s2) != n)
1630
        return 0;
1631
    return memcmp(s1, s2, n) == 0;
1632
}
1633

    
1634
/* takes a comma separated list of log masks. Return 0 if error. */
1635
int cpu_str_to_log_mask(const char *str)
1636
{
1637
    const CPULogItem *item;
1638
    int mask;
1639
    const char *p, *p1;
1640

    
1641
    p = str;
1642
    mask = 0;
1643
    for(;;) {
1644
        p1 = strchr(p, ',');
1645
        if (!p1)
1646
            p1 = p + strlen(p);
1647
        if(cmp1(p,p1-p,"all")) {
1648
                for(item = cpu_log_items; item->mask != 0; item++) {
1649
                        mask |= item->mask;
1650
                }
1651
        } else {
1652
        for(item = cpu_log_items; item->mask != 0; item++) {
1653
            if (cmp1(p, p1 - p, item->name))
1654
                goto found;
1655
        }
1656
        return 0;
1657
        }
1658
    found:
1659
        mask |= item->mask;
1660
        if (*p1 != ',')
1661
            break;
1662
        p = p1 + 1;
1663
    }
1664
    return mask;
1665
}
1666

    
1667
void cpu_abort(CPUState *env, const char *fmt, ...)
1668
{
1669
    va_list ap;
1670
    va_list ap2;
1671

    
1672
    va_start(ap, fmt);
1673
    va_copy(ap2, ap);
1674
    fprintf(stderr, "qemu: fatal: ");
1675
    vfprintf(stderr, fmt, ap);
1676
    fprintf(stderr, "\n");
1677
#ifdef TARGET_I386
1678
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1679
#else
1680
    cpu_dump_state(env, stderr, fprintf, 0);
1681
#endif
1682
    if (qemu_log_enabled()) {
1683
        qemu_log("qemu: fatal: ");
1684
        qemu_log_vprintf(fmt, ap2);
1685
        qemu_log("\n");
1686
#ifdef TARGET_I386
1687
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1688
#else
1689
        log_cpu_state(env, 0);
1690
#endif
1691
        qemu_log_flush();
1692
        qemu_log_close();
1693
    }
1694
    va_end(ap2);
1695
    va_end(ap);
1696
#if defined(CONFIG_USER_ONLY)
1697
    {
1698
        struct sigaction act;
1699
        sigfillset(&act.sa_mask);
1700
        act.sa_handler = SIG_DFL;
1701
        sigaction(SIGABRT, &act, NULL);
1702
    }
1703
#endif
1704
    abort();
1705
}
1706

    
1707
CPUState *cpu_copy(CPUState *env)
1708
{
1709
    CPUState *new_env = cpu_init(env->cpu_model_str);
1710
    CPUState *next_cpu = new_env->next_cpu;
1711
    int cpu_index = new_env->cpu_index;
1712
#if defined(TARGET_HAS_ICE)
1713
    CPUBreakpoint *bp;
1714
    CPUWatchpoint *wp;
1715
#endif
1716

    
1717
    memcpy(new_env, env, sizeof(CPUState));
1718

    
1719
    /* Preserve chaining and index. */
1720
    new_env->next_cpu = next_cpu;
1721
    new_env->cpu_index = cpu_index;
1722

    
1723
    /* Clone all break/watchpoints.
1724
       Note: Once we support ptrace with hw-debug register access, make sure
1725
       BP_CPU break/watchpoints are handled correctly on clone. */
1726
    QTAILQ_INIT(&env->breakpoints);
1727
    QTAILQ_INIT(&env->watchpoints);
1728
#if defined(TARGET_HAS_ICE)
1729
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1730
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1731
    }
1732
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1733
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1734
                              wp->flags, NULL);
1735
    }
1736
#endif
1737

    
1738
    return new_env;
1739
}
1740

    
1741
#if !defined(CONFIG_USER_ONLY)
1742

    
1743
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1744
{
1745
    unsigned int i;
1746

    
1747
    /* Discard jump cache entries for any tb which might potentially
1748
       overlap the flushed page.  */
1749
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1750
    memset (&env->tb_jmp_cache[i], 0, 
1751
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1752

    
1753
    i = tb_jmp_cache_hash_page(addr);
1754
    memset (&env->tb_jmp_cache[i], 0, 
1755
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1756
}
1757

    
1758
static CPUTLBEntry s_cputlb_empty_entry = {
1759
    .addr_read  = -1,
1760
    .addr_write = -1,
1761
    .addr_code  = -1,
1762
    .addend     = -1,
1763
};
1764

    
1765
/* NOTE: if flush_global is true, also flush global entries (not
1766
   implemented yet) */
1767
void tlb_flush(CPUState *env, int flush_global)
1768
{
1769
    int i;
1770

    
1771
#if defined(DEBUG_TLB)
1772
    printf("tlb_flush:\n");
1773
#endif
1774
    /* must reset current TB so that interrupts cannot modify the
1775
       links while we are modifying them */
1776
    env->current_tb = NULL;
1777

    
1778
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1779
        int mmu_idx;
1780
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1781
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1782
        }
1783
    }
1784

    
1785
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1786

    
1787
    tlb_flush_count++;
1788
}
1789

    
1790
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1791
{
1792
    if (addr == (tlb_entry->addr_read &
1793
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1794
        addr == (tlb_entry->addr_write &
1795
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1796
        addr == (tlb_entry->addr_code &
1797
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1798
        *tlb_entry = s_cputlb_empty_entry;
1799
    }
1800
}
1801

    
1802
void tlb_flush_page(CPUState *env, target_ulong addr)
1803
{
1804
    int i;
1805
    int mmu_idx;
1806

    
1807
#if defined(DEBUG_TLB)
1808
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1809
#endif
1810
    /* must reset current TB so that interrupts cannot modify the
1811
       links while we are modifying them */
1812
    env->current_tb = NULL;
1813

    
1814
    addr &= TARGET_PAGE_MASK;
1815
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1816
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1817
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1818

    
1819
    tlb_flush_jmp_cache(env, addr);
1820
}
1821

    
1822
/* update the TLBs so that writes to code in the virtual page 'addr'
1823
   can be detected */
1824
static void tlb_protect_code(ram_addr_t ram_addr)
1825
{
1826
    cpu_physical_memory_reset_dirty(ram_addr,
1827
                                    ram_addr + TARGET_PAGE_SIZE,
1828
                                    CODE_DIRTY_FLAG);
1829
}
1830

    
1831
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1832
   tested for self modifying code */
1833
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1834
                                    target_ulong vaddr)
1835
{
1836
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1837
}
1838

    
1839
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1840
                                         unsigned long start, unsigned long length)
1841
{
1842
    unsigned long addr;
1843
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1844
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1845
        if ((addr - start) < length) {
1846
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1847
        }
1848
    }
1849
}
1850

    
1851
/* Note: start and end must be within the same ram block.  */
1852
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1853
                                     int dirty_flags)
1854
{
1855
    CPUState *env;
1856
    unsigned long length, start1;
1857
    int i, mask, len;
1858
    uint8_t *p;
1859

    
1860
    start &= TARGET_PAGE_MASK;
1861
    end = TARGET_PAGE_ALIGN(end);
1862

    
1863
    length = end - start;
1864
    if (length == 0)
1865
        return;
1866
    len = length >> TARGET_PAGE_BITS;
1867
    mask = ~dirty_flags;
1868
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1869
    for(i = 0; i < len; i++)
1870
        p[i] &= mask;
1871

    
1872
    /* we modify the TLB cache so that the dirty bit will be set again
1873
       when accessing the range */
1874
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1875
    /* Chek that we don't span multiple blocks - this breaks the
1876
       address comparisons below.  */
1877
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1878
            != (end - 1) - start) {
1879
        abort();
1880
    }
1881

    
1882
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1883
        int mmu_idx;
1884
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1885
            for(i = 0; i < CPU_TLB_SIZE; i++)
1886
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1887
                                      start1, length);
1888
        }
1889
    }
1890
}
1891

    
1892
int cpu_physical_memory_set_dirty_tracking(int enable)
1893
{
1894
    in_migration = enable;
1895
    if (kvm_enabled()) {
1896
        return kvm_set_migration_log(enable);
1897
    }
1898
    return 0;
1899
}
1900

    
1901
int cpu_physical_memory_get_dirty_tracking(void)
1902
{
1903
    return in_migration;
1904
}
1905

    
1906
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1907
                                   target_phys_addr_t end_addr)
1908
{
1909
    int ret = 0;
1910

    
1911
    if (kvm_enabled())
1912
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1913
    return ret;
1914
}
1915

    
1916
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1917
{
1918
    ram_addr_t ram_addr;
1919
    void *p;
1920

    
1921
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1922
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1923
            + tlb_entry->addend);
1924
        ram_addr = qemu_ram_addr_from_host(p);
1925
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1926
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1927
        }
1928
    }
1929
}
1930

    
1931
/* update the TLB according to the current state of the dirty bits */
1932
void cpu_tlb_update_dirty(CPUState *env)
1933
{
1934
    int i;
1935
    int mmu_idx;
1936
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1937
        for(i = 0; i < CPU_TLB_SIZE; i++)
1938
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1939
    }
1940
}
1941

    
1942
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1943
{
1944
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1945
        tlb_entry->addr_write = vaddr;
1946
}
1947

    
1948
/* update the TLB corresponding to virtual page vaddr
1949
   so that it is no longer dirty */
1950
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1951
{
1952
    int i;
1953
    int mmu_idx;
1954

    
1955
    vaddr &= TARGET_PAGE_MASK;
1956
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1957
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1958
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1959
}
1960

    
1961
/* add a new TLB entry. At most one entry for a given virtual address
1962
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1963
   (can only happen in non SOFTMMU mode for I/O pages or pages
1964
   conflicting with the host address space). */
1965
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1966
                      target_phys_addr_t paddr, int prot,
1967
                      int mmu_idx, int is_softmmu)
1968
{
1969
    PhysPageDesc *p;
1970
    unsigned long pd;
1971
    unsigned int index;
1972
    target_ulong address;
1973
    target_ulong code_address;
1974
    target_phys_addr_t addend;
1975
    int ret;
1976
    CPUTLBEntry *te;
1977
    CPUWatchpoint *wp;
1978
    target_phys_addr_t iotlb;
1979

    
1980
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1981
    if (!p) {
1982
        pd = IO_MEM_UNASSIGNED;
1983
    } else {
1984
        pd = p->phys_offset;
1985
    }
1986
#if defined(DEBUG_TLB)
1987
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1988
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1989
#endif
1990

    
1991
    ret = 0;
1992
    address = vaddr;
1993
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1994
        /* IO memory case (romd handled later) */
1995
        address |= TLB_MMIO;
1996
    }
1997
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1998
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1999
        /* Normal RAM.  */
2000
        iotlb = pd & TARGET_PAGE_MASK;
2001
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2002
            iotlb |= IO_MEM_NOTDIRTY;
2003
        else
2004
            iotlb |= IO_MEM_ROM;
2005
    } else {
2006
        /* IO handlers are currently passed a physical address.
2007
           It would be nice to pass an offset from the base address
2008
           of that region.  This would avoid having to special case RAM,
2009
           and avoid full address decoding in every device.
2010
           We can't use the high bits of pd for this because
2011
           IO_MEM_ROMD uses these as a ram address.  */
2012
        iotlb = (pd & ~TARGET_PAGE_MASK);
2013
        if (p) {
2014
            iotlb += p->region_offset;
2015
        } else {
2016
            iotlb += paddr;
2017
        }
2018
    }
2019

    
2020
    code_address = address;
2021
    /* Make accesses to pages with watchpoints go via the
2022
       watchpoint trap routines.  */
2023
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2024
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2025
            iotlb = io_mem_watch + paddr;
2026
            /* TODO: The memory case can be optimized by not trapping
2027
               reads of pages with a write breakpoint.  */
2028
            address |= TLB_MMIO;
2029
        }
2030
    }
2031

    
2032
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2033
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2034
    te = &env->tlb_table[mmu_idx][index];
2035
    te->addend = addend - vaddr;
2036
    if (prot & PAGE_READ) {
2037
        te->addr_read = address;
2038
    } else {
2039
        te->addr_read = -1;
2040
    }
2041

    
2042
    if (prot & PAGE_EXEC) {
2043
        te->addr_code = code_address;
2044
    } else {
2045
        te->addr_code = -1;
2046
    }
2047
    if (prot & PAGE_WRITE) {
2048
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2049
            (pd & IO_MEM_ROMD)) {
2050
            /* Write access calls the I/O callback.  */
2051
            te->addr_write = address | TLB_MMIO;
2052
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2053
                   !cpu_physical_memory_is_dirty(pd)) {
2054
            te->addr_write = address | TLB_NOTDIRTY;
2055
        } else {
2056
            te->addr_write = address;
2057
        }
2058
    } else {
2059
        te->addr_write = -1;
2060
    }
2061
    return ret;
2062
}
2063

    
2064
#else
2065

    
2066
void tlb_flush(CPUState *env, int flush_global)
2067
{
2068
}
2069

    
2070
void tlb_flush_page(CPUState *env, target_ulong addr)
2071
{
2072
}
2073

    
2074
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2075
                      target_phys_addr_t paddr, int prot,
2076
                      int mmu_idx, int is_softmmu)
2077
{
2078
    return 0;
2079
}
2080

    
2081
/*
2082
 * Walks guest process memory "regions" one by one
2083
 * and calls callback function 'fn' for each region.
2084
 */
2085
int walk_memory_regions(void *priv,
2086
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2087
{
2088
    unsigned long start, end;
2089
    PageDesc *p = NULL;
2090
    int i, j, prot, prot1;
2091
    int rc = 0;
2092

    
2093
    start = end = -1;
2094
    prot = 0;
2095

    
2096
    for (i = 0; i <= L1_SIZE; i++) {
2097
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2098
        for (j = 0; j < L2_SIZE; j++) {
2099
            prot1 = (p == NULL) ? 0 : p[j].flags;
2100
            /*
2101
             * "region" is one continuous chunk of memory
2102
             * that has same protection flags set.
2103
             */
2104
            if (prot1 != prot) {
2105
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2106
                if (start != -1) {
2107
                    rc = (*fn)(priv, start, end, prot);
2108
                    /* callback can stop iteration by returning != 0 */
2109
                    if (rc != 0)
2110
                        return (rc);
2111
                }
2112
                if (prot1 != 0)
2113
                    start = end;
2114
                else
2115
                    start = -1;
2116
                prot = prot1;
2117
            }
2118
            if (p == NULL)
2119
                break;
2120
        }
2121
    }
2122
    return (rc);
2123
}
2124

    
2125
static int dump_region(void *priv, unsigned long start,
2126
    unsigned long end, unsigned long prot)
2127
{
2128
    FILE *f = (FILE *)priv;
2129

    
2130
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2131
        start, end, end - start,
2132
        ((prot & PAGE_READ) ? 'r' : '-'),
2133
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2134
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2135

    
2136
    return (0);
2137
}
2138

    
2139
/* dump memory mappings */
2140
void page_dump(FILE *f)
2141
{
2142
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2143
            "start", "end", "size", "prot");
2144
    walk_memory_regions(f, dump_region);
2145
}
2146

    
2147
int page_get_flags(target_ulong address)
2148
{
2149
    PageDesc *p;
2150

    
2151
    p = page_find(address >> TARGET_PAGE_BITS);
2152
    if (!p)
2153
        return 0;
2154
    return p->flags;
2155
}
2156

    
2157
/* modify the flags of a page and invalidate the code if
2158
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2159
   depending on PAGE_WRITE */
2160
void page_set_flags(target_ulong start, target_ulong end, int flags)
2161
{
2162
    PageDesc *p;
2163
    target_ulong addr;
2164

    
2165
    /* mmap_lock should already be held.  */
2166
    start = start & TARGET_PAGE_MASK;
2167
    end = TARGET_PAGE_ALIGN(end);
2168
    if (flags & PAGE_WRITE)
2169
        flags |= PAGE_WRITE_ORG;
2170
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2171
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2172
        /* We may be called for host regions that are outside guest
2173
           address space.  */
2174
        if (!p)
2175
            return;
2176
        /* if the write protection is set, then we invalidate the code
2177
           inside */
2178
        if (!(p->flags & PAGE_WRITE) &&
2179
            (flags & PAGE_WRITE) &&
2180
            p->first_tb) {
2181
            tb_invalidate_phys_page(addr, 0, NULL);
2182
        }
2183
        p->flags = flags;
2184
    }
2185
}
2186

    
2187
int page_check_range(target_ulong start, target_ulong len, int flags)
2188
{
2189
    PageDesc *p;
2190
    target_ulong end;
2191
    target_ulong addr;
2192

    
2193
    if (start + len < start)
2194
        /* we've wrapped around */
2195
        return -1;
2196

    
2197
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2198
    start = start & TARGET_PAGE_MASK;
2199

    
2200
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2201
        p = page_find(addr >> TARGET_PAGE_BITS);
2202
        if( !p )
2203
            return -1;
2204
        if( !(p->flags & PAGE_VALID) )
2205
            return -1;
2206

    
2207
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2208
            return -1;
2209
        if (flags & PAGE_WRITE) {
2210
            if (!(p->flags & PAGE_WRITE_ORG))
2211
                return -1;
2212
            /* unprotect the page if it was put read-only because it
2213
               contains translated code */
2214
            if (!(p->flags & PAGE_WRITE)) {
2215
                if (!page_unprotect(addr, 0, NULL))
2216
                    return -1;
2217
            }
2218
            return 0;
2219
        }
2220
    }
2221
    return 0;
2222
}
2223

    
2224
/* called from signal handler: invalidate the code and unprotect the
2225
   page. Return TRUE if the fault was successfully handled. */
2226
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2227
{
2228
    unsigned int page_index, prot, pindex;
2229
    PageDesc *p, *p1;
2230
    target_ulong host_start, host_end, addr;
2231

    
2232
    /* Technically this isn't safe inside a signal handler.  However we
2233
       know this only ever happens in a synchronous SEGV handler, so in
2234
       practice it seems to be ok.  */
2235
    mmap_lock();
2236

    
2237
    host_start = address & qemu_host_page_mask;
2238
    page_index = host_start >> TARGET_PAGE_BITS;
2239
    p1 = page_find(page_index);
2240
    if (!p1) {
2241
        mmap_unlock();
2242
        return 0;
2243
    }
2244
    host_end = host_start + qemu_host_page_size;
2245
    p = p1;
2246
    prot = 0;
2247
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2248
        prot |= p->flags;
2249
        p++;
2250
    }
2251
    /* if the page was really writable, then we change its
2252
       protection back to writable */
2253
    if (prot & PAGE_WRITE_ORG) {
2254
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2255
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2256
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2257
                     (prot & PAGE_BITS) | PAGE_WRITE);
2258
            p1[pindex].flags |= PAGE_WRITE;
2259
            /* and since the content will be modified, we must invalidate
2260
               the corresponding translated code. */
2261
            tb_invalidate_phys_page(address, pc, puc);
2262
#ifdef DEBUG_TB_CHECK
2263
            tb_invalidate_check(address);
2264
#endif
2265
            mmap_unlock();
2266
            return 1;
2267
        }
2268
    }
2269
    mmap_unlock();
2270
    return 0;
2271
}
2272

    
2273
static inline void tlb_set_dirty(CPUState *env,
2274
                                 unsigned long addr, target_ulong vaddr)
2275
{
2276
}
2277
#endif /* defined(CONFIG_USER_ONLY) */
2278

    
2279
#if !defined(CONFIG_USER_ONLY)
2280

    
2281
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2282
                             ram_addr_t memory, ram_addr_t region_offset);
2283
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2284
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2285
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2286
                      need_subpage)                                     \
2287
    do {                                                                \
2288
        if (addr > start_addr)                                          \
2289
            start_addr2 = 0;                                            \
2290
        else {                                                          \
2291
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2292
            if (start_addr2 > 0)                                        \
2293
                need_subpage = 1;                                       \
2294
        }                                                               \
2295
                                                                        \
2296
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2297
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2298
        else {                                                          \
2299
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2300
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2301
                need_subpage = 1;                                       \
2302
        }                                                               \
2303
    } while (0)
2304

    
2305
/* register physical memory.
2306
   For RAM, 'size' must be a multiple of the target page size.
2307
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2308
   io memory page.  The address used when calling the IO function is
2309
   the offset from the start of the region, plus region_offset.  Both
2310
   start_addr and region_offset are rounded down to a page boundary
2311
   before calculating this offset.  This should not be a problem unless
2312
   the low bits of start_addr and region_offset differ.  */
2313
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2314
                                         ram_addr_t size,
2315
                                         ram_addr_t phys_offset,
2316
                                         ram_addr_t region_offset)
2317
{
2318
    target_phys_addr_t addr, end_addr;
2319
    PhysPageDesc *p;
2320
    CPUState *env;
2321
    ram_addr_t orig_size = size;
2322
    void *subpage;
2323

    
2324
    if (kvm_enabled())
2325
        kvm_set_phys_mem(start_addr, size, phys_offset);
2326

    
2327
    if (phys_offset == IO_MEM_UNASSIGNED) {
2328
        region_offset = start_addr;
2329
    }
2330
    region_offset &= TARGET_PAGE_MASK;
2331
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2332
    end_addr = start_addr + (target_phys_addr_t)size;
2333
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2334
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2335
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2336
            ram_addr_t orig_memory = p->phys_offset;
2337
            target_phys_addr_t start_addr2, end_addr2;
2338
            int need_subpage = 0;
2339

    
2340
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2341
                          need_subpage);
2342
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2343
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2344
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2345
                                           &p->phys_offset, orig_memory,
2346
                                           p->region_offset);
2347
                } else {
2348
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2349
                                            >> IO_MEM_SHIFT];
2350
                }
2351
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2352
                                 region_offset);
2353
                p->region_offset = 0;
2354
            } else {
2355
                p->phys_offset = phys_offset;
2356
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2357
                    (phys_offset & IO_MEM_ROMD))
2358
                    phys_offset += TARGET_PAGE_SIZE;
2359
            }
2360
        } else {
2361
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2362
            p->phys_offset = phys_offset;
2363
            p->region_offset = region_offset;
2364
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2365
                (phys_offset & IO_MEM_ROMD)) {
2366
                phys_offset += TARGET_PAGE_SIZE;
2367
            } else {
2368
                target_phys_addr_t start_addr2, end_addr2;
2369
                int need_subpage = 0;
2370

    
2371
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2372
                              end_addr2, need_subpage);
2373

    
2374
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2375
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2376
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2377
                                           addr & TARGET_PAGE_MASK);
2378
                    subpage_register(subpage, start_addr2, end_addr2,
2379
                                     phys_offset, region_offset);
2380
                    p->region_offset = 0;
2381
                }
2382
            }
2383
        }
2384
        region_offset += TARGET_PAGE_SIZE;
2385
    }
2386

    
2387
    /* since each CPU stores ram addresses in its TLB cache, we must
2388
       reset the modified entries */
2389
    /* XXX: slow ! */
2390
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2391
        tlb_flush(env, 1);
2392
    }
2393
}
2394

    
2395
/* XXX: temporary until new memory mapping API */
2396
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2397
{
2398
    PhysPageDesc *p;
2399

    
2400
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2401
    if (!p)
2402
        return IO_MEM_UNASSIGNED;
2403
    return p->phys_offset;
2404
}
2405

    
2406
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2407
{
2408
    if (kvm_enabled())
2409
        kvm_coalesce_mmio_region(addr, size);
2410
}
2411

    
2412
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2413
{
2414
    if (kvm_enabled())
2415
        kvm_uncoalesce_mmio_region(addr, size);
2416
}
2417

    
2418
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2419
{
2420
    RAMBlock *new_block;
2421

    
2422
    size = TARGET_PAGE_ALIGN(size);
2423
    new_block = qemu_malloc(sizeof(*new_block));
2424

    
2425
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2426
    /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2427
    new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
2428
                           MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2429
#else
2430
    new_block->host = qemu_vmalloc(size);
2431
#endif
2432
#ifdef MADV_MERGEABLE
2433
    madvise(new_block->host, size, MADV_MERGEABLE);
2434
#endif
2435
    new_block->offset = last_ram_offset;
2436
    new_block->length = size;
2437

    
2438
    new_block->next = ram_blocks;
2439
    ram_blocks = new_block;
2440

    
2441
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2442
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2443
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2444
           0xff, size >> TARGET_PAGE_BITS);
2445

    
2446
    last_ram_offset += size;
2447

    
2448
    if (kvm_enabled())
2449
        kvm_setup_guest_memory(new_block->host, size);
2450

    
2451
    return new_block->offset;
2452
}
2453

    
2454
void qemu_ram_free(ram_addr_t addr)
2455
{
2456
    /* TODO: implement this.  */
2457
}
2458

    
2459
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2460
   With the exception of the softmmu code in this file, this should
2461
   only be used for local memory (e.g. video ram) that the device owns,
2462
   and knows it isn't going to access beyond the end of the block.
2463

2464
   It should not be used for general purpose DMA.
2465
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2466
 */
2467
void *qemu_get_ram_ptr(ram_addr_t addr)
2468
{
2469
    RAMBlock *prev;
2470
    RAMBlock **prevp;
2471
    RAMBlock *block;
2472

    
2473
    prev = NULL;
2474
    prevp = &ram_blocks;
2475
    block = ram_blocks;
2476
    while (block && (block->offset > addr
2477
                     || block->offset + block->length <= addr)) {
2478
        if (prev)
2479
          prevp = &prev->next;
2480
        prev = block;
2481
        block = block->next;
2482
    }
2483
    if (!block) {
2484
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2485
        abort();
2486
    }
2487
    /* Move this entry to to start of the list.  */
2488
    if (prev) {
2489
        prev->next = block->next;
2490
        block->next = *prevp;
2491
        *prevp = block;
2492
    }
2493
    return block->host + (addr - block->offset);
2494
}
2495

    
2496
/* Some of the softmmu routines need to translate from a host pointer
2497
   (typically a TLB entry) back to a ram offset.  */
2498
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2499
{
2500
    RAMBlock *prev;
2501
    RAMBlock *block;
2502
    uint8_t *host = ptr;
2503

    
2504
    prev = NULL;
2505
    block = ram_blocks;
2506
    while (block && (block->host > host
2507
                     || block->host + block->length <= host)) {
2508
        prev = block;
2509
        block = block->next;
2510
    }
2511
    if (!block) {
2512
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2513
        abort();
2514
    }
2515
    return block->offset + (host - block->host);
2516
}
2517

    
2518
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2519
{
2520
#ifdef DEBUG_UNASSIGNED
2521
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2522
#endif
2523
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2524
    do_unassigned_access(addr, 0, 0, 0, 1);
2525
#endif
2526
    return 0;
2527
}
2528

    
2529
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2530
{
2531
#ifdef DEBUG_UNASSIGNED
2532
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2533
#endif
2534
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2535
    do_unassigned_access(addr, 0, 0, 0, 2);
2536
#endif
2537
    return 0;
2538
}
2539

    
2540
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2541
{
2542
#ifdef DEBUG_UNASSIGNED
2543
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2544
#endif
2545
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2546
    do_unassigned_access(addr, 0, 0, 0, 4);
2547
#endif
2548
    return 0;
2549
}
2550

    
2551
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2552
{
2553
#ifdef DEBUG_UNASSIGNED
2554
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2555
#endif
2556
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2557
    do_unassigned_access(addr, 1, 0, 0, 1);
2558
#endif
2559
}
2560

    
2561
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2562
{
2563
#ifdef DEBUG_UNASSIGNED
2564
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2565
#endif
2566
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2567
    do_unassigned_access(addr, 1, 0, 0, 2);
2568
#endif
2569
}
2570

    
2571
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2572
{
2573
#ifdef DEBUG_UNASSIGNED
2574
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2575
#endif
2576
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2577
    do_unassigned_access(addr, 1, 0, 0, 4);
2578
#endif
2579
}
2580

    
2581
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2582
    unassigned_mem_readb,
2583
    unassigned_mem_readw,
2584
    unassigned_mem_readl,
2585
};
2586

    
2587
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2588
    unassigned_mem_writeb,
2589
    unassigned_mem_writew,
2590
    unassigned_mem_writel,
2591
};
2592

    
2593
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2594
                                uint32_t val)
2595
{
2596
    int dirty_flags;
2597
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2598
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2599
#if !defined(CONFIG_USER_ONLY)
2600
        tb_invalidate_phys_page_fast(ram_addr, 1);
2601
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2602
#endif
2603
    }
2604
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2605
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2606
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2607
    /* we remove the notdirty callback only if the code has been
2608
       flushed */
2609
    if (dirty_flags == 0xff)
2610
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2611
}
2612

    
2613
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2614
                                uint32_t val)
2615
{
2616
    int dirty_flags;
2617
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2618
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2619
#if !defined(CONFIG_USER_ONLY)
2620
        tb_invalidate_phys_page_fast(ram_addr, 2);
2621
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2622
#endif
2623
    }
2624
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2625
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2626
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2627
    /* we remove the notdirty callback only if the code has been
2628
       flushed */
2629
    if (dirty_flags == 0xff)
2630
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2631
}
2632

    
2633
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2634
                                uint32_t val)
2635
{
2636
    int dirty_flags;
2637
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2638
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2639
#if !defined(CONFIG_USER_ONLY)
2640
        tb_invalidate_phys_page_fast(ram_addr, 4);
2641
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2642
#endif
2643
    }
2644
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2645
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2646
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2647
    /* we remove the notdirty callback only if the code has been
2648
       flushed */
2649
    if (dirty_flags == 0xff)
2650
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2651
}
2652

    
2653
static CPUReadMemoryFunc * const error_mem_read[3] = {
2654
    NULL, /* never used */
2655
    NULL, /* never used */
2656
    NULL, /* never used */
2657
};
2658

    
2659
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2660
    notdirty_mem_writeb,
2661
    notdirty_mem_writew,
2662
    notdirty_mem_writel,
2663
};
2664

    
2665
/* Generate a debug exception if a watchpoint has been hit.  */
2666
static void check_watchpoint(int offset, int len_mask, int flags)
2667
{
2668
    CPUState *env = cpu_single_env;
2669
    target_ulong pc, cs_base;
2670
    TranslationBlock *tb;
2671
    target_ulong vaddr;
2672
    CPUWatchpoint *wp;
2673
    int cpu_flags;
2674

    
2675
    if (env->watchpoint_hit) {
2676
        /* We re-entered the check after replacing the TB. Now raise
2677
         * the debug interrupt so that is will trigger after the
2678
         * current instruction. */
2679
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2680
        return;
2681
    }
2682
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2683
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2684
        if ((vaddr == (wp->vaddr & len_mask) ||
2685
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2686
            wp->flags |= BP_WATCHPOINT_HIT;
2687
            if (!env->watchpoint_hit) {
2688
                env->watchpoint_hit = wp;
2689
                tb = tb_find_pc(env->mem_io_pc);
2690
                if (!tb) {
2691
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2692
                              "pc=%p", (void *)env->mem_io_pc);
2693
                }
2694
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2695
                tb_phys_invalidate(tb, -1);
2696
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2697
                    env->exception_index = EXCP_DEBUG;
2698
                } else {
2699
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2700
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2701
                }
2702
                cpu_resume_from_signal(env, NULL);
2703
            }
2704
        } else {
2705
            wp->flags &= ~BP_WATCHPOINT_HIT;
2706
        }
2707
    }
2708
}
2709

    
2710
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2711
   so these check for a hit then pass through to the normal out-of-line
2712
   phys routines.  */
2713
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2714
{
2715
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2716
    return ldub_phys(addr);
2717
}
2718

    
2719
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2720
{
2721
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2722
    return lduw_phys(addr);
2723
}
2724

    
2725
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2726
{
2727
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2728
    return ldl_phys(addr);
2729
}
2730

    
2731
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2732
                             uint32_t val)
2733
{
2734
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2735
    stb_phys(addr, val);
2736
}
2737

    
2738
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2739
                             uint32_t val)
2740
{
2741
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2742
    stw_phys(addr, val);
2743
}
2744

    
2745
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2746
                             uint32_t val)
2747
{
2748
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2749
    stl_phys(addr, val);
2750
}
2751

    
2752
static CPUReadMemoryFunc * const watch_mem_read[3] = {
2753
    watch_mem_readb,
2754
    watch_mem_readw,
2755
    watch_mem_readl,
2756
};
2757

    
2758
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2759
    watch_mem_writeb,
2760
    watch_mem_writew,
2761
    watch_mem_writel,
2762
};
2763

    
2764
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2765
                                 unsigned int len)
2766
{
2767
    uint32_t ret;
2768
    unsigned int idx;
2769

    
2770
    idx = SUBPAGE_IDX(addr);
2771
#if defined(DEBUG_SUBPAGE)
2772
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2773
           mmio, len, addr, idx);
2774
#endif
2775
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2776
                                       addr + mmio->region_offset[idx][0][len]);
2777

    
2778
    return ret;
2779
}
2780

    
2781
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2782
                              uint32_t value, unsigned int len)
2783
{
2784
    unsigned int idx;
2785

    
2786
    idx = SUBPAGE_IDX(addr);
2787
#if defined(DEBUG_SUBPAGE)
2788
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2789
           mmio, len, addr, idx, value);
2790
#endif
2791
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2792
                                  addr + mmio->region_offset[idx][1][len],
2793
                                  value);
2794
}
2795

    
2796
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2797
{
2798
#if defined(DEBUG_SUBPAGE)
2799
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2800
#endif
2801

    
2802
    return subpage_readlen(opaque, addr, 0);
2803
}
2804

    
2805
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2806
                            uint32_t value)
2807
{
2808
#if defined(DEBUG_SUBPAGE)
2809
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2810
#endif
2811
    subpage_writelen(opaque, addr, value, 0);
2812
}
2813

    
2814
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2815
{
2816
#if defined(DEBUG_SUBPAGE)
2817
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2818
#endif
2819

    
2820
    return subpage_readlen(opaque, addr, 1);
2821
}
2822

    
2823
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2824
                            uint32_t value)
2825
{
2826
#if defined(DEBUG_SUBPAGE)
2827
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2828
#endif
2829
    subpage_writelen(opaque, addr, value, 1);
2830
}
2831

    
2832
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2833
{
2834
#if defined(DEBUG_SUBPAGE)
2835
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2836
#endif
2837

    
2838
    return subpage_readlen(opaque, addr, 2);
2839
}
2840

    
2841
static void subpage_writel (void *opaque,
2842
                         target_phys_addr_t addr, uint32_t value)
2843
{
2844
#if defined(DEBUG_SUBPAGE)
2845
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2846
#endif
2847
    subpage_writelen(opaque, addr, value, 2);
2848
}
2849

    
2850
static CPUReadMemoryFunc * const subpage_read[] = {
2851
    &subpage_readb,
2852
    &subpage_readw,
2853
    &subpage_readl,
2854
};
2855

    
2856
static CPUWriteMemoryFunc * const subpage_write[] = {
2857
    &subpage_writeb,
2858
    &subpage_writew,
2859
    &subpage_writel,
2860
};
2861

    
2862
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2863
                             ram_addr_t memory, ram_addr_t region_offset)
2864
{
2865
    int idx, eidx;
2866
    unsigned int i;
2867

    
2868
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2869
        return -1;
2870
    idx = SUBPAGE_IDX(start);
2871
    eidx = SUBPAGE_IDX(end);
2872
#if defined(DEBUG_SUBPAGE)
2873
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2874
           mmio, start, end, idx, eidx, memory);
2875
#endif
2876
    memory >>= IO_MEM_SHIFT;
2877
    for (; idx <= eidx; idx++) {
2878
        for (i = 0; i < 4; i++) {
2879
            if (io_mem_read[memory][i]) {
2880
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2881
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2882
                mmio->region_offset[idx][0][i] = region_offset;
2883
            }
2884
            if (io_mem_write[memory][i]) {
2885
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2886
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2887
                mmio->region_offset[idx][1][i] = region_offset;
2888
            }
2889
        }
2890
    }
2891

    
2892
    return 0;
2893
}
2894

    
2895
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2896
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2897
{
2898
    subpage_t *mmio;
2899
    int subpage_memory;
2900

    
2901
    mmio = qemu_mallocz(sizeof(subpage_t));
2902

    
2903
    mmio->base = base;
2904
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2905
#if defined(DEBUG_SUBPAGE)
2906
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2907
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2908
#endif
2909
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2910
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2911
                         region_offset);
2912

    
2913
    return mmio;
2914
}
2915

    
2916
static int get_free_io_mem_idx(void)
2917
{
2918
    int i;
2919

    
2920
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2921
        if (!io_mem_used[i]) {
2922
            io_mem_used[i] = 1;
2923
            return i;
2924
        }
2925
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
2926
    return -1;
2927
}
2928

    
2929
/* mem_read and mem_write are arrays of functions containing the
2930
   function to access byte (index 0), word (index 1) and dword (index
2931
   2). Functions can be omitted with a NULL function pointer.
2932
   If io_index is non zero, the corresponding io zone is
2933
   modified. If it is zero, a new io zone is allocated. The return
2934
   value can be used with cpu_register_physical_memory(). (-1) is
2935
   returned if error. */
2936
static int cpu_register_io_memory_fixed(int io_index,
2937
                                        CPUReadMemoryFunc * const *mem_read,
2938
                                        CPUWriteMemoryFunc * const *mem_write,
2939
                                        void *opaque)
2940
{
2941
    int i, subwidth = 0;
2942

    
2943
    if (io_index <= 0) {
2944
        io_index = get_free_io_mem_idx();
2945
        if (io_index == -1)
2946
            return io_index;
2947
    } else {
2948
        io_index >>= IO_MEM_SHIFT;
2949
        if (io_index >= IO_MEM_NB_ENTRIES)
2950
            return -1;
2951
    }
2952

    
2953
    for(i = 0;i < 3; i++) {
2954
        if (!mem_read[i] || !mem_write[i])
2955
            subwidth = IO_MEM_SUBWIDTH;
2956
        io_mem_read[io_index][i] = mem_read[i];
2957
        io_mem_write[io_index][i] = mem_write[i];
2958
    }
2959
    io_mem_opaque[io_index] = opaque;
2960
    return (io_index << IO_MEM_SHIFT) | subwidth;
2961
}
2962

    
2963
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2964
                           CPUWriteMemoryFunc * const *mem_write,
2965
                           void *opaque)
2966
{
2967
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
2968
}
2969

    
2970
void cpu_unregister_io_memory(int io_table_address)
2971
{
2972
    int i;
2973
    int io_index = io_table_address >> IO_MEM_SHIFT;
2974

    
2975
    for (i=0;i < 3; i++) {
2976
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2977
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2978
    }
2979
    io_mem_opaque[io_index] = NULL;
2980
    io_mem_used[io_index] = 0;
2981
}
2982

    
2983
static void io_mem_init(void)
2984
{
2985
    int i;
2986

    
2987
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
2988
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
2989
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
2990
    for (i=0; i<5; i++)
2991
        io_mem_used[i] = 1;
2992

    
2993
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
2994
                                          watch_mem_write, NULL);
2995
}
2996

    
2997
#endif /* !defined(CONFIG_USER_ONLY) */
2998

    
2999
/* physical memory access (slow version, mainly for debug) */
3000
#if defined(CONFIG_USER_ONLY)
3001
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3002
                            int len, int is_write)
3003
{
3004
    int l, flags;
3005
    target_ulong page;
3006
    void * p;
3007

    
3008
    while (len > 0) {
3009
        page = addr & TARGET_PAGE_MASK;
3010
        l = (page + TARGET_PAGE_SIZE) - addr;
3011
        if (l > len)
3012
            l = len;
3013
        flags = page_get_flags(page);
3014
        if (!(flags & PAGE_VALID))
3015
            return;
3016
        if (is_write) {
3017
            if (!(flags & PAGE_WRITE))
3018
                return;
3019
            /* XXX: this code should not depend on lock_user */
3020
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3021
                /* FIXME - should this return an error rather than just fail? */
3022
                return;
3023
            memcpy(p, buf, l);
3024
            unlock_user(p, addr, l);
3025
        } else {
3026
            if (!(flags & PAGE_READ))
3027
                return;
3028
            /* XXX: this code should not depend on lock_user */
3029
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3030
                /* FIXME - should this return an error rather than just fail? */
3031
                return;
3032
            memcpy(buf, p, l);
3033
            unlock_user(p, addr, 0);
3034
        }
3035
        len -= l;
3036
        buf += l;
3037
        addr += l;
3038
    }
3039
}
3040

    
3041
#else
3042
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3043
                            int len, int is_write)
3044
{
3045
    int l, io_index;
3046
    uint8_t *ptr;
3047
    uint32_t val;
3048
    target_phys_addr_t page;
3049
    unsigned long pd;
3050
    PhysPageDesc *p;
3051

    
3052
    while (len > 0) {
3053
        page = addr & TARGET_PAGE_MASK;
3054
        l = (page + TARGET_PAGE_SIZE) - addr;
3055
        if (l > len)
3056
            l = len;
3057
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3058
        if (!p) {
3059
            pd = IO_MEM_UNASSIGNED;
3060
        } else {
3061
            pd = p->phys_offset;
3062
        }
3063

    
3064
        if (is_write) {
3065
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3066
                target_phys_addr_t addr1 = addr;
3067
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3068
                if (p)
3069
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3070
                /* XXX: could force cpu_single_env to NULL to avoid
3071
                   potential bugs */
3072
                if (l >= 4 && ((addr1 & 3) == 0)) {
3073
                    /* 32 bit write access */
3074
                    val = ldl_p(buf);
3075
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3076
                    l = 4;
3077
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3078
                    /* 16 bit write access */
3079
                    val = lduw_p(buf);
3080
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3081
                    l = 2;
3082
                } else {
3083
                    /* 8 bit write access */
3084
                    val = ldub_p(buf);
3085
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3086
                    l = 1;
3087
                }
3088
            } else {
3089
                unsigned long addr1;
3090
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3091
                /* RAM case */
3092
                ptr = qemu_get_ram_ptr(addr1);
3093
                memcpy(ptr, buf, l);
3094
                if (!cpu_physical_memory_is_dirty(addr1)) {
3095
                    /* invalidate code */
3096
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3097
                    /* set dirty bit */
3098
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3099
                        (0xff & ~CODE_DIRTY_FLAG);
3100
                }
3101
            }
3102
        } else {
3103
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3104
                !(pd & IO_MEM_ROMD)) {
3105
                target_phys_addr_t addr1 = addr;
3106
                /* I/O case */
3107
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3108
                if (p)
3109
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3110
                if (l >= 4 && ((addr1 & 3) == 0)) {
3111
                    /* 32 bit read access */
3112
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3113
                    stl_p(buf, val);
3114
                    l = 4;
3115
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3116
                    /* 16 bit read access */
3117
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3118
                    stw_p(buf, val);
3119
                    l = 2;
3120
                } else {
3121
                    /* 8 bit read access */
3122
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3123
                    stb_p(buf, val);
3124
                    l = 1;
3125
                }
3126
            } else {
3127
                /* RAM case */
3128
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3129
                    (addr & ~TARGET_PAGE_MASK);
3130
                memcpy(buf, ptr, l);
3131
            }
3132
        }
3133
        len -= l;
3134
        buf += l;
3135
        addr += l;
3136
    }
3137
}
3138

    
3139
/* used for ROM loading : can write in RAM and ROM */
3140
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3141
                                   const uint8_t *buf, int len)
3142
{
3143
    int l;
3144
    uint8_t *ptr;
3145
    target_phys_addr_t page;
3146
    unsigned long pd;
3147
    PhysPageDesc *p;
3148

    
3149
    while (len > 0) {
3150
        page = addr & TARGET_PAGE_MASK;
3151
        l = (page + TARGET_PAGE_SIZE) - addr;
3152
        if (l > len)
3153
            l = len;
3154
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3155
        if (!p) {
3156
            pd = IO_MEM_UNASSIGNED;
3157
        } else {
3158
            pd = p->phys_offset;
3159
        }
3160

    
3161
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3162
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3163
            !(pd & IO_MEM_ROMD)) {
3164
            /* do nothing */
3165
        } else {
3166
            unsigned long addr1;
3167
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3168
            /* ROM/RAM case */
3169
            ptr = qemu_get_ram_ptr(addr1);
3170
            memcpy(ptr, buf, l);
3171
        }
3172
        len -= l;
3173
        buf += l;
3174
        addr += l;
3175
    }
3176
}
3177

    
3178
typedef struct {
3179
    void *buffer;
3180
    target_phys_addr_t addr;
3181
    target_phys_addr_t len;
3182
} BounceBuffer;
3183

    
3184
static BounceBuffer bounce;
3185

    
3186
typedef struct MapClient {
3187
    void *opaque;
3188
    void (*callback)(void *opaque);
3189
    QLIST_ENTRY(MapClient) link;
3190
} MapClient;
3191

    
3192
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3193
    = QLIST_HEAD_INITIALIZER(map_client_list);
3194

    
3195
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3196
{
3197
    MapClient *client = qemu_malloc(sizeof(*client));
3198

    
3199
    client->opaque = opaque;
3200
    client->callback = callback;
3201
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3202
    return client;
3203
}
3204

    
3205
void cpu_unregister_map_client(void *_client)
3206
{
3207
    MapClient *client = (MapClient *)_client;
3208

    
3209
    QLIST_REMOVE(client, link);
3210
    qemu_free(client);
3211
}
3212

    
3213
static void cpu_notify_map_clients(void)
3214
{
3215
    MapClient *client;
3216

    
3217
    while (!QLIST_EMPTY(&map_client_list)) {
3218
        client = QLIST_FIRST(&map_client_list);
3219
        client->callback(client->opaque);
3220
        cpu_unregister_map_client(client);
3221
    }
3222
}
3223

    
3224
/* Map a physical memory region into a host virtual address.
3225
 * May map a subset of the requested range, given by and returned in *plen.
3226
 * May return NULL if resources needed to perform the mapping are exhausted.
3227
 * Use only for reads OR writes - not for read-modify-write operations.
3228
 * Use cpu_register_map_client() to know when retrying the map operation is
3229
 * likely to succeed.
3230
 */
3231
void *cpu_physical_memory_map(target_phys_addr_t addr,
3232
                              target_phys_addr_t *plen,
3233
                              int is_write)
3234
{
3235
    target_phys_addr_t len = *plen;
3236
    target_phys_addr_t done = 0;
3237
    int l;
3238
    uint8_t *ret = NULL;
3239
    uint8_t *ptr;
3240
    target_phys_addr_t page;
3241
    unsigned long pd;
3242
    PhysPageDesc *p;
3243
    unsigned long addr1;
3244

    
3245
    while (len > 0) {
3246
        page = addr & TARGET_PAGE_MASK;
3247
        l = (page + TARGET_PAGE_SIZE) - addr;
3248
        if (l > len)
3249
            l = len;
3250
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3251
        if (!p) {
3252
            pd = IO_MEM_UNASSIGNED;
3253
        } else {
3254
            pd = p->phys_offset;
3255
        }
3256

    
3257
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3258
            if (done || bounce.buffer) {
3259
                break;
3260
            }
3261
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3262
            bounce.addr = addr;
3263
            bounce.len = l;
3264
            if (!is_write) {
3265
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3266
            }
3267
            ptr = bounce.buffer;
3268
        } else {
3269
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3270
            ptr = qemu_get_ram_ptr(addr1);
3271
        }
3272
        if (!done) {
3273
            ret = ptr;
3274
        } else if (ret + done != ptr) {
3275
            break;
3276
        }
3277

    
3278
        len -= l;
3279
        addr += l;
3280
        done += l;
3281
    }
3282
    *plen = done;
3283
    return ret;
3284
}
3285

    
3286
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3287
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3288
 * the amount of memory that was actually read or written by the caller.
3289
 */
3290
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3291
                               int is_write, target_phys_addr_t access_len)
3292
{
3293
    if (buffer != bounce.buffer) {
3294
        if (is_write) {
3295
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3296
            while (access_len) {
3297
                unsigned l;
3298
                l = TARGET_PAGE_SIZE;
3299
                if (l > access_len)
3300
                    l = access_len;
3301
                if (!cpu_physical_memory_is_dirty(addr1)) {
3302
                    /* invalidate code */
3303
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3304
                    /* set dirty bit */
3305
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3306
                        (0xff & ~CODE_DIRTY_FLAG);
3307
                }
3308
                addr1 += l;
3309
                access_len -= l;
3310
            }
3311
        }
3312
        return;
3313
    }
3314
    if (is_write) {
3315
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3316
    }
3317
    qemu_vfree(bounce.buffer);
3318
    bounce.buffer = NULL;
3319
    cpu_notify_map_clients();
3320
}
3321

    
3322
/* warning: addr must be aligned */
3323
uint32_t ldl_phys(target_phys_addr_t addr)
3324
{
3325
    int io_index;
3326
    uint8_t *ptr;
3327
    uint32_t val;
3328
    unsigned long pd;
3329
    PhysPageDesc *p;
3330

    
3331
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3332
    if (!p) {
3333
        pd = IO_MEM_UNASSIGNED;
3334
    } else {
3335
        pd = p->phys_offset;
3336
    }
3337

    
3338
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3339
        !(pd & IO_MEM_ROMD)) {
3340
        /* I/O case */
3341
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3342
        if (p)
3343
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3344
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3345
    } else {
3346
        /* RAM case */
3347
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3348
            (addr & ~TARGET_PAGE_MASK);
3349
        val = ldl_p(ptr);
3350
    }
3351
    return val;
3352
}
3353

    
3354
/* warning: addr must be aligned */
3355
uint64_t ldq_phys(target_phys_addr_t addr)
3356
{
3357
    int io_index;
3358
    uint8_t *ptr;
3359
    uint64_t val;
3360
    unsigned long pd;
3361
    PhysPageDesc *p;
3362

    
3363
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3364
    if (!p) {
3365
        pd = IO_MEM_UNASSIGNED;
3366
    } else {
3367
        pd = p->phys_offset;
3368
    }
3369

    
3370
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3371
        !(pd & IO_MEM_ROMD)) {
3372
        /* I/O case */
3373
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3374
        if (p)
3375
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3376
#ifdef TARGET_WORDS_BIGENDIAN
3377
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3378
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3379
#else
3380
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3381
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3382
#endif
3383
    } else {
3384
        /* RAM case */
3385
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3386
            (addr & ~TARGET_PAGE_MASK);
3387
        val = ldq_p(ptr);
3388
    }
3389
    return val;
3390
}
3391

    
3392
/* XXX: optimize */
3393
uint32_t ldub_phys(target_phys_addr_t addr)
3394
{
3395
    uint8_t val;
3396
    cpu_physical_memory_read(addr, &val, 1);
3397
    return val;
3398
}
3399

    
3400
/* XXX: optimize */
3401
uint32_t lduw_phys(target_phys_addr_t addr)
3402
{
3403
    uint16_t val;
3404
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3405
    return tswap16(val);
3406
}
3407

    
3408
/* warning: addr must be aligned. The ram page is not masked as dirty
3409
   and the code inside is not invalidated. It is useful if the dirty
3410
   bits are used to track modified PTEs */
3411
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3412
{
3413
    int io_index;
3414
    uint8_t *ptr;
3415
    unsigned long pd;
3416
    PhysPageDesc *p;
3417

    
3418
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3419
    if (!p) {
3420
        pd = IO_MEM_UNASSIGNED;
3421
    } else {
3422
        pd = p->phys_offset;
3423
    }
3424

    
3425
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3426
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3427
        if (p)
3428
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3429
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3430
    } else {
3431
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3432
        ptr = qemu_get_ram_ptr(addr1);
3433
        stl_p(ptr, val);
3434

    
3435
        if (unlikely(in_migration)) {
3436
            if (!cpu_physical_memory_is_dirty(addr1)) {
3437
                /* invalidate code */
3438
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3439
                /* set dirty bit */
3440
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3441
                    (0xff & ~CODE_DIRTY_FLAG);
3442
            }
3443
        }
3444
    }
3445
}
3446

    
3447
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3448
{
3449
    int io_index;
3450
    uint8_t *ptr;
3451
    unsigned long pd;
3452
    PhysPageDesc *p;
3453

    
3454
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3455
    if (!p) {
3456
        pd = IO_MEM_UNASSIGNED;
3457
    } else {
3458
        pd = p->phys_offset;
3459
    }
3460

    
3461
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3462
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3463
        if (p)
3464
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3465
#ifdef TARGET_WORDS_BIGENDIAN
3466
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3467
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3468
#else
3469
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3470
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3471
#endif
3472
    } else {
3473
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3474
            (addr & ~TARGET_PAGE_MASK);
3475
        stq_p(ptr, val);
3476
    }
3477
}
3478

    
3479
/* warning: addr must be aligned */
3480
void stl_phys(target_phys_addr_t addr, uint32_t val)
3481
{
3482
    int io_index;
3483
    uint8_t *ptr;
3484
    unsigned long pd;
3485
    PhysPageDesc *p;
3486

    
3487
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3488
    if (!p) {
3489
        pd = IO_MEM_UNASSIGNED;
3490
    } else {
3491
        pd = p->phys_offset;
3492
    }
3493

    
3494
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3495
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3496
        if (p)
3497
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3498
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3499
    } else {
3500
        unsigned long addr1;
3501
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3502
        /* RAM case */
3503
        ptr = qemu_get_ram_ptr(addr1);
3504
        stl_p(ptr, val);
3505
        if (!cpu_physical_memory_is_dirty(addr1)) {
3506
            /* invalidate code */
3507
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3508
            /* set dirty bit */
3509
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3510
                (0xff & ~CODE_DIRTY_FLAG);
3511
        }
3512
    }
3513
}
3514

    
3515
/* XXX: optimize */
3516
void stb_phys(target_phys_addr_t addr, uint32_t val)
3517
{
3518
    uint8_t v = val;
3519
    cpu_physical_memory_write(addr, &v, 1);
3520
}
3521

    
3522
/* XXX: optimize */
3523
void stw_phys(target_phys_addr_t addr, uint32_t val)
3524
{
3525
    uint16_t v = tswap16(val);
3526
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3527
}
3528

    
3529
/* XXX: optimize */
3530
void stq_phys(target_phys_addr_t addr, uint64_t val)
3531
{
3532
    val = tswap64(val);
3533
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3534
}
3535

    
3536
#endif
3537

    
3538
/* virtual memory access for debug (includes writing to ROM) */
3539
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3540
                        uint8_t *buf, int len, int is_write)
3541
{
3542
    int l;
3543
    target_phys_addr_t phys_addr;
3544
    target_ulong page;
3545

    
3546
    while (len > 0) {
3547
        page = addr & TARGET_PAGE_MASK;
3548
        phys_addr = cpu_get_phys_page_debug(env, page);
3549
        /* if no physical page mapped, return an error */
3550
        if (phys_addr == -1)
3551
            return -1;
3552
        l = (page + TARGET_PAGE_SIZE) - addr;
3553
        if (l > len)
3554
            l = len;
3555
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3556
#if !defined(CONFIG_USER_ONLY)
3557
        if (is_write)
3558
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3559
        else
3560
#endif
3561
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3562
        len -= l;
3563
        buf += l;
3564
        addr += l;
3565
    }
3566
    return 0;
3567
}
3568

    
3569
/* in deterministic execution mode, instructions doing device I/Os
3570
   must be at the end of the TB */
3571
void cpu_io_recompile(CPUState *env, void *retaddr)
3572
{
3573
    TranslationBlock *tb;
3574
    uint32_t n, cflags;
3575
    target_ulong pc, cs_base;
3576
    uint64_t flags;
3577

    
3578
    tb = tb_find_pc((unsigned long)retaddr);
3579
    if (!tb) {
3580
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3581
                  retaddr);
3582
    }
3583
    n = env->icount_decr.u16.low + tb->icount;
3584
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3585
    /* Calculate how many instructions had been executed before the fault
3586
       occurred.  */
3587
    n = n - env->icount_decr.u16.low;
3588
    /* Generate a new TB ending on the I/O insn.  */
3589
    n++;
3590
    /* On MIPS and SH, delay slot instructions can only be restarted if
3591
       they were already the first instruction in the TB.  If this is not
3592
       the first instruction in a TB then re-execute the preceding
3593
       branch.  */
3594
#if defined(TARGET_MIPS)
3595
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3596
        env->active_tc.PC -= 4;
3597
        env->icount_decr.u16.low++;
3598
        env->hflags &= ~MIPS_HFLAG_BMASK;
3599
    }
3600
#elif defined(TARGET_SH4)
3601
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3602
            && n > 1) {
3603
        env->pc -= 2;
3604
        env->icount_decr.u16.low++;
3605
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3606
    }
3607
#endif
3608
    /* This should never happen.  */
3609
    if (n > CF_COUNT_MASK)
3610
        cpu_abort(env, "TB too big during recompile");
3611

    
3612
    cflags = n | CF_LAST_IO;
3613
    pc = tb->pc;
3614
    cs_base = tb->cs_base;
3615
    flags = tb->flags;
3616
    tb_phys_invalidate(tb, -1);
3617
    /* FIXME: In theory this could raise an exception.  In practice
3618
       we have already translated the block once so it's probably ok.  */
3619
    tb_gen_code(env, pc, cs_base, flags, cflags);
3620
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3621
       the first in the TB) then we end up generating a whole new TB and
3622
       repeating the fault, which is horribly inefficient.
3623
       Better would be to execute just this insn uncached, or generate a
3624
       second new TB.  */
3625
    cpu_resume_from_signal(env, NULL);
3626
}
3627

    
3628
void dump_exec_info(FILE *f,
3629
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3630
{
3631
    int i, target_code_size, max_target_code_size;
3632
    int direct_jmp_count, direct_jmp2_count, cross_page;
3633
    TranslationBlock *tb;
3634

    
3635
    target_code_size = 0;
3636
    max_target_code_size = 0;
3637
    cross_page = 0;
3638
    direct_jmp_count = 0;
3639
    direct_jmp2_count = 0;
3640
    for(i = 0; i < nb_tbs; i++) {
3641
        tb = &tbs[i];
3642
        target_code_size += tb->size;
3643
        if (tb->size > max_target_code_size)
3644
            max_target_code_size = tb->size;
3645
        if (tb->page_addr[1] != -1)
3646
            cross_page++;
3647
        if (tb->tb_next_offset[0] != 0xffff) {
3648
            direct_jmp_count++;
3649
            if (tb->tb_next_offset[1] != 0xffff) {
3650
                direct_jmp2_count++;
3651
            }
3652
        }
3653
    }
3654
    /* XXX: avoid using doubles ? */
3655
    cpu_fprintf(f, "Translation buffer state:\n");
3656
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3657
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3658
    cpu_fprintf(f, "TB count            %d/%d\n", 
3659
                nb_tbs, code_gen_max_blocks);
3660
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3661
                nb_tbs ? target_code_size / nb_tbs : 0,
3662
                max_target_code_size);
3663
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3664
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3665
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3666
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3667
            cross_page,
3668
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3669
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3670
                direct_jmp_count,
3671
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3672
                direct_jmp2_count,
3673
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3674
    cpu_fprintf(f, "\nStatistics:\n");
3675
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3676
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3677
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3678
    tcg_dump_info(f, cpu_fprintf);
3679
}
3680

    
3681
#if !defined(CONFIG_USER_ONLY)
3682

    
3683
#define MMUSUFFIX _cmmu
3684
#define GETPC() NULL
3685
#define env cpu_single_env
3686
#define SOFTMMU_CODE_ACCESS
3687

    
3688
#define SHIFT 0
3689
#include "softmmu_template.h"
3690

    
3691
#define SHIFT 1
3692
#include "softmmu_template.h"
3693

    
3694
#define SHIFT 2
3695
#include "softmmu_template.h"
3696

    
3697
#define SHIFT 3
3698
#include "softmmu_template.h"
3699

    
3700
#undef env
3701

    
3702
#endif