Statistics
| Branch: | Revision:

root / exec.c @ f8a83245

History | View | Annotate | Download (110 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#ifdef _WIN32
21
#include <windows.h>
22
#else
23
#include <sys/types.h>
24
#include <sys/mman.h>
25
#endif
26
#include <stdlib.h>
27
#include <stdio.h>
28
#include <stdarg.h>
29
#include <string.h>
30
#include <errno.h>
31
#include <unistd.h>
32
#include <inttypes.h>
33

    
34
#include "cpu.h"
35
#include "exec-all.h"
36
#include "qemu-common.h"
37
#include "tcg.h"
38
#include "hw/hw.h"
39
#include "osdep.h"
40
#include "kvm.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#endif
44

    
45
//#define DEBUG_TB_INVALIDATE
46
//#define DEBUG_FLUSH
47
//#define DEBUG_TLB
48
//#define DEBUG_UNASSIGNED
49

    
50
/* make various TB consistency checks */
51
//#define DEBUG_TB_CHECK
52
//#define DEBUG_TLB_CHECK
53

    
54
//#define DEBUG_IOPORT
55
//#define DEBUG_SUBPAGE
56

    
57
#if !defined(CONFIG_USER_ONLY)
58
/* TB consistency checks only implemented for usermode emulation.  */
59
#undef DEBUG_TB_CHECK
60
#endif
61

    
62
#define SMC_BITMAP_USE_THRESHOLD 10
63

    
64
#if defined(TARGET_SPARC64)
65
#define TARGET_PHYS_ADDR_SPACE_BITS 41
66
#elif defined(TARGET_SPARC)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 36
68
#elif defined(TARGET_ALPHA)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 42
70
#define TARGET_VIRT_ADDR_SPACE_BITS 42
71
#elif defined(TARGET_PPC64)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_X86_64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_I386)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 36
77
#else
78
#define TARGET_PHYS_ADDR_SPACE_BITS 32
79
#endif
80

    
81
static TranslationBlock *tbs;
82
int code_gen_max_blocks;
83
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84
static int nb_tbs;
85
/* any access to the tbs or the page table must use this lock */
86
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
87

    
88
#if defined(__arm__) || defined(__sparc_v9__)
89
/* The prologue must be reachable with a direct jump. ARM and Sparc64
90
 have limited branch ranges (possibly also PPC) so place it in a
91
 section close to code segment. */
92
#define code_gen_section                                \
93
    __attribute__((__section__(".gen_code")))           \
94
    __attribute__((aligned (32)))
95
#elif defined(_WIN32)
96
/* Maximum alignment for Win32 is 16. */
97
#define code_gen_section                                \
98
    __attribute__((aligned (16)))
99
#else
100
#define code_gen_section                                \
101
    __attribute__((aligned (32)))
102
#endif
103

    
104
uint8_t code_gen_prologue[1024] code_gen_section;
105
static uint8_t *code_gen_buffer;
106
static unsigned long code_gen_buffer_size;
107
/* threshold to flush the translated code buffer */
108
static unsigned long code_gen_buffer_max_size;
109
uint8_t *code_gen_ptr;
110

    
111
#if !defined(CONFIG_USER_ONLY)
112
int phys_ram_fd;
113
uint8_t *phys_ram_dirty;
114
static int in_migration;
115

    
116
typedef struct RAMBlock {
117
    uint8_t *host;
118
    ram_addr_t offset;
119
    ram_addr_t length;
120
    struct RAMBlock *next;
121
} RAMBlock;
122

    
123
static RAMBlock *ram_blocks;
124
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
125
   then we can no longer assume contiguous ram offsets, and external uses
126
   of this variable will break.  */
127
ram_addr_t last_ram_offset;
128
#endif
129

    
130
CPUState *first_cpu;
131
/* current CPU in the current thread. It is only valid inside
132
   cpu_exec() */
133
CPUState *cpu_single_env;
134
/* 0 = Do not count executed instructions.
135
   1 = Precise instruction counting.
136
   2 = Adaptive rate instruction counting.  */
137
int use_icount = 0;
138
/* Current instruction counter.  While executing translated code this may
139
   include some instructions that have not yet been executed.  */
140
int64_t qemu_icount;
141

    
142
typedef struct PageDesc {
143
    /* list of TBs intersecting this ram page */
144
    TranslationBlock *first_tb;
145
    /* in order to optimize self modifying code, we count the number
146
       of lookups we do to a given page to use a bitmap */
147
    unsigned int code_write_count;
148
    uint8_t *code_bitmap;
149
#if defined(CONFIG_USER_ONLY)
150
    unsigned long flags;
151
#endif
152
} PageDesc;
153

    
154
typedef struct PhysPageDesc {
155
    /* offset in host memory of the page + io_index in the low bits */
156
    ram_addr_t phys_offset;
157
    ram_addr_t region_offset;
158
} PhysPageDesc;
159

    
160
#define L2_BITS 10
161
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162
/* XXX: this is a temporary hack for alpha target.
163
 *      In the future, this is to be replaced by a multi-level table
164
 *      to actually be able to handle the complete 64 bits address space.
165
 */
166
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167
#else
168
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
169
#endif
170

    
171
#define L1_SIZE (1 << L1_BITS)
172
#define L2_SIZE (1 << L2_BITS)
173

    
174
unsigned long qemu_real_host_page_size;
175
unsigned long qemu_host_page_bits;
176
unsigned long qemu_host_page_size;
177
unsigned long qemu_host_page_mask;
178

    
179
/* XXX: for system emulation, it could just be an array */
180
static PageDesc *l1_map[L1_SIZE];
181
static PhysPageDesc **l1_phys_map;
182

    
183
#if !defined(CONFIG_USER_ONLY)
184
static void io_mem_init(void);
185

    
186
/* io memory support */
187
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
188
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
189
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
190
static char io_mem_used[IO_MEM_NB_ENTRIES];
191
static int io_mem_watch;
192
#endif
193

    
194
/* log support */
195
#ifdef WIN32
196
static const char *logfilename = "qemu.log";
197
#else
198
static const char *logfilename = "/tmp/qemu.log";
199
#endif
200
FILE *logfile;
201
int loglevel;
202
static int log_append = 0;
203

    
204
/* statistics */
205
static int tlb_flush_count;
206
static int tb_flush_count;
207
static int tb_phys_invalidate_count;
208

    
209
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
210
typedef struct subpage_t {
211
    target_phys_addr_t base;
212
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
213
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
214
    void *opaque[TARGET_PAGE_SIZE][2][4];
215
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
216
} subpage_t;
217

    
218
#ifdef _WIN32
219
static void map_exec(void *addr, long size)
220
{
221
    DWORD old_protect;
222
    VirtualProtect(addr, size,
223
                   PAGE_EXECUTE_READWRITE, &old_protect);
224
    
225
}
226
#else
227
static void map_exec(void *addr, long size)
228
{
229
    unsigned long start, end, page_size;
230
    
231
    page_size = getpagesize();
232
    start = (unsigned long)addr;
233
    start &= ~(page_size - 1);
234
    
235
    end = (unsigned long)addr + size;
236
    end += page_size - 1;
237
    end &= ~(page_size - 1);
238
    
239
    mprotect((void *)start, end - start,
240
             PROT_READ | PROT_WRITE | PROT_EXEC);
241
}
242
#endif
243

    
244
static void page_init(void)
245
{
246
    /* NOTE: we can always suppose that qemu_host_page_size >=
247
       TARGET_PAGE_SIZE */
248
#ifdef _WIN32
249
    {
250
        SYSTEM_INFO system_info;
251

    
252
        GetSystemInfo(&system_info);
253
        qemu_real_host_page_size = system_info.dwPageSize;
254
    }
255
#else
256
    qemu_real_host_page_size = getpagesize();
257
#endif
258
    if (qemu_host_page_size == 0)
259
        qemu_host_page_size = qemu_real_host_page_size;
260
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
261
        qemu_host_page_size = TARGET_PAGE_SIZE;
262
    qemu_host_page_bits = 0;
263
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
264
        qemu_host_page_bits++;
265
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
266
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
267
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
268

    
269
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
270
    {
271
        long long startaddr, endaddr;
272
        FILE *f;
273
        int n;
274

    
275
        mmap_lock();
276
        last_brk = (unsigned long)sbrk(0);
277
        f = fopen("/proc/self/maps", "r");
278
        if (f) {
279
            do {
280
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
281
                if (n == 2) {
282
                    startaddr = MIN(startaddr,
283
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
284
                    endaddr = MIN(endaddr,
285
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
286
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
287
                                   TARGET_PAGE_ALIGN(endaddr),
288
                                   PAGE_RESERVED); 
289
                }
290
            } while (!feof(f));
291
            fclose(f);
292
        }
293
        mmap_unlock();
294
    }
295
#endif
296
}
297

    
298
static inline PageDesc **page_l1_map(target_ulong index)
299
{
300
#if TARGET_LONG_BITS > 32
301
    /* Host memory outside guest VM.  For 32-bit targets we have already
302
       excluded high addresses.  */
303
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
304
        return NULL;
305
#endif
306
    return &l1_map[index >> L2_BITS];
307
}
308

    
309
static inline PageDesc *page_find_alloc(target_ulong index)
310
{
311
    PageDesc **lp, *p;
312
    lp = page_l1_map(index);
313
    if (!lp)
314
        return NULL;
315

    
316
    p = *lp;
317
    if (!p) {
318
        /* allocate if not found */
319
#if defined(CONFIG_USER_ONLY)
320
        size_t len = sizeof(PageDesc) * L2_SIZE;
321
        /* Don't use qemu_malloc because it may recurse.  */
322
        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
323
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
324
        *lp = p;
325
        if (h2g_valid(p)) {
326
            unsigned long addr = h2g(p);
327
            page_set_flags(addr & TARGET_PAGE_MASK,
328
                           TARGET_PAGE_ALIGN(addr + len),
329
                           PAGE_RESERVED); 
330
        }
331
#else
332
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
333
        *lp = p;
334
#endif
335
    }
336
    return p + (index & (L2_SIZE - 1));
337
}
338

    
339
static inline PageDesc *page_find(target_ulong index)
340
{
341
    PageDesc **lp, *p;
342
    lp = page_l1_map(index);
343
    if (!lp)
344
        return NULL;
345

    
346
    p = *lp;
347
    if (!p) {
348
        return NULL;
349
    }
350
    return p + (index & (L2_SIZE - 1));
351
}
352

    
353
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
354
{
355
    void **lp, **p;
356
    PhysPageDesc *pd;
357

    
358
    p = (void **)l1_phys_map;
359
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
360

    
361
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
362
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
363
#endif
364
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
365
    p = *lp;
366
    if (!p) {
367
        /* allocate if not found */
368
        if (!alloc)
369
            return NULL;
370
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
371
        memset(p, 0, sizeof(void *) * L1_SIZE);
372
        *lp = p;
373
    }
374
#endif
375
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
376
    pd = *lp;
377
    if (!pd) {
378
        int i;
379
        /* allocate if not found */
380
        if (!alloc)
381
            return NULL;
382
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
383
        *lp = pd;
384
        for (i = 0; i < L2_SIZE; i++) {
385
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
386
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
387
        }
388
    }
389
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
390
}
391

    
392
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
393
{
394
    return phys_page_find_alloc(index, 0);
395
}
396

    
397
#if !defined(CONFIG_USER_ONLY)
398
static void tlb_protect_code(ram_addr_t ram_addr);
399
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
400
                                    target_ulong vaddr);
401
#define mmap_lock() do { } while(0)
402
#define mmap_unlock() do { } while(0)
403
#endif
404

    
405
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
406

    
407
#if defined(CONFIG_USER_ONLY)
408
/* Currently it is not recommended to allocate big chunks of data in
409
   user mode. It will change when a dedicated libc will be used */
410
#define USE_STATIC_CODE_GEN_BUFFER
411
#endif
412

    
413
#ifdef USE_STATIC_CODE_GEN_BUFFER
414
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
415
#endif
416

    
417
static void code_gen_alloc(unsigned long tb_size)
418
{
419
#ifdef USE_STATIC_CODE_GEN_BUFFER
420
    code_gen_buffer = static_code_gen_buffer;
421
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
422
    map_exec(code_gen_buffer, code_gen_buffer_size);
423
#else
424
    code_gen_buffer_size = tb_size;
425
    if (code_gen_buffer_size == 0) {
426
#if defined(CONFIG_USER_ONLY)
427
        /* in user mode, phys_ram_size is not meaningful */
428
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
429
#else
430
        /* XXX: needs adjustments */
431
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
432
#endif
433
    }
434
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
435
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
436
    /* The code gen buffer location may have constraints depending on
437
       the host cpu and OS */
438
#if defined(__linux__) 
439
    {
440
        int flags;
441
        void *start = NULL;
442

    
443
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
444
#if defined(__x86_64__)
445
        flags |= MAP_32BIT;
446
        /* Cannot map more than that */
447
        if (code_gen_buffer_size > (800 * 1024 * 1024))
448
            code_gen_buffer_size = (800 * 1024 * 1024);
449
#elif defined(__sparc_v9__)
450
        // Map the buffer below 2G, so we can use direct calls and branches
451
        flags |= MAP_FIXED;
452
        start = (void *) 0x60000000UL;
453
        if (code_gen_buffer_size > (512 * 1024 * 1024))
454
            code_gen_buffer_size = (512 * 1024 * 1024);
455
#elif defined(__arm__)
456
        /* Map the buffer below 32M, so we can use direct calls and branches */
457
        flags |= MAP_FIXED;
458
        start = (void *) 0x01000000UL;
459
        if (code_gen_buffer_size > 16 * 1024 * 1024)
460
            code_gen_buffer_size = 16 * 1024 * 1024;
461
#endif
462
        code_gen_buffer = mmap(start, code_gen_buffer_size,
463
                               PROT_WRITE | PROT_READ | PROT_EXEC,
464
                               flags, -1, 0);
465
        if (code_gen_buffer == MAP_FAILED) {
466
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
467
            exit(1);
468
        }
469
    }
470
#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
471
    {
472
        int flags;
473
        void *addr = NULL;
474
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
475
#if defined(__x86_64__)
476
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
477
         * 0x40000000 is free */
478
        flags |= MAP_FIXED;
479
        addr = (void *)0x40000000;
480
        /* Cannot map more than that */
481
        if (code_gen_buffer_size > (800 * 1024 * 1024))
482
            code_gen_buffer_size = (800 * 1024 * 1024);
483
#endif
484
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
485
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
486
                               flags, -1, 0);
487
        if (code_gen_buffer == MAP_FAILED) {
488
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
489
            exit(1);
490
        }
491
    }
492
#else
493
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
494
    map_exec(code_gen_buffer, code_gen_buffer_size);
495
#endif
496
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
497
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
498
    code_gen_buffer_max_size = code_gen_buffer_size - 
499
        code_gen_max_block_size();
500
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
501
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
502
}
503

    
504
/* Must be called before using the QEMU cpus. 'tb_size' is the size
505
   (in bytes) allocated to the translation buffer. Zero means default
506
   size. */
507
void cpu_exec_init_all(unsigned long tb_size)
508
{
509
    cpu_gen_init();
510
    code_gen_alloc(tb_size);
511
    code_gen_ptr = code_gen_buffer;
512
    page_init();
513
#if !defined(CONFIG_USER_ONLY)
514
    io_mem_init();
515
#endif
516
}
517

    
518
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
519

    
520
static void cpu_common_pre_save(void *opaque)
521
{
522
    CPUState *env = opaque;
523

    
524
    cpu_synchronize_state(env);
525
}
526

    
527
static int cpu_common_pre_load(void *opaque)
528
{
529
    CPUState *env = opaque;
530

    
531
    cpu_synchronize_state(env);
532
    return 0;
533
}
534

    
535
static int cpu_common_post_load(void *opaque, int version_id)
536
{
537
    CPUState *env = opaque;
538

    
539
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
540
       version_id is increased. */
541
    env->interrupt_request &= ~0x01;
542
    tlb_flush(env, 1);
543

    
544
    return 0;
545
}
546

    
547
static const VMStateDescription vmstate_cpu_common = {
548
    .name = "cpu_common",
549
    .version_id = 1,
550
    .minimum_version_id = 1,
551
    .minimum_version_id_old = 1,
552
    .pre_save = cpu_common_pre_save,
553
    .pre_load = cpu_common_pre_load,
554
    .post_load = cpu_common_post_load,
555
    .fields      = (VMStateField []) {
556
        VMSTATE_UINT32(halted, CPUState),
557
        VMSTATE_UINT32(interrupt_request, CPUState),
558
        VMSTATE_END_OF_LIST()
559
    }
560
};
561
#endif
562

    
563
CPUState *qemu_get_cpu(int cpu)
564
{
565
    CPUState *env = first_cpu;
566

    
567
    while (env) {
568
        if (env->cpu_index == cpu)
569
            break;
570
        env = env->next_cpu;
571
    }
572

    
573
    return env;
574
}
575

    
576
void cpu_exec_init(CPUState *env)
577
{
578
    CPUState **penv;
579
    int cpu_index;
580

    
581
#if defined(CONFIG_USER_ONLY)
582
    cpu_list_lock();
583
#endif
584
    env->next_cpu = NULL;
585
    penv = &first_cpu;
586
    cpu_index = 0;
587
    while (*penv != NULL) {
588
        penv = &(*penv)->next_cpu;
589
        cpu_index++;
590
    }
591
    env->cpu_index = cpu_index;
592
    env->numa_node = 0;
593
    QTAILQ_INIT(&env->breakpoints);
594
    QTAILQ_INIT(&env->watchpoints);
595
    *penv = env;
596
#if defined(CONFIG_USER_ONLY)
597
    cpu_list_unlock();
598
#endif
599
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
600
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
601
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
602
                    cpu_save, cpu_load, env);
603
#endif
604
}
605

    
606
static inline void invalidate_page_bitmap(PageDesc *p)
607
{
608
    if (p->code_bitmap) {
609
        qemu_free(p->code_bitmap);
610
        p->code_bitmap = NULL;
611
    }
612
    p->code_write_count = 0;
613
}
614

    
615
/* set to NULL all the 'first_tb' fields in all PageDescs */
616
static void page_flush_tb(void)
617
{
618
    int i, j;
619
    PageDesc *p;
620

    
621
    for(i = 0; i < L1_SIZE; i++) {
622
        p = l1_map[i];
623
        if (p) {
624
            for(j = 0; j < L2_SIZE; j++) {
625
                p->first_tb = NULL;
626
                invalidate_page_bitmap(p);
627
                p++;
628
            }
629
        }
630
    }
631
}
632

    
633
/* flush all the translation blocks */
634
/* XXX: tb_flush is currently not thread safe */
635
void tb_flush(CPUState *env1)
636
{
637
    CPUState *env;
638
#if defined(DEBUG_FLUSH)
639
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
640
           (unsigned long)(code_gen_ptr - code_gen_buffer),
641
           nb_tbs, nb_tbs > 0 ?
642
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
643
#endif
644
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
645
        cpu_abort(env1, "Internal error: code buffer overflow\n");
646

    
647
    nb_tbs = 0;
648

    
649
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
650
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
651
    }
652

    
653
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
654
    page_flush_tb();
655

    
656
    code_gen_ptr = code_gen_buffer;
657
    /* XXX: flush processor icache at this point if cache flush is
658
       expensive */
659
    tb_flush_count++;
660
}
661

    
662
#ifdef DEBUG_TB_CHECK
663

    
664
static void tb_invalidate_check(target_ulong address)
665
{
666
    TranslationBlock *tb;
667
    int i;
668
    address &= TARGET_PAGE_MASK;
669
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
670
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
671
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
672
                  address >= tb->pc + tb->size)) {
673
                printf("ERROR invalidate: address=" TARGET_FMT_lx
674
                       " PC=%08lx size=%04x\n",
675
                       address, (long)tb->pc, tb->size);
676
            }
677
        }
678
    }
679
}
680

    
681
/* verify that all the pages have correct rights for code */
682
static void tb_page_check(void)
683
{
684
    TranslationBlock *tb;
685
    int i, flags1, flags2;
686

    
687
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
688
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
689
            flags1 = page_get_flags(tb->pc);
690
            flags2 = page_get_flags(tb->pc + tb->size - 1);
691
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
692
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
693
                       (long)tb->pc, tb->size, flags1, flags2);
694
            }
695
        }
696
    }
697
}
698

    
699
#endif
700

    
701
/* invalidate one TB */
702
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
703
                             int next_offset)
704
{
705
    TranslationBlock *tb1;
706
    for(;;) {
707
        tb1 = *ptb;
708
        if (tb1 == tb) {
709
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
710
            break;
711
        }
712
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
713
    }
714
}
715

    
716
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
717
{
718
    TranslationBlock *tb1;
719
    unsigned int n1;
720

    
721
    for(;;) {
722
        tb1 = *ptb;
723
        n1 = (long)tb1 & 3;
724
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
725
        if (tb1 == tb) {
726
            *ptb = tb1->page_next[n1];
727
            break;
728
        }
729
        ptb = &tb1->page_next[n1];
730
    }
731
}
732

    
733
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
734
{
735
    TranslationBlock *tb1, **ptb;
736
    unsigned int n1;
737

    
738
    ptb = &tb->jmp_next[n];
739
    tb1 = *ptb;
740
    if (tb1) {
741
        /* find tb(n) in circular list */
742
        for(;;) {
743
            tb1 = *ptb;
744
            n1 = (long)tb1 & 3;
745
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
746
            if (n1 == n && tb1 == tb)
747
                break;
748
            if (n1 == 2) {
749
                ptb = &tb1->jmp_first;
750
            } else {
751
                ptb = &tb1->jmp_next[n1];
752
            }
753
        }
754
        /* now we can suppress tb(n) from the list */
755
        *ptb = tb->jmp_next[n];
756

    
757
        tb->jmp_next[n] = NULL;
758
    }
759
}
760

    
761
/* reset the jump entry 'n' of a TB so that it is not chained to
762
   another TB */
763
static inline void tb_reset_jump(TranslationBlock *tb, int n)
764
{
765
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
766
}
767

    
768
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
769
{
770
    CPUState *env;
771
    PageDesc *p;
772
    unsigned int h, n1;
773
    target_phys_addr_t phys_pc;
774
    TranslationBlock *tb1, *tb2;
775

    
776
    /* remove the TB from the hash list */
777
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
778
    h = tb_phys_hash_func(phys_pc);
779
    tb_remove(&tb_phys_hash[h], tb,
780
              offsetof(TranslationBlock, phys_hash_next));
781

    
782
    /* remove the TB from the page list */
783
    if (tb->page_addr[0] != page_addr) {
784
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
785
        tb_page_remove(&p->first_tb, tb);
786
        invalidate_page_bitmap(p);
787
    }
788
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
789
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
790
        tb_page_remove(&p->first_tb, tb);
791
        invalidate_page_bitmap(p);
792
    }
793

    
794
    tb_invalidated_flag = 1;
795

    
796
    /* remove the TB from the hash list */
797
    h = tb_jmp_cache_hash_func(tb->pc);
798
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
799
        if (env->tb_jmp_cache[h] == tb)
800
            env->tb_jmp_cache[h] = NULL;
801
    }
802

    
803
    /* suppress this TB from the two jump lists */
804
    tb_jmp_remove(tb, 0);
805
    tb_jmp_remove(tb, 1);
806

    
807
    /* suppress any remaining jumps to this TB */
808
    tb1 = tb->jmp_first;
809
    for(;;) {
810
        n1 = (long)tb1 & 3;
811
        if (n1 == 2)
812
            break;
813
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
814
        tb2 = tb1->jmp_next[n1];
815
        tb_reset_jump(tb1, n1);
816
        tb1->jmp_next[n1] = NULL;
817
        tb1 = tb2;
818
    }
819
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
820

    
821
    tb_phys_invalidate_count++;
822
}
823

    
824
static inline void set_bits(uint8_t *tab, int start, int len)
825
{
826
    int end, mask, end1;
827

    
828
    end = start + len;
829
    tab += start >> 3;
830
    mask = 0xff << (start & 7);
831
    if ((start & ~7) == (end & ~7)) {
832
        if (start < end) {
833
            mask &= ~(0xff << (end & 7));
834
            *tab |= mask;
835
        }
836
    } else {
837
        *tab++ |= mask;
838
        start = (start + 8) & ~7;
839
        end1 = end & ~7;
840
        while (start < end1) {
841
            *tab++ = 0xff;
842
            start += 8;
843
        }
844
        if (start < end) {
845
            mask = ~(0xff << (end & 7));
846
            *tab |= mask;
847
        }
848
    }
849
}
850

    
851
static void build_page_bitmap(PageDesc *p)
852
{
853
    int n, tb_start, tb_end;
854
    TranslationBlock *tb;
855

    
856
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
857

    
858
    tb = p->first_tb;
859
    while (tb != NULL) {
860
        n = (long)tb & 3;
861
        tb = (TranslationBlock *)((long)tb & ~3);
862
        /* NOTE: this is subtle as a TB may span two physical pages */
863
        if (n == 0) {
864
            /* NOTE: tb_end may be after the end of the page, but
865
               it is not a problem */
866
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
867
            tb_end = tb_start + tb->size;
868
            if (tb_end > TARGET_PAGE_SIZE)
869
                tb_end = TARGET_PAGE_SIZE;
870
        } else {
871
            tb_start = 0;
872
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
873
        }
874
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
875
        tb = tb->page_next[n];
876
    }
877
}
878

    
879
TranslationBlock *tb_gen_code(CPUState *env,
880
                              target_ulong pc, target_ulong cs_base,
881
                              int flags, int cflags)
882
{
883
    TranslationBlock *tb;
884
    uint8_t *tc_ptr;
885
    target_ulong phys_pc, phys_page2, virt_page2;
886
    int code_gen_size;
887

    
888
    phys_pc = get_phys_addr_code(env, pc);
889
    tb = tb_alloc(pc);
890
    if (!tb) {
891
        /* flush must be done */
892
        tb_flush(env);
893
        /* cannot fail at this point */
894
        tb = tb_alloc(pc);
895
        /* Don't forget to invalidate previous TB info.  */
896
        tb_invalidated_flag = 1;
897
    }
898
    tc_ptr = code_gen_ptr;
899
    tb->tc_ptr = tc_ptr;
900
    tb->cs_base = cs_base;
901
    tb->flags = flags;
902
    tb->cflags = cflags;
903
    cpu_gen_code(env, tb, &code_gen_size);
904
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
905

    
906
    /* check next page if needed */
907
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
908
    phys_page2 = -1;
909
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
910
        phys_page2 = get_phys_addr_code(env, virt_page2);
911
    }
912
    tb_link_phys(tb, phys_pc, phys_page2);
913
    return tb;
914
}
915

    
916
/* invalidate all TBs which intersect with the target physical page
917
   starting in range [start;end[. NOTE: start and end must refer to
918
   the same physical page. 'is_cpu_write_access' should be true if called
919
   from a real cpu write access: the virtual CPU will exit the current
920
   TB if code is modified inside this TB. */
921
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
922
                                   int is_cpu_write_access)
923
{
924
    TranslationBlock *tb, *tb_next, *saved_tb;
925
    CPUState *env = cpu_single_env;
926
    target_ulong tb_start, tb_end;
927
    PageDesc *p;
928
    int n;
929
#ifdef TARGET_HAS_PRECISE_SMC
930
    int current_tb_not_found = is_cpu_write_access;
931
    TranslationBlock *current_tb = NULL;
932
    int current_tb_modified = 0;
933
    target_ulong current_pc = 0;
934
    target_ulong current_cs_base = 0;
935
    int current_flags = 0;
936
#endif /* TARGET_HAS_PRECISE_SMC */
937

    
938
    p = page_find(start >> TARGET_PAGE_BITS);
939
    if (!p)
940
        return;
941
    if (!p->code_bitmap &&
942
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
943
        is_cpu_write_access) {
944
        /* build code bitmap */
945
        build_page_bitmap(p);
946
    }
947

    
948
    /* we remove all the TBs in the range [start, end[ */
949
    /* XXX: see if in some cases it could be faster to invalidate all the code */
950
    tb = p->first_tb;
951
    while (tb != NULL) {
952
        n = (long)tb & 3;
953
        tb = (TranslationBlock *)((long)tb & ~3);
954
        tb_next = tb->page_next[n];
955
        /* NOTE: this is subtle as a TB may span two physical pages */
956
        if (n == 0) {
957
            /* NOTE: tb_end may be after the end of the page, but
958
               it is not a problem */
959
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
960
            tb_end = tb_start + tb->size;
961
        } else {
962
            tb_start = tb->page_addr[1];
963
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
964
        }
965
        if (!(tb_end <= start || tb_start >= end)) {
966
#ifdef TARGET_HAS_PRECISE_SMC
967
            if (current_tb_not_found) {
968
                current_tb_not_found = 0;
969
                current_tb = NULL;
970
                if (env->mem_io_pc) {
971
                    /* now we have a real cpu fault */
972
                    current_tb = tb_find_pc(env->mem_io_pc);
973
                }
974
            }
975
            if (current_tb == tb &&
976
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
977
                /* If we are modifying the current TB, we must stop
978
                its execution. We could be more precise by checking
979
                that the modification is after the current PC, but it
980
                would require a specialized function to partially
981
                restore the CPU state */
982

    
983
                current_tb_modified = 1;
984
                cpu_restore_state(current_tb, env,
985
                                  env->mem_io_pc, NULL);
986
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
987
                                     &current_flags);
988
            }
989
#endif /* TARGET_HAS_PRECISE_SMC */
990
            /* we need to do that to handle the case where a signal
991
               occurs while doing tb_phys_invalidate() */
992
            saved_tb = NULL;
993
            if (env) {
994
                saved_tb = env->current_tb;
995
                env->current_tb = NULL;
996
            }
997
            tb_phys_invalidate(tb, -1);
998
            if (env) {
999
                env->current_tb = saved_tb;
1000
                if (env->interrupt_request && env->current_tb)
1001
                    cpu_interrupt(env, env->interrupt_request);
1002
            }
1003
        }
1004
        tb = tb_next;
1005
    }
1006
#if !defined(CONFIG_USER_ONLY)
1007
    /* if no code remaining, no need to continue to use slow writes */
1008
    if (!p->first_tb) {
1009
        invalidate_page_bitmap(p);
1010
        if (is_cpu_write_access) {
1011
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1012
        }
1013
    }
1014
#endif
1015
#ifdef TARGET_HAS_PRECISE_SMC
1016
    if (current_tb_modified) {
1017
        /* we generate a block containing just the instruction
1018
           modifying the memory. It will ensure that it cannot modify
1019
           itself */
1020
        env->current_tb = NULL;
1021
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1022
        cpu_resume_from_signal(env, NULL);
1023
    }
1024
#endif
1025
}
1026

    
1027
/* len must be <= 8 and start must be a multiple of len */
1028
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1029
{
1030
    PageDesc *p;
1031
    int offset, b;
1032
#if 0
1033
    if (1) {
1034
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1035
                  cpu_single_env->mem_io_vaddr, len,
1036
                  cpu_single_env->eip,
1037
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1038
    }
1039
#endif
1040
    p = page_find(start >> TARGET_PAGE_BITS);
1041
    if (!p)
1042
        return;
1043
    if (p->code_bitmap) {
1044
        offset = start & ~TARGET_PAGE_MASK;
1045
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1046
        if (b & ((1 << len) - 1))
1047
            goto do_invalidate;
1048
    } else {
1049
    do_invalidate:
1050
        tb_invalidate_phys_page_range(start, start + len, 1);
1051
    }
1052
}
1053

    
1054
#if !defined(CONFIG_SOFTMMU)
1055
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1056
                                    unsigned long pc, void *puc)
1057
{
1058
    TranslationBlock *tb;
1059
    PageDesc *p;
1060
    int n;
1061
#ifdef TARGET_HAS_PRECISE_SMC
1062
    TranslationBlock *current_tb = NULL;
1063
    CPUState *env = cpu_single_env;
1064
    int current_tb_modified = 0;
1065
    target_ulong current_pc = 0;
1066
    target_ulong current_cs_base = 0;
1067
    int current_flags = 0;
1068
#endif
1069

    
1070
    addr &= TARGET_PAGE_MASK;
1071
    p = page_find(addr >> TARGET_PAGE_BITS);
1072
    if (!p)
1073
        return;
1074
    tb = p->first_tb;
1075
#ifdef TARGET_HAS_PRECISE_SMC
1076
    if (tb && pc != 0) {
1077
        current_tb = tb_find_pc(pc);
1078
    }
1079
#endif
1080
    while (tb != NULL) {
1081
        n = (long)tb & 3;
1082
        tb = (TranslationBlock *)((long)tb & ~3);
1083
#ifdef TARGET_HAS_PRECISE_SMC
1084
        if (current_tb == tb &&
1085
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1086
                /* If we are modifying the current TB, we must stop
1087
                   its execution. We could be more precise by checking
1088
                   that the modification is after the current PC, but it
1089
                   would require a specialized function to partially
1090
                   restore the CPU state */
1091

    
1092
            current_tb_modified = 1;
1093
            cpu_restore_state(current_tb, env, pc, puc);
1094
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1095
                                 &current_flags);
1096
        }
1097
#endif /* TARGET_HAS_PRECISE_SMC */
1098
        tb_phys_invalidate(tb, addr);
1099
        tb = tb->page_next[n];
1100
    }
1101
    p->first_tb = NULL;
1102
#ifdef TARGET_HAS_PRECISE_SMC
1103
    if (current_tb_modified) {
1104
        /* we generate a block containing just the instruction
1105
           modifying the memory. It will ensure that it cannot modify
1106
           itself */
1107
        env->current_tb = NULL;
1108
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1109
        cpu_resume_from_signal(env, puc);
1110
    }
1111
#endif
1112
}
1113
#endif
1114

    
1115
/* add the tb in the target page and protect it if necessary */
1116
static inline void tb_alloc_page(TranslationBlock *tb,
1117
                                 unsigned int n, target_ulong page_addr)
1118
{
1119
    PageDesc *p;
1120
    TranslationBlock *last_first_tb;
1121

    
1122
    tb->page_addr[n] = page_addr;
1123
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1124
    tb->page_next[n] = p->first_tb;
1125
    last_first_tb = p->first_tb;
1126
    p->first_tb = (TranslationBlock *)((long)tb | n);
1127
    invalidate_page_bitmap(p);
1128

    
1129
#if defined(TARGET_HAS_SMC) || 1
1130

    
1131
#if defined(CONFIG_USER_ONLY)
1132
    if (p->flags & PAGE_WRITE) {
1133
        target_ulong addr;
1134
        PageDesc *p2;
1135
        int prot;
1136

    
1137
        /* force the host page as non writable (writes will have a
1138
           page fault + mprotect overhead) */
1139
        page_addr &= qemu_host_page_mask;
1140
        prot = 0;
1141
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1142
            addr += TARGET_PAGE_SIZE) {
1143

    
1144
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1145
            if (!p2)
1146
                continue;
1147
            prot |= p2->flags;
1148
            p2->flags &= ~PAGE_WRITE;
1149
            page_get_flags(addr);
1150
          }
1151
        mprotect(g2h(page_addr), qemu_host_page_size,
1152
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1153
#ifdef DEBUG_TB_INVALIDATE
1154
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1155
               page_addr);
1156
#endif
1157
    }
1158
#else
1159
    /* if some code is already present, then the pages are already
1160
       protected. So we handle the case where only the first TB is
1161
       allocated in a physical page */
1162
    if (!last_first_tb) {
1163
        tlb_protect_code(page_addr);
1164
    }
1165
#endif
1166

    
1167
#endif /* TARGET_HAS_SMC */
1168
}
1169

    
1170
/* Allocate a new translation block. Flush the translation buffer if
1171
   too many translation blocks or too much generated code. */
1172
TranslationBlock *tb_alloc(target_ulong pc)
1173
{
1174
    TranslationBlock *tb;
1175

    
1176
    if (nb_tbs >= code_gen_max_blocks ||
1177
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1178
        return NULL;
1179
    tb = &tbs[nb_tbs++];
1180
    tb->pc = pc;
1181
    tb->cflags = 0;
1182
    return tb;
1183
}
1184

    
1185
void tb_free(TranslationBlock *tb)
1186
{
1187
    /* In practice this is mostly used for single use temporary TB
1188
       Ignore the hard cases and just back up if this TB happens to
1189
       be the last one generated.  */
1190
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1191
        code_gen_ptr = tb->tc_ptr;
1192
        nb_tbs--;
1193
    }
1194
}
1195

    
1196
/* add a new TB and link it to the physical page tables. phys_page2 is
1197
   (-1) to indicate that only one page contains the TB. */
1198
void tb_link_phys(TranslationBlock *tb,
1199
                  target_ulong phys_pc, target_ulong phys_page2)
1200
{
1201
    unsigned int h;
1202
    TranslationBlock **ptb;
1203

    
1204
    /* Grab the mmap lock to stop another thread invalidating this TB
1205
       before we are done.  */
1206
    mmap_lock();
1207
    /* add in the physical hash table */
1208
    h = tb_phys_hash_func(phys_pc);
1209
    ptb = &tb_phys_hash[h];
1210
    tb->phys_hash_next = *ptb;
1211
    *ptb = tb;
1212

    
1213
    /* add in the page list */
1214
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1215
    if (phys_page2 != -1)
1216
        tb_alloc_page(tb, 1, phys_page2);
1217
    else
1218
        tb->page_addr[1] = -1;
1219

    
1220
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1221
    tb->jmp_next[0] = NULL;
1222
    tb->jmp_next[1] = NULL;
1223

    
1224
    /* init original jump addresses */
1225
    if (tb->tb_next_offset[0] != 0xffff)
1226
        tb_reset_jump(tb, 0);
1227
    if (tb->tb_next_offset[1] != 0xffff)
1228
        tb_reset_jump(tb, 1);
1229

    
1230
#ifdef DEBUG_TB_CHECK
1231
    tb_page_check();
1232
#endif
1233
    mmap_unlock();
1234
}
1235

    
1236
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1237
   tb[1].tc_ptr. Return NULL if not found */
1238
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1239
{
1240
    int m_min, m_max, m;
1241
    unsigned long v;
1242
    TranslationBlock *tb;
1243

    
1244
    if (nb_tbs <= 0)
1245
        return NULL;
1246
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1247
        tc_ptr >= (unsigned long)code_gen_ptr)
1248
        return NULL;
1249
    /* binary search (cf Knuth) */
1250
    m_min = 0;
1251
    m_max = nb_tbs - 1;
1252
    while (m_min <= m_max) {
1253
        m = (m_min + m_max) >> 1;
1254
        tb = &tbs[m];
1255
        v = (unsigned long)tb->tc_ptr;
1256
        if (v == tc_ptr)
1257
            return tb;
1258
        else if (tc_ptr < v) {
1259
            m_max = m - 1;
1260
        } else {
1261
            m_min = m + 1;
1262
        }
1263
    }
1264
    return &tbs[m_max];
1265
}
1266

    
1267
static void tb_reset_jump_recursive(TranslationBlock *tb);
1268

    
1269
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1270
{
1271
    TranslationBlock *tb1, *tb_next, **ptb;
1272
    unsigned int n1;
1273

    
1274
    tb1 = tb->jmp_next[n];
1275
    if (tb1 != NULL) {
1276
        /* find head of list */
1277
        for(;;) {
1278
            n1 = (long)tb1 & 3;
1279
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1280
            if (n1 == 2)
1281
                break;
1282
            tb1 = tb1->jmp_next[n1];
1283
        }
1284
        /* we are now sure now that tb jumps to tb1 */
1285
        tb_next = tb1;
1286

    
1287
        /* remove tb from the jmp_first list */
1288
        ptb = &tb_next->jmp_first;
1289
        for(;;) {
1290
            tb1 = *ptb;
1291
            n1 = (long)tb1 & 3;
1292
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1293
            if (n1 == n && tb1 == tb)
1294
                break;
1295
            ptb = &tb1->jmp_next[n1];
1296
        }
1297
        *ptb = tb->jmp_next[n];
1298
        tb->jmp_next[n] = NULL;
1299

    
1300
        /* suppress the jump to next tb in generated code */
1301
        tb_reset_jump(tb, n);
1302

    
1303
        /* suppress jumps in the tb on which we could have jumped */
1304
        tb_reset_jump_recursive(tb_next);
1305
    }
1306
}
1307

    
1308
static void tb_reset_jump_recursive(TranslationBlock *tb)
1309
{
1310
    tb_reset_jump_recursive2(tb, 0);
1311
    tb_reset_jump_recursive2(tb, 1);
1312
}
1313

    
1314
#if defined(TARGET_HAS_ICE)
1315
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1316
{
1317
    target_phys_addr_t addr;
1318
    target_ulong pd;
1319
    ram_addr_t ram_addr;
1320
    PhysPageDesc *p;
1321

    
1322
    addr = cpu_get_phys_page_debug(env, pc);
1323
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1324
    if (!p) {
1325
        pd = IO_MEM_UNASSIGNED;
1326
    } else {
1327
        pd = p->phys_offset;
1328
    }
1329
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1330
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1331
}
1332
#endif
1333

    
1334
/* Add a watchpoint.  */
1335
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1336
                          int flags, CPUWatchpoint **watchpoint)
1337
{
1338
    target_ulong len_mask = ~(len - 1);
1339
    CPUWatchpoint *wp;
1340

    
1341
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1342
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1343
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1344
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1345
        return -EINVAL;
1346
    }
1347
    wp = qemu_malloc(sizeof(*wp));
1348

    
1349
    wp->vaddr = addr;
1350
    wp->len_mask = len_mask;
1351
    wp->flags = flags;
1352

    
1353
    /* keep all GDB-injected watchpoints in front */
1354
    if (flags & BP_GDB)
1355
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1356
    else
1357
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1358

    
1359
    tlb_flush_page(env, addr);
1360

    
1361
    if (watchpoint)
1362
        *watchpoint = wp;
1363
    return 0;
1364
}
1365

    
1366
/* Remove a specific watchpoint.  */
1367
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1368
                          int flags)
1369
{
1370
    target_ulong len_mask = ~(len - 1);
1371
    CPUWatchpoint *wp;
1372

    
1373
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1374
        if (addr == wp->vaddr && len_mask == wp->len_mask
1375
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1376
            cpu_watchpoint_remove_by_ref(env, wp);
1377
            return 0;
1378
        }
1379
    }
1380
    return -ENOENT;
1381
}
1382

    
1383
/* Remove a specific watchpoint by reference.  */
1384
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1385
{
1386
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1387

    
1388
    tlb_flush_page(env, watchpoint->vaddr);
1389

    
1390
    qemu_free(watchpoint);
1391
}
1392

    
1393
/* Remove all matching watchpoints.  */
1394
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1395
{
1396
    CPUWatchpoint *wp, *next;
1397

    
1398
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1399
        if (wp->flags & mask)
1400
            cpu_watchpoint_remove_by_ref(env, wp);
1401
    }
1402
}
1403

    
1404
/* Add a breakpoint.  */
1405
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1406
                          CPUBreakpoint **breakpoint)
1407
{
1408
#if defined(TARGET_HAS_ICE)
1409
    CPUBreakpoint *bp;
1410

    
1411
    bp = qemu_malloc(sizeof(*bp));
1412

    
1413
    bp->pc = pc;
1414
    bp->flags = flags;
1415

    
1416
    /* keep all GDB-injected breakpoints in front */
1417
    if (flags & BP_GDB)
1418
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1419
    else
1420
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1421

    
1422
    breakpoint_invalidate(env, pc);
1423

    
1424
    if (breakpoint)
1425
        *breakpoint = bp;
1426
    return 0;
1427
#else
1428
    return -ENOSYS;
1429
#endif
1430
}
1431

    
1432
/* Remove a specific breakpoint.  */
1433
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1434
{
1435
#if defined(TARGET_HAS_ICE)
1436
    CPUBreakpoint *bp;
1437

    
1438
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1439
        if (bp->pc == pc && bp->flags == flags) {
1440
            cpu_breakpoint_remove_by_ref(env, bp);
1441
            return 0;
1442
        }
1443
    }
1444
    return -ENOENT;
1445
#else
1446
    return -ENOSYS;
1447
#endif
1448
}
1449

    
1450
/* Remove a specific breakpoint by reference.  */
1451
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1452
{
1453
#if defined(TARGET_HAS_ICE)
1454
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1455

    
1456
    breakpoint_invalidate(env, breakpoint->pc);
1457

    
1458
    qemu_free(breakpoint);
1459
#endif
1460
}
1461

    
1462
/* Remove all matching breakpoints. */
1463
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1464
{
1465
#if defined(TARGET_HAS_ICE)
1466
    CPUBreakpoint *bp, *next;
1467

    
1468
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1469
        if (bp->flags & mask)
1470
            cpu_breakpoint_remove_by_ref(env, bp);
1471
    }
1472
#endif
1473
}
1474

    
1475
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1476
   CPU loop after each instruction */
1477
void cpu_single_step(CPUState *env, int enabled)
1478
{
1479
#if defined(TARGET_HAS_ICE)
1480
    if (env->singlestep_enabled != enabled) {
1481
        env->singlestep_enabled = enabled;
1482
        if (kvm_enabled())
1483
            kvm_update_guest_debug(env, 0);
1484
        else {
1485
            /* must flush all the translated code to avoid inconsistencies */
1486
            /* XXX: only flush what is necessary */
1487
            tb_flush(env);
1488
        }
1489
    }
1490
#endif
1491
}
1492

    
1493
/* enable or disable low levels log */
1494
void cpu_set_log(int log_flags)
1495
{
1496
    loglevel = log_flags;
1497
    if (loglevel && !logfile) {
1498
        logfile = fopen(logfilename, log_append ? "a" : "w");
1499
        if (!logfile) {
1500
            perror(logfilename);
1501
            _exit(1);
1502
        }
1503
#if !defined(CONFIG_SOFTMMU)
1504
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1505
        {
1506
            static char logfile_buf[4096];
1507
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1508
        }
1509
#elif !defined(_WIN32)
1510
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1511
        setvbuf(logfile, NULL, _IOLBF, 0);
1512
#endif
1513
        log_append = 1;
1514
    }
1515
    if (!loglevel && logfile) {
1516
        fclose(logfile);
1517
        logfile = NULL;
1518
    }
1519
}
1520

    
1521
void cpu_set_log_filename(const char *filename)
1522
{
1523
    logfilename = strdup(filename);
1524
    if (logfile) {
1525
        fclose(logfile);
1526
        logfile = NULL;
1527
    }
1528
    cpu_set_log(loglevel);
1529
}
1530

    
1531
static void cpu_unlink_tb(CPUState *env)
1532
{
1533
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1534
       problem and hope the cpu will stop of its own accord.  For userspace
1535
       emulation this often isn't actually as bad as it sounds.  Often
1536
       signals are used primarily to interrupt blocking syscalls.  */
1537
    TranslationBlock *tb;
1538
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1539

    
1540
    tb = env->current_tb;
1541
    /* if the cpu is currently executing code, we must unlink it and
1542
       all the potentially executing TB */
1543
    if (tb) {
1544
        spin_lock(&interrupt_lock);
1545
        env->current_tb = NULL;
1546
        tb_reset_jump_recursive(tb);
1547
        spin_unlock(&interrupt_lock);
1548
    }
1549
}
1550

    
1551
/* mask must never be zero, except for A20 change call */
1552
void cpu_interrupt(CPUState *env, int mask)
1553
{
1554
    int old_mask;
1555

    
1556
    old_mask = env->interrupt_request;
1557
    env->interrupt_request |= mask;
1558

    
1559
#ifndef CONFIG_USER_ONLY
1560
    /*
1561
     * If called from iothread context, wake the target cpu in
1562
     * case its halted.
1563
     */
1564
    if (!qemu_cpu_self(env)) {
1565
        qemu_cpu_kick(env);
1566
        return;
1567
    }
1568
#endif
1569

    
1570
    if (use_icount) {
1571
        env->icount_decr.u16.high = 0xffff;
1572
#ifndef CONFIG_USER_ONLY
1573
        if (!can_do_io(env)
1574
            && (mask & ~old_mask) != 0) {
1575
            cpu_abort(env, "Raised interrupt while not in I/O function");
1576
        }
1577
#endif
1578
    } else {
1579
        cpu_unlink_tb(env);
1580
    }
1581
}
1582

    
1583
void cpu_reset_interrupt(CPUState *env, int mask)
1584
{
1585
    env->interrupt_request &= ~mask;
1586
}
1587

    
1588
void cpu_exit(CPUState *env)
1589
{
1590
    env->exit_request = 1;
1591
    cpu_unlink_tb(env);
1592
}
1593

    
1594
const CPULogItem cpu_log_items[] = {
1595
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1596
      "show generated host assembly code for each compiled TB" },
1597
    { CPU_LOG_TB_IN_ASM, "in_asm",
1598
      "show target assembly code for each compiled TB" },
1599
    { CPU_LOG_TB_OP, "op",
1600
      "show micro ops for each compiled TB" },
1601
    { CPU_LOG_TB_OP_OPT, "op_opt",
1602
      "show micro ops "
1603
#ifdef TARGET_I386
1604
      "before eflags optimization and "
1605
#endif
1606
      "after liveness analysis" },
1607
    { CPU_LOG_INT, "int",
1608
      "show interrupts/exceptions in short format" },
1609
    { CPU_LOG_EXEC, "exec",
1610
      "show trace before each executed TB (lots of logs)" },
1611
    { CPU_LOG_TB_CPU, "cpu",
1612
      "show CPU state before block translation" },
1613
#ifdef TARGET_I386
1614
    { CPU_LOG_PCALL, "pcall",
1615
      "show protected mode far calls/returns/exceptions" },
1616
    { CPU_LOG_RESET, "cpu_reset",
1617
      "show CPU state before CPU resets" },
1618
#endif
1619
#ifdef DEBUG_IOPORT
1620
    { CPU_LOG_IOPORT, "ioport",
1621
      "show all i/o ports accesses" },
1622
#endif
1623
    { 0, NULL, NULL },
1624
};
1625

    
1626
static int cmp1(const char *s1, int n, const char *s2)
1627
{
1628
    if (strlen(s2) != n)
1629
        return 0;
1630
    return memcmp(s1, s2, n) == 0;
1631
}
1632

    
1633
/* takes a comma separated list of log masks. Return 0 if error. */
1634
int cpu_str_to_log_mask(const char *str)
1635
{
1636
    const CPULogItem *item;
1637
    int mask;
1638
    const char *p, *p1;
1639

    
1640
    p = str;
1641
    mask = 0;
1642
    for(;;) {
1643
        p1 = strchr(p, ',');
1644
        if (!p1)
1645
            p1 = p + strlen(p);
1646
        if(cmp1(p,p1-p,"all")) {
1647
                for(item = cpu_log_items; item->mask != 0; item++) {
1648
                        mask |= item->mask;
1649
                }
1650
        } else {
1651
        for(item = cpu_log_items; item->mask != 0; item++) {
1652
            if (cmp1(p, p1 - p, item->name))
1653
                goto found;
1654
        }
1655
        return 0;
1656
        }
1657
    found:
1658
        mask |= item->mask;
1659
        if (*p1 != ',')
1660
            break;
1661
        p = p1 + 1;
1662
    }
1663
    return mask;
1664
}
1665

    
1666
void cpu_abort(CPUState *env, const char *fmt, ...)
1667
{
1668
    va_list ap;
1669
    va_list ap2;
1670

    
1671
    va_start(ap, fmt);
1672
    va_copy(ap2, ap);
1673
    fprintf(stderr, "qemu: fatal: ");
1674
    vfprintf(stderr, fmt, ap);
1675
    fprintf(stderr, "\n");
1676
#ifdef TARGET_I386
1677
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1678
#else
1679
    cpu_dump_state(env, stderr, fprintf, 0);
1680
#endif
1681
    if (qemu_log_enabled()) {
1682
        qemu_log("qemu: fatal: ");
1683
        qemu_log_vprintf(fmt, ap2);
1684
        qemu_log("\n");
1685
#ifdef TARGET_I386
1686
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1687
#else
1688
        log_cpu_state(env, 0);
1689
#endif
1690
        qemu_log_flush();
1691
        qemu_log_close();
1692
    }
1693
    va_end(ap2);
1694
    va_end(ap);
1695
    abort();
1696
}
1697

    
1698
CPUState *cpu_copy(CPUState *env)
1699
{
1700
    CPUState *new_env = cpu_init(env->cpu_model_str);
1701
    CPUState *next_cpu = new_env->next_cpu;
1702
    int cpu_index = new_env->cpu_index;
1703
#if defined(TARGET_HAS_ICE)
1704
    CPUBreakpoint *bp;
1705
    CPUWatchpoint *wp;
1706
#endif
1707

    
1708
    memcpy(new_env, env, sizeof(CPUState));
1709

    
1710
    /* Preserve chaining and index. */
1711
    new_env->next_cpu = next_cpu;
1712
    new_env->cpu_index = cpu_index;
1713

    
1714
    /* Clone all break/watchpoints.
1715
       Note: Once we support ptrace with hw-debug register access, make sure
1716
       BP_CPU break/watchpoints are handled correctly on clone. */
1717
    QTAILQ_INIT(&env->breakpoints);
1718
    QTAILQ_INIT(&env->watchpoints);
1719
#if defined(TARGET_HAS_ICE)
1720
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1721
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1722
    }
1723
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1724
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1725
                              wp->flags, NULL);
1726
    }
1727
#endif
1728

    
1729
    return new_env;
1730
}
1731

    
1732
#if !defined(CONFIG_USER_ONLY)
1733

    
1734
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1735
{
1736
    unsigned int i;
1737

    
1738
    /* Discard jump cache entries for any tb which might potentially
1739
       overlap the flushed page.  */
1740
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1741
    memset (&env->tb_jmp_cache[i], 0, 
1742
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1743

    
1744
    i = tb_jmp_cache_hash_page(addr);
1745
    memset (&env->tb_jmp_cache[i], 0, 
1746
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1747
}
1748

    
1749
static CPUTLBEntry s_cputlb_empty_entry = {
1750
    .addr_read  = -1,
1751
    .addr_write = -1,
1752
    .addr_code  = -1,
1753
    .addend     = -1,
1754
};
1755

    
1756
/* NOTE: if flush_global is true, also flush global entries (not
1757
   implemented yet) */
1758
void tlb_flush(CPUState *env, int flush_global)
1759
{
1760
    int i;
1761

    
1762
#if defined(DEBUG_TLB)
1763
    printf("tlb_flush:\n");
1764
#endif
1765
    /* must reset current TB so that interrupts cannot modify the
1766
       links while we are modifying them */
1767
    env->current_tb = NULL;
1768

    
1769
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1770
        int mmu_idx;
1771
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1772
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1773
        }
1774
    }
1775

    
1776
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1777

    
1778
    tlb_flush_count++;
1779
}
1780

    
1781
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1782
{
1783
    if (addr == (tlb_entry->addr_read &
1784
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1785
        addr == (tlb_entry->addr_write &
1786
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1787
        addr == (tlb_entry->addr_code &
1788
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1789
        *tlb_entry = s_cputlb_empty_entry;
1790
    }
1791
}
1792

    
1793
void tlb_flush_page(CPUState *env, target_ulong addr)
1794
{
1795
    int i;
1796
    int mmu_idx;
1797

    
1798
#if defined(DEBUG_TLB)
1799
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1800
#endif
1801
    /* must reset current TB so that interrupts cannot modify the
1802
       links while we are modifying them */
1803
    env->current_tb = NULL;
1804

    
1805
    addr &= TARGET_PAGE_MASK;
1806
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1807
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1808
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1809

    
1810
    tlb_flush_jmp_cache(env, addr);
1811
}
1812

    
1813
/* update the TLBs so that writes to code in the virtual page 'addr'
1814
   can be detected */
1815
static void tlb_protect_code(ram_addr_t ram_addr)
1816
{
1817
    cpu_physical_memory_reset_dirty(ram_addr,
1818
                                    ram_addr + TARGET_PAGE_SIZE,
1819
                                    CODE_DIRTY_FLAG);
1820
}
1821

    
1822
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1823
   tested for self modifying code */
1824
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1825
                                    target_ulong vaddr)
1826
{
1827
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1828
}
1829

    
1830
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1831
                                         unsigned long start, unsigned long length)
1832
{
1833
    unsigned long addr;
1834
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1835
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1836
        if ((addr - start) < length) {
1837
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1838
        }
1839
    }
1840
}
1841

    
1842
/* Note: start and end must be within the same ram block.  */
1843
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1844
                                     int dirty_flags)
1845
{
1846
    CPUState *env;
1847
    unsigned long length, start1;
1848
    int i, mask, len;
1849
    uint8_t *p;
1850

    
1851
    start &= TARGET_PAGE_MASK;
1852
    end = TARGET_PAGE_ALIGN(end);
1853

    
1854
    length = end - start;
1855
    if (length == 0)
1856
        return;
1857
    len = length >> TARGET_PAGE_BITS;
1858
    mask = ~dirty_flags;
1859
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1860
    for(i = 0; i < len; i++)
1861
        p[i] &= mask;
1862

    
1863
    /* we modify the TLB cache so that the dirty bit will be set again
1864
       when accessing the range */
1865
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1866
    /* Chek that we don't span multiple blocks - this breaks the
1867
       address comparisons below.  */
1868
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1869
            != (end - 1) - start) {
1870
        abort();
1871
    }
1872

    
1873
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1874
        int mmu_idx;
1875
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1876
            for(i = 0; i < CPU_TLB_SIZE; i++)
1877
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1878
                                      start1, length);
1879
        }
1880
    }
1881
}
1882

    
1883
int cpu_physical_memory_set_dirty_tracking(int enable)
1884
{
1885
    in_migration = enable;
1886
    if (kvm_enabled()) {
1887
        return kvm_set_migration_log(enable);
1888
    }
1889
    return 0;
1890
}
1891

    
1892
int cpu_physical_memory_get_dirty_tracking(void)
1893
{
1894
    return in_migration;
1895
}
1896

    
1897
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
1898
                                   target_phys_addr_t end_addr)
1899
{
1900
    int ret = 0;
1901

    
1902
    if (kvm_enabled())
1903
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1904
    return ret;
1905
}
1906

    
1907
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1908
{
1909
    ram_addr_t ram_addr;
1910
    void *p;
1911

    
1912
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1913
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1914
            + tlb_entry->addend);
1915
        ram_addr = qemu_ram_addr_from_host(p);
1916
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1917
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1918
        }
1919
    }
1920
}
1921

    
1922
/* update the TLB according to the current state of the dirty bits */
1923
void cpu_tlb_update_dirty(CPUState *env)
1924
{
1925
    int i;
1926
    int mmu_idx;
1927
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1928
        for(i = 0; i < CPU_TLB_SIZE; i++)
1929
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1930
    }
1931
}
1932

    
1933
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1934
{
1935
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1936
        tlb_entry->addr_write = vaddr;
1937
}
1938

    
1939
/* update the TLB corresponding to virtual page vaddr
1940
   so that it is no longer dirty */
1941
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1942
{
1943
    int i;
1944
    int mmu_idx;
1945

    
1946
    vaddr &= TARGET_PAGE_MASK;
1947
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1948
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1949
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1950
}
1951

    
1952
/* add a new TLB entry. At most one entry for a given virtual address
1953
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1954
   (can only happen in non SOFTMMU mode for I/O pages or pages
1955
   conflicting with the host address space). */
1956
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1957
                      target_phys_addr_t paddr, int prot,
1958
                      int mmu_idx, int is_softmmu)
1959
{
1960
    PhysPageDesc *p;
1961
    unsigned long pd;
1962
    unsigned int index;
1963
    target_ulong address;
1964
    target_ulong code_address;
1965
    target_phys_addr_t addend;
1966
    int ret;
1967
    CPUTLBEntry *te;
1968
    CPUWatchpoint *wp;
1969
    target_phys_addr_t iotlb;
1970

    
1971
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1972
    if (!p) {
1973
        pd = IO_MEM_UNASSIGNED;
1974
    } else {
1975
        pd = p->phys_offset;
1976
    }
1977
#if defined(DEBUG_TLB)
1978
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1979
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1980
#endif
1981

    
1982
    ret = 0;
1983
    address = vaddr;
1984
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1985
        /* IO memory case (romd handled later) */
1986
        address |= TLB_MMIO;
1987
    }
1988
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1989
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1990
        /* Normal RAM.  */
1991
        iotlb = pd & TARGET_PAGE_MASK;
1992
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1993
            iotlb |= IO_MEM_NOTDIRTY;
1994
        else
1995
            iotlb |= IO_MEM_ROM;
1996
    } else {
1997
        /* IO handlers are currently passed a physical address.
1998
           It would be nice to pass an offset from the base address
1999
           of that region.  This would avoid having to special case RAM,
2000
           and avoid full address decoding in every device.
2001
           We can't use the high bits of pd for this because
2002
           IO_MEM_ROMD uses these as a ram address.  */
2003
        iotlb = (pd & ~TARGET_PAGE_MASK);
2004
        if (p) {
2005
            iotlb += p->region_offset;
2006
        } else {
2007
            iotlb += paddr;
2008
        }
2009
    }
2010

    
2011
    code_address = address;
2012
    /* Make accesses to pages with watchpoints go via the
2013
       watchpoint trap routines.  */
2014
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2015
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2016
            iotlb = io_mem_watch + paddr;
2017
            /* TODO: The memory case can be optimized by not trapping
2018
               reads of pages with a write breakpoint.  */
2019
            address |= TLB_MMIO;
2020
        }
2021
    }
2022

    
2023
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2024
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2025
    te = &env->tlb_table[mmu_idx][index];
2026
    te->addend = addend - vaddr;
2027
    if (prot & PAGE_READ) {
2028
        te->addr_read = address;
2029
    } else {
2030
        te->addr_read = -1;
2031
    }
2032

    
2033
    if (prot & PAGE_EXEC) {
2034
        te->addr_code = code_address;
2035
    } else {
2036
        te->addr_code = -1;
2037
    }
2038
    if (prot & PAGE_WRITE) {
2039
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2040
            (pd & IO_MEM_ROMD)) {
2041
            /* Write access calls the I/O callback.  */
2042
            te->addr_write = address | TLB_MMIO;
2043
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2044
                   !cpu_physical_memory_is_dirty(pd)) {
2045
            te->addr_write = address | TLB_NOTDIRTY;
2046
        } else {
2047
            te->addr_write = address;
2048
        }
2049
    } else {
2050
        te->addr_write = -1;
2051
    }
2052
    return ret;
2053
}
2054

    
2055
#else
2056

    
2057
void tlb_flush(CPUState *env, int flush_global)
2058
{
2059
}
2060

    
2061
void tlb_flush_page(CPUState *env, target_ulong addr)
2062
{
2063
}
2064

    
2065
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2066
                      target_phys_addr_t paddr, int prot,
2067
                      int mmu_idx, int is_softmmu)
2068
{
2069
    return 0;
2070
}
2071

    
2072
/*
2073
 * Walks guest process memory "regions" one by one
2074
 * and calls callback function 'fn' for each region.
2075
 */
2076
int walk_memory_regions(void *priv,
2077
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2078
{
2079
    unsigned long start, end;
2080
    PageDesc *p = NULL;
2081
    int i, j, prot, prot1;
2082
    int rc = 0;
2083

    
2084
    start = end = -1;
2085
    prot = 0;
2086

    
2087
    for (i = 0; i <= L1_SIZE; i++) {
2088
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2089
        for (j = 0; j < L2_SIZE; j++) {
2090
            prot1 = (p == NULL) ? 0 : p[j].flags;
2091
            /*
2092
             * "region" is one continuous chunk of memory
2093
             * that has same protection flags set.
2094
             */
2095
            if (prot1 != prot) {
2096
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2097
                if (start != -1) {
2098
                    rc = (*fn)(priv, start, end, prot);
2099
                    /* callback can stop iteration by returning != 0 */
2100
                    if (rc != 0)
2101
                        return (rc);
2102
                }
2103
                if (prot1 != 0)
2104
                    start = end;
2105
                else
2106
                    start = -1;
2107
                prot = prot1;
2108
            }
2109
            if (p == NULL)
2110
                break;
2111
        }
2112
    }
2113
    return (rc);
2114
}
2115

    
2116
static int dump_region(void *priv, unsigned long start,
2117
    unsigned long end, unsigned long prot)
2118
{
2119
    FILE *f = (FILE *)priv;
2120

    
2121
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2122
        start, end, end - start,
2123
        ((prot & PAGE_READ) ? 'r' : '-'),
2124
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2125
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2126

    
2127
    return (0);
2128
}
2129

    
2130
/* dump memory mappings */
2131
void page_dump(FILE *f)
2132
{
2133
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2134
            "start", "end", "size", "prot");
2135
    walk_memory_regions(f, dump_region);
2136
}
2137

    
2138
int page_get_flags(target_ulong address)
2139
{
2140
    PageDesc *p;
2141

    
2142
    p = page_find(address >> TARGET_PAGE_BITS);
2143
    if (!p)
2144
        return 0;
2145
    return p->flags;
2146
}
2147

    
2148
/* modify the flags of a page and invalidate the code if
2149
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2150
   depending on PAGE_WRITE */
2151
void page_set_flags(target_ulong start, target_ulong end, int flags)
2152
{
2153
    PageDesc *p;
2154
    target_ulong addr;
2155

    
2156
    /* mmap_lock should already be held.  */
2157
    start = start & TARGET_PAGE_MASK;
2158
    end = TARGET_PAGE_ALIGN(end);
2159
    if (flags & PAGE_WRITE)
2160
        flags |= PAGE_WRITE_ORG;
2161
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2162
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2163
        /* We may be called for host regions that are outside guest
2164
           address space.  */
2165
        if (!p)
2166
            return;
2167
        /* if the write protection is set, then we invalidate the code
2168
           inside */
2169
        if (!(p->flags & PAGE_WRITE) &&
2170
            (flags & PAGE_WRITE) &&
2171
            p->first_tb) {
2172
            tb_invalidate_phys_page(addr, 0, NULL);
2173
        }
2174
        p->flags = flags;
2175
    }
2176
}
2177

    
2178
int page_check_range(target_ulong start, target_ulong len, int flags)
2179
{
2180
    PageDesc *p;
2181
    target_ulong end;
2182
    target_ulong addr;
2183

    
2184
    if (start + len < start)
2185
        /* we've wrapped around */
2186
        return -1;
2187

    
2188
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2189
    start = start & TARGET_PAGE_MASK;
2190

    
2191
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2192
        p = page_find(addr >> TARGET_PAGE_BITS);
2193
        if( !p )
2194
            return -1;
2195
        if( !(p->flags & PAGE_VALID) )
2196
            return -1;
2197

    
2198
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2199
            return -1;
2200
        if (flags & PAGE_WRITE) {
2201
            if (!(p->flags & PAGE_WRITE_ORG))
2202
                return -1;
2203
            /* unprotect the page if it was put read-only because it
2204
               contains translated code */
2205
            if (!(p->flags & PAGE_WRITE)) {
2206
                if (!page_unprotect(addr, 0, NULL))
2207
                    return -1;
2208
            }
2209
            return 0;
2210
        }
2211
    }
2212
    return 0;
2213
}
2214

    
2215
/* called from signal handler: invalidate the code and unprotect the
2216
   page. Return TRUE if the fault was successfully handled. */
2217
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2218
{
2219
    unsigned int page_index, prot, pindex;
2220
    PageDesc *p, *p1;
2221
    target_ulong host_start, host_end, addr;
2222

    
2223
    /* Technically this isn't safe inside a signal handler.  However we
2224
       know this only ever happens in a synchronous SEGV handler, so in
2225
       practice it seems to be ok.  */
2226
    mmap_lock();
2227

    
2228
    host_start = address & qemu_host_page_mask;
2229
    page_index = host_start >> TARGET_PAGE_BITS;
2230
    p1 = page_find(page_index);
2231
    if (!p1) {
2232
        mmap_unlock();
2233
        return 0;
2234
    }
2235
    host_end = host_start + qemu_host_page_size;
2236
    p = p1;
2237
    prot = 0;
2238
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2239
        prot |= p->flags;
2240
        p++;
2241
    }
2242
    /* if the page was really writable, then we change its
2243
       protection back to writable */
2244
    if (prot & PAGE_WRITE_ORG) {
2245
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2246
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2247
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2248
                     (prot & PAGE_BITS) | PAGE_WRITE);
2249
            p1[pindex].flags |= PAGE_WRITE;
2250
            /* and since the content will be modified, we must invalidate
2251
               the corresponding translated code. */
2252
            tb_invalidate_phys_page(address, pc, puc);
2253
#ifdef DEBUG_TB_CHECK
2254
            tb_invalidate_check(address);
2255
#endif
2256
            mmap_unlock();
2257
            return 1;
2258
        }
2259
    }
2260
    mmap_unlock();
2261
    return 0;
2262
}
2263

    
2264
static inline void tlb_set_dirty(CPUState *env,
2265
                                 unsigned long addr, target_ulong vaddr)
2266
{
2267
}
2268
#endif /* defined(CONFIG_USER_ONLY) */
2269

    
2270
#if !defined(CONFIG_USER_ONLY)
2271

    
2272
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2273
                             ram_addr_t memory, ram_addr_t region_offset);
2274
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2275
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2276
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2277
                      need_subpage)                                     \
2278
    do {                                                                \
2279
        if (addr > start_addr)                                          \
2280
            start_addr2 = 0;                                            \
2281
        else {                                                          \
2282
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2283
            if (start_addr2 > 0)                                        \
2284
                need_subpage = 1;                                       \
2285
        }                                                               \
2286
                                                                        \
2287
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2288
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2289
        else {                                                          \
2290
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2291
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2292
                need_subpage = 1;                                       \
2293
        }                                                               \
2294
    } while (0)
2295

    
2296
/* register physical memory.
2297
   For RAM, 'size' must be a multiple of the target page size.
2298
   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2299
   io memory page.  The address used when calling the IO function is
2300
   the offset from the start of the region, plus region_offset.  Both
2301
   start_addr and region_offset are rounded down to a page boundary
2302
   before calculating this offset.  This should not be a problem unless
2303
   the low bits of start_addr and region_offset differ.  */
2304
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2305
                                         ram_addr_t size,
2306
                                         ram_addr_t phys_offset,
2307
                                         ram_addr_t region_offset)
2308
{
2309
    target_phys_addr_t addr, end_addr;
2310
    PhysPageDesc *p;
2311
    CPUState *env;
2312
    ram_addr_t orig_size = size;
2313
    void *subpage;
2314

    
2315
    if (kvm_enabled())
2316
        kvm_set_phys_mem(start_addr, size, phys_offset);
2317

    
2318
    if (phys_offset == IO_MEM_UNASSIGNED) {
2319
        region_offset = start_addr;
2320
    }
2321
    region_offset &= TARGET_PAGE_MASK;
2322
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2323
    end_addr = start_addr + (target_phys_addr_t)size;
2324
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2325
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2326
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2327
            ram_addr_t orig_memory = p->phys_offset;
2328
            target_phys_addr_t start_addr2, end_addr2;
2329
            int need_subpage = 0;
2330

    
2331
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2332
                          need_subpage);
2333
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2334
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2335
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2336
                                           &p->phys_offset, orig_memory,
2337
                                           p->region_offset);
2338
                } else {
2339
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2340
                                            >> IO_MEM_SHIFT];
2341
                }
2342
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2343
                                 region_offset);
2344
                p->region_offset = 0;
2345
            } else {
2346
                p->phys_offset = phys_offset;
2347
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2348
                    (phys_offset & IO_MEM_ROMD))
2349
                    phys_offset += TARGET_PAGE_SIZE;
2350
            }
2351
        } else {
2352
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2353
            p->phys_offset = phys_offset;
2354
            p->region_offset = region_offset;
2355
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2356
                (phys_offset & IO_MEM_ROMD)) {
2357
                phys_offset += TARGET_PAGE_SIZE;
2358
            } else {
2359
                target_phys_addr_t start_addr2, end_addr2;
2360
                int need_subpage = 0;
2361

    
2362
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2363
                              end_addr2, need_subpage);
2364

    
2365
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2366
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2367
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2368
                                           addr & TARGET_PAGE_MASK);
2369
                    subpage_register(subpage, start_addr2, end_addr2,
2370
                                     phys_offset, region_offset);
2371
                    p->region_offset = 0;
2372
                }
2373
            }
2374
        }
2375
        region_offset += TARGET_PAGE_SIZE;
2376
    }
2377

    
2378
    /* since each CPU stores ram addresses in its TLB cache, we must
2379
       reset the modified entries */
2380
    /* XXX: slow ! */
2381
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2382
        tlb_flush(env, 1);
2383
    }
2384
}
2385

    
2386
/* XXX: temporary until new memory mapping API */
2387
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2388
{
2389
    PhysPageDesc *p;
2390

    
2391
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2392
    if (!p)
2393
        return IO_MEM_UNASSIGNED;
2394
    return p->phys_offset;
2395
}
2396

    
2397
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2398
{
2399
    if (kvm_enabled())
2400
        kvm_coalesce_mmio_region(addr, size);
2401
}
2402

    
2403
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2404
{
2405
    if (kvm_enabled())
2406
        kvm_uncoalesce_mmio_region(addr, size);
2407
}
2408

    
2409
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2410
{
2411
    RAMBlock *new_block;
2412

    
2413
    size = TARGET_PAGE_ALIGN(size);
2414
    new_block = qemu_malloc(sizeof(*new_block));
2415

    
2416
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2417
    /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
2418
    new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
2419
                           MAP_SHARED | MAP_ANONYMOUS, -1, 0);
2420
#else
2421
    new_block->host = qemu_vmalloc(size);
2422
#endif
2423
#ifdef MADV_MERGEABLE
2424
    madvise(new_block->host, size, MADV_MERGEABLE);
2425
#endif
2426
    new_block->offset = last_ram_offset;
2427
    new_block->length = size;
2428

    
2429
    new_block->next = ram_blocks;
2430
    ram_blocks = new_block;
2431

    
2432
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2433
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2434
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2435
           0xff, size >> TARGET_PAGE_BITS);
2436

    
2437
    last_ram_offset += size;
2438

    
2439
    if (kvm_enabled())
2440
        kvm_setup_guest_memory(new_block->host, size);
2441

    
2442
    return new_block->offset;
2443
}
2444

    
2445
void qemu_ram_free(ram_addr_t addr)
2446
{
2447
    /* TODO: implement this.  */
2448
}
2449

    
2450
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2451
   With the exception of the softmmu code in this file, this should
2452
   only be used for local memory (e.g. video ram) that the device owns,
2453
   and knows it isn't going to access beyond the end of the block.
2454

2455
   It should not be used for general purpose DMA.
2456
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2457
 */
2458
void *qemu_get_ram_ptr(ram_addr_t addr)
2459
{
2460
    RAMBlock *prev;
2461
    RAMBlock **prevp;
2462
    RAMBlock *block;
2463

    
2464
    prev = NULL;
2465
    prevp = &ram_blocks;
2466
    block = ram_blocks;
2467
    while (block && (block->offset > addr
2468
                     || block->offset + block->length <= addr)) {
2469
        if (prev)
2470
          prevp = &prev->next;
2471
        prev = block;
2472
        block = block->next;
2473
    }
2474
    if (!block) {
2475
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2476
        abort();
2477
    }
2478
    /* Move this entry to to start of the list.  */
2479
    if (prev) {
2480
        prev->next = block->next;
2481
        block->next = *prevp;
2482
        *prevp = block;
2483
    }
2484
    return block->host + (addr - block->offset);
2485
}
2486

    
2487
/* Some of the softmmu routines need to translate from a host pointer
2488
   (typically a TLB entry) back to a ram offset.  */
2489
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2490
{
2491
    RAMBlock *prev;
2492
    RAMBlock **prevp;
2493
    RAMBlock *block;
2494
    uint8_t *host = ptr;
2495

    
2496
    prev = NULL;
2497
    prevp = &ram_blocks;
2498
    block = ram_blocks;
2499
    while (block && (block->host > host
2500
                     || block->host + block->length <= host)) {
2501
        if (prev)
2502
          prevp = &prev->next;
2503
        prev = block;
2504
        block = block->next;
2505
    }
2506
    if (!block) {
2507
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2508
        abort();
2509
    }
2510
    return block->offset + (host - block->host);
2511
}
2512

    
2513
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2514
{
2515
#ifdef DEBUG_UNASSIGNED
2516
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2517
#endif
2518
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2519
    do_unassigned_access(addr, 0, 0, 0, 1);
2520
#endif
2521
    return 0;
2522
}
2523

    
2524
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2525
{
2526
#ifdef DEBUG_UNASSIGNED
2527
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2528
#endif
2529
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2530
    do_unassigned_access(addr, 0, 0, 0, 2);
2531
#endif
2532
    return 0;
2533
}
2534

    
2535
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2536
{
2537
#ifdef DEBUG_UNASSIGNED
2538
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2539
#endif
2540
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2541
    do_unassigned_access(addr, 0, 0, 0, 4);
2542
#endif
2543
    return 0;
2544
}
2545

    
2546
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2547
{
2548
#ifdef DEBUG_UNASSIGNED
2549
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2550
#endif
2551
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2552
    do_unassigned_access(addr, 1, 0, 0, 1);
2553
#endif
2554
}
2555

    
2556
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2557
{
2558
#ifdef DEBUG_UNASSIGNED
2559
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2560
#endif
2561
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2562
    do_unassigned_access(addr, 1, 0, 0, 2);
2563
#endif
2564
}
2565

    
2566
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2567
{
2568
#ifdef DEBUG_UNASSIGNED
2569
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2570
#endif
2571
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2572
    do_unassigned_access(addr, 1, 0, 0, 4);
2573
#endif
2574
}
2575

    
2576
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2577
    unassigned_mem_readb,
2578
    unassigned_mem_readw,
2579
    unassigned_mem_readl,
2580
};
2581

    
2582
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2583
    unassigned_mem_writeb,
2584
    unassigned_mem_writew,
2585
    unassigned_mem_writel,
2586
};
2587

    
2588
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2589
                                uint32_t val)
2590
{
2591
    int dirty_flags;
2592
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2593
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2594
#if !defined(CONFIG_USER_ONLY)
2595
        tb_invalidate_phys_page_fast(ram_addr, 1);
2596
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2597
#endif
2598
    }
2599
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2600
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2601
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2602
    /* we remove the notdirty callback only if the code has been
2603
       flushed */
2604
    if (dirty_flags == 0xff)
2605
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2606
}
2607

    
2608
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2609
                                uint32_t val)
2610
{
2611
    int dirty_flags;
2612
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2613
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2614
#if !defined(CONFIG_USER_ONLY)
2615
        tb_invalidate_phys_page_fast(ram_addr, 2);
2616
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2617
#endif
2618
    }
2619
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2620
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2621
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2622
    /* we remove the notdirty callback only if the code has been
2623
       flushed */
2624
    if (dirty_flags == 0xff)
2625
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2626
}
2627

    
2628
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2629
                                uint32_t val)
2630
{
2631
    int dirty_flags;
2632
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2633
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2634
#if !defined(CONFIG_USER_ONLY)
2635
        tb_invalidate_phys_page_fast(ram_addr, 4);
2636
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2637
#endif
2638
    }
2639
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2640
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2641
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2642
    /* we remove the notdirty callback only if the code has been
2643
       flushed */
2644
    if (dirty_flags == 0xff)
2645
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2646
}
2647

    
2648
static CPUReadMemoryFunc * const error_mem_read[3] = {
2649
    NULL, /* never used */
2650
    NULL, /* never used */
2651
    NULL, /* never used */
2652
};
2653

    
2654
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2655
    notdirty_mem_writeb,
2656
    notdirty_mem_writew,
2657
    notdirty_mem_writel,
2658
};
2659

    
2660
/* Generate a debug exception if a watchpoint has been hit.  */
2661
static void check_watchpoint(int offset, int len_mask, int flags)
2662
{
2663
    CPUState *env = cpu_single_env;
2664
    target_ulong pc, cs_base;
2665
    TranslationBlock *tb;
2666
    target_ulong vaddr;
2667
    CPUWatchpoint *wp;
2668
    int cpu_flags;
2669

    
2670
    if (env->watchpoint_hit) {
2671
        /* We re-entered the check after replacing the TB. Now raise
2672
         * the debug interrupt so that is will trigger after the
2673
         * current instruction. */
2674
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2675
        return;
2676
    }
2677
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2678
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2679
        if ((vaddr == (wp->vaddr & len_mask) ||
2680
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2681
            wp->flags |= BP_WATCHPOINT_HIT;
2682
            if (!env->watchpoint_hit) {
2683
                env->watchpoint_hit = wp;
2684
                tb = tb_find_pc(env->mem_io_pc);
2685
                if (!tb) {
2686
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2687
                              "pc=%p", (void *)env->mem_io_pc);
2688
                }
2689
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2690
                tb_phys_invalidate(tb, -1);
2691
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2692
                    env->exception_index = EXCP_DEBUG;
2693
                } else {
2694
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2695
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2696
                }
2697
                cpu_resume_from_signal(env, NULL);
2698
            }
2699
        } else {
2700
            wp->flags &= ~BP_WATCHPOINT_HIT;
2701
        }
2702
    }
2703
}
2704

    
2705
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2706
   so these check for a hit then pass through to the normal out-of-line
2707
   phys routines.  */
2708
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2709
{
2710
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2711
    return ldub_phys(addr);
2712
}
2713

    
2714
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2715
{
2716
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2717
    return lduw_phys(addr);
2718
}
2719

    
2720
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2721
{
2722
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2723
    return ldl_phys(addr);
2724
}
2725

    
2726
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2727
                             uint32_t val)
2728
{
2729
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2730
    stb_phys(addr, val);
2731
}
2732

    
2733
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2734
                             uint32_t val)
2735
{
2736
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2737
    stw_phys(addr, val);
2738
}
2739

    
2740
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2741
                             uint32_t val)
2742
{
2743
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2744
    stl_phys(addr, val);
2745
}
2746

    
2747
static CPUReadMemoryFunc * const watch_mem_read[3] = {
2748
    watch_mem_readb,
2749
    watch_mem_readw,
2750
    watch_mem_readl,
2751
};
2752

    
2753
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2754
    watch_mem_writeb,
2755
    watch_mem_writew,
2756
    watch_mem_writel,
2757
};
2758

    
2759
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2760
                                 unsigned int len)
2761
{
2762
    uint32_t ret;
2763
    unsigned int idx;
2764

    
2765
    idx = SUBPAGE_IDX(addr);
2766
#if defined(DEBUG_SUBPAGE)
2767
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2768
           mmio, len, addr, idx);
2769
#endif
2770
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2771
                                       addr + mmio->region_offset[idx][0][len]);
2772

    
2773
    return ret;
2774
}
2775

    
2776
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2777
                              uint32_t value, unsigned int len)
2778
{
2779
    unsigned int idx;
2780

    
2781
    idx = SUBPAGE_IDX(addr);
2782
#if defined(DEBUG_SUBPAGE)
2783
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2784
           mmio, len, addr, idx, value);
2785
#endif
2786
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2787
                                  addr + mmio->region_offset[idx][1][len],
2788
                                  value);
2789
}
2790

    
2791
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2792
{
2793
#if defined(DEBUG_SUBPAGE)
2794
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2795
#endif
2796

    
2797
    return subpage_readlen(opaque, addr, 0);
2798
}
2799

    
2800
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2801
                            uint32_t value)
2802
{
2803
#if defined(DEBUG_SUBPAGE)
2804
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2805
#endif
2806
    subpage_writelen(opaque, addr, value, 0);
2807
}
2808

    
2809
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2810
{
2811
#if defined(DEBUG_SUBPAGE)
2812
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2813
#endif
2814

    
2815
    return subpage_readlen(opaque, addr, 1);
2816
}
2817

    
2818
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2819
                            uint32_t value)
2820
{
2821
#if defined(DEBUG_SUBPAGE)
2822
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2823
#endif
2824
    subpage_writelen(opaque, addr, value, 1);
2825
}
2826

    
2827
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2828
{
2829
#if defined(DEBUG_SUBPAGE)
2830
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2831
#endif
2832

    
2833
    return subpage_readlen(opaque, addr, 2);
2834
}
2835

    
2836
static void subpage_writel (void *opaque,
2837
                         target_phys_addr_t addr, uint32_t value)
2838
{
2839
#if defined(DEBUG_SUBPAGE)
2840
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2841
#endif
2842
    subpage_writelen(opaque, addr, value, 2);
2843
}
2844

    
2845
static CPUReadMemoryFunc * const subpage_read[] = {
2846
    &subpage_readb,
2847
    &subpage_readw,
2848
    &subpage_readl,
2849
};
2850

    
2851
static CPUWriteMemoryFunc * const subpage_write[] = {
2852
    &subpage_writeb,
2853
    &subpage_writew,
2854
    &subpage_writel,
2855
};
2856

    
2857
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2858
                             ram_addr_t memory, ram_addr_t region_offset)
2859
{
2860
    int idx, eidx;
2861
    unsigned int i;
2862

    
2863
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2864
        return -1;
2865
    idx = SUBPAGE_IDX(start);
2866
    eidx = SUBPAGE_IDX(end);
2867
#if defined(DEBUG_SUBPAGE)
2868
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2869
           mmio, start, end, idx, eidx, memory);
2870
#endif
2871
    memory >>= IO_MEM_SHIFT;
2872
    for (; idx <= eidx; idx++) {
2873
        for (i = 0; i < 4; i++) {
2874
            if (io_mem_read[memory][i]) {
2875
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2876
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2877
                mmio->region_offset[idx][0][i] = region_offset;
2878
            }
2879
            if (io_mem_write[memory][i]) {
2880
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2881
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2882
                mmio->region_offset[idx][1][i] = region_offset;
2883
            }
2884
        }
2885
    }
2886

    
2887
    return 0;
2888
}
2889

    
2890
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2891
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2892
{
2893
    subpage_t *mmio;
2894
    int subpage_memory;
2895

    
2896
    mmio = qemu_mallocz(sizeof(subpage_t));
2897

    
2898
    mmio->base = base;
2899
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2900
#if defined(DEBUG_SUBPAGE)
2901
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2902
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2903
#endif
2904
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2905
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2906
                         region_offset);
2907

    
2908
    return mmio;
2909
}
2910

    
2911
static int get_free_io_mem_idx(void)
2912
{
2913
    int i;
2914

    
2915
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2916
        if (!io_mem_used[i]) {
2917
            io_mem_used[i] = 1;
2918
            return i;
2919
        }
2920
    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
2921
    return -1;
2922
}
2923

    
2924
/* mem_read and mem_write are arrays of functions containing the
2925
   function to access byte (index 0), word (index 1) and dword (index
2926
   2). Functions can be omitted with a NULL function pointer.
2927
   If io_index is non zero, the corresponding io zone is
2928
   modified. If it is zero, a new io zone is allocated. The return
2929
   value can be used with cpu_register_physical_memory(). (-1) is
2930
   returned if error. */
2931
static int cpu_register_io_memory_fixed(int io_index,
2932
                                        CPUReadMemoryFunc * const *mem_read,
2933
                                        CPUWriteMemoryFunc * const *mem_write,
2934
                                        void *opaque)
2935
{
2936
    int i, subwidth = 0;
2937

    
2938
    if (io_index <= 0) {
2939
        io_index = get_free_io_mem_idx();
2940
        if (io_index == -1)
2941
            return io_index;
2942
    } else {
2943
        io_index >>= IO_MEM_SHIFT;
2944
        if (io_index >= IO_MEM_NB_ENTRIES)
2945
            return -1;
2946
    }
2947

    
2948
    for(i = 0;i < 3; i++) {
2949
        if (!mem_read[i] || !mem_write[i])
2950
            subwidth = IO_MEM_SUBWIDTH;
2951
        io_mem_read[io_index][i] = mem_read[i];
2952
        io_mem_write[io_index][i] = mem_write[i];
2953
    }
2954
    io_mem_opaque[io_index] = opaque;
2955
    return (io_index << IO_MEM_SHIFT) | subwidth;
2956
}
2957

    
2958
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2959
                           CPUWriteMemoryFunc * const *mem_write,
2960
                           void *opaque)
2961
{
2962
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
2963
}
2964

    
2965
void cpu_unregister_io_memory(int io_table_address)
2966
{
2967
    int i;
2968
    int io_index = io_table_address >> IO_MEM_SHIFT;
2969

    
2970
    for (i=0;i < 3; i++) {
2971
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2972
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2973
    }
2974
    io_mem_opaque[io_index] = NULL;
2975
    io_mem_used[io_index] = 0;
2976
}
2977

    
2978
static void io_mem_init(void)
2979
{
2980
    int i;
2981

    
2982
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
2983
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
2984
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
2985
    for (i=0; i<5; i++)
2986
        io_mem_used[i] = 1;
2987

    
2988
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
2989
                                          watch_mem_write, NULL);
2990
}
2991

    
2992
#endif /* !defined(CONFIG_USER_ONLY) */
2993

    
2994
/* physical memory access (slow version, mainly for debug) */
2995
#if defined(CONFIG_USER_ONLY)
2996
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2997
                            int len, int is_write)
2998
{
2999
    int l, flags;
3000
    target_ulong page;
3001
    void * p;
3002

    
3003
    while (len > 0) {
3004
        page = addr & TARGET_PAGE_MASK;
3005
        l = (page + TARGET_PAGE_SIZE) - addr;
3006
        if (l > len)
3007
            l = len;
3008
        flags = page_get_flags(page);
3009
        if (!(flags & PAGE_VALID))
3010
            return;
3011
        if (is_write) {
3012
            if (!(flags & PAGE_WRITE))
3013
                return;
3014
            /* XXX: this code should not depend on lock_user */
3015
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3016
                /* FIXME - should this return an error rather than just fail? */
3017
                return;
3018
            memcpy(p, buf, l);
3019
            unlock_user(p, addr, l);
3020
        } else {
3021
            if (!(flags & PAGE_READ))
3022
                return;
3023
            /* XXX: this code should not depend on lock_user */
3024
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3025
                /* FIXME - should this return an error rather than just fail? */
3026
                return;
3027
            memcpy(buf, p, l);
3028
            unlock_user(p, addr, 0);
3029
        }
3030
        len -= l;
3031
        buf += l;
3032
        addr += l;
3033
    }
3034
}
3035

    
3036
#else
3037
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3038
                            int len, int is_write)
3039
{
3040
    int l, io_index;
3041
    uint8_t *ptr;
3042
    uint32_t val;
3043
    target_phys_addr_t page;
3044
    unsigned long pd;
3045
    PhysPageDesc *p;
3046

    
3047
    while (len > 0) {
3048
        page = addr & TARGET_PAGE_MASK;
3049
        l = (page + TARGET_PAGE_SIZE) - addr;
3050
        if (l > len)
3051
            l = len;
3052
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3053
        if (!p) {
3054
            pd = IO_MEM_UNASSIGNED;
3055
        } else {
3056
            pd = p->phys_offset;
3057
        }
3058

    
3059
        if (is_write) {
3060
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3061
                target_phys_addr_t addr1 = addr;
3062
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3063
                if (p)
3064
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3065
                /* XXX: could force cpu_single_env to NULL to avoid
3066
                   potential bugs */
3067
                if (l >= 4 && ((addr1 & 3) == 0)) {
3068
                    /* 32 bit write access */
3069
                    val = ldl_p(buf);
3070
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3071
                    l = 4;
3072
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3073
                    /* 16 bit write access */
3074
                    val = lduw_p(buf);
3075
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3076
                    l = 2;
3077
                } else {
3078
                    /* 8 bit write access */
3079
                    val = ldub_p(buf);
3080
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3081
                    l = 1;
3082
                }
3083
            } else {
3084
                unsigned long addr1;
3085
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3086
                /* RAM case */
3087
                ptr = qemu_get_ram_ptr(addr1);
3088
                memcpy(ptr, buf, l);
3089
                if (!cpu_physical_memory_is_dirty(addr1)) {
3090
                    /* invalidate code */
3091
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3092
                    /* set dirty bit */
3093
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3094
                        (0xff & ~CODE_DIRTY_FLAG);
3095
                }
3096
            }
3097
        } else {
3098
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3099
                !(pd & IO_MEM_ROMD)) {
3100
                target_phys_addr_t addr1 = addr;
3101
                /* I/O case */
3102
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3103
                if (p)
3104
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3105
                if (l >= 4 && ((addr1 & 3) == 0)) {
3106
                    /* 32 bit read access */
3107
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3108
                    stl_p(buf, val);
3109
                    l = 4;
3110
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3111
                    /* 16 bit read access */
3112
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3113
                    stw_p(buf, val);
3114
                    l = 2;
3115
                } else {
3116
                    /* 8 bit read access */
3117
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3118
                    stb_p(buf, val);
3119
                    l = 1;
3120
                }
3121
            } else {
3122
                /* RAM case */
3123
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3124
                    (addr & ~TARGET_PAGE_MASK);
3125
                memcpy(buf, ptr, l);
3126
            }
3127
        }
3128
        len -= l;
3129
        buf += l;
3130
        addr += l;
3131
    }
3132
}
3133

    
3134
/* used for ROM loading : can write in RAM and ROM */
3135
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3136
                                   const uint8_t *buf, int len)
3137
{
3138
    int l;
3139
    uint8_t *ptr;
3140
    target_phys_addr_t page;
3141
    unsigned long pd;
3142
    PhysPageDesc *p;
3143

    
3144
    while (len > 0) {
3145
        page = addr & TARGET_PAGE_MASK;
3146
        l = (page + TARGET_PAGE_SIZE) - addr;
3147
        if (l > len)
3148
            l = len;
3149
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3150
        if (!p) {
3151
            pd = IO_MEM_UNASSIGNED;
3152
        } else {
3153
            pd = p->phys_offset;
3154
        }
3155

    
3156
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3157
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3158
            !(pd & IO_MEM_ROMD)) {
3159
            /* do nothing */
3160
        } else {
3161
            unsigned long addr1;
3162
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3163
            /* ROM/RAM case */
3164
            ptr = qemu_get_ram_ptr(addr1);
3165
            memcpy(ptr, buf, l);
3166
        }
3167
        len -= l;
3168
        buf += l;
3169
        addr += l;
3170
    }
3171
}
3172

    
3173
typedef struct {
3174
    void *buffer;
3175
    target_phys_addr_t addr;
3176
    target_phys_addr_t len;
3177
} BounceBuffer;
3178

    
3179
static BounceBuffer bounce;
3180

    
3181
typedef struct MapClient {
3182
    void *opaque;
3183
    void (*callback)(void *opaque);
3184
    QLIST_ENTRY(MapClient) link;
3185
} MapClient;
3186

    
3187
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3188
    = QLIST_HEAD_INITIALIZER(map_client_list);
3189

    
3190
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3191
{
3192
    MapClient *client = qemu_malloc(sizeof(*client));
3193

    
3194
    client->opaque = opaque;
3195
    client->callback = callback;
3196
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3197
    return client;
3198
}
3199

    
3200
void cpu_unregister_map_client(void *_client)
3201
{
3202
    MapClient *client = (MapClient *)_client;
3203

    
3204
    QLIST_REMOVE(client, link);
3205
    qemu_free(client);
3206
}
3207

    
3208
static void cpu_notify_map_clients(void)
3209
{
3210
    MapClient *client;
3211

    
3212
    while (!QLIST_EMPTY(&map_client_list)) {
3213
        client = QLIST_FIRST(&map_client_list);
3214
        client->callback(client->opaque);
3215
        cpu_unregister_map_client(client);
3216
    }
3217
}
3218

    
3219
/* Map a physical memory region into a host virtual address.
3220
 * May map a subset of the requested range, given by and returned in *plen.
3221
 * May return NULL if resources needed to perform the mapping are exhausted.
3222
 * Use only for reads OR writes - not for read-modify-write operations.
3223
 * Use cpu_register_map_client() to know when retrying the map operation is
3224
 * likely to succeed.
3225
 */
3226
void *cpu_physical_memory_map(target_phys_addr_t addr,
3227
                              target_phys_addr_t *plen,
3228
                              int is_write)
3229
{
3230
    target_phys_addr_t len = *plen;
3231
    target_phys_addr_t done = 0;
3232
    int l;
3233
    uint8_t *ret = NULL;
3234
    uint8_t *ptr;
3235
    target_phys_addr_t page;
3236
    unsigned long pd;
3237
    PhysPageDesc *p;
3238
    unsigned long addr1;
3239

    
3240
    while (len > 0) {
3241
        page = addr & TARGET_PAGE_MASK;
3242
        l = (page + TARGET_PAGE_SIZE) - addr;
3243
        if (l > len)
3244
            l = len;
3245
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3246
        if (!p) {
3247
            pd = IO_MEM_UNASSIGNED;
3248
        } else {
3249
            pd = p->phys_offset;
3250
        }
3251

    
3252
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3253
            if (done || bounce.buffer) {
3254
                break;
3255
            }
3256
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3257
            bounce.addr = addr;
3258
            bounce.len = l;
3259
            if (!is_write) {
3260
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3261
            }
3262
            ptr = bounce.buffer;
3263
        } else {
3264
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3265
            ptr = qemu_get_ram_ptr(addr1);
3266
        }
3267
        if (!done) {
3268
            ret = ptr;
3269
        } else if (ret + done != ptr) {
3270
            break;
3271
        }
3272

    
3273
        len -= l;
3274
        addr += l;
3275
        done += l;
3276
    }
3277
    *plen = done;
3278
    return ret;
3279
}
3280

    
3281
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3282
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3283
 * the amount of memory that was actually read or written by the caller.
3284
 */
3285
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3286
                               int is_write, target_phys_addr_t access_len)
3287
{
3288
    if (buffer != bounce.buffer) {
3289
        if (is_write) {
3290
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3291
            while (access_len) {
3292
                unsigned l;
3293
                l = TARGET_PAGE_SIZE;
3294
                if (l > access_len)
3295
                    l = access_len;
3296
                if (!cpu_physical_memory_is_dirty(addr1)) {
3297
                    /* invalidate code */
3298
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3299
                    /* set dirty bit */
3300
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3301
                        (0xff & ~CODE_DIRTY_FLAG);
3302
                }
3303
                addr1 += l;
3304
                access_len -= l;
3305
            }
3306
        }
3307
        return;
3308
    }
3309
    if (is_write) {
3310
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3311
    }
3312
    qemu_vfree(bounce.buffer);
3313
    bounce.buffer = NULL;
3314
    cpu_notify_map_clients();
3315
}
3316

    
3317
/* warning: addr must be aligned */
3318
uint32_t ldl_phys(target_phys_addr_t addr)
3319
{
3320
    int io_index;
3321
    uint8_t *ptr;
3322
    uint32_t val;
3323
    unsigned long pd;
3324
    PhysPageDesc *p;
3325

    
3326
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3327
    if (!p) {
3328
        pd = IO_MEM_UNASSIGNED;
3329
    } else {
3330
        pd = p->phys_offset;
3331
    }
3332

    
3333
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3334
        !(pd & IO_MEM_ROMD)) {
3335
        /* I/O case */
3336
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3337
        if (p)
3338
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3339
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3340
    } else {
3341
        /* RAM case */
3342
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3343
            (addr & ~TARGET_PAGE_MASK);
3344
        val = ldl_p(ptr);
3345
    }
3346
    return val;
3347
}
3348

    
3349
/* warning: addr must be aligned */
3350
uint64_t ldq_phys(target_phys_addr_t addr)
3351
{
3352
    int io_index;
3353
    uint8_t *ptr;
3354
    uint64_t val;
3355
    unsigned long pd;
3356
    PhysPageDesc *p;
3357

    
3358
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3359
    if (!p) {
3360
        pd = IO_MEM_UNASSIGNED;
3361
    } else {
3362
        pd = p->phys_offset;
3363
    }
3364

    
3365
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3366
        !(pd & IO_MEM_ROMD)) {
3367
        /* I/O case */
3368
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3369
        if (p)
3370
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3371
#ifdef TARGET_WORDS_BIGENDIAN
3372
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3373
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3374
#else
3375
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3376
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3377
#endif
3378
    } else {
3379
        /* RAM case */
3380
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3381
            (addr & ~TARGET_PAGE_MASK);
3382
        val = ldq_p(ptr);
3383
    }
3384
    return val;
3385
}
3386

    
3387
/* XXX: optimize */
3388
uint32_t ldub_phys(target_phys_addr_t addr)
3389
{
3390
    uint8_t val;
3391
    cpu_physical_memory_read(addr, &val, 1);
3392
    return val;
3393
}
3394

    
3395
/* XXX: optimize */
3396
uint32_t lduw_phys(target_phys_addr_t addr)
3397
{
3398
    uint16_t val;
3399
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3400
    return tswap16(val);
3401
}
3402

    
3403
/* warning: addr must be aligned. The ram page is not masked as dirty
3404
   and the code inside is not invalidated. It is useful if the dirty
3405
   bits are used to track modified PTEs */
3406
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3407
{
3408
    int io_index;
3409
    uint8_t *ptr;
3410
    unsigned long pd;
3411
    PhysPageDesc *p;
3412

    
3413
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3414
    if (!p) {
3415
        pd = IO_MEM_UNASSIGNED;
3416
    } else {
3417
        pd = p->phys_offset;
3418
    }
3419

    
3420
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3421
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3422
        if (p)
3423
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3424
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3425
    } else {
3426
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3427
        ptr = qemu_get_ram_ptr(addr1);
3428
        stl_p(ptr, val);
3429

    
3430
        if (unlikely(in_migration)) {
3431
            if (!cpu_physical_memory_is_dirty(addr1)) {
3432
                /* invalidate code */
3433
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3434
                /* set dirty bit */
3435
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3436
                    (0xff & ~CODE_DIRTY_FLAG);
3437
            }
3438
        }
3439
    }
3440
}
3441

    
3442
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3443
{
3444
    int io_index;
3445
    uint8_t *ptr;
3446
    unsigned long pd;
3447
    PhysPageDesc *p;
3448

    
3449
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3450
    if (!p) {
3451
        pd = IO_MEM_UNASSIGNED;
3452
    } else {
3453
        pd = p->phys_offset;
3454
    }
3455

    
3456
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3457
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3458
        if (p)
3459
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3460
#ifdef TARGET_WORDS_BIGENDIAN
3461
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3462
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3463
#else
3464
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3465
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3466
#endif
3467
    } else {
3468
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3469
            (addr & ~TARGET_PAGE_MASK);
3470
        stq_p(ptr, val);
3471
    }
3472
}
3473

    
3474
/* warning: addr must be aligned */
3475
void stl_phys(target_phys_addr_t addr, uint32_t val)
3476
{
3477
    int io_index;
3478
    uint8_t *ptr;
3479
    unsigned long pd;
3480
    PhysPageDesc *p;
3481

    
3482
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3483
    if (!p) {
3484
        pd = IO_MEM_UNASSIGNED;
3485
    } else {
3486
        pd = p->phys_offset;
3487
    }
3488

    
3489
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3490
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3491
        if (p)
3492
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3493
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3494
    } else {
3495
        unsigned long addr1;
3496
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3497
        /* RAM case */
3498
        ptr = qemu_get_ram_ptr(addr1);
3499
        stl_p(ptr, val);
3500
        if (!cpu_physical_memory_is_dirty(addr1)) {
3501
            /* invalidate code */
3502
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3503
            /* set dirty bit */
3504
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3505
                (0xff & ~CODE_DIRTY_FLAG);
3506
        }
3507
    }
3508
}
3509

    
3510
/* XXX: optimize */
3511
void stb_phys(target_phys_addr_t addr, uint32_t val)
3512
{
3513
    uint8_t v = val;
3514
    cpu_physical_memory_write(addr, &v, 1);
3515
}
3516

    
3517
/* XXX: optimize */
3518
void stw_phys(target_phys_addr_t addr, uint32_t val)
3519
{
3520
    uint16_t v = tswap16(val);
3521
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3522
}
3523

    
3524
/* XXX: optimize */
3525
void stq_phys(target_phys_addr_t addr, uint64_t val)
3526
{
3527
    val = tswap64(val);
3528
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3529
}
3530

    
3531
#endif
3532

    
3533
/* virtual memory access for debug (includes writing to ROM) */
3534
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3535
                        uint8_t *buf, int len, int is_write)
3536
{
3537
    int l;
3538
    target_phys_addr_t phys_addr;
3539
    target_ulong page;
3540

    
3541
    while (len > 0) {
3542
        page = addr & TARGET_PAGE_MASK;
3543
        phys_addr = cpu_get_phys_page_debug(env, page);
3544
        /* if no physical page mapped, return an error */
3545
        if (phys_addr == -1)
3546
            return -1;
3547
        l = (page + TARGET_PAGE_SIZE) - addr;
3548
        if (l > len)
3549
            l = len;
3550
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3551
#if !defined(CONFIG_USER_ONLY)
3552
        if (is_write)
3553
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3554
        else
3555
#endif
3556
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3557
        len -= l;
3558
        buf += l;
3559
        addr += l;
3560
    }
3561
    return 0;
3562
}
3563

    
3564
/* in deterministic execution mode, instructions doing device I/Os
3565
   must be at the end of the TB */
3566
void cpu_io_recompile(CPUState *env, void *retaddr)
3567
{
3568
    TranslationBlock *tb;
3569
    uint32_t n, cflags;
3570
    target_ulong pc, cs_base;
3571
    uint64_t flags;
3572

    
3573
    tb = tb_find_pc((unsigned long)retaddr);
3574
    if (!tb) {
3575
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3576
                  retaddr);
3577
    }
3578
    n = env->icount_decr.u16.low + tb->icount;
3579
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3580
    /* Calculate how many instructions had been executed before the fault
3581
       occurred.  */
3582
    n = n - env->icount_decr.u16.low;
3583
    /* Generate a new TB ending on the I/O insn.  */
3584
    n++;
3585
    /* On MIPS and SH, delay slot instructions can only be restarted if
3586
       they were already the first instruction in the TB.  If this is not
3587
       the first instruction in a TB then re-execute the preceding
3588
       branch.  */
3589
#if defined(TARGET_MIPS)
3590
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3591
        env->active_tc.PC -= 4;
3592
        env->icount_decr.u16.low++;
3593
        env->hflags &= ~MIPS_HFLAG_BMASK;
3594
    }
3595
#elif defined(TARGET_SH4)
3596
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3597
            && n > 1) {
3598
        env->pc -= 2;
3599
        env->icount_decr.u16.low++;
3600
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3601
    }
3602
#endif
3603
    /* This should never happen.  */
3604
    if (n > CF_COUNT_MASK)
3605
        cpu_abort(env, "TB too big during recompile");
3606

    
3607
    cflags = n | CF_LAST_IO;
3608
    pc = tb->pc;
3609
    cs_base = tb->cs_base;
3610
    flags = tb->flags;
3611
    tb_phys_invalidate(tb, -1);
3612
    /* FIXME: In theory this could raise an exception.  In practice
3613
       we have already translated the block once so it's probably ok.  */
3614
    tb_gen_code(env, pc, cs_base, flags, cflags);
3615
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3616
       the first in the TB) then we end up generating a whole new TB and
3617
       repeating the fault, which is horribly inefficient.
3618
       Better would be to execute just this insn uncached, or generate a
3619
       second new TB.  */
3620
    cpu_resume_from_signal(env, NULL);
3621
}
3622

    
3623
void dump_exec_info(FILE *f,
3624
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3625
{
3626
    int i, target_code_size, max_target_code_size;
3627
    int direct_jmp_count, direct_jmp2_count, cross_page;
3628
    TranslationBlock *tb;
3629

    
3630
    target_code_size = 0;
3631
    max_target_code_size = 0;
3632
    cross_page = 0;
3633
    direct_jmp_count = 0;
3634
    direct_jmp2_count = 0;
3635
    for(i = 0; i < nb_tbs; i++) {
3636
        tb = &tbs[i];
3637
        target_code_size += tb->size;
3638
        if (tb->size > max_target_code_size)
3639
            max_target_code_size = tb->size;
3640
        if (tb->page_addr[1] != -1)
3641
            cross_page++;
3642
        if (tb->tb_next_offset[0] != 0xffff) {
3643
            direct_jmp_count++;
3644
            if (tb->tb_next_offset[1] != 0xffff) {
3645
                direct_jmp2_count++;
3646
            }
3647
        }
3648
    }
3649
    /* XXX: avoid using doubles ? */
3650
    cpu_fprintf(f, "Translation buffer state:\n");
3651
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3652
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3653
    cpu_fprintf(f, "TB count            %d/%d\n", 
3654
                nb_tbs, code_gen_max_blocks);
3655
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3656
                nb_tbs ? target_code_size / nb_tbs : 0,
3657
                max_target_code_size);
3658
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3659
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3660
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3661
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3662
            cross_page,
3663
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3664
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3665
                direct_jmp_count,
3666
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3667
                direct_jmp2_count,
3668
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3669
    cpu_fprintf(f, "\nStatistics:\n");
3670
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3671
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3672
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3673
    tcg_dump_info(f, cpu_fprintf);
3674
}
3675

    
3676
#if !defined(CONFIG_USER_ONLY)
3677

    
3678
#define MMUSUFFIX _cmmu
3679
#define GETPC() NULL
3680
#define env cpu_single_env
3681
#define SOFTMMU_CODE_ACCESS
3682

    
3683
#define SHIFT 0
3684
#include "softmmu_template.h"
3685

    
3686
#define SHIFT 1
3687
#include "softmmu_template.h"
3688

    
3689
#define SHIFT 2
3690
#include "softmmu_template.h"
3691

    
3692
#define SHIFT 3
3693
#include "softmmu_template.h"
3694

    
3695
#undef env
3696

    
3697
#endif