Statistics
| Branch: | Revision:

root / exec.c @ 99a0949b

History | View | Annotate | Download (109.4 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18
 */
19
#include "config.h"
20
#ifdef _WIN32
21
#include <windows.h>
22
#else
23
#include <sys/types.h>
24
#include <sys/mman.h>
25
#endif
26
#include <stdlib.h>
27
#include <stdio.h>
28
#include <stdarg.h>
29
#include <string.h>
30
#include <errno.h>
31
#include <unistd.h>
32
#include <inttypes.h>
33

    
34
#include "cpu.h"
35
#include "exec-all.h"
36
#include "qemu-common.h"
37
#include "tcg.h"
38
#include "hw/hw.h"
39
#include "osdep.h"
40
#include "kvm.h"
41
#if defined(CONFIG_USER_ONLY)
42
#include <qemu.h>
43
#endif
44

    
45
//#define DEBUG_TB_INVALIDATE
46
//#define DEBUG_FLUSH
47
//#define DEBUG_TLB
48
//#define DEBUG_UNASSIGNED
49

    
50
/* make various TB consistency checks */
51
//#define DEBUG_TB_CHECK
52
//#define DEBUG_TLB_CHECK
53

    
54
//#define DEBUG_IOPORT
55
//#define DEBUG_SUBPAGE
56

    
57
#if !defined(CONFIG_USER_ONLY)
58
/* TB consistency checks only implemented for usermode emulation.  */
59
#undef DEBUG_TB_CHECK
60
#endif
61

    
62
#define SMC_BITMAP_USE_THRESHOLD 10
63

    
64
#if defined(TARGET_SPARC64)
65
#define TARGET_PHYS_ADDR_SPACE_BITS 41
66
#elif defined(TARGET_SPARC)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 36
68
#elif defined(TARGET_ALPHA)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 42
70
#define TARGET_VIRT_ADDR_SPACE_BITS 42
71
#elif defined(TARGET_PPC64)
72
#define TARGET_PHYS_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_X86_64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_I386)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 36
77
#else
78
#define TARGET_PHYS_ADDR_SPACE_BITS 32
79
#endif
80

    
81
static TranslationBlock *tbs;
82
int code_gen_max_blocks;
83
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
84
static int nb_tbs;
85
/* any access to the tbs or the page table must use this lock */
86
a_spinlock tb_lock = SPIN_LOCK_UNLOCKED;
87

    
88
#if defined(__arm__) || defined(__sparc_v9__)
89
/* The prologue must be reachable with a direct jump. ARM and Sparc64
90
 have limited branch ranges (possibly also PPC) so place it in a
91
 section close to code segment. */
92
#define code_gen_section                                \
93
    __attribute__((__section__(".gen_code")))           \
94
    __attribute__((aligned (32)))
95
#elif defined(_WIN32)
96
/* Maximum alignment for Win32 is 16. */
97
#define code_gen_section                                \
98
    __attribute__((aligned (16)))
99
#else
100
#define code_gen_section                                \
101
    __attribute__((aligned (32)))
102
#endif
103

    
104
uint8_t code_gen_prologue[1024] code_gen_section;
105
static uint8_t *code_gen_buffer;
106
static unsigned long code_gen_buffer_size;
107
/* threshold to flush the translated code buffer */
108
static unsigned long code_gen_buffer_max_size;
109
uint8_t *code_gen_ptr;
110

    
111
#if !defined(CONFIG_USER_ONLY)
112
int phys_ram_fd;
113
uint8_t *phys_ram_dirty;
114
static int in_migration;
115

    
116
typedef struct RAMBlock {
117
    uint8_t *host;
118
    a_ram_addr offset;
119
    a_ram_addr length;
120
    struct RAMBlock *next;
121
} RAMBlock;
122

    
123
static RAMBlock *ram_blocks;
124
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
125
   then we can no longer assume contiguous ram offsets, and external uses
126
   of this variable will break.  */
127
a_ram_addr last_ram_offset;
128
#endif
129

    
130
CPUState *first_cpu;
131
/* current CPU in the current thread. It is only valid inside
132
   cpu_exec() */
133
CPUState *cpu_single_env;
134
/* 0 = Do not count executed instructions.
135
   1 = Precise instruction counting.
136
   2 = Adaptive rate instruction counting.  */
137
int use_icount = 0;
138
/* Current instruction counter.  While executing translated code this may
139
   include some instructions that have not yet been executed.  */
140
int64_t qemu_icount;
141

    
142
typedef struct PageDesc {
143
    /* list of TBs intersecting this ram page */
144
    TranslationBlock *first_tb;
145
    /* in order to optimize self modifying code, we count the number
146
       of lookups we do to a given page to use a bitmap */
147
    unsigned int code_write_count;
148
    uint8_t *code_bitmap;
149
#if defined(CONFIG_USER_ONLY)
150
    unsigned long flags;
151
#endif
152
} PageDesc;
153

    
154
typedef struct PhysPageDesc {
155
    /* offset in host memory of the page + io_index in the low bits */
156
    a_ram_addr phys_offset;
157
    a_ram_addr region_offset;
158
} PhysPageDesc;
159

    
160
#define L2_BITS 10
161
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
162
/* XXX: this is a temporary hack for alpha target.
163
 *      In the future, this is to be replaced by a multi-level table
164
 *      to actually be able to handle the complete 64 bits address space.
165
 */
166
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
167
#else
168
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
169
#endif
170

    
171
#define L1_SIZE (1 << L1_BITS)
172
#define L2_SIZE (1 << L2_BITS)
173

    
174
unsigned long qemu_real_host_page_size;
175
unsigned long qemu_host_page_bits;
176
unsigned long qemu_host_page_size;
177
unsigned long qemu_host_page_mask;
178

    
179
/* XXX: for system emulation, it could just be an array */
180
static PageDesc *l1_map[L1_SIZE];
181
static PhysPageDesc **l1_phys_map;
182

    
183
#if !defined(CONFIG_USER_ONLY)
184
static void io_mem_init(void);
185

    
186
/* io memory support */
187
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
188
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
189
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
190
static char io_mem_used[IO_MEM_NB_ENTRIES];
191
static int io_mem_watch;
192
#endif
193

    
194
/* log support */
195
static const char *logfilename = "/tmp/qemu.log";
196
FILE *logfile;
197
int loglevel;
198
static int log_append = 0;
199

    
200
/* statistics */
201
static int tlb_flush_count;
202
static int tb_flush_count;
203
static int tb_phys_invalidate_count;
204

    
205
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
206
typedef struct subpage {
207
    a_target_phys_addr base;
208
    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
209
    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
210
    void *opaque[TARGET_PAGE_SIZE][2][4];
211
    a_ram_addr region_offset[TARGET_PAGE_SIZE][2][4];
212
} a_subpage;
213

    
214
#ifdef _WIN32
215
static void map_exec(void *addr, long size)
216
{
217
    DWORD old_protect;
218
    VirtualProtect(addr, size,
219
                   PAGE_EXECUTE_READWRITE, &old_protect);
220
    
221
}
222
#else
223
static void map_exec(void *addr, long size)
224
{
225
    unsigned long start, end, page_size;
226
    
227
    page_size = getpagesize();
228
    start = (unsigned long)addr;
229
    start &= ~(page_size - 1);
230
    
231
    end = (unsigned long)addr + size;
232
    end += page_size - 1;
233
    end &= ~(page_size - 1);
234
    
235
    mprotect((void *)start, end - start,
236
             PROT_READ | PROT_WRITE | PROT_EXEC);
237
}
238
#endif
239

    
240
static void page_init(void)
241
{
242
    /* NOTE: we can always suppose that qemu_host_page_size >=
243
       TARGET_PAGE_SIZE */
244
#ifdef _WIN32
245
    {
246
        SYSTEM_INFO system_info;
247

    
248
        GetSystemInfo(&system_info);
249
        qemu_real_host_page_size = system_info.dwPageSize;
250
    }
251
#else
252
    qemu_real_host_page_size = getpagesize();
253
#endif
254
    if (qemu_host_page_size == 0)
255
        qemu_host_page_size = qemu_real_host_page_size;
256
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
257
        qemu_host_page_size = TARGET_PAGE_SIZE;
258
    qemu_host_page_bits = 0;
259
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
260
        qemu_host_page_bits++;
261
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
262
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
263
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
264

    
265
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
266
    {
267
        long long startaddr, endaddr;
268
        FILE *f;
269
        int n;
270

    
271
        mmap_lock();
272
        last_brk = (unsigned long)sbrk(0);
273
        f = fopen("/proc/self/maps", "r");
274
        if (f) {
275
            do {
276
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
277
                if (n == 2) {
278
                    startaddr = MIN(startaddr,
279
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280
                    endaddr = MIN(endaddr,
281
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
282
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
283
                                   TARGET_PAGE_ALIGN(endaddr),
284
                                   PAGE_RESERVED); 
285
                }
286
            } while (!feof(f));
287
            fclose(f);
288
        }
289
        mmap_unlock();
290
    }
291
#endif
292
}
293

    
294
static inline PageDesc **page_l1_map(target_ulong index)
295
{
296
#if TARGET_LONG_BITS > 32
297
    /* Host memory outside guest VM.  For 32-bit targets we have already
298
       excluded high addresses.  */
299
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
300
        return NULL;
301
#endif
302
    return &l1_map[index >> L2_BITS];
303
}
304

    
305
static inline PageDesc *page_find_alloc(target_ulong index)
306
{
307
    PageDesc **lp, *p;
308
    lp = page_l1_map(index);
309
    if (!lp)
310
        return NULL;
311

    
312
    p = *lp;
313
    if (!p) {
314
        /* allocate if not found */
315
#if defined(CONFIG_USER_ONLY)
316
        size_t len = sizeof(PageDesc) * L2_SIZE;
317
        /* Don't use qemu_malloc because it may recurse.  */
318
        p = mmap(NULL, len, PROT_READ | PROT_WRITE,
319
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
320
        *lp = p;
321
        if (h2g_valid(p)) {
322
            unsigned long addr = h2g(p);
323
            page_set_flags(addr & TARGET_PAGE_MASK,
324
                           TARGET_PAGE_ALIGN(addr + len),
325
                           PAGE_RESERVED); 
326
        }
327
#else
328
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
329
        *lp = p;
330
#endif
331
    }
332
    return p + (index & (L2_SIZE - 1));
333
}
334

    
335
static inline PageDesc *page_find(target_ulong index)
336
{
337
    PageDesc **lp, *p;
338
    lp = page_l1_map(index);
339
    if (!lp)
340
        return NULL;
341

    
342
    p = *lp;
343
    if (!p) {
344
        return NULL;
345
    }
346
    return p + (index & (L2_SIZE - 1));
347
}
348

    
349
static PhysPageDesc *phys_page_find_alloc(a_target_phys_addr index, int alloc)
350
{
351
    void **lp, **p;
352
    PhysPageDesc *pd;
353

    
354
    p = (void **)l1_phys_map;
355
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
356

    
357
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
358
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
359
#endif
360
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
361
    p = *lp;
362
    if (!p) {
363
        /* allocate if not found */
364
        if (!alloc)
365
            return NULL;
366
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
367
        memset(p, 0, sizeof(void *) * L1_SIZE);
368
        *lp = p;
369
    }
370
#endif
371
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
372
    pd = *lp;
373
    if (!pd) {
374
        int i;
375
        /* allocate if not found */
376
        if (!alloc)
377
            return NULL;
378
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
379
        *lp = pd;
380
        for (i = 0; i < L2_SIZE; i++) {
381
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
382
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
383
        }
384
    }
385
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
386
}
387

    
388
static inline PhysPageDesc *phys_page_find(a_target_phys_addr index)
389
{
390
    return phys_page_find_alloc(index, 0);
391
}
392

    
393
#if !defined(CONFIG_USER_ONLY)
394
static void tlb_protect_code(a_ram_addr ram_addr);
395
static void tlb_unprotect_code_phys(CPUState *env, a_ram_addr ram_addr,
396
                                    target_ulong vaddr);
397
#define mmap_lock() do { } while(0)
398
#define mmap_unlock() do { } while(0)
399
#endif
400

    
401
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
402

    
403
#if defined(CONFIG_USER_ONLY)
404
/* Currently it is not recommended to allocate big chunks of data in
405
   user mode. It will change when a dedicated libc will be used */
406
#define USE_STATIC_CODE_GEN_BUFFER
407
#endif
408

    
409
#ifdef USE_STATIC_CODE_GEN_BUFFER
410
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
411
#endif
412

    
413
static void code_gen_alloc(unsigned long tb_size)
414
{
415
#ifdef USE_STATIC_CODE_GEN_BUFFER
416
    code_gen_buffer = static_code_gen_buffer;
417
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
418
    map_exec(code_gen_buffer, code_gen_buffer_size);
419
#else
420
    code_gen_buffer_size = tb_size;
421
    if (code_gen_buffer_size == 0) {
422
#if defined(CONFIG_USER_ONLY)
423
        /* in user mode, phys_ram_size is not meaningful */
424
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
425
#else
426
        /* XXX: needs adjustments */
427
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
428
#endif
429
    }
430
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
431
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
432
    /* The code gen buffer location may have constraints depending on
433
       the host cpu and OS */
434
#if defined(__linux__) 
435
    {
436
        int flags;
437
        void *start = NULL;
438

    
439
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
440
#if defined(__x86_64__)
441
        flags |= MAP_32BIT;
442
        /* Cannot map more than that */
443
        if (code_gen_buffer_size > (800 * 1024 * 1024))
444
            code_gen_buffer_size = (800 * 1024 * 1024);
445
#elif defined(__sparc_v9__)
446
        // Map the buffer below 2G, so we can use direct calls and branches
447
        flags |= MAP_FIXED;
448
        start = (void *) 0x60000000UL;
449
        if (code_gen_buffer_size > (512 * 1024 * 1024))
450
            code_gen_buffer_size = (512 * 1024 * 1024);
451
#elif defined(__arm__)
452
        /* Map the buffer below 32M, so we can use direct calls and branches */
453
        flags |= MAP_FIXED;
454
        start = (void *) 0x01000000UL;
455
        if (code_gen_buffer_size > 16 * 1024 * 1024)
456
            code_gen_buffer_size = 16 * 1024 * 1024;
457
#endif
458
        code_gen_buffer = mmap(start, code_gen_buffer_size,
459
                               PROT_WRITE | PROT_READ | PROT_EXEC,
460
                               flags, -1, 0);
461
        if (code_gen_buffer == MAP_FAILED) {
462
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
463
            exit(1);
464
        }
465
    }
466
#elif defined(__FreeBSD__) || defined(__DragonFly__)
467
    {
468
        int flags;
469
        void *addr = NULL;
470
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
471
#if defined(__x86_64__)
472
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
473
         * 0x40000000 is free */
474
        flags |= MAP_FIXED;
475
        addr = (void *)0x40000000;
476
        /* Cannot map more than that */
477
        if (code_gen_buffer_size > (800 * 1024 * 1024))
478
            code_gen_buffer_size = (800 * 1024 * 1024);
479
#endif
480
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
481
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
482
                               flags, -1, 0);
483
        if (code_gen_buffer == MAP_FAILED) {
484
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
485
            exit(1);
486
        }
487
    }
488
#else
489
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
490
    map_exec(code_gen_buffer, code_gen_buffer_size);
491
#endif
492
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
493
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
494
    code_gen_buffer_max_size = code_gen_buffer_size - 
495
        code_gen_max_block_size();
496
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
497
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
498
}
499

    
500
/* Must be called before using the QEMU cpus. 'tb_size' is the size
501
   (in bytes) allocated to the translation buffer. Zero means default
502
   size. */
503
void cpu_exec_init_all(unsigned long tb_size)
504
{
505
    cpu_gen_init();
506
    code_gen_alloc(tb_size);
507
    code_gen_ptr = code_gen_buffer;
508
    page_init();
509
#if !defined(CONFIG_USER_ONLY)
510
    io_mem_init();
511
#endif
512
}
513

    
514
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
515

    
516
static void cpu_common_pre_save(const void *opaque)
517
{
518
    CPUState *env = (void *)opaque;
519

    
520
    cpu_synchronize_state(env);
521
}
522

    
523
static int cpu_common_pre_load(void *opaque)
524
{
525
    CPUState *env = opaque;
526

    
527
    cpu_synchronize_state(env);
528
    return 0;
529
}
530

    
531
static int cpu_common_post_load(void *opaque)
532
{
533
    CPUState *env = opaque;
534

    
535
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
536
       version_id is increased. */
537
    env->interrupt_request &= ~0x01;
538
    tlb_flush(env, 1);
539

    
540
    return 0;
541
}
542

    
543
static const VMStateDescription vmstate_cpu_common = {
544
    .name = "cpu_common",
545
    .version_id = 1,
546
    .minimum_version_id = 1,
547
    .minimum_version_id_old = 1,
548
    .pre_save = cpu_common_pre_save,
549
    .pre_load = cpu_common_pre_load,
550
    .post_load = cpu_common_post_load,
551
    .fields      = (VMStateField []) {
552
        VMSTATE_UINT32(halted, CPUState),
553
        VMSTATE_UINT32(interrupt_request, CPUState),
554
        VMSTATE_END_OF_LIST()
555
    }
556
};
557
#endif
558

    
559
CPUState *qemu_get_cpu(int cpu)
560
{
561
    CPUState *env = first_cpu;
562

    
563
    while (env) {
564
        if (env->cpu_index == cpu)
565
            break;
566
        env = env->next_cpu;
567
    }
568

    
569
    return env;
570
}
571

    
572
void cpu_exec_init(CPUState *env)
573
{
574
    CPUState **penv;
575
    int cpu_index;
576

    
577
#if defined(CONFIG_USER_ONLY)
578
    cpu_list_lock();
579
#endif
580
    env->next_cpu = NULL;
581
    penv = &first_cpu;
582
    cpu_index = 0;
583
    while (*penv != NULL) {
584
        penv = &(*penv)->next_cpu;
585
        cpu_index++;
586
    }
587
    env->cpu_index = cpu_index;
588
    env->numa_node = 0;
589
    QTAILQ_INIT(&env->breakpoints);
590
    QTAILQ_INIT(&env->watchpoints);
591
    *penv = env;
592
#if defined(CONFIG_USER_ONLY)
593
    cpu_list_unlock();
594
#endif
595
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
596
    vmstate_register(cpu_index, &vmstate_cpu_common, env);
597
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
598
                    cpu_save, cpu_load, env);
599
#endif
600
}
601

    
602
static inline void invalidate_page_bitmap(PageDesc *p)
603
{
604
    if (p->code_bitmap) {
605
        qemu_free(p->code_bitmap);
606
        p->code_bitmap = NULL;
607
    }
608
    p->code_write_count = 0;
609
}
610

    
611
/* set to NULL all the 'first_tb' fields in all PageDescs */
612
static void page_flush_tb(void)
613
{
614
    int i, j;
615
    PageDesc *p;
616

    
617
    for(i = 0; i < L1_SIZE; i++) {
618
        p = l1_map[i];
619
        if (p) {
620
            for(j = 0; j < L2_SIZE; j++) {
621
                p->first_tb = NULL;
622
                invalidate_page_bitmap(p);
623
                p++;
624
            }
625
        }
626
    }
627
}
628

    
629
/* flush all the translation blocks */
630
/* XXX: tb_flush is currently not thread safe */
631
void tb_flush(CPUState *env1)
632
{
633
    CPUState *env;
634
#if defined(DEBUG_FLUSH)
635
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
636
           (unsigned long)(code_gen_ptr - code_gen_buffer),
637
           nb_tbs, nb_tbs > 0 ?
638
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
639
#endif
640
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
641
        cpu_abort(env1, "Internal error: code buffer overflow\n");
642

    
643
    nb_tbs = 0;
644

    
645
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
646
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
647
    }
648

    
649
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
650
    page_flush_tb();
651

    
652
    code_gen_ptr = code_gen_buffer;
653
    /* XXX: flush processor icache at this point if cache flush is
654
       expensive */
655
    tb_flush_count++;
656
}
657

    
658
#ifdef DEBUG_TB_CHECK
659

    
660
static void tb_invalidate_check(target_ulong address)
661
{
662
    TranslationBlock *tb;
663
    int i;
664
    address &= TARGET_PAGE_MASK;
665
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
666
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
667
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
668
                  address >= tb->pc + tb->size)) {
669
                printf("ERROR invalidate: address=" TARGET_FMT_lx
670
                       " PC=%08lx size=%04x\n",
671
                       address, (long)tb->pc, tb->size);
672
            }
673
        }
674
    }
675
}
676

    
677
/* verify that all the pages have correct rights for code */
678
static void tb_page_check(void)
679
{
680
    TranslationBlock *tb;
681
    int i, flags1, flags2;
682

    
683
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
684
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
685
            flags1 = page_get_flags(tb->pc);
686
            flags2 = page_get_flags(tb->pc + tb->size - 1);
687
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
688
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
689
                       (long)tb->pc, tb->size, flags1, flags2);
690
            }
691
        }
692
    }
693
}
694

    
695
#endif
696

    
697
/* invalidate one TB */
698
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
699
                             int next_offset)
700
{
701
    TranslationBlock *tb1;
702
    for(;;) {
703
        tb1 = *ptb;
704
        if (tb1 == tb) {
705
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
706
            break;
707
        }
708
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
709
    }
710
}
711

    
712
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
713
{
714
    TranslationBlock *tb1;
715
    unsigned int n1;
716

    
717
    for(;;) {
718
        tb1 = *ptb;
719
        n1 = (long)tb1 & 3;
720
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
721
        if (tb1 == tb) {
722
            *ptb = tb1->page_next[n1];
723
            break;
724
        }
725
        ptb = &tb1->page_next[n1];
726
    }
727
}
728

    
729
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
730
{
731
    TranslationBlock *tb1, **ptb;
732
    unsigned int n1;
733

    
734
    ptb = &tb->jmp_next[n];
735
    tb1 = *ptb;
736
    if (tb1) {
737
        /* find tb(n) in circular list */
738
        for(;;) {
739
            tb1 = *ptb;
740
            n1 = (long)tb1 & 3;
741
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
742
            if (n1 == n && tb1 == tb)
743
                break;
744
            if (n1 == 2) {
745
                ptb = &tb1->jmp_first;
746
            } else {
747
                ptb = &tb1->jmp_next[n1];
748
            }
749
        }
750
        /* now we can suppress tb(n) from the list */
751
        *ptb = tb->jmp_next[n];
752

    
753
        tb->jmp_next[n] = NULL;
754
    }
755
}
756

    
757
/* reset the jump entry 'n' of a TB so that it is not chained to
758
   another TB */
759
static inline void tb_reset_jump(TranslationBlock *tb, int n)
760
{
761
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
762
}
763

    
764
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
765
{
766
    CPUState *env;
767
    PageDesc *p;
768
    unsigned int h, n1;
769
    a_target_phys_addr phys_pc;
770
    TranslationBlock *tb1, *tb2;
771

    
772
    /* remove the TB from the hash list */
773
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
774
    h = tb_phys_hash_func(phys_pc);
775
    tb_remove(&tb_phys_hash[h], tb,
776
              offsetof(TranslationBlock, phys_hash_next));
777

    
778
    /* remove the TB from the page list */
779
    if (tb->page_addr[0] != page_addr) {
780
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
781
        tb_page_remove(&p->first_tb, tb);
782
        invalidate_page_bitmap(p);
783
    }
784
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
785
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
786
        tb_page_remove(&p->first_tb, tb);
787
        invalidate_page_bitmap(p);
788
    }
789

    
790
    tb_invalidated_flag = 1;
791

    
792
    /* remove the TB from the hash list */
793
    h = tb_jmp_cache_hash_func(tb->pc);
794
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
795
        if (env->tb_jmp_cache[h] == tb)
796
            env->tb_jmp_cache[h] = NULL;
797
    }
798

    
799
    /* suppress this TB from the two jump lists */
800
    tb_jmp_remove(tb, 0);
801
    tb_jmp_remove(tb, 1);
802

    
803
    /* suppress any remaining jumps to this TB */
804
    tb1 = tb->jmp_first;
805
    for(;;) {
806
        n1 = (long)tb1 & 3;
807
        if (n1 == 2)
808
            break;
809
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
810
        tb2 = tb1->jmp_next[n1];
811
        tb_reset_jump(tb1, n1);
812
        tb1->jmp_next[n1] = NULL;
813
        tb1 = tb2;
814
    }
815
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
816

    
817
    tb_phys_invalidate_count++;
818
}
819

    
820
static inline void set_bits(uint8_t *tab, int start, int len)
821
{
822
    int end, mask, end1;
823

    
824
    end = start + len;
825
    tab += start >> 3;
826
    mask = 0xff << (start & 7);
827
    if ((start & ~7) == (end & ~7)) {
828
        if (start < end) {
829
            mask &= ~(0xff << (end & 7));
830
            *tab |= mask;
831
        }
832
    } else {
833
        *tab++ |= mask;
834
        start = (start + 8) & ~7;
835
        end1 = end & ~7;
836
        while (start < end1) {
837
            *tab++ = 0xff;
838
            start += 8;
839
        }
840
        if (start < end) {
841
            mask = ~(0xff << (end & 7));
842
            *tab |= mask;
843
        }
844
    }
845
}
846

    
847
static void build_page_bitmap(PageDesc *p)
848
{
849
    int n, tb_start, tb_end;
850
    TranslationBlock *tb;
851

    
852
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
853

    
854
    tb = p->first_tb;
855
    while (tb != NULL) {
856
        n = (long)tb & 3;
857
        tb = (TranslationBlock *)((long)tb & ~3);
858
        /* NOTE: this is subtle as a TB may span two physical pages */
859
        if (n == 0) {
860
            /* NOTE: tb_end may be after the end of the page, but
861
               it is not a problem */
862
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
863
            tb_end = tb_start + tb->size;
864
            if (tb_end > TARGET_PAGE_SIZE)
865
                tb_end = TARGET_PAGE_SIZE;
866
        } else {
867
            tb_start = 0;
868
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
869
        }
870
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
871
        tb = tb->page_next[n];
872
    }
873
}
874

    
875
TranslationBlock *tb_gen_code(CPUState *env,
876
                              target_ulong pc, target_ulong cs_base,
877
                              int flags, int cflags)
878
{
879
    TranslationBlock *tb;
880
    uint8_t *tc_ptr;
881
    target_ulong phys_pc, phys_page2, virt_page2;
882
    int code_gen_size;
883

    
884
    phys_pc = get_phys_addr_code(env, pc);
885
    tb = tb_alloc(pc);
886
    if (!tb) {
887
        /* flush must be done */
888
        tb_flush(env);
889
        /* cannot fail at this point */
890
        tb = tb_alloc(pc);
891
        /* Don't forget to invalidate previous TB info.  */
892
        tb_invalidated_flag = 1;
893
    }
894
    tc_ptr = code_gen_ptr;
895
    tb->tc_ptr = tc_ptr;
896
    tb->cs_base = cs_base;
897
    tb->flags = flags;
898
    tb->cflags = cflags;
899
    cpu_gen_code(env, tb, &code_gen_size);
900
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
901

    
902
    /* check next page if needed */
903
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
904
    phys_page2 = -1;
905
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
906
        phys_page2 = get_phys_addr_code(env, virt_page2);
907
    }
908
    tb_link_phys(tb, phys_pc, phys_page2);
909
    return tb;
910
}
911

    
912
/* invalidate all TBs which intersect with the target physical page
913
   starting in range [start;end[. NOTE: start and end must refer to
914
   the same physical page. 'is_cpu_write_access' should be true if called
915
   from a real cpu write access: the virtual CPU will exit the current
916
   TB if code is modified inside this TB. */
917
void tb_invalidate_phys_page_range(a_target_phys_addr start, a_target_phys_addr end,
918
                                   int is_cpu_write_access)
919
{
920
    TranslationBlock *tb, *tb_next, *saved_tb;
921
    CPUState *env = cpu_single_env;
922
    target_ulong tb_start, tb_end;
923
    PageDesc *p;
924
    int n;
925
#ifdef TARGET_HAS_PRECISE_SMC
926
    int current_tb_not_found = is_cpu_write_access;
927
    TranslationBlock *current_tb = NULL;
928
    int current_tb_modified = 0;
929
    target_ulong current_pc = 0;
930
    target_ulong current_cs_base = 0;
931
    int current_flags = 0;
932
#endif /* TARGET_HAS_PRECISE_SMC */
933

    
934
    p = page_find(start >> TARGET_PAGE_BITS);
935
    if (!p)
936
        return;
937
    if (!p->code_bitmap &&
938
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
939
        is_cpu_write_access) {
940
        /* build code bitmap */
941
        build_page_bitmap(p);
942
    }
943

    
944
    /* we remove all the TBs in the range [start, end[ */
945
    /* XXX: see if in some cases it could be faster to invalidate all the code */
946
    tb = p->first_tb;
947
    while (tb != NULL) {
948
        n = (long)tb & 3;
949
        tb = (TranslationBlock *)((long)tb & ~3);
950
        tb_next = tb->page_next[n];
951
        /* NOTE: this is subtle as a TB may span two physical pages */
952
        if (n == 0) {
953
            /* NOTE: tb_end may be after the end of the page, but
954
               it is not a problem */
955
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
956
            tb_end = tb_start + tb->size;
957
        } else {
958
            tb_start = tb->page_addr[1];
959
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
960
        }
961
        if (!(tb_end <= start || tb_start >= end)) {
962
#ifdef TARGET_HAS_PRECISE_SMC
963
            if (current_tb_not_found) {
964
                current_tb_not_found = 0;
965
                current_tb = NULL;
966
                if (env->mem_io_pc) {
967
                    /* now we have a real cpu fault */
968
                    current_tb = tb_find_pc(env->mem_io_pc);
969
                }
970
            }
971
            if (current_tb == tb &&
972
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
973
                /* If we are modifying the current TB, we must stop
974
                its execution. We could be more precise by checking
975
                that the modification is after the current PC, but it
976
                would require a specialized function to partially
977
                restore the CPU state */
978

    
979
                current_tb_modified = 1;
980
                cpu_restore_state(current_tb, env,
981
                                  env->mem_io_pc, NULL);
982
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
983
                                     &current_flags);
984
            }
985
#endif /* TARGET_HAS_PRECISE_SMC */
986
            /* we need to do that to handle the case where a signal
987
               occurs while doing tb_phys_invalidate() */
988
            saved_tb = NULL;
989
            if (env) {
990
                saved_tb = env->current_tb;
991
                env->current_tb = NULL;
992
            }
993
            tb_phys_invalidate(tb, -1);
994
            if (env) {
995
                env->current_tb = saved_tb;
996
                if (env->interrupt_request && env->current_tb)
997
                    cpu_interrupt(env, env->interrupt_request);
998
            }
999
        }
1000
        tb = tb_next;
1001
    }
1002
#if !defined(CONFIG_USER_ONLY)
1003
    /* if no code remaining, no need to continue to use slow writes */
1004
    if (!p->first_tb) {
1005
        invalidate_page_bitmap(p);
1006
        if (is_cpu_write_access) {
1007
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
1008
        }
1009
    }
1010
#endif
1011
#ifdef TARGET_HAS_PRECISE_SMC
1012
    if (current_tb_modified) {
1013
        /* we generate a block containing just the instruction
1014
           modifying the memory. It will ensure that it cannot modify
1015
           itself */
1016
        env->current_tb = NULL;
1017
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1018
        cpu_resume_from_signal(env, NULL);
1019
    }
1020
#endif
1021
}
1022

    
1023
/* len must be <= 8 and start must be a multiple of len */
1024
static inline void tb_invalidate_phys_page_fast(a_target_phys_addr start, int len)
1025
{
1026
    PageDesc *p;
1027
    int offset, b;
1028
#if 0
1029
    if (1) {
1030
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1031
                  cpu_single_env->mem_io_vaddr, len,
1032
                  cpu_single_env->eip,
1033
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1034
    }
1035
#endif
1036
    p = page_find(start >> TARGET_PAGE_BITS);
1037
    if (!p)
1038
        return;
1039
    if (p->code_bitmap) {
1040
        offset = start & ~TARGET_PAGE_MASK;
1041
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1042
        if (b & ((1 << len) - 1))
1043
            goto do_invalidate;
1044
    } else {
1045
    do_invalidate:
1046
        tb_invalidate_phys_page_range(start, start + len, 1);
1047
    }
1048
}
1049

    
1050
#if !defined(CONFIG_SOFTMMU)
1051
static void tb_invalidate_phys_page(a_target_phys_addr addr,
1052
                                    unsigned long pc, void *puc)
1053
{
1054
    TranslationBlock *tb;
1055
    PageDesc *p;
1056
    int n;
1057
#ifdef TARGET_HAS_PRECISE_SMC
1058
    TranslationBlock *current_tb = NULL;
1059
    CPUState *env = cpu_single_env;
1060
    int current_tb_modified = 0;
1061
    target_ulong current_pc = 0;
1062
    target_ulong current_cs_base = 0;
1063
    int current_flags = 0;
1064
#endif
1065

    
1066
    addr &= TARGET_PAGE_MASK;
1067
    p = page_find(addr >> TARGET_PAGE_BITS);
1068
    if (!p)
1069
        return;
1070
    tb = p->first_tb;
1071
#ifdef TARGET_HAS_PRECISE_SMC
1072
    if (tb && pc != 0) {
1073
        current_tb = tb_find_pc(pc);
1074
    }
1075
#endif
1076
    while (tb != NULL) {
1077
        n = (long)tb & 3;
1078
        tb = (TranslationBlock *)((long)tb & ~3);
1079
#ifdef TARGET_HAS_PRECISE_SMC
1080
        if (current_tb == tb &&
1081
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1082
                /* If we are modifying the current TB, we must stop
1083
                   its execution. We could be more precise by checking
1084
                   that the modification is after the current PC, but it
1085
                   would require a specialized function to partially
1086
                   restore the CPU state */
1087

    
1088
            current_tb_modified = 1;
1089
            cpu_restore_state(current_tb, env, pc, puc);
1090
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1091
                                 &current_flags);
1092
        }
1093
#endif /* TARGET_HAS_PRECISE_SMC */
1094
        tb_phys_invalidate(tb, addr);
1095
        tb = tb->page_next[n];
1096
    }
1097
    p->first_tb = NULL;
1098
#ifdef TARGET_HAS_PRECISE_SMC
1099
    if (current_tb_modified) {
1100
        /* we generate a block containing just the instruction
1101
           modifying the memory. It will ensure that it cannot modify
1102
           itself */
1103
        env->current_tb = NULL;
1104
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1105
        cpu_resume_from_signal(env, puc);
1106
    }
1107
#endif
1108
}
1109
#endif
1110

    
1111
/* add the tb in the target page and protect it if necessary */
1112
static inline void tb_alloc_page(TranslationBlock *tb,
1113
                                 unsigned int n, target_ulong page_addr)
1114
{
1115
    PageDesc *p;
1116
    TranslationBlock *last_first_tb;
1117

    
1118
    tb->page_addr[n] = page_addr;
1119
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1120
    tb->page_next[n] = p->first_tb;
1121
    last_first_tb = p->first_tb;
1122
    p->first_tb = (TranslationBlock *)((long)tb | n);
1123
    invalidate_page_bitmap(p);
1124

    
1125
#if defined(TARGET_HAS_SMC) || 1
1126

    
1127
#if defined(CONFIG_USER_ONLY)
1128
    if (p->flags & PAGE_WRITE) {
1129
        target_ulong addr;
1130
        PageDesc *p2;
1131
        int prot;
1132

    
1133
        /* force the host page as non writable (writes will have a
1134
           page fault + mprotect overhead) */
1135
        page_addr &= qemu_host_page_mask;
1136
        prot = 0;
1137
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1138
            addr += TARGET_PAGE_SIZE) {
1139

    
1140
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1141
            if (!p2)
1142
                continue;
1143
            prot |= p2->flags;
1144
            p2->flags &= ~PAGE_WRITE;
1145
            page_get_flags(addr);
1146
          }
1147
        mprotect(g2h(page_addr), qemu_host_page_size,
1148
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1149
#ifdef DEBUG_TB_INVALIDATE
1150
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1151
               page_addr);
1152
#endif
1153
    }
1154
#else
1155
    /* if some code is already present, then the pages are already
1156
       protected. So we handle the case where only the first TB is
1157
       allocated in a physical page */
1158
    if (!last_first_tb) {
1159
        tlb_protect_code(page_addr);
1160
    }
1161
#endif
1162

    
1163
#endif /* TARGET_HAS_SMC */
1164
}
1165

    
1166
/* Allocate a new translation block. Flush the translation buffer if
1167
   too many translation blocks or too much generated code. */
1168
TranslationBlock *tb_alloc(target_ulong pc)
1169
{
1170
    TranslationBlock *tb;
1171

    
1172
    if (nb_tbs >= code_gen_max_blocks ||
1173
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1174
        return NULL;
1175
    tb = &tbs[nb_tbs++];
1176
    tb->pc = pc;
1177
    tb->cflags = 0;
1178
    return tb;
1179
}
1180

    
1181
void tb_free(TranslationBlock *tb)
1182
{
1183
    /* In practice this is mostly used for single use temporary TB
1184
       Ignore the hard cases and just back up if this TB happens to
1185
       be the last one generated.  */
1186
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1187
        code_gen_ptr = tb->tc_ptr;
1188
        nb_tbs--;
1189
    }
1190
}
1191

    
1192
/* add a new TB and link it to the physical page tables. phys_page2 is
1193
   (-1) to indicate that only one page contains the TB. */
1194
void tb_link_phys(TranslationBlock *tb,
1195
                  target_ulong phys_pc, target_ulong phys_page2)
1196
{
1197
    unsigned int h;
1198
    TranslationBlock **ptb;
1199

    
1200
    /* Grab the mmap lock to stop another thread invalidating this TB
1201
       before we are done.  */
1202
    mmap_lock();
1203
    /* add in the physical hash table */
1204
    h = tb_phys_hash_func(phys_pc);
1205
    ptb = &tb_phys_hash[h];
1206
    tb->phys_hash_next = *ptb;
1207
    *ptb = tb;
1208

    
1209
    /* add in the page list */
1210
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1211
    if (phys_page2 != -1)
1212
        tb_alloc_page(tb, 1, phys_page2);
1213
    else
1214
        tb->page_addr[1] = -1;
1215

    
1216
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1217
    tb->jmp_next[0] = NULL;
1218
    tb->jmp_next[1] = NULL;
1219

    
1220
    /* init original jump addresses */
1221
    if (tb->tb_next_offset[0] != 0xffff)
1222
        tb_reset_jump(tb, 0);
1223
    if (tb->tb_next_offset[1] != 0xffff)
1224
        tb_reset_jump(tb, 1);
1225

    
1226
#ifdef DEBUG_TB_CHECK
1227
    tb_page_check();
1228
#endif
1229
    mmap_unlock();
1230
}
1231

    
1232
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1233
   tb[1].tc_ptr. Return NULL if not found */
1234
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1235
{
1236
    int m_min, m_max, m;
1237
    unsigned long v;
1238
    TranslationBlock *tb;
1239

    
1240
    if (nb_tbs <= 0)
1241
        return NULL;
1242
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1243
        tc_ptr >= (unsigned long)code_gen_ptr)
1244
        return NULL;
1245
    /* binary search (cf Knuth) */
1246
    m_min = 0;
1247
    m_max = nb_tbs - 1;
1248
    while (m_min <= m_max) {
1249
        m = (m_min + m_max) >> 1;
1250
        tb = &tbs[m];
1251
        v = (unsigned long)tb->tc_ptr;
1252
        if (v == tc_ptr)
1253
            return tb;
1254
        else if (tc_ptr < v) {
1255
            m_max = m - 1;
1256
        } else {
1257
            m_min = m + 1;
1258
        }
1259
    }
1260
    return &tbs[m_max];
1261
}
1262

    
1263
static void tb_reset_jump_recursive(TranslationBlock *tb);
1264

    
1265
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1266
{
1267
    TranslationBlock *tb1, *tb_next, **ptb;
1268
    unsigned int n1;
1269

    
1270
    tb1 = tb->jmp_next[n];
1271
    if (tb1 != NULL) {
1272
        /* find head of list */
1273
        for(;;) {
1274
            n1 = (long)tb1 & 3;
1275
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1276
            if (n1 == 2)
1277
                break;
1278
            tb1 = tb1->jmp_next[n1];
1279
        }
1280
        /* we are now sure now that tb jumps to tb1 */
1281
        tb_next = tb1;
1282

    
1283
        /* remove tb from the jmp_first list */
1284
        ptb = &tb_next->jmp_first;
1285
        for(;;) {
1286
            tb1 = *ptb;
1287
            n1 = (long)tb1 & 3;
1288
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1289
            if (n1 == n && tb1 == tb)
1290
                break;
1291
            ptb = &tb1->jmp_next[n1];
1292
        }
1293
        *ptb = tb->jmp_next[n];
1294
        tb->jmp_next[n] = NULL;
1295

    
1296
        /* suppress the jump to next tb in generated code */
1297
        tb_reset_jump(tb, n);
1298

    
1299
        /* suppress jumps in the tb on which we could have jumped */
1300
        tb_reset_jump_recursive(tb_next);
1301
    }
1302
}
1303

    
1304
static void tb_reset_jump_recursive(TranslationBlock *tb)
1305
{
1306
    tb_reset_jump_recursive2(tb, 0);
1307
    tb_reset_jump_recursive2(tb, 1);
1308
}
1309

    
1310
#if defined(TARGET_HAS_ICE)
1311
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1312
{
1313
    a_target_phys_addr addr;
1314
    target_ulong pd;
1315
    a_ram_addr ram_addr;
1316
    PhysPageDesc *p;
1317

    
1318
    addr = cpu_get_phys_page_debug(env, pc);
1319
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1320
    if (!p) {
1321
        pd = IO_MEM_UNASSIGNED;
1322
    } else {
1323
        pd = p->phys_offset;
1324
    }
1325
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1326
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1327
}
1328
#endif
1329

    
1330
/* Add a watchpoint.  */
1331
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1332
                          int flags, CPUWatchpoint **watchpoint)
1333
{
1334
    target_ulong len_mask = ~(len - 1);
1335
    CPUWatchpoint *wp;
1336

    
1337
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1338
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1339
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1340
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1341
        return -EINVAL;
1342
    }
1343
    wp = qemu_malloc(sizeof(*wp));
1344

    
1345
    wp->vaddr = addr;
1346
    wp->len_mask = len_mask;
1347
    wp->flags = flags;
1348

    
1349
    /* keep all GDB-injected watchpoints in front */
1350
    if (flags & BP_GDB)
1351
        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1352
    else
1353
        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1354

    
1355
    tlb_flush_page(env, addr);
1356

    
1357
    if (watchpoint)
1358
        *watchpoint = wp;
1359
    return 0;
1360
}
1361

    
1362
/* Remove a specific watchpoint.  */
1363
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1364
                          int flags)
1365
{
1366
    target_ulong len_mask = ~(len - 1);
1367
    CPUWatchpoint *wp;
1368

    
1369
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1370
        if (addr == wp->vaddr && len_mask == wp->len_mask
1371
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1372
            cpu_watchpoint_remove_by_ref(env, wp);
1373
            return 0;
1374
        }
1375
    }
1376
    return -ENOENT;
1377
}
1378

    
1379
/* Remove a specific watchpoint by reference.  */
1380
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1381
{
1382
    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1383

    
1384
    tlb_flush_page(env, watchpoint->vaddr);
1385

    
1386
    qemu_free(watchpoint);
1387
}
1388

    
1389
/* Remove all matching watchpoints.  */
1390
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1391
{
1392
    CPUWatchpoint *wp, *next;
1393

    
1394
    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1395
        if (wp->flags & mask)
1396
            cpu_watchpoint_remove_by_ref(env, wp);
1397
    }
1398
}
1399

    
1400
/* Add a breakpoint.  */
1401
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1402
                          CPUBreakpoint **breakpoint)
1403
{
1404
#if defined(TARGET_HAS_ICE)
1405
    CPUBreakpoint *bp;
1406

    
1407
    bp = qemu_malloc(sizeof(*bp));
1408

    
1409
    bp->pc = pc;
1410
    bp->flags = flags;
1411

    
1412
    /* keep all GDB-injected breakpoints in front */
1413
    if (flags & BP_GDB)
1414
        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1415
    else
1416
        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1417

    
1418
    breakpoint_invalidate(env, pc);
1419

    
1420
    if (breakpoint)
1421
        *breakpoint = bp;
1422
    return 0;
1423
#else
1424
    return -ENOSYS;
1425
#endif
1426
}
1427

    
1428
/* Remove a specific breakpoint.  */
1429
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1430
{
1431
#if defined(TARGET_HAS_ICE)
1432
    CPUBreakpoint *bp;
1433

    
1434
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1435
        if (bp->pc == pc && bp->flags == flags) {
1436
            cpu_breakpoint_remove_by_ref(env, bp);
1437
            return 0;
1438
        }
1439
    }
1440
    return -ENOENT;
1441
#else
1442
    return -ENOSYS;
1443
#endif
1444
}
1445

    
1446
/* Remove a specific breakpoint by reference.  */
1447
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1448
{
1449
#if defined(TARGET_HAS_ICE)
1450
    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1451

    
1452
    breakpoint_invalidate(env, breakpoint->pc);
1453

    
1454
    qemu_free(breakpoint);
1455
#endif
1456
}
1457

    
1458
/* Remove all matching breakpoints. */
1459
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1460
{
1461
#if defined(TARGET_HAS_ICE)
1462
    CPUBreakpoint *bp, *next;
1463

    
1464
    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1465
        if (bp->flags & mask)
1466
            cpu_breakpoint_remove_by_ref(env, bp);
1467
    }
1468
#endif
1469
}
1470

    
1471
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1472
   CPU loop after each instruction */
1473
void cpu_single_step(CPUState *env, int enabled)
1474
{
1475
#if defined(TARGET_HAS_ICE)
1476
    if (env->singlestep_enabled != enabled) {
1477
        env->singlestep_enabled = enabled;
1478
        if (kvm_enabled())
1479
            kvm_update_guest_debug(env, 0);
1480
        else {
1481
            /* must flush all the translated code to avoid inconsistencies */
1482
            /* XXX: only flush what is necessary */
1483
            tb_flush(env);
1484
        }
1485
    }
1486
#endif
1487
}
1488

    
1489
/* enable or disable low levels log */
1490
void cpu_set_log(int log_flags)
1491
{
1492
    loglevel = log_flags;
1493
    if (loglevel && !logfile) {
1494
        logfile = fopen(logfilename, log_append ? "a" : "w");
1495
        if (!logfile) {
1496
            perror(logfilename);
1497
            _exit(1);
1498
        }
1499
#if !defined(CONFIG_SOFTMMU)
1500
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1501
        {
1502
            static char logfile_buf[4096];
1503
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1504
        }
1505
#elif !defined(_WIN32)
1506
        /* Win32 doesn't support line-buffering and requires size >= 2 */
1507
        setvbuf(logfile, NULL, _IOLBF, 0);
1508
#endif
1509
        log_append = 1;
1510
    }
1511
    if (!loglevel && logfile) {
1512
        fclose(logfile);
1513
        logfile = NULL;
1514
    }
1515
}
1516

    
1517
void cpu_set_log_filename(const char *filename)
1518
{
1519
    logfilename = strdup(filename);
1520
    if (logfile) {
1521
        fclose(logfile);
1522
        logfile = NULL;
1523
    }
1524
    cpu_set_log(loglevel);
1525
}
1526

    
1527
static void cpu_unlink_tb(CPUState *env)
1528
{
1529
#if defined(CONFIG_USE_NPTL)
1530
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1531
       problem and hope the cpu will stop of its own accord.  For userspace
1532
       emulation this often isn't actually as bad as it sounds.  Often
1533
       signals are used primarily to interrupt blocking syscalls.  */
1534
#else
1535
    TranslationBlock *tb;
1536
    static a_spinlock interrupt_lock = SPIN_LOCK_UNLOCKED;
1537

    
1538
    tb = env->current_tb;
1539
    /* if the cpu is currently executing code, we must unlink it and
1540
       all the potentially executing TB */
1541
    if (tb && !testandset(&interrupt_lock)) {
1542
        env->current_tb = NULL;
1543
        tb_reset_jump_recursive(tb);
1544
        resetlock(&interrupt_lock);
1545
    }
1546
#endif
1547
}
1548

    
1549
/* mask must never be zero, except for A20 change call */
1550
void cpu_interrupt(CPUState *env, int mask)
1551
{
1552
    int old_mask;
1553

    
1554
    old_mask = env->interrupt_request;
1555
    env->interrupt_request |= mask;
1556

    
1557
#ifndef CONFIG_USER_ONLY
1558
    /*
1559
     * If called from iothread context, wake the target cpu in
1560
     * case its halted.
1561
     */
1562
    if (!qemu_cpu_self(env)) {
1563
        qemu_cpu_kick(env);
1564
        return;
1565
    }
1566
#endif
1567

    
1568
    if (use_icount) {
1569
        env->icount_decr.u16.high = 0xffff;
1570
#ifndef CONFIG_USER_ONLY
1571
        if (!can_do_io(env)
1572
            && (mask & ~old_mask) != 0) {
1573
            cpu_abort(env, "Raised interrupt while not in I/O function");
1574
        }
1575
#endif
1576
    } else {
1577
        cpu_unlink_tb(env);
1578
    }
1579
}
1580

    
1581
void cpu_reset_interrupt(CPUState *env, int mask)
1582
{
1583
    env->interrupt_request &= ~mask;
1584
}
1585

    
1586
void cpu_exit(CPUState *env)
1587
{
1588
    env->exit_request = 1;
1589
    cpu_unlink_tb(env);
1590
}
1591

    
1592
const CPULogItem cpu_log_items[] = {
1593
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1594
      "show generated host assembly code for each compiled TB" },
1595
    { CPU_LOG_TB_IN_ASM, "in_asm",
1596
      "show target assembly code for each compiled TB" },
1597
    { CPU_LOG_TB_OP, "op",
1598
      "show micro ops for each compiled TB" },
1599
    { CPU_LOG_TB_OP_OPT, "op_opt",
1600
      "show micro ops "
1601
#ifdef TARGET_I386
1602
      "before eflags optimization and "
1603
#endif
1604
      "after liveness analysis" },
1605
    { CPU_LOG_INT, "int",
1606
      "show interrupts/exceptions in short format" },
1607
    { CPU_LOG_EXEC, "exec",
1608
      "show trace before each executed TB (lots of logs)" },
1609
    { CPU_LOG_TB_CPU, "cpu",
1610
      "show CPU state before block translation" },
1611
#ifdef TARGET_I386
1612
    { CPU_LOG_PCALL, "pcall",
1613
      "show protected mode far calls/returns/exceptions" },
1614
    { CPU_LOG_RESET, "cpu_reset",
1615
      "show CPU state before CPU resets" },
1616
#endif
1617
#ifdef DEBUG_IOPORT
1618
    { CPU_LOG_IOPORT, "ioport",
1619
      "show all i/o ports accesses" },
1620
#endif
1621
    { 0, NULL, NULL },
1622
};
1623

    
1624
static int cmp1(const char *s1, int n, const char *s2)
1625
{
1626
    if (strlen(s2) != n)
1627
        return 0;
1628
    return memcmp(s1, s2, n) == 0;
1629
}
1630

    
1631
/* takes a comma separated list of log masks. Return 0 if error. */
1632
int cpu_str_to_log_mask(const char *str)
1633
{
1634
    const CPULogItem *item;
1635
    int mask;
1636
    const char *p, *p1;
1637

    
1638
    p = str;
1639
    mask = 0;
1640
    for(;;) {
1641
        p1 = strchr(p, ',');
1642
        if (!p1)
1643
            p1 = p + strlen(p);
1644
        if(cmp1(p,p1-p,"all")) {
1645
                for(item = cpu_log_items; item->mask != 0; item++) {
1646
                        mask |= item->mask;
1647
                }
1648
        } else {
1649
        for(item = cpu_log_items; item->mask != 0; item++) {
1650
            if (cmp1(p, p1 - p, item->name))
1651
                goto found;
1652
        }
1653
        return 0;
1654
        }
1655
    found:
1656
        mask |= item->mask;
1657
        if (*p1 != ',')
1658
            break;
1659
        p = p1 + 1;
1660
    }
1661
    return mask;
1662
}
1663

    
1664
void cpu_abort(CPUState *env, const char *fmt, ...)
1665
{
1666
    va_list ap;
1667
    va_list ap2;
1668

    
1669
    va_start(ap, fmt);
1670
    va_copy(ap2, ap);
1671
    fprintf(stderr, "qemu: fatal: ");
1672
    vfprintf(stderr, fmt, ap);
1673
    fprintf(stderr, "\n");
1674
#ifdef TARGET_I386
1675
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1676
#else
1677
    cpu_dump_state(env, stderr, fprintf, 0);
1678
#endif
1679
    if (qemu_log_enabled()) {
1680
        qemu_log("qemu: fatal: ");
1681
        qemu_log_vprintf(fmt, ap2);
1682
        qemu_log("\n");
1683
#ifdef TARGET_I386
1684
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1685
#else
1686
        log_cpu_state(env, 0);
1687
#endif
1688
        qemu_log_flush();
1689
        qemu_log_close();
1690
    }
1691
    va_end(ap2);
1692
    va_end(ap);
1693
    abort();
1694
}
1695

    
1696
CPUState *cpu_copy(CPUState *env)
1697
{
1698
    CPUState *new_env = cpu_init(env->cpu_model_str);
1699
    CPUState *next_cpu = new_env->next_cpu;
1700
    int cpu_index = new_env->cpu_index;
1701
#if defined(TARGET_HAS_ICE)
1702
    CPUBreakpoint *bp;
1703
    CPUWatchpoint *wp;
1704
#endif
1705

    
1706
    memcpy(new_env, env, sizeof(CPUState));
1707

    
1708
    /* Preserve chaining and index. */
1709
    new_env->next_cpu = next_cpu;
1710
    new_env->cpu_index = cpu_index;
1711

    
1712
    /* Clone all break/watchpoints.
1713
       Note: Once we support ptrace with hw-debug register access, make sure
1714
       BP_CPU break/watchpoints are handled correctly on clone. */
1715
    QTAILQ_INIT(&env->breakpoints);
1716
    QTAILQ_INIT(&env->watchpoints);
1717
#if defined(TARGET_HAS_ICE)
1718
    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
1719
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1720
    }
1721
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1722
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1723
                              wp->flags, NULL);
1724
    }
1725
#endif
1726

    
1727
    return new_env;
1728
}
1729

    
1730
#if !defined(CONFIG_USER_ONLY)
1731

    
1732
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1733
{
1734
    unsigned int i;
1735

    
1736
    /* Discard jump cache entries for any tb which might potentially
1737
       overlap the flushed page.  */
1738
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1739
    memset (&env->tb_jmp_cache[i], 0, 
1740
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1741

    
1742
    i = tb_jmp_cache_hash_page(addr);
1743
    memset (&env->tb_jmp_cache[i], 0, 
1744
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1745
}
1746

    
1747
static CPUTLBEntry s_cputlb_empty_entry = {
1748
    .addr_read  = -1,
1749
    .addr_write = -1,
1750
    .addr_code  = -1,
1751
    .addend     = -1,
1752
};
1753

    
1754
/* NOTE: if flush_global is true, also flush global entries (not
1755
   implemented yet) */
1756
void tlb_flush(CPUState *env, int flush_global)
1757
{
1758
    int i;
1759

    
1760
#if defined(DEBUG_TLB)
1761
    printf("tlb_flush:\n");
1762
#endif
1763
    /* must reset current TB so that interrupts cannot modify the
1764
       links while we are modifying them */
1765
    env->current_tb = NULL;
1766

    
1767
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1768
        int mmu_idx;
1769
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1770
            env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
1771
        }
1772
    }
1773

    
1774
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1775

    
1776
    tlb_flush_count++;
1777
}
1778

    
1779
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1780
{
1781
    if (addr == (tlb_entry->addr_read &
1782
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1783
        addr == (tlb_entry->addr_write &
1784
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1785
        addr == (tlb_entry->addr_code &
1786
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1787
        *tlb_entry = s_cputlb_empty_entry;
1788
    }
1789
}
1790

    
1791
void tlb_flush_page(CPUState *env, target_ulong addr)
1792
{
1793
    int i;
1794
    int mmu_idx;
1795

    
1796
#if defined(DEBUG_TLB)
1797
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1798
#endif
1799
    /* must reset current TB so that interrupts cannot modify the
1800
       links while we are modifying them */
1801
    env->current_tb = NULL;
1802

    
1803
    addr &= TARGET_PAGE_MASK;
1804
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1805
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1806
        tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
1807

    
1808
    tlb_flush_jmp_cache(env, addr);
1809
}
1810

    
1811
/* update the TLBs so that writes to code in the virtual page 'addr'
1812
   can be detected */
1813
static void tlb_protect_code(a_ram_addr ram_addr)
1814
{
1815
    cpu_physical_memory_reset_dirty(ram_addr,
1816
                                    ram_addr + TARGET_PAGE_SIZE,
1817
                                    CODE_DIRTY_FLAG);
1818
}
1819

    
1820
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1821
   tested for self modifying code */
1822
static void tlb_unprotect_code_phys(CPUState *env, a_ram_addr ram_addr,
1823
                                    target_ulong vaddr)
1824
{
1825
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1826
}
1827

    
1828
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1829
                                         unsigned long start, unsigned long length)
1830
{
1831
    unsigned long addr;
1832
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1833
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1834
        if ((addr - start) < length) {
1835
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1836
        }
1837
    }
1838
}
1839

    
1840
/* Note: start and end must be within the same ram block.  */
1841
void cpu_physical_memory_reset_dirty(a_ram_addr start, a_ram_addr end,
1842
                                     int dirty_flags)
1843
{
1844
    CPUState *env;
1845
    unsigned long length, start1;
1846
    int i, mask, len;
1847
    uint8_t *p;
1848

    
1849
    start &= TARGET_PAGE_MASK;
1850
    end = TARGET_PAGE_ALIGN(end);
1851

    
1852
    length = end - start;
1853
    if (length == 0)
1854
        return;
1855
    len = length >> TARGET_PAGE_BITS;
1856
    mask = ~dirty_flags;
1857
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1858
    for(i = 0; i < len; i++)
1859
        p[i] &= mask;
1860

    
1861
    /* we modify the TLB cache so that the dirty bit will be set again
1862
       when accessing the range */
1863
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1864
    /* Chek that we don't span multiple blocks - this breaks the
1865
       address comparisons below.  */
1866
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1867
            != (end - 1) - start) {
1868
        abort();
1869
    }
1870

    
1871
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1872
        int mmu_idx;
1873
        for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1874
            for(i = 0; i < CPU_TLB_SIZE; i++)
1875
                tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
1876
                                      start1, length);
1877
        }
1878
    }
1879
}
1880

    
1881
int cpu_physical_memory_set_dirty_tracking(int enable)
1882
{
1883
    in_migration = enable;
1884
    if (kvm_enabled()) {
1885
        return kvm_set_migration_log(enable);
1886
    }
1887
    return 0;
1888
}
1889

    
1890
int cpu_physical_memory_get_dirty_tracking(void)
1891
{
1892
    return in_migration;
1893
}
1894

    
1895
int cpu_physical_sync_dirty_bitmap(a_target_phys_addr start_addr,
1896
                                   a_target_phys_addr end_addr)
1897
{
1898
    int ret = 0;
1899

    
1900
    if (kvm_enabled())
1901
        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1902
    return ret;
1903
}
1904

    
1905
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1906
{
1907
    a_ram_addr ram_addr;
1908
    void *p;
1909

    
1910
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1911
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1912
            + tlb_entry->addend);
1913
        ram_addr = qemu_ram_addr_from_host(p);
1914
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1915
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1916
        }
1917
    }
1918
}
1919

    
1920
/* update the TLB according to the current state of the dirty bits */
1921
void cpu_tlb_update_dirty(CPUState *env)
1922
{
1923
    int i;
1924
    int mmu_idx;
1925
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1926
        for(i = 0; i < CPU_TLB_SIZE; i++)
1927
            tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
1928
    }
1929
}
1930

    
1931
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1932
{
1933
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1934
        tlb_entry->addr_write = vaddr;
1935
}
1936

    
1937
/* update the TLB corresponding to virtual page vaddr
1938
   so that it is no longer dirty */
1939
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1940
{
1941
    int i;
1942
    int mmu_idx;
1943

    
1944
    vaddr &= TARGET_PAGE_MASK;
1945
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1946
    for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
1947
        tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
1948
}
1949

    
1950
/* add a new TLB entry. At most one entry for a given virtual address
1951
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1952
   (can only happen in non SOFTMMU mode for I/O pages or pages
1953
   conflicting with the host address space). */
1954
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1955
                      a_target_phys_addr paddr, int prot,
1956
                      int mmu_idx, int is_softmmu)
1957
{
1958
    PhysPageDesc *p;
1959
    unsigned long pd;
1960
    unsigned int index;
1961
    target_ulong address;
1962
    target_ulong code_address;
1963
    a_target_phys_addr addend;
1964
    int ret;
1965
    CPUTLBEntry *te;
1966
    CPUWatchpoint *wp;
1967
    a_target_phys_addr iotlb;
1968

    
1969
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1970
    if (!p) {
1971
        pd = IO_MEM_UNASSIGNED;
1972
    } else {
1973
        pd = p->phys_offset;
1974
    }
1975
#if defined(DEBUG_TLB)
1976
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1977
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1978
#endif
1979

    
1980
    ret = 0;
1981
    address = vaddr;
1982
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1983
        /* IO memory case (romd handled later) */
1984
        address |= TLB_MMIO;
1985
    }
1986
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
1987
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
1988
        /* Normal RAM.  */
1989
        iotlb = pd & TARGET_PAGE_MASK;
1990
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
1991
            iotlb |= IO_MEM_NOTDIRTY;
1992
        else
1993
            iotlb |= IO_MEM_ROM;
1994
    } else {
1995
        /* IO handlers are currently passed a physical address.
1996
           It would be nice to pass an offset from the base address
1997
           of that region.  This would avoid having to special case RAM,
1998
           and avoid full address decoding in every device.
1999
           We can't use the high bits of pd for this because
2000
           IO_MEM_ROMD uses these as a ram address.  */
2001
        iotlb = (pd & ~TARGET_PAGE_MASK);
2002
        if (p) {
2003
            iotlb += p->region_offset;
2004
        } else {
2005
            iotlb += paddr;
2006
        }
2007
    }
2008

    
2009
    code_address = address;
2010
    /* Make accesses to pages with watchpoints go via the
2011
       watchpoint trap routines.  */
2012
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2013
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2014
            iotlb = io_mem_watch + paddr;
2015
            /* TODO: The memory case can be optimized by not trapping
2016
               reads of pages with a write breakpoint.  */
2017
            address |= TLB_MMIO;
2018
        }
2019
    }
2020

    
2021
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2022
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2023
    te = &env->tlb_table[mmu_idx][index];
2024
    te->addend = addend - vaddr;
2025
    if (prot & PAGE_READ) {
2026
        te->addr_read = address;
2027
    } else {
2028
        te->addr_read = -1;
2029
    }
2030

    
2031
    if (prot & PAGE_EXEC) {
2032
        te->addr_code = code_address;
2033
    } else {
2034
        te->addr_code = -1;
2035
    }
2036
    if (prot & PAGE_WRITE) {
2037
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2038
            (pd & IO_MEM_ROMD)) {
2039
            /* Write access calls the I/O callback.  */
2040
            te->addr_write = address | TLB_MMIO;
2041
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2042
                   !cpu_physical_memory_is_dirty(pd)) {
2043
            te->addr_write = address | TLB_NOTDIRTY;
2044
        } else {
2045
            te->addr_write = address;
2046
        }
2047
    } else {
2048
        te->addr_write = -1;
2049
    }
2050
    return ret;
2051
}
2052

    
2053
#else
2054

    
2055
void tlb_flush(CPUState *env, int flush_global)
2056
{
2057
}
2058

    
2059
void tlb_flush_page(CPUState *env, target_ulong addr)
2060
{
2061
}
2062

    
2063
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2064
                      a_target_phys_addr paddr, int prot,
2065
                      int mmu_idx, int is_softmmu)
2066
{
2067
    return 0;
2068
}
2069

    
2070
/*
2071
 * Walks guest process memory "regions" one by one
2072
 * and calls callback function 'fn' for each region.
2073
 */
2074
int walk_memory_regions(void *priv,
2075
    int (*fn)(void *, unsigned long, unsigned long, unsigned long))
2076
{
2077
    unsigned long start, end;
2078
    PageDesc *p = NULL;
2079
    int i, j, prot, prot1;
2080
    int rc = 0;
2081

    
2082
    start = end = -1;
2083
    prot = 0;
2084

    
2085
    for (i = 0; i <= L1_SIZE; i++) {
2086
        p = (i < L1_SIZE) ? l1_map[i] : NULL;
2087
        for (j = 0; j < L2_SIZE; j++) {
2088
            prot1 = (p == NULL) ? 0 : p[j].flags;
2089
            /*
2090
             * "region" is one continuous chunk of memory
2091
             * that has same protection flags set.
2092
             */
2093
            if (prot1 != prot) {
2094
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2095
                if (start != -1) {
2096
                    rc = (*fn)(priv, start, end, prot);
2097
                    /* callback can stop iteration by returning != 0 */
2098
                    if (rc != 0)
2099
                        return (rc);
2100
                }
2101
                if (prot1 != 0)
2102
                    start = end;
2103
                else
2104
                    start = -1;
2105
                prot = prot1;
2106
            }
2107
            if (p == NULL)
2108
                break;
2109
        }
2110
    }
2111
    return (rc);
2112
}
2113

    
2114
static int dump_region(void *priv, unsigned long start,
2115
    unsigned long end, unsigned long prot)
2116
{
2117
    FILE *f = (FILE *)priv;
2118

    
2119
    (void) fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2120
        start, end, end - start,
2121
        ((prot & PAGE_READ) ? 'r' : '-'),
2122
        ((prot & PAGE_WRITE) ? 'w' : '-'),
2123
        ((prot & PAGE_EXEC) ? 'x' : '-'));
2124

    
2125
    return (0);
2126
}
2127

    
2128
/* dump memory mappings */
2129
void page_dump(FILE *f)
2130
{
2131
    (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2132
            "start", "end", "size", "prot");
2133
    walk_memory_regions(f, dump_region);
2134
}
2135

    
2136
int page_get_flags(target_ulong address)
2137
{
2138
    PageDesc *p;
2139

    
2140
    p = page_find(address >> TARGET_PAGE_BITS);
2141
    if (!p)
2142
        return 0;
2143
    return p->flags;
2144
}
2145

    
2146
/* modify the flags of a page and invalidate the code if
2147
   necessary. The flag PAGE_WRITE_ORG is positioned automatically
2148
   depending on PAGE_WRITE */
2149
void page_set_flags(target_ulong start, target_ulong end, int flags)
2150
{
2151
    PageDesc *p;
2152
    target_ulong addr;
2153

    
2154
    /* mmap_lock should already be held.  */
2155
    start = start & TARGET_PAGE_MASK;
2156
    end = TARGET_PAGE_ALIGN(end);
2157
    if (flags & PAGE_WRITE)
2158
        flags |= PAGE_WRITE_ORG;
2159
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2160
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2161
        /* We may be called for host regions that are outside guest
2162
           address space.  */
2163
        if (!p)
2164
            return;
2165
        /* if the write protection is set, then we invalidate the code
2166
           inside */
2167
        if (!(p->flags & PAGE_WRITE) &&
2168
            (flags & PAGE_WRITE) &&
2169
            p->first_tb) {
2170
            tb_invalidate_phys_page(addr, 0, NULL);
2171
        }
2172
        p->flags = flags;
2173
    }
2174
}
2175

    
2176
int page_check_range(target_ulong start, target_ulong len, int flags)
2177
{
2178
    PageDesc *p;
2179
    target_ulong end;
2180
    target_ulong addr;
2181

    
2182
    if (start + len < start)
2183
        /* we've wrapped around */
2184
        return -1;
2185

    
2186
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2187
    start = start & TARGET_PAGE_MASK;
2188

    
2189
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2190
        p = page_find(addr >> TARGET_PAGE_BITS);
2191
        if( !p )
2192
            return -1;
2193
        if( !(p->flags & PAGE_VALID) )
2194
            return -1;
2195

    
2196
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2197
            return -1;
2198
        if (flags & PAGE_WRITE) {
2199
            if (!(p->flags & PAGE_WRITE_ORG))
2200
                return -1;
2201
            /* unprotect the page if it was put read-only because it
2202
               contains translated code */
2203
            if (!(p->flags & PAGE_WRITE)) {
2204
                if (!page_unprotect(addr, 0, NULL))
2205
                    return -1;
2206
            }
2207
            return 0;
2208
        }
2209
    }
2210
    return 0;
2211
}
2212

    
2213
/* called from signal handler: invalidate the code and unprotect the
2214
   page. Return TRUE if the fault was successfully handled. */
2215
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2216
{
2217
    unsigned int page_index, prot, pindex;
2218
    PageDesc *p, *p1;
2219
    target_ulong host_start, host_end, addr;
2220

    
2221
    /* Technically this isn't safe inside a signal handler.  However we
2222
       know this only ever happens in a synchronous SEGV handler, so in
2223
       practice it seems to be ok.  */
2224
    mmap_lock();
2225

    
2226
    host_start = address & qemu_host_page_mask;
2227
    page_index = host_start >> TARGET_PAGE_BITS;
2228
    p1 = page_find(page_index);
2229
    if (!p1) {
2230
        mmap_unlock();
2231
        return 0;
2232
    }
2233
    host_end = host_start + qemu_host_page_size;
2234
    p = p1;
2235
    prot = 0;
2236
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2237
        prot |= p->flags;
2238
        p++;
2239
    }
2240
    /* if the page was really writable, then we change its
2241
       protection back to writable */
2242
    if (prot & PAGE_WRITE_ORG) {
2243
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2244
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2245
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2246
                     (prot & PAGE_BITS) | PAGE_WRITE);
2247
            p1[pindex].flags |= PAGE_WRITE;
2248
            /* and since the content will be modified, we must invalidate
2249
               the corresponding translated code. */
2250
            tb_invalidate_phys_page(address, pc, puc);
2251
#ifdef DEBUG_TB_CHECK
2252
            tb_invalidate_check(address);
2253
#endif
2254
            mmap_unlock();
2255
            return 1;
2256
        }
2257
    }
2258
    mmap_unlock();
2259
    return 0;
2260
}
2261

    
2262
static inline void tlb_set_dirty(CPUState *env,
2263
                                 unsigned long addr, target_ulong vaddr)
2264
{
2265
}
2266
#endif /* defined(CONFIG_USER_ONLY) */
2267

    
2268
#if !defined(CONFIG_USER_ONLY)
2269

    
2270
static int subpage_register (a_subpage *mmio, uint32_t start, uint32_t end,
2271
                             a_ram_addr memory, a_ram_addr region_offset);
2272
static void *subpage_init (a_target_phys_addr base, a_ram_addr *phys,
2273
                           a_ram_addr orig_memory, a_ram_addr region_offset);
2274
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2275
                      need_subpage)                                     \
2276
    do {                                                                \
2277
        if (addr > start_addr)                                          \
2278
            start_addr2 = 0;                                            \
2279
        else {                                                          \
2280
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2281
            if (start_addr2 > 0)                                        \
2282
                need_subpage = 1;                                       \
2283
        }                                                               \
2284
                                                                        \
2285
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2286
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2287
        else {                                                          \
2288
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2289
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2290
                need_subpage = 1;                                       \
2291
        }                                                               \
2292
    } while (0)
2293

    
2294
/* register physical memory. 'size' must be a multiple of the target
2295
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2296
   io memory page.  The address used when calling the IO function is
2297
   the offset from the start of the region, plus region_offset.  Both
2298
   start_addr and region_offset are rounded down to a page boundary
2299
   before calculating this offset.  This should not be a problem unless
2300
   the low bits of start_addr and region_offset differ.  */
2301
void cpu_register_physical_memory_offset(a_target_phys_addr start_addr,
2302
                                         a_ram_addr size,
2303
                                         a_ram_addr phys_offset,
2304
                                         a_ram_addr region_offset)
2305
{
2306
    a_target_phys_addr addr, end_addr;
2307
    PhysPageDesc *p;
2308
    CPUState *env;
2309
    a_ram_addr orig_size = size;
2310
    void *subpage;
2311

    
2312
    if (kvm_enabled())
2313
        kvm_set_phys_mem(start_addr, size, phys_offset);
2314

    
2315
    if (phys_offset == IO_MEM_UNASSIGNED) {
2316
        region_offset = start_addr;
2317
    }
2318
    region_offset &= TARGET_PAGE_MASK;
2319
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2320
    end_addr = start_addr + (a_target_phys_addr)size;
2321
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2322
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2323
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2324
            a_ram_addr orig_memory = p->phys_offset;
2325
            a_target_phys_addr start_addr2, end_addr2;
2326
            int need_subpage = 0;
2327

    
2328
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2329
                          need_subpage);
2330
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2331
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2332
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2333
                                           &p->phys_offset, orig_memory,
2334
                                           p->region_offset);
2335
                } else {
2336
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2337
                                            >> IO_MEM_SHIFT];
2338
                }
2339
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2340
                                 region_offset);
2341
                p->region_offset = 0;
2342
            } else {
2343
                p->phys_offset = phys_offset;
2344
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2345
                    (phys_offset & IO_MEM_ROMD))
2346
                    phys_offset += TARGET_PAGE_SIZE;
2347
            }
2348
        } else {
2349
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2350
            p->phys_offset = phys_offset;
2351
            p->region_offset = region_offset;
2352
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2353
                (phys_offset & IO_MEM_ROMD)) {
2354
                phys_offset += TARGET_PAGE_SIZE;
2355
            } else {
2356
                a_target_phys_addr start_addr2, end_addr2;
2357
                int need_subpage = 0;
2358

    
2359
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2360
                              end_addr2, need_subpage);
2361

    
2362
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2363
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2364
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2365
                                           addr & TARGET_PAGE_MASK);
2366
                    subpage_register(subpage, start_addr2, end_addr2,
2367
                                     phys_offset, region_offset);
2368
                    p->region_offset = 0;
2369
                }
2370
            }
2371
        }
2372
        region_offset += TARGET_PAGE_SIZE;
2373
    }
2374

    
2375
    /* since each CPU stores ram addresses in its TLB cache, we must
2376
       reset the modified entries */
2377
    /* XXX: slow ! */
2378
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2379
        tlb_flush(env, 1);
2380
    }
2381
}
2382

    
2383
/* XXX: temporary until new memory mapping API */
2384
a_ram_addr cpu_get_physical_page_desc(a_target_phys_addr addr)
2385
{
2386
    PhysPageDesc *p;
2387

    
2388
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2389
    if (!p)
2390
        return IO_MEM_UNASSIGNED;
2391
    return p->phys_offset;
2392
}
2393

    
2394
void qemu_register_coalesced_mmio(a_target_phys_addr addr, a_ram_addr size)
2395
{
2396
    if (kvm_enabled())
2397
        kvm_coalesce_mmio_region(addr, size);
2398
}
2399

    
2400
void qemu_unregister_coalesced_mmio(a_target_phys_addr addr, a_ram_addr size)
2401
{
2402
    if (kvm_enabled())
2403
        kvm_uncoalesce_mmio_region(addr, size);
2404
}
2405

    
2406
a_ram_addr qemu_ram_alloc(a_ram_addr size)
2407
{
2408
    RAMBlock *new_block;
2409

    
2410
    size = TARGET_PAGE_ALIGN(size);
2411
    new_block = qemu_malloc(sizeof(*new_block));
2412

    
2413
    new_block->host = qemu_vmalloc(size);
2414
    new_block->offset = last_ram_offset;
2415
    new_block->length = size;
2416

    
2417
    new_block->next = ram_blocks;
2418
    ram_blocks = new_block;
2419

    
2420
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2421
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2422
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2423
           0xff, size >> TARGET_PAGE_BITS);
2424

    
2425
    last_ram_offset += size;
2426

    
2427
    if (kvm_enabled())
2428
        kvm_setup_guest_memory(new_block->host, size);
2429

    
2430
    return new_block->offset;
2431
}
2432

    
2433
void qemu_ram_free(a_ram_addr addr)
2434
{
2435
    /* TODO: implement this.  */
2436
}
2437

    
2438
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2439
   With the exception of the softmmu code in this file, this should
2440
   only be used for local memory (e.g. video ram) that the device owns,
2441
   and knows it isn't going to access beyond the end of the block.
2442

2443
   It should not be used for general purpose DMA.
2444
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2445
 */
2446
void *qemu_get_ram_ptr(a_ram_addr addr)
2447
{
2448
    RAMBlock *prev;
2449
    RAMBlock **prevp;
2450
    RAMBlock *block;
2451

    
2452
    prev = NULL;
2453
    prevp = &ram_blocks;
2454
    block = ram_blocks;
2455
    while (block && (block->offset > addr
2456
                     || block->offset + block->length <= addr)) {
2457
        if (prev)
2458
          prevp = &prev->next;
2459
        prev = block;
2460
        block = block->next;
2461
    }
2462
    if (!block) {
2463
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2464
        abort();
2465
    }
2466
    /* Move this entry to to start of the list.  */
2467
    if (prev) {
2468
        prev->next = block->next;
2469
        block->next = *prevp;
2470
        *prevp = block;
2471
    }
2472
    return block->host + (addr - block->offset);
2473
}
2474

    
2475
/* Some of the softmmu routines need to translate from a host pointer
2476
   (typically a TLB entry) back to a ram offset.  */
2477
a_ram_addr qemu_ram_addr_from_host(void *ptr)
2478
{
2479
    RAMBlock *prev;
2480
    RAMBlock **prevp;
2481
    RAMBlock *block;
2482
    uint8_t *host = ptr;
2483

    
2484
    prev = NULL;
2485
    prevp = &ram_blocks;
2486
    block = ram_blocks;
2487
    while (block && (block->host > host
2488
                     || block->host + block->length <= host)) {
2489
        if (prev)
2490
          prevp = &prev->next;
2491
        prev = block;
2492
        block = block->next;
2493
    }
2494
    if (!block) {
2495
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2496
        abort();
2497
    }
2498
    return block->offset + (host - block->host);
2499
}
2500

    
2501
static uint32_t unassigned_mem_readb(void *opaque, a_target_phys_addr addr)
2502
{
2503
#ifdef DEBUG_UNASSIGNED
2504
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2505
#endif
2506
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2507
    do_unassigned_access(addr, 0, 0, 0, 1);
2508
#endif
2509
    return 0;
2510
}
2511

    
2512
static uint32_t unassigned_mem_readw(void *opaque, a_target_phys_addr addr)
2513
{
2514
#ifdef DEBUG_UNASSIGNED
2515
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2516
#endif
2517
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2518
    do_unassigned_access(addr, 0, 0, 0, 2);
2519
#endif
2520
    return 0;
2521
}
2522

    
2523
static uint32_t unassigned_mem_readl(void *opaque, a_target_phys_addr addr)
2524
{
2525
#ifdef DEBUG_UNASSIGNED
2526
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2527
#endif
2528
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2529
    do_unassigned_access(addr, 0, 0, 0, 4);
2530
#endif
2531
    return 0;
2532
}
2533

    
2534
static void unassigned_mem_writeb(void *opaque, a_target_phys_addr addr, uint32_t val)
2535
{
2536
#ifdef DEBUG_UNASSIGNED
2537
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2538
#endif
2539
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2540
    do_unassigned_access(addr, 1, 0, 0, 1);
2541
#endif
2542
}
2543

    
2544
static void unassigned_mem_writew(void *opaque, a_target_phys_addr addr, uint32_t val)
2545
{
2546
#ifdef DEBUG_UNASSIGNED
2547
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2548
#endif
2549
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2550
    do_unassigned_access(addr, 1, 0, 0, 2);
2551
#endif
2552
}
2553

    
2554
static void unassigned_mem_writel(void *opaque, a_target_phys_addr addr, uint32_t val)
2555
{
2556
#ifdef DEBUG_UNASSIGNED
2557
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2558
#endif
2559
#if defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
2560
    do_unassigned_access(addr, 1, 0, 0, 4);
2561
#endif
2562
}
2563

    
2564
static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
2565
    unassigned_mem_readb,
2566
    unassigned_mem_readw,
2567
    unassigned_mem_readl,
2568
};
2569

    
2570
static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
2571
    unassigned_mem_writeb,
2572
    unassigned_mem_writew,
2573
    unassigned_mem_writel,
2574
};
2575

    
2576
static void notdirty_mem_writeb(void *opaque, a_target_phys_addr ram_addr,
2577
                                uint32_t val)
2578
{
2579
    int dirty_flags;
2580
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2581
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2582
#if !defined(CONFIG_USER_ONLY)
2583
        tb_invalidate_phys_page_fast(ram_addr, 1);
2584
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2585
#endif
2586
    }
2587
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2588
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2589
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2590
    /* we remove the notdirty callback only if the code has been
2591
       flushed */
2592
    if (dirty_flags == 0xff)
2593
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2594
}
2595

    
2596
static void notdirty_mem_writew(void *opaque, a_target_phys_addr ram_addr,
2597
                                uint32_t val)
2598
{
2599
    int dirty_flags;
2600
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2601
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2602
#if !defined(CONFIG_USER_ONLY)
2603
        tb_invalidate_phys_page_fast(ram_addr, 2);
2604
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2605
#endif
2606
    }
2607
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2608
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2609
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2610
    /* we remove the notdirty callback only if the code has been
2611
       flushed */
2612
    if (dirty_flags == 0xff)
2613
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2614
}
2615

    
2616
static void notdirty_mem_writel(void *opaque, a_target_phys_addr ram_addr,
2617
                                uint32_t val)
2618
{
2619
    int dirty_flags;
2620
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2621
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2622
#if !defined(CONFIG_USER_ONLY)
2623
        tb_invalidate_phys_page_fast(ram_addr, 4);
2624
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2625
#endif
2626
    }
2627
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2628
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2629
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2630
    /* we remove the notdirty callback only if the code has been
2631
       flushed */
2632
    if (dirty_flags == 0xff)
2633
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2634
}
2635

    
2636
static CPUReadMemoryFunc * const error_mem_read[3] = {
2637
    NULL, /* never used */
2638
    NULL, /* never used */
2639
    NULL, /* never used */
2640
};
2641

    
2642
static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
2643
    notdirty_mem_writeb,
2644
    notdirty_mem_writew,
2645
    notdirty_mem_writel,
2646
};
2647

    
2648
/* Generate a debug exception if a watchpoint has been hit.  */
2649
static void check_watchpoint(int offset, int len_mask, int flags)
2650
{
2651
    CPUState *env = cpu_single_env;
2652
    target_ulong pc, cs_base;
2653
    TranslationBlock *tb;
2654
    target_ulong vaddr;
2655
    CPUWatchpoint *wp;
2656
    int cpu_flags;
2657

    
2658
    if (env->watchpoint_hit) {
2659
        /* We re-entered the check after replacing the TB. Now raise
2660
         * the debug interrupt so that is will trigger after the
2661
         * current instruction. */
2662
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2663
        return;
2664
    }
2665
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2666
    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
2667
        if ((vaddr == (wp->vaddr & len_mask) ||
2668
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2669
            wp->flags |= BP_WATCHPOINT_HIT;
2670
            if (!env->watchpoint_hit) {
2671
                env->watchpoint_hit = wp;
2672
                tb = tb_find_pc(env->mem_io_pc);
2673
                if (!tb) {
2674
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2675
                              "pc=%p", (void *)env->mem_io_pc);
2676
                }
2677
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2678
                tb_phys_invalidate(tb, -1);
2679
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2680
                    env->exception_index = EXCP_DEBUG;
2681
                } else {
2682
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2683
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2684
                }
2685
                cpu_resume_from_signal(env, NULL);
2686
            }
2687
        } else {
2688
            wp->flags &= ~BP_WATCHPOINT_HIT;
2689
        }
2690
    }
2691
}
2692

    
2693
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2694
   so these check for a hit then pass through to the normal out-of-line
2695
   phys routines.  */
2696
static uint32_t watch_mem_readb(void *opaque, a_target_phys_addr addr)
2697
{
2698
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2699
    return ldub_phys(addr);
2700
}
2701

    
2702
static uint32_t watch_mem_readw(void *opaque, a_target_phys_addr addr)
2703
{
2704
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2705
    return lduw_phys(addr);
2706
}
2707

    
2708
static uint32_t watch_mem_readl(void *opaque, a_target_phys_addr addr)
2709
{
2710
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2711
    return ldl_phys(addr);
2712
}
2713

    
2714
static void watch_mem_writeb(void *opaque, a_target_phys_addr addr,
2715
                             uint32_t val)
2716
{
2717
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2718
    stb_phys(addr, val);
2719
}
2720

    
2721
static void watch_mem_writew(void *opaque, a_target_phys_addr addr,
2722
                             uint32_t val)
2723
{
2724
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2725
    stw_phys(addr, val);
2726
}
2727

    
2728
static void watch_mem_writel(void *opaque, a_target_phys_addr addr,
2729
                             uint32_t val)
2730
{
2731
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2732
    stl_phys(addr, val);
2733
}
2734

    
2735
static CPUReadMemoryFunc * const watch_mem_read[3] = {
2736
    watch_mem_readb,
2737
    watch_mem_readw,
2738
    watch_mem_readl,
2739
};
2740

    
2741
static CPUWriteMemoryFunc * const watch_mem_write[3] = {
2742
    watch_mem_writeb,
2743
    watch_mem_writew,
2744
    watch_mem_writel,
2745
};
2746

    
2747
static inline uint32_t subpage_readlen (a_subpage *mmio, a_target_phys_addr addr,
2748
                                 unsigned int len)
2749
{
2750
    uint32_t ret;
2751
    unsigned int idx;
2752

    
2753
    idx = SUBPAGE_IDX(addr);
2754
#if defined(DEBUG_SUBPAGE)
2755
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2756
           mmio, len, addr, idx);
2757
#endif
2758
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2759
                                       addr + mmio->region_offset[idx][0][len]);
2760

    
2761
    return ret;
2762
}
2763

    
2764
static inline void subpage_writelen (a_subpage *mmio, a_target_phys_addr addr,
2765
                              uint32_t value, unsigned int len)
2766
{
2767
    unsigned int idx;
2768

    
2769
    idx = SUBPAGE_IDX(addr);
2770
#if defined(DEBUG_SUBPAGE)
2771
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2772
           mmio, len, addr, idx, value);
2773
#endif
2774
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2775
                                  addr + mmio->region_offset[idx][1][len],
2776
                                  value);
2777
}
2778

    
2779
static uint32_t subpage_readb (void *opaque, a_target_phys_addr addr)
2780
{
2781
#if defined(DEBUG_SUBPAGE)
2782
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2783
#endif
2784

    
2785
    return subpage_readlen(opaque, addr, 0);
2786
}
2787

    
2788
static void subpage_writeb (void *opaque, a_target_phys_addr addr,
2789
                            uint32_t value)
2790
{
2791
#if defined(DEBUG_SUBPAGE)
2792
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2793
#endif
2794
    subpage_writelen(opaque, addr, value, 0);
2795
}
2796

    
2797
static uint32_t subpage_readw (void *opaque, a_target_phys_addr addr)
2798
{
2799
#if defined(DEBUG_SUBPAGE)
2800
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2801
#endif
2802

    
2803
    return subpage_readlen(opaque, addr, 1);
2804
}
2805

    
2806
static void subpage_writew (void *opaque, a_target_phys_addr addr,
2807
                            uint32_t value)
2808
{
2809
#if defined(DEBUG_SUBPAGE)
2810
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2811
#endif
2812
    subpage_writelen(opaque, addr, value, 1);
2813
}
2814

    
2815
static uint32_t subpage_readl (void *opaque, a_target_phys_addr addr)
2816
{
2817
#if defined(DEBUG_SUBPAGE)
2818
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2819
#endif
2820

    
2821
    return subpage_readlen(opaque, addr, 2);
2822
}
2823

    
2824
static void subpage_writel (void *opaque,
2825
                         a_target_phys_addr addr, uint32_t value)
2826
{
2827
#if defined(DEBUG_SUBPAGE)
2828
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2829
#endif
2830
    subpage_writelen(opaque, addr, value, 2);
2831
}
2832

    
2833
static CPUReadMemoryFunc * const subpage_read[] = {
2834
    &subpage_readb,
2835
    &subpage_readw,
2836
    &subpage_readl,
2837
};
2838

    
2839
static CPUWriteMemoryFunc * const subpage_write[] = {
2840
    &subpage_writeb,
2841
    &subpage_writew,
2842
    &subpage_writel,
2843
};
2844

    
2845
static int subpage_register (a_subpage *mmio, uint32_t start, uint32_t end,
2846
                             a_ram_addr memory, a_ram_addr region_offset)
2847
{
2848
    int idx, eidx;
2849
    unsigned int i;
2850

    
2851
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2852
        return -1;
2853
    idx = SUBPAGE_IDX(start);
2854
    eidx = SUBPAGE_IDX(end);
2855
#if defined(DEBUG_SUBPAGE)
2856
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
2857
           mmio, start, end, idx, eidx, memory);
2858
#endif
2859
    memory >>= IO_MEM_SHIFT;
2860
    for (; idx <= eidx; idx++) {
2861
        for (i = 0; i < 4; i++) {
2862
            if (io_mem_read[memory][i]) {
2863
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2864
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2865
                mmio->region_offset[idx][0][i] = region_offset;
2866
            }
2867
            if (io_mem_write[memory][i]) {
2868
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2869
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2870
                mmio->region_offset[idx][1][i] = region_offset;
2871
            }
2872
        }
2873
    }
2874

    
2875
    return 0;
2876
}
2877

    
2878
static void *subpage_init (a_target_phys_addr base, a_ram_addr *phys,
2879
                           a_ram_addr orig_memory, a_ram_addr region_offset)
2880
{
2881
    a_subpage *mmio;
2882
    int subpage_memory;
2883

    
2884
    mmio = qemu_mallocz(sizeof(a_subpage));
2885

    
2886
    mmio->base = base;
2887
    subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio);
2888
#if defined(DEBUG_SUBPAGE)
2889
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2890
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2891
#endif
2892
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2893
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2894
                         region_offset);
2895

    
2896
    return mmio;
2897
}
2898

    
2899
static int get_free_io_mem_idx(void)
2900
{
2901
    int i;
2902

    
2903
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2904
        if (!io_mem_used[i]) {
2905
            io_mem_used[i] = 1;
2906
            return i;
2907
        }
2908

    
2909
    return -1;
2910
}
2911

    
2912
/* mem_read and mem_write are arrays of functions containing the
2913
   function to access byte (index 0), word (index 1) and dword (index
2914
   2). Functions can be omitted with a NULL function pointer.
2915
   If io_index is non zero, the corresponding io zone is
2916
   modified. If it is zero, a new io zone is allocated. The return
2917
   value can be used with cpu_register_physical_memory(). (-1) is
2918
   returned if error. */
2919
static int cpu_register_io_memory_fixed(int io_index,
2920
                                        CPUReadMemoryFunc * const *mem_read,
2921
                                        CPUWriteMemoryFunc * const *mem_write,
2922
                                        void *opaque)
2923
{
2924
    int i, subwidth = 0;
2925

    
2926
    if (io_index <= 0) {
2927
        io_index = get_free_io_mem_idx();
2928
        if (io_index == -1)
2929
            return io_index;
2930
    } else {
2931
        io_index >>= IO_MEM_SHIFT;
2932
        if (io_index >= IO_MEM_NB_ENTRIES)
2933
            return -1;
2934
    }
2935

    
2936
    for(i = 0;i < 3; i++) {
2937
        if (!mem_read[i] || !mem_write[i])
2938
            subwidth = IO_MEM_SUBWIDTH;
2939
        io_mem_read[io_index][i] = mem_read[i];
2940
        io_mem_write[io_index][i] = mem_write[i];
2941
    }
2942
    io_mem_opaque[io_index] = opaque;
2943
    return (io_index << IO_MEM_SHIFT) | subwidth;
2944
}
2945

    
2946
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
2947
                           CPUWriteMemoryFunc * const *mem_write,
2948
                           void *opaque)
2949
{
2950
    return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque);
2951
}
2952

    
2953
void cpu_unregister_io_memory(int io_table_address)
2954
{
2955
    int i;
2956
    int io_index = io_table_address >> IO_MEM_SHIFT;
2957

    
2958
    for (i=0;i < 3; i++) {
2959
        io_mem_read[io_index][i] = unassigned_mem_read[i];
2960
        io_mem_write[io_index][i] = unassigned_mem_write[i];
2961
    }
2962
    io_mem_opaque[io_index] = NULL;
2963
    io_mem_used[io_index] = 0;
2964
}
2965

    
2966
static void io_mem_init(void)
2967
{
2968
    int i;
2969

    
2970
    cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read, unassigned_mem_write, NULL);
2971
    cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read, unassigned_mem_write, NULL);
2972
    cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read, notdirty_mem_write, NULL);
2973
    for (i=0; i<5; i++)
2974
        io_mem_used[i] = 1;
2975

    
2976
    io_mem_watch = cpu_register_io_memory(watch_mem_read,
2977
                                          watch_mem_write, NULL);
2978
}
2979

    
2980
#endif /* !defined(CONFIG_USER_ONLY) */
2981

    
2982
/* physical memory access (slow version, mainly for debug) */
2983
#if defined(CONFIG_USER_ONLY)
2984
void cpu_physical_memory_rw(a_target_phys_addr addr, uint8_t *buf,
2985
                            int len, int is_write)
2986
{
2987
    int l, flags;
2988
    target_ulong page;
2989
    void * p;
2990

    
2991
    while (len > 0) {
2992
        page = addr & TARGET_PAGE_MASK;
2993
        l = (page + TARGET_PAGE_SIZE) - addr;
2994
        if (l > len)
2995
            l = len;
2996
        flags = page_get_flags(page);
2997
        if (!(flags & PAGE_VALID))
2998
            return;
2999
        if (is_write) {
3000
            if (!(flags & PAGE_WRITE))
3001
                return;
3002
            /* XXX: this code should not depend on lock_user */
3003
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3004
                /* FIXME - should this return an error rather than just fail? */
3005
                return;
3006
            memcpy(p, buf, l);
3007
            unlock_user(p, addr, l);
3008
        } else {
3009
            if (!(flags & PAGE_READ))
3010
                return;
3011
            /* XXX: this code should not depend on lock_user */
3012
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3013
                /* FIXME - should this return an error rather than just fail? */
3014
                return;
3015
            memcpy(buf, p, l);
3016
            unlock_user(p, addr, 0);
3017
        }
3018
        len -= l;
3019
        buf += l;
3020
        addr += l;
3021
    }
3022
}
3023

    
3024
#else
3025
void cpu_physical_memory_rw(a_target_phys_addr addr, uint8_t *buf,
3026
                            int len, int is_write)
3027
{
3028
    int l, io_index;
3029
    uint8_t *ptr;
3030
    uint32_t val;
3031
    a_target_phys_addr page;
3032
    unsigned long pd;
3033
    PhysPageDesc *p;
3034

    
3035
    while (len > 0) {
3036
        page = addr & TARGET_PAGE_MASK;
3037
        l = (page + TARGET_PAGE_SIZE) - addr;
3038
        if (l > len)
3039
            l = len;
3040
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3041
        if (!p) {
3042
            pd = IO_MEM_UNASSIGNED;
3043
        } else {
3044
            pd = p->phys_offset;
3045
        }
3046

    
3047
        if (is_write) {
3048
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3049
                a_target_phys_addr addr1 = addr;
3050
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3051
                if (p)
3052
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3053
                /* XXX: could force cpu_single_env to NULL to avoid
3054
                   potential bugs */
3055
                if (l >= 4 && ((addr1 & 3) == 0)) {
3056
                    /* 32 bit write access */
3057
                    val = ldl_p(buf);
3058
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3059
                    l = 4;
3060
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3061
                    /* 16 bit write access */
3062
                    val = lduw_p(buf);
3063
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3064
                    l = 2;
3065
                } else {
3066
                    /* 8 bit write access */
3067
                    val = ldub_p(buf);
3068
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3069
                    l = 1;
3070
                }
3071
            } else {
3072
                unsigned long addr1;
3073
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3074
                /* RAM case */
3075
                ptr = qemu_get_ram_ptr(addr1);
3076
                memcpy(ptr, buf, l);
3077
                if (!cpu_physical_memory_is_dirty(addr1)) {
3078
                    /* invalidate code */
3079
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3080
                    /* set dirty bit */
3081
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3082
                        (0xff & ~CODE_DIRTY_FLAG);
3083
                }
3084
            }
3085
        } else {
3086
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3087
                !(pd & IO_MEM_ROMD)) {
3088
                a_target_phys_addr addr1 = addr;
3089
                /* I/O case */
3090
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3091
                if (p)
3092
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3093
                if (l >= 4 && ((addr1 & 3) == 0)) {
3094
                    /* 32 bit read access */
3095
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3096
                    stl_p(buf, val);
3097
                    l = 4;
3098
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3099
                    /* 16 bit read access */
3100
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3101
                    stw_p(buf, val);
3102
                    l = 2;
3103
                } else {
3104
                    /* 8 bit read access */
3105
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3106
                    stb_p(buf, val);
3107
                    l = 1;
3108
                }
3109
            } else {
3110
                /* RAM case */
3111
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3112
                    (addr & ~TARGET_PAGE_MASK);
3113
                memcpy(buf, ptr, l);
3114
            }
3115
        }
3116
        len -= l;
3117
        buf += l;
3118
        addr += l;
3119
    }
3120
}
3121

    
3122
/* used for ROM loading : can write in RAM and ROM */
3123
void cpu_physical_memory_write_rom(a_target_phys_addr addr,
3124
                                   const uint8_t *buf, int len)
3125
{
3126
    int l;
3127
    uint8_t *ptr;
3128
    a_target_phys_addr page;
3129
    unsigned long pd;
3130
    PhysPageDesc *p;
3131

    
3132
    while (len > 0) {
3133
        page = addr & TARGET_PAGE_MASK;
3134
        l = (page + TARGET_PAGE_SIZE) - addr;
3135
        if (l > len)
3136
            l = len;
3137
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3138
        if (!p) {
3139
            pd = IO_MEM_UNASSIGNED;
3140
        } else {
3141
            pd = p->phys_offset;
3142
        }
3143

    
3144
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3145
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3146
            !(pd & IO_MEM_ROMD)) {
3147
            /* do nothing */
3148
        } else {
3149
            unsigned long addr1;
3150
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3151
            /* ROM/RAM case */
3152
            ptr = qemu_get_ram_ptr(addr1);
3153
            memcpy(ptr, buf, l);
3154
        }
3155
        len -= l;
3156
        buf += l;
3157
        addr += l;
3158
    }
3159
}
3160

    
3161
typedef struct {
3162
    void *buffer;
3163
    a_target_phys_addr addr;
3164
    a_target_phys_addr len;
3165
} BounceBuffer;
3166

    
3167
static BounceBuffer bounce;
3168

    
3169
typedef struct MapClient {
3170
    void *opaque;
3171
    void (*callback)(void *opaque);
3172
    QLIST_ENTRY(MapClient) link;
3173
} MapClient;
3174

    
3175
static QLIST_HEAD(map_client_list, MapClient) map_client_list
3176
    = QLIST_HEAD_INITIALIZER(map_client_list);
3177

    
3178
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3179
{
3180
    MapClient *client = qemu_malloc(sizeof(*client));
3181

    
3182
    client->opaque = opaque;
3183
    client->callback = callback;
3184
    QLIST_INSERT_HEAD(&map_client_list, client, link);
3185
    return client;
3186
}
3187

    
3188
void cpu_unregister_map_client(void *_client)
3189
{
3190
    MapClient *client = (MapClient *)_client;
3191

    
3192
    QLIST_REMOVE(client, link);
3193
    qemu_free(client);
3194
}
3195

    
3196
static void cpu_notify_map_clients(void)
3197
{
3198
    MapClient *client;
3199

    
3200
    while (!QLIST_EMPTY(&map_client_list)) {
3201
        client = QLIST_FIRST(&map_client_list);
3202
        client->callback(client->opaque);
3203
        cpu_unregister_map_client(client);
3204
    }
3205
}
3206

    
3207
/* Map a physical memory region into a host virtual address.
3208
 * May map a subset of the requested range, given by and returned in *plen.
3209
 * May return NULL if resources needed to perform the mapping are exhausted.
3210
 * Use only for reads OR writes - not for read-modify-write operations.
3211
 * Use cpu_register_map_client() to know when retrying the map operation is
3212
 * likely to succeed.
3213
 */
3214
void *cpu_physical_memory_map(a_target_phys_addr addr,
3215
                              a_target_phys_addr *plen,
3216
                              int is_write)
3217
{
3218
    a_target_phys_addr len = *plen;
3219
    a_target_phys_addr done = 0;
3220
    int l;
3221
    uint8_t *ret = NULL;
3222
    uint8_t *ptr;
3223
    a_target_phys_addr page;
3224
    unsigned long pd;
3225
    PhysPageDesc *p;
3226
    unsigned long addr1;
3227

    
3228
    while (len > 0) {
3229
        page = addr & TARGET_PAGE_MASK;
3230
        l = (page + TARGET_PAGE_SIZE) - addr;
3231
        if (l > len)
3232
            l = len;
3233
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3234
        if (!p) {
3235
            pd = IO_MEM_UNASSIGNED;
3236
        } else {
3237
            pd = p->phys_offset;
3238
        }
3239

    
3240
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3241
            if (done || bounce.buffer) {
3242
                break;
3243
            }
3244
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3245
            bounce.addr = addr;
3246
            bounce.len = l;
3247
            if (!is_write) {
3248
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3249
            }
3250
            ptr = bounce.buffer;
3251
        } else {
3252
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3253
            ptr = qemu_get_ram_ptr(addr1);
3254
        }
3255
        if (!done) {
3256
            ret = ptr;
3257
        } else if (ret + done != ptr) {
3258
            break;
3259
        }
3260

    
3261
        len -= l;
3262
        addr += l;
3263
        done += l;
3264
    }
3265
    *plen = done;
3266
    return ret;
3267
}
3268

    
3269
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3270
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3271
 * the amount of memory that was actually read or written by the caller.
3272
 */
3273
void cpu_physical_memory_unmap(void *buffer, a_target_phys_addr len,
3274
                               int is_write, a_target_phys_addr access_len)
3275
{
3276
    if (buffer != bounce.buffer) {
3277
        if (is_write) {
3278
            a_ram_addr addr1 = qemu_ram_addr_from_host(buffer);
3279
            while (access_len) {
3280
                unsigned l;
3281
                l = TARGET_PAGE_SIZE;
3282
                if (l > access_len)
3283
                    l = access_len;
3284
                if (!cpu_physical_memory_is_dirty(addr1)) {
3285
                    /* invalidate code */
3286
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3287
                    /* set dirty bit */
3288
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3289
                        (0xff & ~CODE_DIRTY_FLAG);
3290
                }
3291
                addr1 += l;
3292
                access_len -= l;
3293
            }
3294
        }
3295
        return;
3296
    }
3297
    if (is_write) {
3298
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3299
    }
3300
    qemu_free(bounce.buffer);
3301
    bounce.buffer = NULL;
3302
    cpu_notify_map_clients();
3303
}
3304

    
3305
/* warning: addr must be aligned */
3306
uint32_t ldl_phys(a_target_phys_addr addr)
3307
{
3308
    int io_index;
3309
    uint8_t *ptr;
3310
    uint32_t val;
3311
    unsigned long pd;
3312
    PhysPageDesc *p;
3313

    
3314
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3315
    if (!p) {
3316
        pd = IO_MEM_UNASSIGNED;
3317
    } else {
3318
        pd = p->phys_offset;
3319
    }
3320

    
3321
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3322
        !(pd & IO_MEM_ROMD)) {
3323
        /* I/O case */
3324
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3325
        if (p)
3326
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3327
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3328
    } else {
3329
        /* RAM case */
3330
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3331
            (addr & ~TARGET_PAGE_MASK);
3332
        val = ldl_p(ptr);
3333
    }
3334
    return val;
3335
}
3336

    
3337
/* warning: addr must be aligned */
3338
uint64_t ldq_phys(a_target_phys_addr addr)
3339
{
3340
    int io_index;
3341
    uint8_t *ptr;
3342
    uint64_t val;
3343
    unsigned long pd;
3344
    PhysPageDesc *p;
3345

    
3346
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3347
    if (!p) {
3348
        pd = IO_MEM_UNASSIGNED;
3349
    } else {
3350
        pd = p->phys_offset;
3351
    }
3352

    
3353
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3354
        !(pd & IO_MEM_ROMD)) {
3355
        /* I/O case */
3356
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3357
        if (p)
3358
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3359
#ifdef TARGET_WORDS_BIGENDIAN
3360
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3361
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3362
#else
3363
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3364
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3365
#endif
3366
    } else {
3367
        /* RAM case */
3368
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3369
            (addr & ~TARGET_PAGE_MASK);
3370
        val = ldq_p(ptr);
3371
    }
3372
    return val;
3373
}
3374

    
3375
/* XXX: optimize */
3376
uint32_t ldub_phys(a_target_phys_addr addr)
3377
{
3378
    uint8_t val;
3379
    cpu_physical_memory_read(addr, &val, 1);
3380
    return val;
3381
}
3382

    
3383
/* XXX: optimize */
3384
uint32_t lduw_phys(a_target_phys_addr addr)
3385
{
3386
    uint16_t val;
3387
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3388
    return tswap16(val);
3389
}
3390

    
3391
/* warning: addr must be aligned. The ram page is not masked as dirty
3392
   and the code inside is not invalidated. It is useful if the dirty
3393
   bits are used to track modified PTEs */
3394
void stl_phys_notdirty(a_target_phys_addr addr, uint32_t val)
3395
{
3396
    int io_index;
3397
    uint8_t *ptr;
3398
    unsigned long pd;
3399
    PhysPageDesc *p;
3400

    
3401
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3402
    if (!p) {
3403
        pd = IO_MEM_UNASSIGNED;
3404
    } else {
3405
        pd = p->phys_offset;
3406
    }
3407

    
3408
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3409
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3410
        if (p)
3411
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3412
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3413
    } else {
3414
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3415
        ptr = qemu_get_ram_ptr(addr1);
3416
        stl_p(ptr, val);
3417

    
3418
        if (unlikely(in_migration)) {
3419
            if (!cpu_physical_memory_is_dirty(addr1)) {
3420
                /* invalidate code */
3421
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3422
                /* set dirty bit */
3423
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3424
                    (0xff & ~CODE_DIRTY_FLAG);
3425
            }
3426
        }
3427
    }
3428
}
3429

    
3430
void stq_phys_notdirty(a_target_phys_addr addr, uint64_t val)
3431
{
3432
    int io_index;
3433
    uint8_t *ptr;
3434
    unsigned long pd;
3435
    PhysPageDesc *p;
3436

    
3437
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3438
    if (!p) {
3439
        pd = IO_MEM_UNASSIGNED;
3440
    } else {
3441
        pd = p->phys_offset;
3442
    }
3443

    
3444
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3445
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3446
        if (p)
3447
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3448
#ifdef TARGET_WORDS_BIGENDIAN
3449
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3450
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3451
#else
3452
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3453
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3454
#endif
3455
    } else {
3456
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3457
            (addr & ~TARGET_PAGE_MASK);
3458
        stq_p(ptr, val);
3459
    }
3460
}
3461

    
3462
/* warning: addr must be aligned */
3463
void stl_phys(a_target_phys_addr addr, uint32_t val)
3464
{
3465
    int io_index;
3466
    uint8_t *ptr;
3467
    unsigned long pd;
3468
    PhysPageDesc *p;
3469

    
3470
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3471
    if (!p) {
3472
        pd = IO_MEM_UNASSIGNED;
3473
    } else {
3474
        pd = p->phys_offset;
3475
    }
3476

    
3477
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3478
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3479
        if (p)
3480
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3481
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3482
    } else {
3483
        unsigned long addr1;
3484
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3485
        /* RAM case */
3486
        ptr = qemu_get_ram_ptr(addr1);
3487
        stl_p(ptr, val);
3488
        if (!cpu_physical_memory_is_dirty(addr1)) {
3489
            /* invalidate code */
3490
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3491
            /* set dirty bit */
3492
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3493
                (0xff & ~CODE_DIRTY_FLAG);
3494
        }
3495
    }
3496
}
3497

    
3498
/* XXX: optimize */
3499
void stb_phys(a_target_phys_addr addr, uint32_t val)
3500
{
3501
    uint8_t v = val;
3502
    cpu_physical_memory_write(addr, &v, 1);
3503
}
3504

    
3505
/* XXX: optimize */
3506
void stw_phys(a_target_phys_addr addr, uint32_t val)
3507
{
3508
    uint16_t v = tswap16(val);
3509
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3510
}
3511

    
3512
/* XXX: optimize */
3513
void stq_phys(a_target_phys_addr addr, uint64_t val)
3514
{
3515
    val = tswap64(val);
3516
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3517
}
3518

    
3519
#endif
3520

    
3521
/* virtual memory access for debug (includes writing to ROM) */
3522
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3523
                        uint8_t *buf, int len, int is_write)
3524
{
3525
    int l;
3526
    a_target_phys_addr phys_addr;
3527
    target_ulong page;
3528

    
3529
    while (len > 0) {
3530
        page = addr & TARGET_PAGE_MASK;
3531
        phys_addr = cpu_get_phys_page_debug(env, page);
3532
        /* if no physical page mapped, return an error */
3533
        if (phys_addr == -1)
3534
            return -1;
3535
        l = (page + TARGET_PAGE_SIZE) - addr;
3536
        if (l > len)
3537
            l = len;
3538
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3539
#if !defined(CONFIG_USER_ONLY)
3540
        if (is_write)
3541
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3542
        else
3543
#endif
3544
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3545
        len -= l;
3546
        buf += l;
3547
        addr += l;
3548
    }
3549
    return 0;
3550
}
3551

    
3552
/* in deterministic execution mode, instructions doing device I/Os
3553
   must be at the end of the TB */
3554
void cpu_io_recompile(CPUState *env, void *retaddr)
3555
{
3556
    TranslationBlock *tb;
3557
    uint32_t n, cflags;
3558
    target_ulong pc, cs_base;
3559
    uint64_t flags;
3560

    
3561
    tb = tb_find_pc((unsigned long)retaddr);
3562
    if (!tb) {
3563
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3564
                  retaddr);
3565
    }
3566
    n = env->icount_decr.u16.low + tb->icount;
3567
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3568
    /* Calculate how many instructions had been executed before the fault
3569
       occurred.  */
3570
    n = n - env->icount_decr.u16.low;
3571
    /* Generate a new TB ending on the I/O insn.  */
3572
    n++;
3573
    /* On MIPS and SH, delay slot instructions can only be restarted if
3574
       they were already the first instruction in the TB.  If this is not
3575
       the first instruction in a TB then re-execute the preceding
3576
       branch.  */
3577
#if defined(TARGET_MIPS)
3578
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3579
        env->active_tc.PC -= 4;
3580
        env->icount_decr.u16.low++;
3581
        env->hflags &= ~MIPS_HFLAG_BMASK;
3582
    }
3583
#elif defined(TARGET_SH4)
3584
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3585
            && n > 1) {
3586
        env->pc -= 2;
3587
        env->icount_decr.u16.low++;
3588
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3589
    }
3590
#endif
3591
    /* This should never happen.  */
3592
    if (n > CF_COUNT_MASK)
3593
        cpu_abort(env, "TB too big during recompile");
3594

    
3595
    cflags = n | CF_LAST_IO;
3596
    pc = tb->pc;
3597
    cs_base = tb->cs_base;
3598
    flags = tb->flags;
3599
    tb_phys_invalidate(tb, -1);
3600
    /* FIXME: In theory this could raise an exception.  In practice
3601
       we have already translated the block once so it's probably ok.  */
3602
    tb_gen_code(env, pc, cs_base, flags, cflags);
3603
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3604
       the first in the TB) then we end up generating a whole new TB and
3605
       repeating the fault, which is horribly inefficient.
3606
       Better would be to execute just this insn uncached, or generate a
3607
       second new TB.  */
3608
    cpu_resume_from_signal(env, NULL);
3609
}
3610

    
3611
void dump_exec_info(FILE *f,
3612
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3613
{
3614
    int i, target_code_size, max_target_code_size;
3615
    int direct_jmp_count, direct_jmp2_count, cross_page;
3616
    TranslationBlock *tb;
3617

    
3618
    target_code_size = 0;
3619
    max_target_code_size = 0;
3620
    cross_page = 0;
3621
    direct_jmp_count = 0;
3622
    direct_jmp2_count = 0;
3623
    for(i = 0; i < nb_tbs; i++) {
3624
        tb = &tbs[i];
3625
        target_code_size += tb->size;
3626
        if (tb->size > max_target_code_size)
3627
            max_target_code_size = tb->size;
3628
        if (tb->page_addr[1] != -1)
3629
            cross_page++;
3630
        if (tb->tb_next_offset[0] != 0xffff) {
3631
            direct_jmp_count++;
3632
            if (tb->tb_next_offset[1] != 0xffff) {
3633
                direct_jmp2_count++;
3634
            }
3635
        }
3636
    }
3637
    /* XXX: avoid using doubles ? */
3638
    cpu_fprintf(f, "Translation buffer state:\n");
3639
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3640
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3641
    cpu_fprintf(f, "TB count            %d/%d\n", 
3642
                nb_tbs, code_gen_max_blocks);
3643
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3644
                nb_tbs ? target_code_size / nb_tbs : 0,
3645
                max_target_code_size);
3646
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3647
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3648
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3649
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3650
            cross_page,
3651
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3652
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3653
                direct_jmp_count,
3654
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3655
                direct_jmp2_count,
3656
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3657
    cpu_fprintf(f, "\nStatistics:\n");
3658
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3659
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3660
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3661
    tcg_dump_info(f, cpu_fprintf);
3662
}
3663

    
3664
#if !defined(CONFIG_USER_ONLY)
3665

    
3666
#define MMUSUFFIX _cmmu
3667
#define GETPC() NULL
3668
#define env cpu_single_env
3669
#define SOFTMMU_CODE_ACCESS
3670

    
3671
#define SHIFT 0
3672
#include "softmmu_template.h"
3673

    
3674
#define SHIFT 1
3675
#include "softmmu_template.h"
3676

    
3677
#define SHIFT 2
3678
#include "softmmu_template.h"
3679

    
3680
#define SHIFT 3
3681
#include "softmmu_template.h"
3682

    
3683
#undef env
3684

    
3685
#endif