Statistics
| Branch: | Revision:

root / exec.c @ 640f42e4

History | View | Annotate | Download (112 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA  02110-1301 USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#include <windows.h>
23
#else
24
#include <sys/types.h>
25
#include <sys/mman.h>
26
#endif
27
#include <stdlib.h>
28
#include <stdio.h>
29
#include <stdarg.h>
30
#include <string.h>
31
#include <errno.h>
32
#include <unistd.h>
33
#include <inttypes.h>
34

    
35
#include "cpu.h"
36
#include "exec-all.h"
37
#include "qemu-common.h"
38
#include "tcg.h"
39
#include "hw/hw.h"
40
#include "osdep.h"
41
#include "kvm.h"
42
#if defined(CONFIG_USER_ONLY)
43
#include <qemu.h>
44
#endif
45

    
46
//#define DEBUG_TB_INVALIDATE
47
//#define DEBUG_FLUSH
48
//#define DEBUG_TLB
49
//#define DEBUG_UNASSIGNED
50

    
51
/* make various TB consistency checks */
52
//#define DEBUG_TB_CHECK
53
//#define DEBUG_TLB_CHECK
54

    
55
//#define DEBUG_IOPORT
56
//#define DEBUG_SUBPAGE
57

    
58
#if !defined(CONFIG_USER_ONLY)
59
/* TB consistency checks only implemented for usermode emulation.  */
60
#undef DEBUG_TB_CHECK
61
#endif
62

    
63
#define SMC_BITMAP_USE_THRESHOLD 10
64

    
65
#if defined(TARGET_SPARC64)
66
#define TARGET_PHYS_ADDR_SPACE_BITS 41
67
#elif defined(TARGET_SPARC)
68
#define TARGET_PHYS_ADDR_SPACE_BITS 36
69
#elif defined(TARGET_ALPHA)
70
#define TARGET_PHYS_ADDR_SPACE_BITS 42
71
#define TARGET_VIRT_ADDR_SPACE_BITS 42
72
#elif defined(TARGET_PPC64)
73
#define TARGET_PHYS_ADDR_SPACE_BITS 42
74
#elif defined(TARGET_X86_64) && !defined(CONFIG_KQEMU)
75
#define TARGET_PHYS_ADDR_SPACE_BITS 42
76
#elif defined(TARGET_I386) && !defined(CONFIG_KQEMU)
77
#define TARGET_PHYS_ADDR_SPACE_BITS 36
78
#else
79
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
80
#define TARGET_PHYS_ADDR_SPACE_BITS 32
81
#endif
82

    
83
static TranslationBlock *tbs;
84
int code_gen_max_blocks;
85
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
86
static int nb_tbs;
87
/* any access to the tbs or the page table must use this lock */
88
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
89

    
90
#if defined(__arm__) || defined(__sparc_v9__)
91
/* The prologue must be reachable with a direct jump. ARM and Sparc64
92
 have limited branch ranges (possibly also PPC) so place it in a
93
 section close to code segment. */
94
#define code_gen_section                                \
95
    __attribute__((__section__(".gen_code")))           \
96
    __attribute__((aligned (32)))
97
#else
98
#define code_gen_section                                \
99
    __attribute__((aligned (32)))
100
#endif
101

    
102
uint8_t code_gen_prologue[1024] code_gen_section;
103
static uint8_t *code_gen_buffer;
104
static unsigned long code_gen_buffer_size;
105
/* threshold to flush the translated code buffer */
106
static unsigned long code_gen_buffer_max_size;
107
uint8_t *code_gen_ptr;
108

    
109
#if !defined(CONFIG_USER_ONLY)
110
int phys_ram_fd;
111
uint8_t *phys_ram_dirty;
112
static int in_migration;
113

    
114
typedef struct RAMBlock {
115
    uint8_t *host;
116
    ram_addr_t offset;
117
    ram_addr_t length;
118
    struct RAMBlock *next;
119
} RAMBlock;
120

    
121
static RAMBlock *ram_blocks;
122
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
123
   then we can no longet assume contiguous ram offsets, and external uses
124
   of this variable will break.  */
125
ram_addr_t last_ram_offset;
126
#endif
127

    
128
CPUState *first_cpu;
129
/* current CPU in the current thread. It is only valid inside
130
   cpu_exec() */
131
CPUState *cpu_single_env;
132
/* 0 = Do not count executed instructions.
133
   1 = Precise instruction counting.
134
   2 = Adaptive rate instruction counting.  */
135
int use_icount = 0;
136
/* Current instruction counter.  While executing translated code this may
137
   include some instructions that have not yet been executed.  */
138
int64_t qemu_icount;
139

    
140
typedef struct PageDesc {
141
    /* list of TBs intersecting this ram page */
142
    TranslationBlock *first_tb;
143
    /* in order to optimize self modifying code, we count the number
144
       of lookups we do to a given page to use a bitmap */
145
    unsigned int code_write_count;
146
    uint8_t *code_bitmap;
147
#if defined(CONFIG_USER_ONLY)
148
    unsigned long flags;
149
#endif
150
} PageDesc;
151

    
152
typedef struct PhysPageDesc {
153
    /* offset in host memory of the page + io_index in the low bits */
154
    ram_addr_t phys_offset;
155
    ram_addr_t region_offset;
156
} PhysPageDesc;
157

    
158
#define L2_BITS 10
159
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
160
/* XXX: this is a temporary hack for alpha target.
161
 *      In the future, this is to be replaced by a multi-level table
162
 *      to actually be able to handle the complete 64 bits address space.
163
 */
164
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
165
#else
166
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
167
#endif
168

    
169
#define L1_SIZE (1 << L1_BITS)
170
#define L2_SIZE (1 << L2_BITS)
171

    
172
unsigned long qemu_real_host_page_size;
173
unsigned long qemu_host_page_bits;
174
unsigned long qemu_host_page_size;
175
unsigned long qemu_host_page_mask;
176

    
177
/* XXX: for system emulation, it could just be an array */
178
static PageDesc *l1_map[L1_SIZE];
179
static PhysPageDesc **l1_phys_map;
180

    
181
#if !defined(CONFIG_USER_ONLY)
182
static void io_mem_init(void);
183

    
184
/* io memory support */
185
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
186
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
187
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
188
static char io_mem_used[IO_MEM_NB_ENTRIES];
189
static int io_mem_watch;
190
#endif
191

    
192
/* log support */
193
static const char *logfilename = "/tmp/qemu.log";
194
FILE *logfile;
195
int loglevel;
196
static int log_append = 0;
197

    
198
/* statistics */
199
static int tlb_flush_count;
200
static int tb_flush_count;
201
static int tb_phys_invalidate_count;
202

    
203
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
204
typedef struct subpage_t {
205
    target_phys_addr_t base;
206
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
207
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
208
    void *opaque[TARGET_PAGE_SIZE][2][4];
209
    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
210
} subpage_t;
211

    
212
#ifdef _WIN32
213
static void map_exec(void *addr, long size)
214
{
215
    DWORD old_protect;
216
    VirtualProtect(addr, size,
217
                   PAGE_EXECUTE_READWRITE, &old_protect);
218
    
219
}
220
#else
221
static void map_exec(void *addr, long size)
222
{
223
    unsigned long start, end, page_size;
224
    
225
    page_size = getpagesize();
226
    start = (unsigned long)addr;
227
    start &= ~(page_size - 1);
228
    
229
    end = (unsigned long)addr + size;
230
    end += page_size - 1;
231
    end &= ~(page_size - 1);
232
    
233
    mprotect((void *)start, end - start,
234
             PROT_READ | PROT_WRITE | PROT_EXEC);
235
}
236
#endif
237

    
238
static void page_init(void)
239
{
240
    /* NOTE: we can always suppose that qemu_host_page_size >=
241
       TARGET_PAGE_SIZE */
242
#ifdef _WIN32
243
    {
244
        SYSTEM_INFO system_info;
245

    
246
        GetSystemInfo(&system_info);
247
        qemu_real_host_page_size = system_info.dwPageSize;
248
    }
249
#else
250
    qemu_real_host_page_size = getpagesize();
251
#endif
252
    if (qemu_host_page_size == 0)
253
        qemu_host_page_size = qemu_real_host_page_size;
254
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
255
        qemu_host_page_size = TARGET_PAGE_SIZE;
256
    qemu_host_page_bits = 0;
257
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
258
        qemu_host_page_bits++;
259
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
260
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
261
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
262

    
263
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
264
    {
265
        long long startaddr, endaddr;
266
        FILE *f;
267
        int n;
268

    
269
        mmap_lock();
270
        last_brk = (unsigned long)sbrk(0);
271
        f = fopen("/proc/self/maps", "r");
272
        if (f) {
273
            do {
274
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
275
                if (n == 2) {
276
                    startaddr = MIN(startaddr,
277
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
278
                    endaddr = MIN(endaddr,
279
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
280
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
281
                                   TARGET_PAGE_ALIGN(endaddr),
282
                                   PAGE_RESERVED); 
283
                }
284
            } while (!feof(f));
285
            fclose(f);
286
        }
287
        mmap_unlock();
288
    }
289
#endif
290
}
291

    
292
static inline PageDesc **page_l1_map(target_ulong index)
293
{
294
#if TARGET_LONG_BITS > 32
295
    /* Host memory outside guest VM.  For 32-bit targets we have already
296
       excluded high addresses.  */
297
    if (index > ((target_ulong)L2_SIZE * L1_SIZE))
298
        return NULL;
299
#endif
300
    return &l1_map[index >> L2_BITS];
301
}
302

    
303
static inline PageDesc *page_find_alloc(target_ulong index)
304
{
305
    PageDesc **lp, *p;
306
    lp = page_l1_map(index);
307
    if (!lp)
308
        return NULL;
309

    
310
    p = *lp;
311
    if (!p) {
312
        /* allocate if not found */
313
#if defined(CONFIG_USER_ONLY)
314
        size_t len = sizeof(PageDesc) * L2_SIZE;
315
        /* Don't use qemu_malloc because it may recurse.  */
316
        p = mmap(0, len, PROT_READ | PROT_WRITE,
317
                 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
318
        *lp = p;
319
        if (h2g_valid(p)) {
320
            unsigned long addr = h2g(p);
321
            page_set_flags(addr & TARGET_PAGE_MASK,
322
                           TARGET_PAGE_ALIGN(addr + len),
323
                           PAGE_RESERVED); 
324
        }
325
#else
326
        p = qemu_mallocz(sizeof(PageDesc) * L2_SIZE);
327
        *lp = p;
328
#endif
329
    }
330
    return p + (index & (L2_SIZE - 1));
331
}
332

    
333
static inline PageDesc *page_find(target_ulong index)
334
{
335
    PageDesc **lp, *p;
336
    lp = page_l1_map(index);
337
    if (!lp)
338
        return NULL;
339

    
340
    p = *lp;
341
    if (!p)
342
        return 0;
343
    return p + (index & (L2_SIZE - 1));
344
}
345

    
346
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
347
{
348
    void **lp, **p;
349
    PhysPageDesc *pd;
350

    
351
    p = (void **)l1_phys_map;
352
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
353

    
354
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
355
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
356
#endif
357
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
358
    p = *lp;
359
    if (!p) {
360
        /* allocate if not found */
361
        if (!alloc)
362
            return NULL;
363
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
364
        memset(p, 0, sizeof(void *) * L1_SIZE);
365
        *lp = p;
366
    }
367
#endif
368
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
369
    pd = *lp;
370
    if (!pd) {
371
        int i;
372
        /* allocate if not found */
373
        if (!alloc)
374
            return NULL;
375
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
376
        *lp = pd;
377
        for (i = 0; i < L2_SIZE; i++) {
378
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
379
          pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
380
        }
381
    }
382
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
383
}
384

    
385
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
386
{
387
    return phys_page_find_alloc(index, 0);
388
}
389

    
390
#if !defined(CONFIG_USER_ONLY)
391
static void tlb_protect_code(ram_addr_t ram_addr);
392
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
393
                                    target_ulong vaddr);
394
#define mmap_lock() do { } while(0)
395
#define mmap_unlock() do { } while(0)
396
#endif
397

    
398
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
399

    
400
#if defined(CONFIG_USER_ONLY)
401
/* Currently it is not recommanded to allocate big chunks of data in
402
   user mode. It will change when a dedicated libc will be used */
403
#define USE_STATIC_CODE_GEN_BUFFER
404
#endif
405

    
406
#ifdef USE_STATIC_CODE_GEN_BUFFER
407
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
408
#endif
409

    
410
static void code_gen_alloc(unsigned long tb_size)
411
{
412
#ifdef USE_STATIC_CODE_GEN_BUFFER
413
    code_gen_buffer = static_code_gen_buffer;
414
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
415
    map_exec(code_gen_buffer, code_gen_buffer_size);
416
#else
417
    code_gen_buffer_size = tb_size;
418
    if (code_gen_buffer_size == 0) {
419
#if defined(CONFIG_USER_ONLY)
420
        /* in user mode, phys_ram_size is not meaningful */
421
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
422
#else
423
        /* XXX: needs ajustments */
424
        code_gen_buffer_size = (unsigned long)(ram_size / 4);
425
#endif
426
    }
427
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
428
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
429
    /* The code gen buffer location may have constraints depending on
430
       the host cpu and OS */
431
#if defined(__linux__) 
432
    {
433
        int flags;
434
        void *start = NULL;
435

    
436
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
437
#if defined(__x86_64__)
438
        flags |= MAP_32BIT;
439
        /* Cannot map more than that */
440
        if (code_gen_buffer_size > (800 * 1024 * 1024))
441
            code_gen_buffer_size = (800 * 1024 * 1024);
442
#elif defined(__sparc_v9__)
443
        // Map the buffer below 2G, so we can use direct calls and branches
444
        flags |= MAP_FIXED;
445
        start = (void *) 0x60000000UL;
446
        if (code_gen_buffer_size > (512 * 1024 * 1024))
447
            code_gen_buffer_size = (512 * 1024 * 1024);
448
#elif defined(__arm__)
449
        /* Map the buffer below 32M, so we can use direct calls and branches */
450
        flags |= MAP_FIXED;
451
        start = (void *) 0x01000000UL;
452
        if (code_gen_buffer_size > 16 * 1024 * 1024)
453
            code_gen_buffer_size = 16 * 1024 * 1024;
454
#endif
455
        code_gen_buffer = mmap(start, code_gen_buffer_size,
456
                               PROT_WRITE | PROT_READ | PROT_EXEC,
457
                               flags, -1, 0);
458
        if (code_gen_buffer == MAP_FAILED) {
459
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
460
            exit(1);
461
        }
462
    }
463
#elif defined(__FreeBSD__) || defined(__DragonFly__)
464
    {
465
        int flags;
466
        void *addr = NULL;
467
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
468
#if defined(__x86_64__)
469
        /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
470
         * 0x40000000 is free */
471
        flags |= MAP_FIXED;
472
        addr = (void *)0x40000000;
473
        /* Cannot map more than that */
474
        if (code_gen_buffer_size > (800 * 1024 * 1024))
475
            code_gen_buffer_size = (800 * 1024 * 1024);
476
#endif
477
        code_gen_buffer = mmap(addr, code_gen_buffer_size,
478
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
479
                               flags, -1, 0);
480
        if (code_gen_buffer == MAP_FAILED) {
481
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
482
            exit(1);
483
        }
484
    }
485
#else
486
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
487
    map_exec(code_gen_buffer, code_gen_buffer_size);
488
#endif
489
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
490
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
491
    code_gen_buffer_max_size = code_gen_buffer_size - 
492
        code_gen_max_block_size();
493
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
494
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
495
}
496

    
497
/* Must be called before using the QEMU cpus. 'tb_size' is the size
498
   (in bytes) allocated to the translation buffer. Zero means default
499
   size. */
500
void cpu_exec_init_all(unsigned long tb_size)
501
{
502
    cpu_gen_init();
503
    code_gen_alloc(tb_size);
504
    code_gen_ptr = code_gen_buffer;
505
    page_init();
506
#if !defined(CONFIG_USER_ONLY)
507
    io_mem_init();
508
#endif
509
}
510

    
511
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
512

    
513
#define CPU_COMMON_SAVE_VERSION 1
514

    
515
static void cpu_common_save(QEMUFile *f, void *opaque)
516
{
517
    CPUState *env = opaque;
518

    
519
    qemu_put_be32s(f, &env->halted);
520
    qemu_put_be32s(f, &env->interrupt_request);
521
}
522

    
523
static int cpu_common_load(QEMUFile *f, void *opaque, int version_id)
524
{
525
    CPUState *env = opaque;
526

    
527
    if (version_id != CPU_COMMON_SAVE_VERSION)
528
        return -EINVAL;
529

    
530
    qemu_get_be32s(f, &env->halted);
531
    qemu_get_be32s(f, &env->interrupt_request);
532
    /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
533
       version_id is increased. */
534
    env->interrupt_request &= ~0x01;
535
    tlb_flush(env, 1);
536

    
537
    return 0;
538
}
539
#endif
540

    
541
void cpu_exec_init(CPUState *env)
542
{
543
    CPUState **penv;
544
    int cpu_index;
545

    
546
#if defined(CONFIG_USER_ONLY)
547
    cpu_list_lock();
548
#endif
549
    env->next_cpu = NULL;
550
    penv = &first_cpu;
551
    cpu_index = 0;
552
    while (*penv != NULL) {
553
        penv = (CPUState **)&(*penv)->next_cpu;
554
        cpu_index++;
555
    }
556
    env->cpu_index = cpu_index;
557
    TAILQ_INIT(&env->breakpoints);
558
    TAILQ_INIT(&env->watchpoints);
559
    *penv = env;
560
#if defined(CONFIG_USER_ONLY)
561
    cpu_list_unlock();
562
#endif
563
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
564
    register_savevm("cpu_common", cpu_index, CPU_COMMON_SAVE_VERSION,
565
                    cpu_common_save, cpu_common_load, env);
566
    register_savevm("cpu", cpu_index, CPU_SAVE_VERSION,
567
                    cpu_save, cpu_load, env);
568
#endif
569
}
570

    
571
static inline void invalidate_page_bitmap(PageDesc *p)
572
{
573
    if (p->code_bitmap) {
574
        qemu_free(p->code_bitmap);
575
        p->code_bitmap = NULL;
576
    }
577
    p->code_write_count = 0;
578
}
579

    
580
/* set to NULL all the 'first_tb' fields in all PageDescs */
581
static void page_flush_tb(void)
582
{
583
    int i, j;
584
    PageDesc *p;
585

    
586
    for(i = 0; i < L1_SIZE; i++) {
587
        p = l1_map[i];
588
        if (p) {
589
            for(j = 0; j < L2_SIZE; j++) {
590
                p->first_tb = NULL;
591
                invalidate_page_bitmap(p);
592
                p++;
593
            }
594
        }
595
    }
596
}
597

    
598
/* flush all the translation blocks */
599
/* XXX: tb_flush is currently not thread safe */
600
void tb_flush(CPUState *env1)
601
{
602
    CPUState *env;
603
#if defined(DEBUG_FLUSH)
604
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
605
           (unsigned long)(code_gen_ptr - code_gen_buffer),
606
           nb_tbs, nb_tbs > 0 ?
607
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
608
#endif
609
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
610
        cpu_abort(env1, "Internal error: code buffer overflow\n");
611

    
612
    nb_tbs = 0;
613

    
614
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
615
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
616
    }
617

    
618
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
619
    page_flush_tb();
620

    
621
    code_gen_ptr = code_gen_buffer;
622
    /* XXX: flush processor icache at this point if cache flush is
623
       expensive */
624
    tb_flush_count++;
625
}
626

    
627
#ifdef DEBUG_TB_CHECK
628

    
629
static void tb_invalidate_check(target_ulong address)
630
{
631
    TranslationBlock *tb;
632
    int i;
633
    address &= TARGET_PAGE_MASK;
634
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
635
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
636
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
637
                  address >= tb->pc + tb->size)) {
638
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
639
                       address, (long)tb->pc, tb->size);
640
            }
641
        }
642
    }
643
}
644

    
645
/* verify that all the pages have correct rights for code */
646
static void tb_page_check(void)
647
{
648
    TranslationBlock *tb;
649
    int i, flags1, flags2;
650

    
651
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
652
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
653
            flags1 = page_get_flags(tb->pc);
654
            flags2 = page_get_flags(tb->pc + tb->size - 1);
655
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
656
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
657
                       (long)tb->pc, tb->size, flags1, flags2);
658
            }
659
        }
660
    }
661
}
662

    
663
static void tb_jmp_check(TranslationBlock *tb)
664
{
665
    TranslationBlock *tb1;
666
    unsigned int n1;
667

    
668
    /* suppress any remaining jumps to this TB */
669
    tb1 = tb->jmp_first;
670
    for(;;) {
671
        n1 = (long)tb1 & 3;
672
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
673
        if (n1 == 2)
674
            break;
675
        tb1 = tb1->jmp_next[n1];
676
    }
677
    /* check end of list */
678
    if (tb1 != tb) {
679
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
680
    }
681
}
682

    
683
#endif
684

    
685
/* invalidate one TB */
686
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
687
                             int next_offset)
688
{
689
    TranslationBlock *tb1;
690
    for(;;) {
691
        tb1 = *ptb;
692
        if (tb1 == tb) {
693
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
694
            break;
695
        }
696
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
697
    }
698
}
699

    
700
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
701
{
702
    TranslationBlock *tb1;
703
    unsigned int n1;
704

    
705
    for(;;) {
706
        tb1 = *ptb;
707
        n1 = (long)tb1 & 3;
708
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
709
        if (tb1 == tb) {
710
            *ptb = tb1->page_next[n1];
711
            break;
712
        }
713
        ptb = &tb1->page_next[n1];
714
    }
715
}
716

    
717
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
718
{
719
    TranslationBlock *tb1, **ptb;
720
    unsigned int n1;
721

    
722
    ptb = &tb->jmp_next[n];
723
    tb1 = *ptb;
724
    if (tb1) {
725
        /* find tb(n) in circular list */
726
        for(;;) {
727
            tb1 = *ptb;
728
            n1 = (long)tb1 & 3;
729
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
730
            if (n1 == n && tb1 == tb)
731
                break;
732
            if (n1 == 2) {
733
                ptb = &tb1->jmp_first;
734
            } else {
735
                ptb = &tb1->jmp_next[n1];
736
            }
737
        }
738
        /* now we can suppress tb(n) from the list */
739
        *ptb = tb->jmp_next[n];
740

    
741
        tb->jmp_next[n] = NULL;
742
    }
743
}
744

    
745
/* reset the jump entry 'n' of a TB so that it is not chained to
746
   another TB */
747
static inline void tb_reset_jump(TranslationBlock *tb, int n)
748
{
749
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
750
}
751

    
752
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
753
{
754
    CPUState *env;
755
    PageDesc *p;
756
    unsigned int h, n1;
757
    target_phys_addr_t phys_pc;
758
    TranslationBlock *tb1, *tb2;
759

    
760
    /* remove the TB from the hash list */
761
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
762
    h = tb_phys_hash_func(phys_pc);
763
    tb_remove(&tb_phys_hash[h], tb,
764
              offsetof(TranslationBlock, phys_hash_next));
765

    
766
    /* remove the TB from the page list */
767
    if (tb->page_addr[0] != page_addr) {
768
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
769
        tb_page_remove(&p->first_tb, tb);
770
        invalidate_page_bitmap(p);
771
    }
772
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
773
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
774
        tb_page_remove(&p->first_tb, tb);
775
        invalidate_page_bitmap(p);
776
    }
777

    
778
    tb_invalidated_flag = 1;
779

    
780
    /* remove the TB from the hash list */
781
    h = tb_jmp_cache_hash_func(tb->pc);
782
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
783
        if (env->tb_jmp_cache[h] == tb)
784
            env->tb_jmp_cache[h] = NULL;
785
    }
786

    
787
    /* suppress this TB from the two jump lists */
788
    tb_jmp_remove(tb, 0);
789
    tb_jmp_remove(tb, 1);
790

    
791
    /* suppress any remaining jumps to this TB */
792
    tb1 = tb->jmp_first;
793
    for(;;) {
794
        n1 = (long)tb1 & 3;
795
        if (n1 == 2)
796
            break;
797
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
798
        tb2 = tb1->jmp_next[n1];
799
        tb_reset_jump(tb1, n1);
800
        tb1->jmp_next[n1] = NULL;
801
        tb1 = tb2;
802
    }
803
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
804

    
805
    tb_phys_invalidate_count++;
806
}
807

    
808
static inline void set_bits(uint8_t *tab, int start, int len)
809
{
810
    int end, mask, end1;
811

    
812
    end = start + len;
813
    tab += start >> 3;
814
    mask = 0xff << (start & 7);
815
    if ((start & ~7) == (end & ~7)) {
816
        if (start < end) {
817
            mask &= ~(0xff << (end & 7));
818
            *tab |= mask;
819
        }
820
    } else {
821
        *tab++ |= mask;
822
        start = (start + 8) & ~7;
823
        end1 = end & ~7;
824
        while (start < end1) {
825
            *tab++ = 0xff;
826
            start += 8;
827
        }
828
        if (start < end) {
829
            mask = ~(0xff << (end & 7));
830
            *tab |= mask;
831
        }
832
    }
833
}
834

    
835
static void build_page_bitmap(PageDesc *p)
836
{
837
    int n, tb_start, tb_end;
838
    TranslationBlock *tb;
839

    
840
    p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
841

    
842
    tb = p->first_tb;
843
    while (tb != NULL) {
844
        n = (long)tb & 3;
845
        tb = (TranslationBlock *)((long)tb & ~3);
846
        /* NOTE: this is subtle as a TB may span two physical pages */
847
        if (n == 0) {
848
            /* NOTE: tb_end may be after the end of the page, but
849
               it is not a problem */
850
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
851
            tb_end = tb_start + tb->size;
852
            if (tb_end > TARGET_PAGE_SIZE)
853
                tb_end = TARGET_PAGE_SIZE;
854
        } else {
855
            tb_start = 0;
856
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
857
        }
858
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
859
        tb = tb->page_next[n];
860
    }
861
}
862

    
863
TranslationBlock *tb_gen_code(CPUState *env,
864
                              target_ulong pc, target_ulong cs_base,
865
                              int flags, int cflags)
866
{
867
    TranslationBlock *tb;
868
    uint8_t *tc_ptr;
869
    target_ulong phys_pc, phys_page2, virt_page2;
870
    int code_gen_size;
871

    
872
    phys_pc = get_phys_addr_code(env, pc);
873
    tb = tb_alloc(pc);
874
    if (!tb) {
875
        /* flush must be done */
876
        tb_flush(env);
877
        /* cannot fail at this point */
878
        tb = tb_alloc(pc);
879
        /* Don't forget to invalidate previous TB info.  */
880
        tb_invalidated_flag = 1;
881
    }
882
    tc_ptr = code_gen_ptr;
883
    tb->tc_ptr = tc_ptr;
884
    tb->cs_base = cs_base;
885
    tb->flags = flags;
886
    tb->cflags = cflags;
887
    cpu_gen_code(env, tb, &code_gen_size);
888
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
889

    
890
    /* check next page if needed */
891
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
892
    phys_page2 = -1;
893
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
894
        phys_page2 = get_phys_addr_code(env, virt_page2);
895
    }
896
    tb_link_phys(tb, phys_pc, phys_page2);
897
    return tb;
898
}
899

    
900
/* invalidate all TBs which intersect with the target physical page
901
   starting in range [start;end[. NOTE: start and end must refer to
902
   the same physical page. 'is_cpu_write_access' should be true if called
903
   from a real cpu write access: the virtual CPU will exit the current
904
   TB if code is modified inside this TB. */
905
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
906
                                   int is_cpu_write_access)
907
{
908
    TranslationBlock *tb, *tb_next, *saved_tb;
909
    CPUState *env = cpu_single_env;
910
    target_ulong tb_start, tb_end;
911
    PageDesc *p;
912
    int n;
913
#ifdef TARGET_HAS_PRECISE_SMC
914
    int current_tb_not_found = is_cpu_write_access;
915
    TranslationBlock *current_tb = NULL;
916
    int current_tb_modified = 0;
917
    target_ulong current_pc = 0;
918
    target_ulong current_cs_base = 0;
919
    int current_flags = 0;
920
#endif /* TARGET_HAS_PRECISE_SMC */
921

    
922
    p = page_find(start >> TARGET_PAGE_BITS);
923
    if (!p)
924
        return;
925
    if (!p->code_bitmap &&
926
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
927
        is_cpu_write_access) {
928
        /* build code bitmap */
929
        build_page_bitmap(p);
930
    }
931

    
932
    /* we remove all the TBs in the range [start, end[ */
933
    /* XXX: see if in some cases it could be faster to invalidate all the code */
934
    tb = p->first_tb;
935
    while (tb != NULL) {
936
        n = (long)tb & 3;
937
        tb = (TranslationBlock *)((long)tb & ~3);
938
        tb_next = tb->page_next[n];
939
        /* NOTE: this is subtle as a TB may span two physical pages */
940
        if (n == 0) {
941
            /* NOTE: tb_end may be after the end of the page, but
942
               it is not a problem */
943
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
944
            tb_end = tb_start + tb->size;
945
        } else {
946
            tb_start = tb->page_addr[1];
947
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
948
        }
949
        if (!(tb_end <= start || tb_start >= end)) {
950
#ifdef TARGET_HAS_PRECISE_SMC
951
            if (current_tb_not_found) {
952
                current_tb_not_found = 0;
953
                current_tb = NULL;
954
                if (env->mem_io_pc) {
955
                    /* now we have a real cpu fault */
956
                    current_tb = tb_find_pc(env->mem_io_pc);
957
                }
958
            }
959
            if (current_tb == tb &&
960
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
961
                /* If we are modifying the current TB, we must stop
962
                its execution. We could be more precise by checking
963
                that the modification is after the current PC, but it
964
                would require a specialized function to partially
965
                restore the CPU state */
966

    
967
                current_tb_modified = 1;
968
                cpu_restore_state(current_tb, env,
969
                                  env->mem_io_pc, NULL);
970
                cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
971
                                     &current_flags);
972
            }
973
#endif /* TARGET_HAS_PRECISE_SMC */
974
            /* we need to do that to handle the case where a signal
975
               occurs while doing tb_phys_invalidate() */
976
            saved_tb = NULL;
977
            if (env) {
978
                saved_tb = env->current_tb;
979
                env->current_tb = NULL;
980
            }
981
            tb_phys_invalidate(tb, -1);
982
            if (env) {
983
                env->current_tb = saved_tb;
984
                if (env->interrupt_request && env->current_tb)
985
                    cpu_interrupt(env, env->interrupt_request);
986
            }
987
        }
988
        tb = tb_next;
989
    }
990
#if !defined(CONFIG_USER_ONLY)
991
    /* if no code remaining, no need to continue to use slow writes */
992
    if (!p->first_tb) {
993
        invalidate_page_bitmap(p);
994
        if (is_cpu_write_access) {
995
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
996
        }
997
    }
998
#endif
999
#ifdef TARGET_HAS_PRECISE_SMC
1000
    if (current_tb_modified) {
1001
        /* we generate a block containing just the instruction
1002
           modifying the memory. It will ensure that it cannot modify
1003
           itself */
1004
        env->current_tb = NULL;
1005
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1006
        cpu_resume_from_signal(env, NULL);
1007
    }
1008
#endif
1009
}
1010

    
1011
/* len must be <= 8 and start must be a multiple of len */
1012
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
1013
{
1014
    PageDesc *p;
1015
    int offset, b;
1016
#if 0
1017
    if (1) {
1018
        qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1019
                  cpu_single_env->mem_io_vaddr, len,
1020
                  cpu_single_env->eip,
1021
                  cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
1022
    }
1023
#endif
1024
    p = page_find(start >> TARGET_PAGE_BITS);
1025
    if (!p)
1026
        return;
1027
    if (p->code_bitmap) {
1028
        offset = start & ~TARGET_PAGE_MASK;
1029
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
1030
        if (b & ((1 << len) - 1))
1031
            goto do_invalidate;
1032
    } else {
1033
    do_invalidate:
1034
        tb_invalidate_phys_page_range(start, start + len, 1);
1035
    }
1036
}
1037

    
1038
#if !defined(CONFIG_SOFTMMU)
1039
static void tb_invalidate_phys_page(target_phys_addr_t addr,
1040
                                    unsigned long pc, void *puc)
1041
{
1042
    TranslationBlock *tb;
1043
    PageDesc *p;
1044
    int n;
1045
#ifdef TARGET_HAS_PRECISE_SMC
1046
    TranslationBlock *current_tb = NULL;
1047
    CPUState *env = cpu_single_env;
1048
    int current_tb_modified = 0;
1049
    target_ulong current_pc = 0;
1050
    target_ulong current_cs_base = 0;
1051
    int current_flags = 0;
1052
#endif
1053

    
1054
    addr &= TARGET_PAGE_MASK;
1055
    p = page_find(addr >> TARGET_PAGE_BITS);
1056
    if (!p)
1057
        return;
1058
    tb = p->first_tb;
1059
#ifdef TARGET_HAS_PRECISE_SMC
1060
    if (tb && pc != 0) {
1061
        current_tb = tb_find_pc(pc);
1062
    }
1063
#endif
1064
    while (tb != NULL) {
1065
        n = (long)tb & 3;
1066
        tb = (TranslationBlock *)((long)tb & ~3);
1067
#ifdef TARGET_HAS_PRECISE_SMC
1068
        if (current_tb == tb &&
1069
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
1070
                /* If we are modifying the current TB, we must stop
1071
                   its execution. We could be more precise by checking
1072
                   that the modification is after the current PC, but it
1073
                   would require a specialized function to partially
1074
                   restore the CPU state */
1075

    
1076
            current_tb_modified = 1;
1077
            cpu_restore_state(current_tb, env, pc, puc);
1078
            cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1079
                                 &current_flags);
1080
        }
1081
#endif /* TARGET_HAS_PRECISE_SMC */
1082
        tb_phys_invalidate(tb, addr);
1083
        tb = tb->page_next[n];
1084
    }
1085
    p->first_tb = NULL;
1086
#ifdef TARGET_HAS_PRECISE_SMC
1087
    if (current_tb_modified) {
1088
        /* we generate a block containing just the instruction
1089
           modifying the memory. It will ensure that it cannot modify
1090
           itself */
1091
        env->current_tb = NULL;
1092
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
1093
        cpu_resume_from_signal(env, puc);
1094
    }
1095
#endif
1096
}
1097
#endif
1098

    
1099
/* add the tb in the target page and protect it if necessary */
1100
static inline void tb_alloc_page(TranslationBlock *tb,
1101
                                 unsigned int n, target_ulong page_addr)
1102
{
1103
    PageDesc *p;
1104
    TranslationBlock *last_first_tb;
1105

    
1106
    tb->page_addr[n] = page_addr;
1107
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
1108
    tb->page_next[n] = p->first_tb;
1109
    last_first_tb = p->first_tb;
1110
    p->first_tb = (TranslationBlock *)((long)tb | n);
1111
    invalidate_page_bitmap(p);
1112

    
1113
#if defined(TARGET_HAS_SMC) || 1
1114

    
1115
#if defined(CONFIG_USER_ONLY)
1116
    if (p->flags & PAGE_WRITE) {
1117
        target_ulong addr;
1118
        PageDesc *p2;
1119
        int prot;
1120

    
1121
        /* force the host page as non writable (writes will have a
1122
           page fault + mprotect overhead) */
1123
        page_addr &= qemu_host_page_mask;
1124
        prot = 0;
1125
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1126
            addr += TARGET_PAGE_SIZE) {
1127

    
1128
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1129
            if (!p2)
1130
                continue;
1131
            prot |= p2->flags;
1132
            p2->flags &= ~PAGE_WRITE;
1133
            page_get_flags(addr);
1134
          }
1135
        mprotect(g2h(page_addr), qemu_host_page_size,
1136
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1137
#ifdef DEBUG_TB_INVALIDATE
1138
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1139
               page_addr);
1140
#endif
1141
    }
1142
#else
1143
    /* if some code is already present, then the pages are already
1144
       protected. So we handle the case where only the first TB is
1145
       allocated in a physical page */
1146
    if (!last_first_tb) {
1147
        tlb_protect_code(page_addr);
1148
    }
1149
#endif
1150

    
1151
#endif /* TARGET_HAS_SMC */
1152
}
1153

    
1154
/* Allocate a new translation block. Flush the translation buffer if
1155
   too many translation blocks or too much generated code. */
1156
TranslationBlock *tb_alloc(target_ulong pc)
1157
{
1158
    TranslationBlock *tb;
1159

    
1160
    if (nb_tbs >= code_gen_max_blocks ||
1161
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1162
        return NULL;
1163
    tb = &tbs[nb_tbs++];
1164
    tb->pc = pc;
1165
    tb->cflags = 0;
1166
    return tb;
1167
}
1168

    
1169
void tb_free(TranslationBlock *tb)
1170
{
1171
    /* In practice this is mostly used for single use temporary TB
1172
       Ignore the hard cases and just back up if this TB happens to
1173
       be the last one generated.  */
1174
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1175
        code_gen_ptr = tb->tc_ptr;
1176
        nb_tbs--;
1177
    }
1178
}
1179

    
1180
/* add a new TB and link it to the physical page tables. phys_page2 is
1181
   (-1) to indicate that only one page contains the TB. */
1182
void tb_link_phys(TranslationBlock *tb,
1183
                  target_ulong phys_pc, target_ulong phys_page2)
1184
{
1185
    unsigned int h;
1186
    TranslationBlock **ptb;
1187

    
1188
    /* Grab the mmap lock to stop another thread invalidating this TB
1189
       before we are done.  */
1190
    mmap_lock();
1191
    /* add in the physical hash table */
1192
    h = tb_phys_hash_func(phys_pc);
1193
    ptb = &tb_phys_hash[h];
1194
    tb->phys_hash_next = *ptb;
1195
    *ptb = tb;
1196

    
1197
    /* add in the page list */
1198
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1199
    if (phys_page2 != -1)
1200
        tb_alloc_page(tb, 1, phys_page2);
1201
    else
1202
        tb->page_addr[1] = -1;
1203

    
1204
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1205
    tb->jmp_next[0] = NULL;
1206
    tb->jmp_next[1] = NULL;
1207

    
1208
    /* init original jump addresses */
1209
    if (tb->tb_next_offset[0] != 0xffff)
1210
        tb_reset_jump(tb, 0);
1211
    if (tb->tb_next_offset[1] != 0xffff)
1212
        tb_reset_jump(tb, 1);
1213

    
1214
#ifdef DEBUG_TB_CHECK
1215
    tb_page_check();
1216
#endif
1217
    mmap_unlock();
1218
}
1219

    
1220
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1221
   tb[1].tc_ptr. Return NULL if not found */
1222
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1223
{
1224
    int m_min, m_max, m;
1225
    unsigned long v;
1226
    TranslationBlock *tb;
1227

    
1228
    if (nb_tbs <= 0)
1229
        return NULL;
1230
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1231
        tc_ptr >= (unsigned long)code_gen_ptr)
1232
        return NULL;
1233
    /* binary search (cf Knuth) */
1234
    m_min = 0;
1235
    m_max = nb_tbs - 1;
1236
    while (m_min <= m_max) {
1237
        m = (m_min + m_max) >> 1;
1238
        tb = &tbs[m];
1239
        v = (unsigned long)tb->tc_ptr;
1240
        if (v == tc_ptr)
1241
            return tb;
1242
        else if (tc_ptr < v) {
1243
            m_max = m - 1;
1244
        } else {
1245
            m_min = m + 1;
1246
        }
1247
    }
1248
    return &tbs[m_max];
1249
}
1250

    
1251
static void tb_reset_jump_recursive(TranslationBlock *tb);
1252

    
1253
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1254
{
1255
    TranslationBlock *tb1, *tb_next, **ptb;
1256
    unsigned int n1;
1257

    
1258
    tb1 = tb->jmp_next[n];
1259
    if (tb1 != NULL) {
1260
        /* find head of list */
1261
        for(;;) {
1262
            n1 = (long)tb1 & 3;
1263
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1264
            if (n1 == 2)
1265
                break;
1266
            tb1 = tb1->jmp_next[n1];
1267
        }
1268
        /* we are now sure now that tb jumps to tb1 */
1269
        tb_next = tb1;
1270

    
1271
        /* remove tb from the jmp_first list */
1272
        ptb = &tb_next->jmp_first;
1273
        for(;;) {
1274
            tb1 = *ptb;
1275
            n1 = (long)tb1 & 3;
1276
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1277
            if (n1 == n && tb1 == tb)
1278
                break;
1279
            ptb = &tb1->jmp_next[n1];
1280
        }
1281
        *ptb = tb->jmp_next[n];
1282
        tb->jmp_next[n] = NULL;
1283

    
1284
        /* suppress the jump to next tb in generated code */
1285
        tb_reset_jump(tb, n);
1286

    
1287
        /* suppress jumps in the tb on which we could have jumped */
1288
        tb_reset_jump_recursive(tb_next);
1289
    }
1290
}
1291

    
1292
static void tb_reset_jump_recursive(TranslationBlock *tb)
1293
{
1294
    tb_reset_jump_recursive2(tb, 0);
1295
    tb_reset_jump_recursive2(tb, 1);
1296
}
1297

    
1298
#if defined(TARGET_HAS_ICE)
1299
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1300
{
1301
    target_phys_addr_t addr;
1302
    target_ulong pd;
1303
    ram_addr_t ram_addr;
1304
    PhysPageDesc *p;
1305

    
1306
    addr = cpu_get_phys_page_debug(env, pc);
1307
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1308
    if (!p) {
1309
        pd = IO_MEM_UNASSIGNED;
1310
    } else {
1311
        pd = p->phys_offset;
1312
    }
1313
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1314
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1315
}
1316
#endif
1317

    
1318
/* Add a watchpoint.  */
1319
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1320
                          int flags, CPUWatchpoint **watchpoint)
1321
{
1322
    target_ulong len_mask = ~(len - 1);
1323
    CPUWatchpoint *wp;
1324

    
1325
    /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1326
    if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1327
        fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1328
                TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1329
        return -EINVAL;
1330
    }
1331
    wp = qemu_malloc(sizeof(*wp));
1332

    
1333
    wp->vaddr = addr;
1334
    wp->len_mask = len_mask;
1335
    wp->flags = flags;
1336

    
1337
    /* keep all GDB-injected watchpoints in front */
1338
    if (flags & BP_GDB)
1339
        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
1340
    else
1341
        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
1342

    
1343
    tlb_flush_page(env, addr);
1344

    
1345
    if (watchpoint)
1346
        *watchpoint = wp;
1347
    return 0;
1348
}
1349

    
1350
/* Remove a specific watchpoint.  */
1351
int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1352
                          int flags)
1353
{
1354
    target_ulong len_mask = ~(len - 1);
1355
    CPUWatchpoint *wp;
1356

    
1357
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1358
        if (addr == wp->vaddr && len_mask == wp->len_mask
1359
                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
1360
            cpu_watchpoint_remove_by_ref(env, wp);
1361
            return 0;
1362
        }
1363
    }
1364
    return -ENOENT;
1365
}
1366

    
1367
/* Remove a specific watchpoint by reference.  */
1368
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1369
{
1370
    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
1371

    
1372
    tlb_flush_page(env, watchpoint->vaddr);
1373

    
1374
    qemu_free(watchpoint);
1375
}
1376

    
1377
/* Remove all matching watchpoints.  */
1378
void cpu_watchpoint_remove_all(CPUState *env, int mask)
1379
{
1380
    CPUWatchpoint *wp, *next;
1381

    
1382
    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
1383
        if (wp->flags & mask)
1384
            cpu_watchpoint_remove_by_ref(env, wp);
1385
    }
1386
}
1387

    
1388
/* Add a breakpoint.  */
1389
int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1390
                          CPUBreakpoint **breakpoint)
1391
{
1392
#if defined(TARGET_HAS_ICE)
1393
    CPUBreakpoint *bp;
1394

    
1395
    bp = qemu_malloc(sizeof(*bp));
1396

    
1397
    bp->pc = pc;
1398
    bp->flags = flags;
1399

    
1400
    /* keep all GDB-injected breakpoints in front */
1401
    if (flags & BP_GDB)
1402
        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
1403
    else
1404
        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
1405

    
1406
    breakpoint_invalidate(env, pc);
1407

    
1408
    if (breakpoint)
1409
        *breakpoint = bp;
1410
    return 0;
1411
#else
1412
    return -ENOSYS;
1413
#endif
1414
}
1415

    
1416
/* Remove a specific breakpoint.  */
1417
int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1418
{
1419
#if defined(TARGET_HAS_ICE)
1420
    CPUBreakpoint *bp;
1421

    
1422
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1423
        if (bp->pc == pc && bp->flags == flags) {
1424
            cpu_breakpoint_remove_by_ref(env, bp);
1425
            return 0;
1426
        }
1427
    }
1428
    return -ENOENT;
1429
#else
1430
    return -ENOSYS;
1431
#endif
1432
}
1433

    
1434
/* Remove a specific breakpoint by reference.  */
1435
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
1436
{
1437
#if defined(TARGET_HAS_ICE)
1438
    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
1439

    
1440
    breakpoint_invalidate(env, breakpoint->pc);
1441

    
1442
    qemu_free(breakpoint);
1443
#endif
1444
}
1445

    
1446
/* Remove all matching breakpoints. */
1447
void cpu_breakpoint_remove_all(CPUState *env, int mask)
1448
{
1449
#if defined(TARGET_HAS_ICE)
1450
    CPUBreakpoint *bp, *next;
1451

    
1452
    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
1453
        if (bp->flags & mask)
1454
            cpu_breakpoint_remove_by_ref(env, bp);
1455
    }
1456
#endif
1457
}
1458

    
1459
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1460
   CPU loop after each instruction */
1461
void cpu_single_step(CPUState *env, int enabled)
1462
{
1463
#if defined(TARGET_HAS_ICE)
1464
    if (env->singlestep_enabled != enabled) {
1465
        env->singlestep_enabled = enabled;
1466
        if (kvm_enabled())
1467
            kvm_update_guest_debug(env, 0);
1468
        else {
1469
            /* must flush all the translated code to avoid inconsistancies */
1470
            /* XXX: only flush what is necessary */
1471
            tb_flush(env);
1472
        }
1473
    }
1474
#endif
1475
}
1476

    
1477
/* enable or disable low levels log */
1478
void cpu_set_log(int log_flags)
1479
{
1480
    loglevel = log_flags;
1481
    if (loglevel && !logfile) {
1482
        logfile = fopen(logfilename, log_append ? "a" : "w");
1483
        if (!logfile) {
1484
            perror(logfilename);
1485
            _exit(1);
1486
        }
1487
#if !defined(CONFIG_SOFTMMU)
1488
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1489
        {
1490
            static char logfile_buf[4096];
1491
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1492
        }
1493
#else
1494
        setvbuf(logfile, NULL, _IOLBF, 0);
1495
#endif
1496
        log_append = 1;
1497
    }
1498
    if (!loglevel && logfile) {
1499
        fclose(logfile);
1500
        logfile = NULL;
1501
    }
1502
}
1503

    
1504
void cpu_set_log_filename(const char *filename)
1505
{
1506
    logfilename = strdup(filename);
1507
    if (logfile) {
1508
        fclose(logfile);
1509
        logfile = NULL;
1510
    }
1511
    cpu_set_log(loglevel);
1512
}
1513

    
1514
static void cpu_unlink_tb(CPUState *env)
1515
{
1516
#if defined(USE_NPTL)
1517
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
1518
       problem and hope the cpu will stop of its own accord.  For userspace
1519
       emulation this often isn't actually as bad as it sounds.  Often
1520
       signals are used primarily to interrupt blocking syscalls.  */
1521
#else
1522
    TranslationBlock *tb;
1523
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1524

    
1525
    tb = env->current_tb;
1526
    /* if the cpu is currently executing code, we must unlink it and
1527
       all the potentially executing TB */
1528
    if (tb && !testandset(&interrupt_lock)) {
1529
        env->current_tb = NULL;
1530
        tb_reset_jump_recursive(tb);
1531
        resetlock(&interrupt_lock);
1532
    }
1533
#endif
1534
}
1535

    
1536
/* mask must never be zero, except for A20 change call */
1537
void cpu_interrupt(CPUState *env, int mask)
1538
{
1539
    int old_mask;
1540

    
1541
    old_mask = env->interrupt_request;
1542
    env->interrupt_request |= mask;
1543

    
1544
    if (use_icount) {
1545
        env->icount_decr.u16.high = 0xffff;
1546
#ifndef CONFIG_USER_ONLY
1547
        if (!can_do_io(env)
1548
            && (mask & ~old_mask) != 0) {
1549
            cpu_abort(env, "Raised interrupt while not in I/O function");
1550
        }
1551
#endif
1552
    } else {
1553
        cpu_unlink_tb(env);
1554
    }
1555
}
1556

    
1557
void cpu_reset_interrupt(CPUState *env, int mask)
1558
{
1559
    env->interrupt_request &= ~mask;
1560
}
1561

    
1562
void cpu_exit(CPUState *env)
1563
{
1564
    env->exit_request = 1;
1565
    cpu_unlink_tb(env);
1566
}
1567

    
1568
const CPULogItem cpu_log_items[] = {
1569
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1570
      "show generated host assembly code for each compiled TB" },
1571
    { CPU_LOG_TB_IN_ASM, "in_asm",
1572
      "show target assembly code for each compiled TB" },
1573
    { CPU_LOG_TB_OP, "op",
1574
      "show micro ops for each compiled TB" },
1575
    { CPU_LOG_TB_OP_OPT, "op_opt",
1576
      "show micro ops "
1577
#ifdef TARGET_I386
1578
      "before eflags optimization and "
1579
#endif
1580
      "after liveness analysis" },
1581
    { CPU_LOG_INT, "int",
1582
      "show interrupts/exceptions in short format" },
1583
    { CPU_LOG_EXEC, "exec",
1584
      "show trace before each executed TB (lots of logs)" },
1585
    { CPU_LOG_TB_CPU, "cpu",
1586
      "show CPU state before block translation" },
1587
#ifdef TARGET_I386
1588
    { CPU_LOG_PCALL, "pcall",
1589
      "show protected mode far calls/returns/exceptions" },
1590
    { CPU_LOG_RESET, "cpu_reset",
1591
      "show CPU state before CPU resets" },
1592
#endif
1593
#ifdef DEBUG_IOPORT
1594
    { CPU_LOG_IOPORT, "ioport",
1595
      "show all i/o ports accesses" },
1596
#endif
1597
    { 0, NULL, NULL },
1598
};
1599

    
1600
static int cmp1(const char *s1, int n, const char *s2)
1601
{
1602
    if (strlen(s2) != n)
1603
        return 0;
1604
    return memcmp(s1, s2, n) == 0;
1605
}
1606

    
1607
/* takes a comma separated list of log masks. Return 0 if error. */
1608
int cpu_str_to_log_mask(const char *str)
1609
{
1610
    const CPULogItem *item;
1611
    int mask;
1612
    const char *p, *p1;
1613

    
1614
    p = str;
1615
    mask = 0;
1616
    for(;;) {
1617
        p1 = strchr(p, ',');
1618
        if (!p1)
1619
            p1 = p + strlen(p);
1620
        if(cmp1(p,p1-p,"all")) {
1621
                for(item = cpu_log_items; item->mask != 0; item++) {
1622
                        mask |= item->mask;
1623
                }
1624
        } else {
1625
        for(item = cpu_log_items; item->mask != 0; item++) {
1626
            if (cmp1(p, p1 - p, item->name))
1627
                goto found;
1628
        }
1629
        return 0;
1630
        }
1631
    found:
1632
        mask |= item->mask;
1633
        if (*p1 != ',')
1634
            break;
1635
        p = p1 + 1;
1636
    }
1637
    return mask;
1638
}
1639

    
1640
void cpu_abort(CPUState *env, const char *fmt, ...)
1641
{
1642
    va_list ap;
1643
    va_list ap2;
1644

    
1645
    va_start(ap, fmt);
1646
    va_copy(ap2, ap);
1647
    fprintf(stderr, "qemu: fatal: ");
1648
    vfprintf(stderr, fmt, ap);
1649
    fprintf(stderr, "\n");
1650
#ifdef TARGET_I386
1651
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1652
#else
1653
    cpu_dump_state(env, stderr, fprintf, 0);
1654
#endif
1655
    if (qemu_log_enabled()) {
1656
        qemu_log("qemu: fatal: ");
1657
        qemu_log_vprintf(fmt, ap2);
1658
        qemu_log("\n");
1659
#ifdef TARGET_I386
1660
        log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
1661
#else
1662
        log_cpu_state(env, 0);
1663
#endif
1664
        qemu_log_flush();
1665
        qemu_log_close();
1666
    }
1667
    va_end(ap2);
1668
    va_end(ap);
1669
    abort();
1670
}
1671

    
1672
CPUState *cpu_copy(CPUState *env)
1673
{
1674
    CPUState *new_env = cpu_init(env->cpu_model_str);
1675
    CPUState *next_cpu = new_env->next_cpu;
1676
    int cpu_index = new_env->cpu_index;
1677
#if defined(TARGET_HAS_ICE)
1678
    CPUBreakpoint *bp;
1679
    CPUWatchpoint *wp;
1680
#endif
1681

    
1682
    memcpy(new_env, env, sizeof(CPUState));
1683

    
1684
    /* Preserve chaining and index. */
1685
    new_env->next_cpu = next_cpu;
1686
    new_env->cpu_index = cpu_index;
1687

    
1688
    /* Clone all break/watchpoints.
1689
       Note: Once we support ptrace with hw-debug register access, make sure
1690
       BP_CPU break/watchpoints are handled correctly on clone. */
1691
    TAILQ_INIT(&env->breakpoints);
1692
    TAILQ_INIT(&env->watchpoints);
1693
#if defined(TARGET_HAS_ICE)
1694
    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
1695
        cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1696
    }
1697
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
1698
        cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1699
                              wp->flags, NULL);
1700
    }
1701
#endif
1702

    
1703
    return new_env;
1704
}
1705

    
1706
#if !defined(CONFIG_USER_ONLY)
1707

    
1708
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1709
{
1710
    unsigned int i;
1711

    
1712
    /* Discard jump cache entries for any tb which might potentially
1713
       overlap the flushed page.  */
1714
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1715
    memset (&env->tb_jmp_cache[i], 0, 
1716
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1717

    
1718
    i = tb_jmp_cache_hash_page(addr);
1719
    memset (&env->tb_jmp_cache[i], 0, 
1720
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1721
}
1722

    
1723
/* NOTE: if flush_global is true, also flush global entries (not
1724
   implemented yet) */
1725
void tlb_flush(CPUState *env, int flush_global)
1726
{
1727
    int i;
1728

    
1729
#if defined(DEBUG_TLB)
1730
    printf("tlb_flush:\n");
1731
#endif
1732
    /* must reset current TB so that interrupts cannot modify the
1733
       links while we are modifying them */
1734
    env->current_tb = NULL;
1735

    
1736
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1737
        env->tlb_table[0][i].addr_read = -1;
1738
        env->tlb_table[0][i].addr_write = -1;
1739
        env->tlb_table[0][i].addr_code = -1;
1740
        env->tlb_table[1][i].addr_read = -1;
1741
        env->tlb_table[1][i].addr_write = -1;
1742
        env->tlb_table[1][i].addr_code = -1;
1743
#if (NB_MMU_MODES >= 3)
1744
        env->tlb_table[2][i].addr_read = -1;
1745
        env->tlb_table[2][i].addr_write = -1;
1746
        env->tlb_table[2][i].addr_code = -1;
1747
#endif
1748
#if (NB_MMU_MODES >= 4)
1749
        env->tlb_table[3][i].addr_read = -1;
1750
        env->tlb_table[3][i].addr_write = -1;
1751
        env->tlb_table[3][i].addr_code = -1;
1752
#endif
1753
#if (NB_MMU_MODES >= 5)
1754
        env->tlb_table[4][i].addr_read = -1;
1755
        env->tlb_table[4][i].addr_write = -1;
1756
        env->tlb_table[4][i].addr_code = -1;
1757
#endif
1758

    
1759
    }
1760

    
1761
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1762

    
1763
#ifdef CONFIG_KQEMU
1764
    if (env->kqemu_enabled) {
1765
        kqemu_flush(env, flush_global);
1766
    }
1767
#endif
1768
    tlb_flush_count++;
1769
}
1770

    
1771
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1772
{
1773
    if (addr == (tlb_entry->addr_read &
1774
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1775
        addr == (tlb_entry->addr_write &
1776
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1777
        addr == (tlb_entry->addr_code &
1778
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1779
        tlb_entry->addr_read = -1;
1780
        tlb_entry->addr_write = -1;
1781
        tlb_entry->addr_code = -1;
1782
    }
1783
}
1784

    
1785
void tlb_flush_page(CPUState *env, target_ulong addr)
1786
{
1787
    int i;
1788

    
1789
#if defined(DEBUG_TLB)
1790
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1791
#endif
1792
    /* must reset current TB so that interrupts cannot modify the
1793
       links while we are modifying them */
1794
    env->current_tb = NULL;
1795

    
1796
    addr &= TARGET_PAGE_MASK;
1797
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1798
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1799
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1800
#if (NB_MMU_MODES >= 3)
1801
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1802
#endif
1803
#if (NB_MMU_MODES >= 4)
1804
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1805
#endif
1806
#if (NB_MMU_MODES >= 5)
1807
    tlb_flush_entry(&env->tlb_table[4][i], addr);
1808
#endif
1809

    
1810
    tlb_flush_jmp_cache(env, addr);
1811

    
1812
#ifdef CONFIG_KQEMU
1813
    if (env->kqemu_enabled) {
1814
        kqemu_flush_page(env, addr);
1815
    }
1816
#endif
1817
}
1818

    
1819
/* update the TLBs so that writes to code in the virtual page 'addr'
1820
   can be detected */
1821
static void tlb_protect_code(ram_addr_t ram_addr)
1822
{
1823
    cpu_physical_memory_reset_dirty(ram_addr,
1824
                                    ram_addr + TARGET_PAGE_SIZE,
1825
                                    CODE_DIRTY_FLAG);
1826
}
1827

    
1828
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1829
   tested for self modifying code */
1830
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1831
                                    target_ulong vaddr)
1832
{
1833
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1834
}
1835

    
1836
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1837
                                         unsigned long start, unsigned long length)
1838
{
1839
    unsigned long addr;
1840
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1841
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1842
        if ((addr - start) < length) {
1843
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
1844
        }
1845
    }
1846
}
1847

    
1848
/* Note: start and end must be within the same ram block.  */
1849
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1850
                                     int dirty_flags)
1851
{
1852
    CPUState *env;
1853
    unsigned long length, start1;
1854
    int i, mask, len;
1855
    uint8_t *p;
1856

    
1857
    start &= TARGET_PAGE_MASK;
1858
    end = TARGET_PAGE_ALIGN(end);
1859

    
1860
    length = end - start;
1861
    if (length == 0)
1862
        return;
1863
    len = length >> TARGET_PAGE_BITS;
1864
#ifdef CONFIG_KQEMU
1865
    /* XXX: should not depend on cpu context */
1866
    env = first_cpu;
1867
    if (env->kqemu_enabled) {
1868
        ram_addr_t addr;
1869
        addr = start;
1870
        for(i = 0; i < len; i++) {
1871
            kqemu_set_notdirty(env, addr);
1872
            addr += TARGET_PAGE_SIZE;
1873
        }
1874
    }
1875
#endif
1876
    mask = ~dirty_flags;
1877
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1878
    for(i = 0; i < len; i++)
1879
        p[i] &= mask;
1880

    
1881
    /* we modify the TLB cache so that the dirty bit will be set again
1882
       when accessing the range */
1883
    start1 = (unsigned long)qemu_get_ram_ptr(start);
1884
    /* Chek that we don't span multiple blocks - this breaks the
1885
       address comparisons below.  */
1886
    if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
1887
            != (end - 1) - start) {
1888
        abort();
1889
    }
1890

    
1891
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1892
        for(i = 0; i < CPU_TLB_SIZE; i++)
1893
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1894
        for(i = 0; i < CPU_TLB_SIZE; i++)
1895
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1896
#if (NB_MMU_MODES >= 3)
1897
        for(i = 0; i < CPU_TLB_SIZE; i++)
1898
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1899
#endif
1900
#if (NB_MMU_MODES >= 4)
1901
        for(i = 0; i < CPU_TLB_SIZE; i++)
1902
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1903
#endif
1904
#if (NB_MMU_MODES >= 5)
1905
        for(i = 0; i < CPU_TLB_SIZE; i++)
1906
            tlb_reset_dirty_range(&env->tlb_table[4][i], start1, length);
1907
#endif
1908
    }
1909
}
1910

    
1911
int cpu_physical_memory_set_dirty_tracking(int enable)
1912
{
1913
    in_migration = enable;
1914
    return 0;
1915
}
1916

    
1917
int cpu_physical_memory_get_dirty_tracking(void)
1918
{
1919
    return in_migration;
1920
}
1921

    
1922
void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr)
1923
{
1924
    if (kvm_enabled())
1925
        kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
1926
}
1927

    
1928
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1929
{
1930
    ram_addr_t ram_addr;
1931
    void *p;
1932

    
1933
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1934
        p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
1935
            + tlb_entry->addend);
1936
        ram_addr = qemu_ram_addr_from_host(p);
1937
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1938
            tlb_entry->addr_write |= TLB_NOTDIRTY;
1939
        }
1940
    }
1941
}
1942

    
1943
/* update the TLB according to the current state of the dirty bits */
1944
void cpu_tlb_update_dirty(CPUState *env)
1945
{
1946
    int i;
1947
    for(i = 0; i < CPU_TLB_SIZE; i++)
1948
        tlb_update_dirty(&env->tlb_table[0][i]);
1949
    for(i = 0; i < CPU_TLB_SIZE; i++)
1950
        tlb_update_dirty(&env->tlb_table[1][i]);
1951
#if (NB_MMU_MODES >= 3)
1952
    for(i = 0; i < CPU_TLB_SIZE; i++)
1953
        tlb_update_dirty(&env->tlb_table[2][i]);
1954
#endif
1955
#if (NB_MMU_MODES >= 4)
1956
    for(i = 0; i < CPU_TLB_SIZE; i++)
1957
        tlb_update_dirty(&env->tlb_table[3][i]);
1958
#endif
1959
#if (NB_MMU_MODES >= 5)
1960
    for(i = 0; i < CPU_TLB_SIZE; i++)
1961
        tlb_update_dirty(&env->tlb_table[4][i]);
1962
#endif
1963
}
1964

    
1965
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
1966
{
1967
    if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
1968
        tlb_entry->addr_write = vaddr;
1969
}
1970

    
1971
/* update the TLB corresponding to virtual page vaddr
1972
   so that it is no longer dirty */
1973
static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
1974
{
1975
    int i;
1976

    
1977
    vaddr &= TARGET_PAGE_MASK;
1978
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1979
    tlb_set_dirty1(&env->tlb_table[0][i], vaddr);
1980
    tlb_set_dirty1(&env->tlb_table[1][i], vaddr);
1981
#if (NB_MMU_MODES >= 3)
1982
    tlb_set_dirty1(&env->tlb_table[2][i], vaddr);
1983
#endif
1984
#if (NB_MMU_MODES >= 4)
1985
    tlb_set_dirty1(&env->tlb_table[3][i], vaddr);
1986
#endif
1987
#if (NB_MMU_MODES >= 5)
1988
    tlb_set_dirty1(&env->tlb_table[4][i], vaddr);
1989
#endif
1990
}
1991

    
1992
/* add a new TLB entry. At most one entry for a given virtual address
1993
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1994
   (can only happen in non SOFTMMU mode for I/O pages or pages
1995
   conflicting with the host address space). */
1996
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1997
                      target_phys_addr_t paddr, int prot,
1998
                      int mmu_idx, int is_softmmu)
1999
{
2000
    PhysPageDesc *p;
2001
    unsigned long pd;
2002
    unsigned int index;
2003
    target_ulong address;
2004
    target_ulong code_address;
2005
    target_phys_addr_t addend;
2006
    int ret;
2007
    CPUTLBEntry *te;
2008
    CPUWatchpoint *wp;
2009
    target_phys_addr_t iotlb;
2010

    
2011
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
2012
    if (!p) {
2013
        pd = IO_MEM_UNASSIGNED;
2014
    } else {
2015
        pd = p->phys_offset;
2016
    }
2017
#if defined(DEBUG_TLB)
2018
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
2019
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
2020
#endif
2021

    
2022
    ret = 0;
2023
    address = vaddr;
2024
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2025
        /* IO memory case (romd handled later) */
2026
        address |= TLB_MMIO;
2027
    }
2028
    addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
2029
    if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2030
        /* Normal RAM.  */
2031
        iotlb = pd & TARGET_PAGE_MASK;
2032
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2033
            iotlb |= IO_MEM_NOTDIRTY;
2034
        else
2035
            iotlb |= IO_MEM_ROM;
2036
    } else {
2037
        /* IO handlers are currently passed a phsical address.
2038
           It would be nice to pass an offset from the base address
2039
           of that region.  This would avoid having to special case RAM,
2040
           and avoid full address decoding in every device.
2041
           We can't use the high bits of pd for this because
2042
           IO_MEM_ROMD uses these as a ram address.  */
2043
        iotlb = (pd & ~TARGET_PAGE_MASK);
2044
        if (p) {
2045
            iotlb += p->region_offset;
2046
        } else {
2047
            iotlb += paddr;
2048
        }
2049
    }
2050

    
2051
    code_address = address;
2052
    /* Make accesses to pages with watchpoints go via the
2053
       watchpoint trap routines.  */
2054
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2055
        if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
2056
            iotlb = io_mem_watch + paddr;
2057
            /* TODO: The memory case can be optimized by not trapping
2058
               reads of pages with a write breakpoint.  */
2059
            address |= TLB_MMIO;
2060
        }
2061
    }
2062

    
2063
    index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2064
    env->iotlb[mmu_idx][index] = iotlb - vaddr;
2065
    te = &env->tlb_table[mmu_idx][index];
2066
    te->addend = addend - vaddr;
2067
    if (prot & PAGE_READ) {
2068
        te->addr_read = address;
2069
    } else {
2070
        te->addr_read = -1;
2071
    }
2072

    
2073
    if (prot & PAGE_EXEC) {
2074
        te->addr_code = code_address;
2075
    } else {
2076
        te->addr_code = -1;
2077
    }
2078
    if (prot & PAGE_WRITE) {
2079
        if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2080
            (pd & IO_MEM_ROMD)) {
2081
            /* Write access calls the I/O callback.  */
2082
            te->addr_write = address | TLB_MMIO;
2083
        } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2084
                   !cpu_physical_memory_is_dirty(pd)) {
2085
            te->addr_write = address | TLB_NOTDIRTY;
2086
        } else {
2087
            te->addr_write = address;
2088
        }
2089
    } else {
2090
        te->addr_write = -1;
2091
    }
2092
    return ret;
2093
}
2094

    
2095
#else
2096

    
2097
void tlb_flush(CPUState *env, int flush_global)
2098
{
2099
}
2100

    
2101
void tlb_flush_page(CPUState *env, target_ulong addr)
2102
{
2103
}
2104

    
2105
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
2106
                      target_phys_addr_t paddr, int prot,
2107
                      int mmu_idx, int is_softmmu)
2108
{
2109
    return 0;
2110
}
2111

    
2112
/* dump memory mappings */
2113
void page_dump(FILE *f)
2114
{
2115
    unsigned long start, end;
2116
    int i, j, prot, prot1;
2117
    PageDesc *p;
2118

    
2119
    fprintf(f, "%-8s %-8s %-8s %s\n",
2120
            "start", "end", "size", "prot");
2121
    start = -1;
2122
    end = -1;
2123
    prot = 0;
2124
    for(i = 0; i <= L1_SIZE; i++) {
2125
        if (i < L1_SIZE)
2126
            p = l1_map[i];
2127
        else
2128
            p = NULL;
2129
        for(j = 0;j < L2_SIZE; j++) {
2130
            if (!p)
2131
                prot1 = 0;
2132
            else
2133
                prot1 = p[j].flags;
2134
            if (prot1 != prot) {
2135
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
2136
                if (start != -1) {
2137
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
2138
                            start, end, end - start,
2139
                            prot & PAGE_READ ? 'r' : '-',
2140
                            prot & PAGE_WRITE ? 'w' : '-',
2141
                            prot & PAGE_EXEC ? 'x' : '-');
2142
                }
2143
                if (prot1 != 0)
2144
                    start = end;
2145
                else
2146
                    start = -1;
2147
                prot = prot1;
2148
            }
2149
            if (!p)
2150
                break;
2151
        }
2152
    }
2153
}
2154

    
2155
int page_get_flags(target_ulong address)
2156
{
2157
    PageDesc *p;
2158

    
2159
    p = page_find(address >> TARGET_PAGE_BITS);
2160
    if (!p)
2161
        return 0;
2162
    return p->flags;
2163
}
2164

    
2165
/* modify the flags of a page and invalidate the code if
2166
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
2167
   depending on PAGE_WRITE */
2168
void page_set_flags(target_ulong start, target_ulong end, int flags)
2169
{
2170
    PageDesc *p;
2171
    target_ulong addr;
2172

    
2173
    /* mmap_lock should already be held.  */
2174
    start = start & TARGET_PAGE_MASK;
2175
    end = TARGET_PAGE_ALIGN(end);
2176
    if (flags & PAGE_WRITE)
2177
        flags |= PAGE_WRITE_ORG;
2178
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2179
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2180
        /* We may be called for host regions that are outside guest
2181
           address space.  */
2182
        if (!p)
2183
            return;
2184
        /* if the write protection is set, then we invalidate the code
2185
           inside */
2186
        if (!(p->flags & PAGE_WRITE) &&
2187
            (flags & PAGE_WRITE) &&
2188
            p->first_tb) {
2189
            tb_invalidate_phys_page(addr, 0, NULL);
2190
        }
2191
        p->flags = flags;
2192
    }
2193
}
2194

    
2195
int page_check_range(target_ulong start, target_ulong len, int flags)
2196
{
2197
    PageDesc *p;
2198
    target_ulong end;
2199
    target_ulong addr;
2200

    
2201
    if (start + len < start)
2202
        /* we've wrapped around */
2203
        return -1;
2204

    
2205
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2206
    start = start & TARGET_PAGE_MASK;
2207

    
2208
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2209
        p = page_find(addr >> TARGET_PAGE_BITS);
2210
        if( !p )
2211
            return -1;
2212
        if( !(p->flags & PAGE_VALID) )
2213
            return -1;
2214

    
2215
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2216
            return -1;
2217
        if (flags & PAGE_WRITE) {
2218
            if (!(p->flags & PAGE_WRITE_ORG))
2219
                return -1;
2220
            /* unprotect the page if it was put read-only because it
2221
               contains translated code */
2222
            if (!(p->flags & PAGE_WRITE)) {
2223
                if (!page_unprotect(addr, 0, NULL))
2224
                    return -1;
2225
            }
2226
            return 0;
2227
        }
2228
    }
2229
    return 0;
2230
}
2231

    
2232
/* called from signal handler: invalidate the code and unprotect the
2233
   page. Return TRUE if the fault was succesfully handled. */
2234
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2235
{
2236
    unsigned int page_index, prot, pindex;
2237
    PageDesc *p, *p1;
2238
    target_ulong host_start, host_end, addr;
2239

    
2240
    /* Technically this isn't safe inside a signal handler.  However we
2241
       know this only ever happens in a synchronous SEGV handler, so in
2242
       practice it seems to be ok.  */
2243
    mmap_lock();
2244

    
2245
    host_start = address & qemu_host_page_mask;
2246
    page_index = host_start >> TARGET_PAGE_BITS;
2247
    p1 = page_find(page_index);
2248
    if (!p1) {
2249
        mmap_unlock();
2250
        return 0;
2251
    }
2252
    host_end = host_start + qemu_host_page_size;
2253
    p = p1;
2254
    prot = 0;
2255
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2256
        prot |= p->flags;
2257
        p++;
2258
    }
2259
    /* if the page was really writable, then we change its
2260
       protection back to writable */
2261
    if (prot & PAGE_WRITE_ORG) {
2262
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2263
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2264
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2265
                     (prot & PAGE_BITS) | PAGE_WRITE);
2266
            p1[pindex].flags |= PAGE_WRITE;
2267
            /* and since the content will be modified, we must invalidate
2268
               the corresponding translated code. */
2269
            tb_invalidate_phys_page(address, pc, puc);
2270
#ifdef DEBUG_TB_CHECK
2271
            tb_invalidate_check(address);
2272
#endif
2273
            mmap_unlock();
2274
            return 1;
2275
        }
2276
    }
2277
    mmap_unlock();
2278
    return 0;
2279
}
2280

    
2281
static inline void tlb_set_dirty(CPUState *env,
2282
                                 unsigned long addr, target_ulong vaddr)
2283
{
2284
}
2285
#endif /* defined(CONFIG_USER_ONLY) */
2286

    
2287
#if !defined(CONFIG_USER_ONLY)
2288

    
2289
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2290
                             ram_addr_t memory, ram_addr_t region_offset);
2291
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2292
                           ram_addr_t orig_memory, ram_addr_t region_offset);
2293
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2294
                      need_subpage)                                     \
2295
    do {                                                                \
2296
        if (addr > start_addr)                                          \
2297
            start_addr2 = 0;                                            \
2298
        else {                                                          \
2299
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2300
            if (start_addr2 > 0)                                        \
2301
                need_subpage = 1;                                       \
2302
        }                                                               \
2303
                                                                        \
2304
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2305
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2306
        else {                                                          \
2307
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2308
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2309
                need_subpage = 1;                                       \
2310
        }                                                               \
2311
    } while (0)
2312

    
2313
/* register physical memory. 'size' must be a multiple of the target
2314
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2315
   io memory page.  The address used when calling the IO function is
2316
   the offset from the start of the region, plus region_offset.  Both
2317
   start_region and regon_offset are rounded down to a page boundary
2318
   before calculating this offset.  This should not be a problem unless
2319
   the low bits of start_addr and region_offset differ.  */
2320
void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
2321
                                         ram_addr_t size,
2322
                                         ram_addr_t phys_offset,
2323
                                         ram_addr_t region_offset)
2324
{
2325
    target_phys_addr_t addr, end_addr;
2326
    PhysPageDesc *p;
2327
    CPUState *env;
2328
    ram_addr_t orig_size = size;
2329
    void *subpage;
2330

    
2331
#ifdef CONFIG_KQEMU
2332
    /* XXX: should not depend on cpu context */
2333
    env = first_cpu;
2334
    if (env->kqemu_enabled) {
2335
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2336
    }
2337
#endif
2338
    if (kvm_enabled())
2339
        kvm_set_phys_mem(start_addr, size, phys_offset);
2340

    
2341
    if (phys_offset == IO_MEM_UNASSIGNED) {
2342
        region_offset = start_addr;
2343
    }
2344
    region_offset &= TARGET_PAGE_MASK;
2345
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2346
    end_addr = start_addr + (target_phys_addr_t)size;
2347
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2348
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2349
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2350
            ram_addr_t orig_memory = p->phys_offset;
2351
            target_phys_addr_t start_addr2, end_addr2;
2352
            int need_subpage = 0;
2353

    
2354
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2355
                          need_subpage);
2356
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2357
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2358
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2359
                                           &p->phys_offset, orig_memory,
2360
                                           p->region_offset);
2361
                } else {
2362
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2363
                                            >> IO_MEM_SHIFT];
2364
                }
2365
                subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2366
                                 region_offset);
2367
                p->region_offset = 0;
2368
            } else {
2369
                p->phys_offset = phys_offset;
2370
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2371
                    (phys_offset & IO_MEM_ROMD))
2372
                    phys_offset += TARGET_PAGE_SIZE;
2373
            }
2374
        } else {
2375
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2376
            p->phys_offset = phys_offset;
2377
            p->region_offset = region_offset;
2378
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2379
                (phys_offset & IO_MEM_ROMD)) {
2380
                phys_offset += TARGET_PAGE_SIZE;
2381
            } else {
2382
                target_phys_addr_t start_addr2, end_addr2;
2383
                int need_subpage = 0;
2384

    
2385
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2386
                              end_addr2, need_subpage);
2387

    
2388
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2389
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2390
                                           &p->phys_offset, IO_MEM_UNASSIGNED,
2391
                                           addr & TARGET_PAGE_MASK);
2392
                    subpage_register(subpage, start_addr2, end_addr2,
2393
                                     phys_offset, region_offset);
2394
                    p->region_offset = 0;
2395
                }
2396
            }
2397
        }
2398
        region_offset += TARGET_PAGE_SIZE;
2399
    }
2400

    
2401
    /* since each CPU stores ram addresses in its TLB cache, we must
2402
       reset the modified entries */
2403
    /* XXX: slow ! */
2404
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2405
        tlb_flush(env, 1);
2406
    }
2407
}
2408

    
2409
/* XXX: temporary until new memory mapping API */
2410
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2411
{
2412
    PhysPageDesc *p;
2413

    
2414
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2415
    if (!p)
2416
        return IO_MEM_UNASSIGNED;
2417
    return p->phys_offset;
2418
}
2419

    
2420
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2421
{
2422
    if (kvm_enabled())
2423
        kvm_coalesce_mmio_region(addr, size);
2424
}
2425

    
2426
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
2427
{
2428
    if (kvm_enabled())
2429
        kvm_uncoalesce_mmio_region(addr, size);
2430
}
2431

    
2432
#ifdef CONFIG_KQEMU
2433
/* XXX: better than nothing */
2434
static ram_addr_t kqemu_ram_alloc(ram_addr_t size)
2435
{
2436
    ram_addr_t addr;
2437
    if ((last_ram_offset + size) > kqemu_phys_ram_size) {
2438
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 ")\n",
2439
                (uint64_t)size, (uint64_t)kqemu_phys_ram_size);
2440
        abort();
2441
    }
2442
    addr = last_ram_offset;
2443
    last_ram_offset = TARGET_PAGE_ALIGN(last_ram_offset + size);
2444
    return addr;
2445
}
2446
#endif
2447

    
2448
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2449
{
2450
    RAMBlock *new_block;
2451

    
2452
#ifdef CONFIG_KQEMU
2453
    if (kqemu_phys_ram_base) {
2454
        return kqemu_ram_alloc(size);
2455
    }
2456
#endif
2457

    
2458
    size = TARGET_PAGE_ALIGN(size);
2459
    new_block = qemu_malloc(sizeof(*new_block));
2460

    
2461
    new_block->host = qemu_vmalloc(size);
2462
    new_block->offset = last_ram_offset;
2463
    new_block->length = size;
2464

    
2465
    new_block->next = ram_blocks;
2466
    ram_blocks = new_block;
2467

    
2468
    phys_ram_dirty = qemu_realloc(phys_ram_dirty,
2469
        (last_ram_offset + size) >> TARGET_PAGE_BITS);
2470
    memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
2471
           0xff, size >> TARGET_PAGE_BITS);
2472

    
2473
    last_ram_offset += size;
2474

    
2475
    return new_block->offset;
2476
}
2477

    
2478
void qemu_ram_free(ram_addr_t addr)
2479
{
2480
    /* TODO: implement this.  */
2481
}
2482

    
2483
/* Return a host pointer to ram allocated with qemu_ram_alloc.
2484
   With the exception of the softmmu code in this file, this should
2485
   only be used for local memory (e.g. video ram) that the device owns,
2486
   and knows it isn't going to access beyond the end of the block.
2487

2488
   It should not be used for general purpose DMA.
2489
   Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2490
 */
2491
void *qemu_get_ram_ptr(ram_addr_t addr)
2492
{
2493
    RAMBlock *prev;
2494
    RAMBlock **prevp;
2495
    RAMBlock *block;
2496

    
2497
#ifdef CONFIG_KQEMU
2498
    if (kqemu_phys_ram_base) {
2499
        return kqemu_phys_ram_base + addr;
2500
    }
2501
#endif
2502

    
2503
    prev = NULL;
2504
    prevp = &ram_blocks;
2505
    block = ram_blocks;
2506
    while (block && (block->offset > addr
2507
                     || block->offset + block->length <= addr)) {
2508
        if (prev)
2509
          prevp = &prev->next;
2510
        prev = block;
2511
        block = block->next;
2512
    }
2513
    if (!block) {
2514
        fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2515
        abort();
2516
    }
2517
    /* Move this entry to to start of the list.  */
2518
    if (prev) {
2519
        prev->next = block->next;
2520
        block->next = *prevp;
2521
        *prevp = block;
2522
    }
2523
    return block->host + (addr - block->offset);
2524
}
2525

    
2526
/* Some of the softmmu routines need to translate from a host pointer
2527
   (typically a TLB entry) back to a ram offset.  */
2528
ram_addr_t qemu_ram_addr_from_host(void *ptr)
2529
{
2530
    RAMBlock *prev;
2531
    RAMBlock **prevp;
2532
    RAMBlock *block;
2533
    uint8_t *host = ptr;
2534

    
2535
#ifdef CONFIG_KQEMU
2536
    if (kqemu_phys_ram_base) {
2537
        return host - kqemu_phys_ram_base;
2538
    }
2539
#endif
2540

    
2541
    prev = NULL;
2542
    prevp = &ram_blocks;
2543
    block = ram_blocks;
2544
    while (block && (block->host > host
2545
                     || block->host + block->length <= host)) {
2546
        if (prev)
2547
          prevp = &prev->next;
2548
        prev = block;
2549
        block = block->next;
2550
    }
2551
    if (!block) {
2552
        fprintf(stderr, "Bad ram pointer %p\n", ptr);
2553
        abort();
2554
    }
2555
    return block->offset + (host - block->host);
2556
}
2557

    
2558
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2559
{
2560
#ifdef DEBUG_UNASSIGNED
2561
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2562
#endif
2563
#if defined(TARGET_SPARC)
2564
    do_unassigned_access(addr, 0, 0, 0, 1);
2565
#endif
2566
    return 0;
2567
}
2568

    
2569
static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
2570
{
2571
#ifdef DEBUG_UNASSIGNED
2572
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2573
#endif
2574
#if defined(TARGET_SPARC)
2575
    do_unassigned_access(addr, 0, 0, 0, 2);
2576
#endif
2577
    return 0;
2578
}
2579

    
2580
static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
2581
{
2582
#ifdef DEBUG_UNASSIGNED
2583
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2584
#endif
2585
#if defined(TARGET_SPARC)
2586
    do_unassigned_access(addr, 0, 0, 0, 4);
2587
#endif
2588
    return 0;
2589
}
2590

    
2591
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2592
{
2593
#ifdef DEBUG_UNASSIGNED
2594
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2595
#endif
2596
#if defined(TARGET_SPARC)
2597
    do_unassigned_access(addr, 1, 0, 0, 1);
2598
#endif
2599
}
2600

    
2601
static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2602
{
2603
#ifdef DEBUG_UNASSIGNED
2604
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2605
#endif
2606
#if defined(TARGET_SPARC)
2607
    do_unassigned_access(addr, 1, 0, 0, 2);
2608
#endif
2609
}
2610

    
2611
static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2612
{
2613
#ifdef DEBUG_UNASSIGNED
2614
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2615
#endif
2616
#if defined(TARGET_SPARC)
2617
    do_unassigned_access(addr, 1, 0, 0, 4);
2618
#endif
2619
}
2620

    
2621
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2622
    unassigned_mem_readb,
2623
    unassigned_mem_readw,
2624
    unassigned_mem_readl,
2625
};
2626

    
2627
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2628
    unassigned_mem_writeb,
2629
    unassigned_mem_writew,
2630
    unassigned_mem_writel,
2631
};
2632

    
2633
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
2634
                                uint32_t val)
2635
{
2636
    int dirty_flags;
2637
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2638
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2639
#if !defined(CONFIG_USER_ONLY)
2640
        tb_invalidate_phys_page_fast(ram_addr, 1);
2641
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2642
#endif
2643
    }
2644
    stb_p(qemu_get_ram_ptr(ram_addr), val);
2645
#ifdef CONFIG_KQEMU
2646
    if (cpu_single_env->kqemu_enabled &&
2647
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2648
        kqemu_modify_page(cpu_single_env, ram_addr);
2649
#endif
2650
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2651
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2652
    /* we remove the notdirty callback only if the code has been
2653
       flushed */
2654
    if (dirty_flags == 0xff)
2655
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2656
}
2657

    
2658
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
2659
                                uint32_t val)
2660
{
2661
    int dirty_flags;
2662
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2663
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2664
#if !defined(CONFIG_USER_ONLY)
2665
        tb_invalidate_phys_page_fast(ram_addr, 2);
2666
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2667
#endif
2668
    }
2669
    stw_p(qemu_get_ram_ptr(ram_addr), val);
2670
#ifdef CONFIG_KQEMU
2671
    if (cpu_single_env->kqemu_enabled &&
2672
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2673
        kqemu_modify_page(cpu_single_env, ram_addr);
2674
#endif
2675
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2676
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2677
    /* we remove the notdirty callback only if the code has been
2678
       flushed */
2679
    if (dirty_flags == 0xff)
2680
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2681
}
2682

    
2683
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
2684
                                uint32_t val)
2685
{
2686
    int dirty_flags;
2687
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2688
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2689
#if !defined(CONFIG_USER_ONLY)
2690
        tb_invalidate_phys_page_fast(ram_addr, 4);
2691
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2692
#endif
2693
    }
2694
    stl_p(qemu_get_ram_ptr(ram_addr), val);
2695
#ifdef CONFIG_KQEMU
2696
    if (cpu_single_env->kqemu_enabled &&
2697
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2698
        kqemu_modify_page(cpu_single_env, ram_addr);
2699
#endif
2700
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2701
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2702
    /* we remove the notdirty callback only if the code has been
2703
       flushed */
2704
    if (dirty_flags == 0xff)
2705
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2706
}
2707

    
2708
static CPUReadMemoryFunc *error_mem_read[3] = {
2709
    NULL, /* never used */
2710
    NULL, /* never used */
2711
    NULL, /* never used */
2712
};
2713

    
2714
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2715
    notdirty_mem_writeb,
2716
    notdirty_mem_writew,
2717
    notdirty_mem_writel,
2718
};
2719

    
2720
/* Generate a debug exception if a watchpoint has been hit.  */
2721
static void check_watchpoint(int offset, int len_mask, int flags)
2722
{
2723
    CPUState *env = cpu_single_env;
2724
    target_ulong pc, cs_base;
2725
    TranslationBlock *tb;
2726
    target_ulong vaddr;
2727
    CPUWatchpoint *wp;
2728
    int cpu_flags;
2729

    
2730
    if (env->watchpoint_hit) {
2731
        /* We re-entered the check after replacing the TB. Now raise
2732
         * the debug interrupt so that is will trigger after the
2733
         * current instruction. */
2734
        cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2735
        return;
2736
    }
2737
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2738
    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
2739
        if ((vaddr == (wp->vaddr & len_mask) ||
2740
             (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
2741
            wp->flags |= BP_WATCHPOINT_HIT;
2742
            if (!env->watchpoint_hit) {
2743
                env->watchpoint_hit = wp;
2744
                tb = tb_find_pc(env->mem_io_pc);
2745
                if (!tb) {
2746
                    cpu_abort(env, "check_watchpoint: could not find TB for "
2747
                              "pc=%p", (void *)env->mem_io_pc);
2748
                }
2749
                cpu_restore_state(tb, env, env->mem_io_pc, NULL);
2750
                tb_phys_invalidate(tb, -1);
2751
                if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2752
                    env->exception_index = EXCP_DEBUG;
2753
                } else {
2754
                    cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2755
                    tb_gen_code(env, pc, cs_base, cpu_flags, 1);
2756
                }
2757
                cpu_resume_from_signal(env, NULL);
2758
            }
2759
        } else {
2760
            wp->flags &= ~BP_WATCHPOINT_HIT;
2761
        }
2762
    }
2763
}
2764

    
2765
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2766
   so these check for a hit then pass through to the normal out-of-line
2767
   phys routines.  */
2768
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2769
{
2770
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
2771
    return ldub_phys(addr);
2772
}
2773

    
2774
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2775
{
2776
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
2777
    return lduw_phys(addr);
2778
}
2779

    
2780
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2781
{
2782
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
2783
    return ldl_phys(addr);
2784
}
2785

    
2786
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2787
                             uint32_t val)
2788
{
2789
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
2790
    stb_phys(addr, val);
2791
}
2792

    
2793
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2794
                             uint32_t val)
2795
{
2796
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
2797
    stw_phys(addr, val);
2798
}
2799

    
2800
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2801
                             uint32_t val)
2802
{
2803
    check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
2804
    stl_phys(addr, val);
2805
}
2806

    
2807
static CPUReadMemoryFunc *watch_mem_read[3] = {
2808
    watch_mem_readb,
2809
    watch_mem_readw,
2810
    watch_mem_readl,
2811
};
2812

    
2813
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2814
    watch_mem_writeb,
2815
    watch_mem_writew,
2816
    watch_mem_writel,
2817
};
2818

    
2819
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2820
                                 unsigned int len)
2821
{
2822
    uint32_t ret;
2823
    unsigned int idx;
2824

    
2825
    idx = SUBPAGE_IDX(addr);
2826
#if defined(DEBUG_SUBPAGE)
2827
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2828
           mmio, len, addr, idx);
2829
#endif
2830
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len],
2831
                                       addr + mmio->region_offset[idx][0][len]);
2832

    
2833
    return ret;
2834
}
2835

    
2836
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2837
                              uint32_t value, unsigned int len)
2838
{
2839
    unsigned int idx;
2840

    
2841
    idx = SUBPAGE_IDX(addr);
2842
#if defined(DEBUG_SUBPAGE)
2843
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2844
           mmio, len, addr, idx, value);
2845
#endif
2846
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len],
2847
                                  addr + mmio->region_offset[idx][1][len],
2848
                                  value);
2849
}
2850

    
2851
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2852
{
2853
#if defined(DEBUG_SUBPAGE)
2854
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2855
#endif
2856

    
2857
    return subpage_readlen(opaque, addr, 0);
2858
}
2859

    
2860
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2861
                            uint32_t value)
2862
{
2863
#if defined(DEBUG_SUBPAGE)
2864
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2865
#endif
2866
    subpage_writelen(opaque, addr, value, 0);
2867
}
2868

    
2869
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2870
{
2871
#if defined(DEBUG_SUBPAGE)
2872
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2873
#endif
2874

    
2875
    return subpage_readlen(opaque, addr, 1);
2876
}
2877

    
2878
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2879
                            uint32_t value)
2880
{
2881
#if defined(DEBUG_SUBPAGE)
2882
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2883
#endif
2884
    subpage_writelen(opaque, addr, value, 1);
2885
}
2886

    
2887
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2888
{
2889
#if defined(DEBUG_SUBPAGE)
2890
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2891
#endif
2892

    
2893
    return subpage_readlen(opaque, addr, 2);
2894
}
2895

    
2896
static void subpage_writel (void *opaque,
2897
                         target_phys_addr_t addr, uint32_t value)
2898
{
2899
#if defined(DEBUG_SUBPAGE)
2900
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2901
#endif
2902
    subpage_writelen(opaque, addr, value, 2);
2903
}
2904

    
2905
static CPUReadMemoryFunc *subpage_read[] = {
2906
    &subpage_readb,
2907
    &subpage_readw,
2908
    &subpage_readl,
2909
};
2910

    
2911
static CPUWriteMemoryFunc *subpage_write[] = {
2912
    &subpage_writeb,
2913
    &subpage_writew,
2914
    &subpage_writel,
2915
};
2916

    
2917
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2918
                             ram_addr_t memory, ram_addr_t region_offset)
2919
{
2920
    int idx, eidx;
2921
    unsigned int i;
2922

    
2923
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2924
        return -1;
2925
    idx = SUBPAGE_IDX(start);
2926
    eidx = SUBPAGE_IDX(end);
2927
#if defined(DEBUG_SUBPAGE)
2928
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2929
           mmio, start, end, idx, eidx, memory);
2930
#endif
2931
    memory >>= IO_MEM_SHIFT;
2932
    for (; idx <= eidx; idx++) {
2933
        for (i = 0; i < 4; i++) {
2934
            if (io_mem_read[memory][i]) {
2935
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2936
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2937
                mmio->region_offset[idx][0][i] = region_offset;
2938
            }
2939
            if (io_mem_write[memory][i]) {
2940
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2941
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2942
                mmio->region_offset[idx][1][i] = region_offset;
2943
            }
2944
        }
2945
    }
2946

    
2947
    return 0;
2948
}
2949

    
2950
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2951
                           ram_addr_t orig_memory, ram_addr_t region_offset)
2952
{
2953
    subpage_t *mmio;
2954
    int subpage_memory;
2955

    
2956
    mmio = qemu_mallocz(sizeof(subpage_t));
2957

    
2958
    mmio->base = base;
2959
    subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2960
#if defined(DEBUG_SUBPAGE)
2961
    printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2962
           mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2963
#endif
2964
    *phys = subpage_memory | IO_MEM_SUBPAGE;
2965
    subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory,
2966
                         region_offset);
2967

    
2968
    return mmio;
2969
}
2970

    
2971
static int get_free_io_mem_idx(void)
2972
{
2973
    int i;
2974

    
2975
    for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
2976
        if (!io_mem_used[i]) {
2977
            io_mem_used[i] = 1;
2978
            return i;
2979
        }
2980

    
2981
    return -1;
2982
}
2983

    
2984
static void io_mem_init(void)
2985
{
2986
    int i;
2987

    
2988
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2989
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2990
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2991
    for (i=0; i<5; i++)
2992
        io_mem_used[i] = 1;
2993

    
2994
    io_mem_watch = cpu_register_io_memory(0, watch_mem_read,
2995
                                          watch_mem_write, NULL);
2996
#ifdef CONFIG_KQEMU
2997
    if (kqemu_phys_ram_base) {
2998
        /* alloc dirty bits array */
2999
        phys_ram_dirty = qemu_vmalloc(kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3000
        memset(phys_ram_dirty, 0xff, kqemu_phys_ram_size >> TARGET_PAGE_BITS);
3001
    }
3002
#endif
3003
}
3004

    
3005
/* mem_read and mem_write are arrays of functions containing the
3006
   function to access byte (index 0), word (index 1) and dword (index
3007
   2). Functions can be omitted with a NULL function pointer. The
3008
   registered functions may be modified dynamically later.
3009
   If io_index is non zero, the corresponding io zone is
3010
   modified. If it is zero, a new io zone is allocated. The return
3011
   value can be used with cpu_register_physical_memory(). (-1) is
3012
   returned if error. */
3013
int cpu_register_io_memory(int io_index,
3014
                           CPUReadMemoryFunc **mem_read,
3015
                           CPUWriteMemoryFunc **mem_write,
3016
                           void *opaque)
3017
{
3018
    int i, subwidth = 0;
3019

    
3020
    if (io_index <= 0) {
3021
        io_index = get_free_io_mem_idx();
3022
        if (io_index == -1)
3023
            return io_index;
3024
    } else {
3025
        if (io_index >= IO_MEM_NB_ENTRIES)
3026
            return -1;
3027
    }
3028

    
3029
    for(i = 0;i < 3; i++) {
3030
        if (!mem_read[i] || !mem_write[i])
3031
            subwidth = IO_MEM_SUBWIDTH;
3032
        io_mem_read[io_index][i] = mem_read[i];
3033
        io_mem_write[io_index][i] = mem_write[i];
3034
    }
3035
    io_mem_opaque[io_index] = opaque;
3036
    return (io_index << IO_MEM_SHIFT) | subwidth;
3037
}
3038

    
3039
void cpu_unregister_io_memory(int io_table_address)
3040
{
3041
    int i;
3042
    int io_index = io_table_address >> IO_MEM_SHIFT;
3043

    
3044
    for (i=0;i < 3; i++) {
3045
        io_mem_read[io_index][i] = unassigned_mem_read[i];
3046
        io_mem_write[io_index][i] = unassigned_mem_write[i];
3047
    }
3048
    io_mem_opaque[io_index] = NULL;
3049
    io_mem_used[io_index] = 0;
3050
}
3051

    
3052
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
3053
{
3054
    return io_mem_write[io_index >> IO_MEM_SHIFT];
3055
}
3056

    
3057
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
3058
{
3059
    return io_mem_read[io_index >> IO_MEM_SHIFT];
3060
}
3061

    
3062
#endif /* !defined(CONFIG_USER_ONLY) */
3063

    
3064
/* physical memory access (slow version, mainly for debug) */
3065
#if defined(CONFIG_USER_ONLY)
3066
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3067
                            int len, int is_write)
3068
{
3069
    int l, flags;
3070
    target_ulong page;
3071
    void * p;
3072

    
3073
    while (len > 0) {
3074
        page = addr & TARGET_PAGE_MASK;
3075
        l = (page + TARGET_PAGE_SIZE) - addr;
3076
        if (l > len)
3077
            l = len;
3078
        flags = page_get_flags(page);
3079
        if (!(flags & PAGE_VALID))
3080
            return;
3081
        if (is_write) {
3082
            if (!(flags & PAGE_WRITE))
3083
                return;
3084
            /* XXX: this code should not depend on lock_user */
3085
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
3086
                /* FIXME - should this return an error rather than just fail? */
3087
                return;
3088
            memcpy(p, buf, l);
3089
            unlock_user(p, addr, l);
3090
        } else {
3091
            if (!(flags & PAGE_READ))
3092
                return;
3093
            /* XXX: this code should not depend on lock_user */
3094
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
3095
                /* FIXME - should this return an error rather than just fail? */
3096
                return;
3097
            memcpy(buf, p, l);
3098
            unlock_user(p, addr, 0);
3099
        }
3100
        len -= l;
3101
        buf += l;
3102
        addr += l;
3103
    }
3104
}
3105

    
3106
#else
3107
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
3108
                            int len, int is_write)
3109
{
3110
    int l, io_index;
3111
    uint8_t *ptr;
3112
    uint32_t val;
3113
    target_phys_addr_t page;
3114
    unsigned long pd;
3115
    PhysPageDesc *p;
3116

    
3117
    while (len > 0) {
3118
        page = addr & TARGET_PAGE_MASK;
3119
        l = (page + TARGET_PAGE_SIZE) - addr;
3120
        if (l > len)
3121
            l = len;
3122
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3123
        if (!p) {
3124
            pd = IO_MEM_UNASSIGNED;
3125
        } else {
3126
            pd = p->phys_offset;
3127
        }
3128

    
3129
        if (is_write) {
3130
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3131
                target_phys_addr_t addr1 = addr;
3132
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3133
                if (p)
3134
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3135
                /* XXX: could force cpu_single_env to NULL to avoid
3136
                   potential bugs */
3137
                if (l >= 4 && ((addr1 & 3) == 0)) {
3138
                    /* 32 bit write access */
3139
                    val = ldl_p(buf);
3140
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
3141
                    l = 4;
3142
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3143
                    /* 16 bit write access */
3144
                    val = lduw_p(buf);
3145
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
3146
                    l = 2;
3147
                } else {
3148
                    /* 8 bit write access */
3149
                    val = ldub_p(buf);
3150
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
3151
                    l = 1;
3152
                }
3153
            } else {
3154
                unsigned long addr1;
3155
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3156
                /* RAM case */
3157
                ptr = qemu_get_ram_ptr(addr1);
3158
                memcpy(ptr, buf, l);
3159
                if (!cpu_physical_memory_is_dirty(addr1)) {
3160
                    /* invalidate code */
3161
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3162
                    /* set dirty bit */
3163
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3164
                        (0xff & ~CODE_DIRTY_FLAG);
3165
                }
3166
            }
3167
        } else {
3168
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3169
                !(pd & IO_MEM_ROMD)) {
3170
                target_phys_addr_t addr1 = addr;
3171
                /* I/O case */
3172
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3173
                if (p)
3174
                    addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3175
                if (l >= 4 && ((addr1 & 3) == 0)) {
3176
                    /* 32 bit read access */
3177
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
3178
                    stl_p(buf, val);
3179
                    l = 4;
3180
                } else if (l >= 2 && ((addr1 & 1) == 0)) {
3181
                    /* 16 bit read access */
3182
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
3183
                    stw_p(buf, val);
3184
                    l = 2;
3185
                } else {
3186
                    /* 8 bit read access */
3187
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
3188
                    stb_p(buf, val);
3189
                    l = 1;
3190
                }
3191
            } else {
3192
                /* RAM case */
3193
                ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3194
                    (addr & ~TARGET_PAGE_MASK);
3195
                memcpy(buf, ptr, l);
3196
            }
3197
        }
3198
        len -= l;
3199
        buf += l;
3200
        addr += l;
3201
    }
3202
}
3203

    
3204
/* used for ROM loading : can write in RAM and ROM */
3205
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
3206
                                   const uint8_t *buf, int len)
3207
{
3208
    int l;
3209
    uint8_t *ptr;
3210
    target_phys_addr_t page;
3211
    unsigned long pd;
3212
    PhysPageDesc *p;
3213

    
3214
    while (len > 0) {
3215
        page = addr & TARGET_PAGE_MASK;
3216
        l = (page + TARGET_PAGE_SIZE) - addr;
3217
        if (l > len)
3218
            l = len;
3219
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3220
        if (!p) {
3221
            pd = IO_MEM_UNASSIGNED;
3222
        } else {
3223
            pd = p->phys_offset;
3224
        }
3225

    
3226
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
3227
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3228
            !(pd & IO_MEM_ROMD)) {
3229
            /* do nothing */
3230
        } else {
3231
            unsigned long addr1;
3232
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3233
            /* ROM/RAM case */
3234
            ptr = qemu_get_ram_ptr(addr1);
3235
            memcpy(ptr, buf, l);
3236
        }
3237
        len -= l;
3238
        buf += l;
3239
        addr += l;
3240
    }
3241
}
3242

    
3243
typedef struct {
3244
    void *buffer;
3245
    target_phys_addr_t addr;
3246
    target_phys_addr_t len;
3247
} BounceBuffer;
3248

    
3249
static BounceBuffer bounce;
3250

    
3251
typedef struct MapClient {
3252
    void *opaque;
3253
    void (*callback)(void *opaque);
3254
    LIST_ENTRY(MapClient) link;
3255
} MapClient;
3256

    
3257
static LIST_HEAD(map_client_list, MapClient) map_client_list
3258
    = LIST_HEAD_INITIALIZER(map_client_list);
3259

    
3260
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3261
{
3262
    MapClient *client = qemu_malloc(sizeof(*client));
3263

    
3264
    client->opaque = opaque;
3265
    client->callback = callback;
3266
    LIST_INSERT_HEAD(&map_client_list, client, link);
3267
    return client;
3268
}
3269

    
3270
void cpu_unregister_map_client(void *_client)
3271
{
3272
    MapClient *client = (MapClient *)_client;
3273

    
3274
    LIST_REMOVE(client, link);
3275
}
3276

    
3277
static void cpu_notify_map_clients(void)
3278
{
3279
    MapClient *client;
3280

    
3281
    while (!LIST_EMPTY(&map_client_list)) {
3282
        client = LIST_FIRST(&map_client_list);
3283
        client->callback(client->opaque);
3284
        LIST_REMOVE(client, link);
3285
    }
3286
}
3287

    
3288
/* Map a physical memory region into a host virtual address.
3289
 * May map a subset of the requested range, given by and returned in *plen.
3290
 * May return NULL if resources needed to perform the mapping are exhausted.
3291
 * Use only for reads OR writes - not for read-modify-write operations.
3292
 * Use cpu_register_map_client() to know when retrying the map operation is
3293
 * likely to succeed.
3294
 */
3295
void *cpu_physical_memory_map(target_phys_addr_t addr,
3296
                              target_phys_addr_t *plen,
3297
                              int is_write)
3298
{
3299
    target_phys_addr_t len = *plen;
3300
    target_phys_addr_t done = 0;
3301
    int l;
3302
    uint8_t *ret = NULL;
3303
    uint8_t *ptr;
3304
    target_phys_addr_t page;
3305
    unsigned long pd;
3306
    PhysPageDesc *p;
3307
    unsigned long addr1;
3308

    
3309
    while (len > 0) {
3310
        page = addr & TARGET_PAGE_MASK;
3311
        l = (page + TARGET_PAGE_SIZE) - addr;
3312
        if (l > len)
3313
            l = len;
3314
        p = phys_page_find(page >> TARGET_PAGE_BITS);
3315
        if (!p) {
3316
            pd = IO_MEM_UNASSIGNED;
3317
        } else {
3318
            pd = p->phys_offset;
3319
        }
3320

    
3321
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3322
            if (done || bounce.buffer) {
3323
                break;
3324
            }
3325
            bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3326
            bounce.addr = addr;
3327
            bounce.len = l;
3328
            if (!is_write) {
3329
                cpu_physical_memory_rw(addr, bounce.buffer, l, 0);
3330
            }
3331
            ptr = bounce.buffer;
3332
        } else {
3333
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3334
            ptr = qemu_get_ram_ptr(addr1);
3335
        }
3336
        if (!done) {
3337
            ret = ptr;
3338
        } else if (ret + done != ptr) {
3339
            break;
3340
        }
3341

    
3342
        len -= l;
3343
        addr += l;
3344
        done += l;
3345
    }
3346
    *plen = done;
3347
    return ret;
3348
}
3349

    
3350
/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3351
 * Will also mark the memory as dirty if is_write == 1.  access_len gives
3352
 * the amount of memory that was actually read or written by the caller.
3353
 */
3354
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3355
                               int is_write, target_phys_addr_t access_len)
3356
{
3357
    if (buffer != bounce.buffer) {
3358
        if (is_write) {
3359
            ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
3360
            while (access_len) {
3361
                unsigned l;
3362
                l = TARGET_PAGE_SIZE;
3363
                if (l > access_len)
3364
                    l = access_len;
3365
                if (!cpu_physical_memory_is_dirty(addr1)) {
3366
                    /* invalidate code */
3367
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3368
                    /* set dirty bit */
3369
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3370
                        (0xff & ~CODE_DIRTY_FLAG);
3371
                }
3372
                addr1 += l;
3373
                access_len -= l;
3374
            }
3375
        }
3376
        return;
3377
    }
3378
    if (is_write) {
3379
        cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3380
    }
3381
    qemu_free(bounce.buffer);
3382
    bounce.buffer = NULL;
3383
    cpu_notify_map_clients();
3384
}
3385

    
3386
/* warning: addr must be aligned */
3387
uint32_t ldl_phys(target_phys_addr_t addr)
3388
{
3389
    int io_index;
3390
    uint8_t *ptr;
3391
    uint32_t val;
3392
    unsigned long pd;
3393
    PhysPageDesc *p;
3394

    
3395
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3396
    if (!p) {
3397
        pd = IO_MEM_UNASSIGNED;
3398
    } else {
3399
        pd = p->phys_offset;
3400
    }
3401

    
3402
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3403
        !(pd & IO_MEM_ROMD)) {
3404
        /* I/O case */
3405
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3406
        if (p)
3407
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3408
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3409
    } else {
3410
        /* RAM case */
3411
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3412
            (addr & ~TARGET_PAGE_MASK);
3413
        val = ldl_p(ptr);
3414
    }
3415
    return val;
3416
}
3417

    
3418
/* warning: addr must be aligned */
3419
uint64_t ldq_phys(target_phys_addr_t addr)
3420
{
3421
    int io_index;
3422
    uint8_t *ptr;
3423
    uint64_t val;
3424
    unsigned long pd;
3425
    PhysPageDesc *p;
3426

    
3427
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3428
    if (!p) {
3429
        pd = IO_MEM_UNASSIGNED;
3430
    } else {
3431
        pd = p->phys_offset;
3432
    }
3433

    
3434
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
3435
        !(pd & IO_MEM_ROMD)) {
3436
        /* I/O case */
3437
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3438
        if (p)
3439
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3440
#ifdef TARGET_WORDS_BIGENDIAN
3441
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
3442
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
3443
#else
3444
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
3445
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
3446
#endif
3447
    } else {
3448
        /* RAM case */
3449
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3450
            (addr & ~TARGET_PAGE_MASK);
3451
        val = ldq_p(ptr);
3452
    }
3453
    return val;
3454
}
3455

    
3456
/* XXX: optimize */
3457
uint32_t ldub_phys(target_phys_addr_t addr)
3458
{
3459
    uint8_t val;
3460
    cpu_physical_memory_read(addr, &val, 1);
3461
    return val;
3462
}
3463

    
3464
/* XXX: optimize */
3465
uint32_t lduw_phys(target_phys_addr_t addr)
3466
{
3467
    uint16_t val;
3468
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
3469
    return tswap16(val);
3470
}
3471

    
3472
/* warning: addr must be aligned. The ram page is not masked as dirty
3473
   and the code inside is not invalidated. It is useful if the dirty
3474
   bits are used to track modified PTEs */
3475
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
3476
{
3477
    int io_index;
3478
    uint8_t *ptr;
3479
    unsigned long pd;
3480
    PhysPageDesc *p;
3481

    
3482
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3483
    if (!p) {
3484
        pd = IO_MEM_UNASSIGNED;
3485
    } else {
3486
        pd = p->phys_offset;
3487
    }
3488

    
3489
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3490
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3491
        if (p)
3492
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3493
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3494
    } else {
3495
        unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3496
        ptr = qemu_get_ram_ptr(addr1);
3497
        stl_p(ptr, val);
3498

    
3499
        if (unlikely(in_migration)) {
3500
            if (!cpu_physical_memory_is_dirty(addr1)) {
3501
                /* invalidate code */
3502
                tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3503
                /* set dirty bit */
3504
                phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3505
                    (0xff & ~CODE_DIRTY_FLAG);
3506
            }
3507
        }
3508
    }
3509
}
3510

    
3511
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
3512
{
3513
    int io_index;
3514
    uint8_t *ptr;
3515
    unsigned long pd;
3516
    PhysPageDesc *p;
3517

    
3518
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3519
    if (!p) {
3520
        pd = IO_MEM_UNASSIGNED;
3521
    } else {
3522
        pd = p->phys_offset;
3523
    }
3524

    
3525
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3526
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3527
        if (p)
3528
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3529
#ifdef TARGET_WORDS_BIGENDIAN
3530
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
3531
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
3532
#else
3533
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3534
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
3535
#endif
3536
    } else {
3537
        ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
3538
            (addr & ~TARGET_PAGE_MASK);
3539
        stq_p(ptr, val);
3540
    }
3541
}
3542

    
3543
/* warning: addr must be aligned */
3544
void stl_phys(target_phys_addr_t addr, uint32_t val)
3545
{
3546
    int io_index;
3547
    uint8_t *ptr;
3548
    unsigned long pd;
3549
    PhysPageDesc *p;
3550

    
3551
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
3552
    if (!p) {
3553
        pd = IO_MEM_UNASSIGNED;
3554
    } else {
3555
        pd = p->phys_offset;
3556
    }
3557

    
3558
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
3559
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
3560
        if (p)
3561
            addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3562
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
3563
    } else {
3564
        unsigned long addr1;
3565
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3566
        /* RAM case */
3567
        ptr = qemu_get_ram_ptr(addr1);
3568
        stl_p(ptr, val);
3569
        if (!cpu_physical_memory_is_dirty(addr1)) {
3570
            /* invalidate code */
3571
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3572
            /* set dirty bit */
3573
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
3574
                (0xff & ~CODE_DIRTY_FLAG);
3575
        }
3576
    }
3577
}
3578

    
3579
/* XXX: optimize */
3580
void stb_phys(target_phys_addr_t addr, uint32_t val)
3581
{
3582
    uint8_t v = val;
3583
    cpu_physical_memory_write(addr, &v, 1);
3584
}
3585

    
3586
/* XXX: optimize */
3587
void stw_phys(target_phys_addr_t addr, uint32_t val)
3588
{
3589
    uint16_t v = tswap16(val);
3590
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3591
}
3592

    
3593
/* XXX: optimize */
3594
void stq_phys(target_phys_addr_t addr, uint64_t val)
3595
{
3596
    val = tswap64(val);
3597
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3598
}
3599

    
3600
#endif
3601

    
3602
/* virtual memory access for debug (includes writing to ROM) */
3603
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3604
                        uint8_t *buf, int len, int is_write)
3605
{
3606
    int l;
3607
    target_phys_addr_t phys_addr;
3608
    target_ulong page;
3609

    
3610
    while (len > 0) {
3611
        page = addr & TARGET_PAGE_MASK;
3612
        phys_addr = cpu_get_phys_page_debug(env, page);
3613
        /* if no physical page mapped, return an error */
3614
        if (phys_addr == -1)
3615
            return -1;
3616
        l = (page + TARGET_PAGE_SIZE) - addr;
3617
        if (l > len)
3618
            l = len;
3619
        phys_addr += (addr & ~TARGET_PAGE_MASK);
3620
#if !defined(CONFIG_USER_ONLY)
3621
        if (is_write)
3622
            cpu_physical_memory_write_rom(phys_addr, buf, l);
3623
        else
3624
#endif
3625
            cpu_physical_memory_rw(phys_addr, buf, l, is_write);
3626
        len -= l;
3627
        buf += l;
3628
        addr += l;
3629
    }
3630
    return 0;
3631
}
3632

    
3633
/* in deterministic execution mode, instructions doing device I/Os
3634
   must be at the end of the TB */
3635
void cpu_io_recompile(CPUState *env, void *retaddr)
3636
{
3637
    TranslationBlock *tb;
3638
    uint32_t n, cflags;
3639
    target_ulong pc, cs_base;
3640
    uint64_t flags;
3641

    
3642
    tb = tb_find_pc((unsigned long)retaddr);
3643
    if (!tb) {
3644
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3645
                  retaddr);
3646
    }
3647
    n = env->icount_decr.u16.low + tb->icount;
3648
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3649
    /* Calculate how many instructions had been executed before the fault
3650
       occurred.  */
3651
    n = n - env->icount_decr.u16.low;
3652
    /* Generate a new TB ending on the I/O insn.  */
3653
    n++;
3654
    /* On MIPS and SH, delay slot instructions can only be restarted if
3655
       they were already the first instruction in the TB.  If this is not
3656
       the first instruction in a TB then re-execute the preceding
3657
       branch.  */
3658
#if defined(TARGET_MIPS)
3659
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3660
        env->active_tc.PC -= 4;
3661
        env->icount_decr.u16.low++;
3662
        env->hflags &= ~MIPS_HFLAG_BMASK;
3663
    }
3664
#elif defined(TARGET_SH4)
3665
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3666
            && n > 1) {
3667
        env->pc -= 2;
3668
        env->icount_decr.u16.low++;
3669
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3670
    }
3671
#endif
3672
    /* This should never happen.  */
3673
    if (n > CF_COUNT_MASK)
3674
        cpu_abort(env, "TB too big during recompile");
3675

    
3676
    cflags = n | CF_LAST_IO;
3677
    pc = tb->pc;
3678
    cs_base = tb->cs_base;
3679
    flags = tb->flags;
3680
    tb_phys_invalidate(tb, -1);
3681
    /* FIXME: In theory this could raise an exception.  In practice
3682
       we have already translated the block once so it's probably ok.  */
3683
    tb_gen_code(env, pc, cs_base, flags, cflags);
3684
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3685
       the first in the TB) then we end up generating a whole new TB and
3686
       repeating the fault, which is horribly inefficient.
3687
       Better would be to execute just this insn uncached, or generate a
3688
       second new TB.  */
3689
    cpu_resume_from_signal(env, NULL);
3690
}
3691

    
3692
void dump_exec_info(FILE *f,
3693
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3694
{
3695
    int i, target_code_size, max_target_code_size;
3696
    int direct_jmp_count, direct_jmp2_count, cross_page;
3697
    TranslationBlock *tb;
3698

    
3699
    target_code_size = 0;
3700
    max_target_code_size = 0;
3701
    cross_page = 0;
3702
    direct_jmp_count = 0;
3703
    direct_jmp2_count = 0;
3704
    for(i = 0; i < nb_tbs; i++) {
3705
        tb = &tbs[i];
3706
        target_code_size += tb->size;
3707
        if (tb->size > max_target_code_size)
3708
            max_target_code_size = tb->size;
3709
        if (tb->page_addr[1] != -1)
3710
            cross_page++;
3711
        if (tb->tb_next_offset[0] != 0xffff) {
3712
            direct_jmp_count++;
3713
            if (tb->tb_next_offset[1] != 0xffff) {
3714
                direct_jmp2_count++;
3715
            }
3716
        }
3717
    }
3718
    /* XXX: avoid using doubles ? */
3719
    cpu_fprintf(f, "Translation buffer state:\n");
3720
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3721
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3722
    cpu_fprintf(f, "TB count            %d/%d\n", 
3723
                nb_tbs, code_gen_max_blocks);
3724
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3725
                nb_tbs ? target_code_size / nb_tbs : 0,
3726
                max_target_code_size);
3727
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3728
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3729
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3730
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3731
            cross_page,
3732
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3733
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3734
                direct_jmp_count,
3735
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3736
                direct_jmp2_count,
3737
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3738
    cpu_fprintf(f, "\nStatistics:\n");
3739
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3740
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3741
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3742
    tcg_dump_info(f, cpu_fprintf);
3743
}
3744

    
3745
#if !defined(CONFIG_USER_ONLY)
3746

    
3747
#define MMUSUFFIX _cmmu
3748
#define GETPC() NULL
3749
#define env cpu_single_env
3750
#define SOFTMMU_CODE_ACCESS
3751

    
3752
#define SHIFT 0
3753
#include "softmmu_template.h"
3754

    
3755
#define SHIFT 1
3756
#include "softmmu_template.h"
3757

    
3758
#define SHIFT 2
3759
#include "softmmu_template.h"
3760

    
3761
#define SHIFT 3
3762
#include "softmmu_template.h"
3763

    
3764
#undef env
3765

    
3766
#endif