Statistics
| Branch: | Revision:

root / exec.c @ 0776590d

History | View | Annotate | Download (92.7 kB)

1
/*
2
 *  virtual page mapping and translated block handling
3
 *
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20
#include "config.h"
21
#ifdef _WIN32
22
#define WIN32_LEAN_AND_MEAN
23
#include <windows.h>
24
#else
25
#include <sys/types.h>
26
#include <sys/mman.h>
27
#endif
28
#include <stdlib.h>
29
#include <stdio.h>
30
#include <stdarg.h>
31
#include <string.h>
32
#include <errno.h>
33
#include <unistd.h>
34
#include <inttypes.h>
35

    
36
#include "cpu.h"
37
#include "exec-all.h"
38
#include "qemu-common.h"
39
#include "tcg.h"
40
#if defined(CONFIG_USER_ONLY)
41
#include <qemu.h>
42
#endif
43

    
44
//#define DEBUG_TB_INVALIDATE
45
//#define DEBUG_FLUSH
46
//#define DEBUG_TLB
47
//#define DEBUG_UNASSIGNED
48

    
49
/* make various TB consistency checks */
50
//#define DEBUG_TB_CHECK
51
//#define DEBUG_TLB_CHECK
52

    
53
//#define DEBUG_IOPORT
54
//#define DEBUG_SUBPAGE
55

    
56
#if !defined(CONFIG_USER_ONLY)
57
/* TB consistency checks only implemented for usermode emulation.  */
58
#undef DEBUG_TB_CHECK
59
#endif
60

    
61
#define SMC_BITMAP_USE_THRESHOLD 10
62

    
63
#define MMAP_AREA_START        0x00000000
64
#define MMAP_AREA_END          0xa8000000
65

    
66
#if defined(TARGET_SPARC64)
67
#define TARGET_PHYS_ADDR_SPACE_BITS 41
68
#elif defined(TARGET_SPARC)
69
#define TARGET_PHYS_ADDR_SPACE_BITS 36
70
#elif defined(TARGET_ALPHA)
71
#define TARGET_PHYS_ADDR_SPACE_BITS 42
72
#define TARGET_VIRT_ADDR_SPACE_BITS 42
73
#elif defined(TARGET_PPC64)
74
#define TARGET_PHYS_ADDR_SPACE_BITS 42
75
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
76
#define TARGET_PHYS_ADDR_SPACE_BITS 42
77
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
78
#define TARGET_PHYS_ADDR_SPACE_BITS 36
79
#else
80
/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
81
#define TARGET_PHYS_ADDR_SPACE_BITS 32
82
#endif
83

    
84
TranslationBlock *tbs;
85
int code_gen_max_blocks;
86
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
87
int nb_tbs;
88
/* any access to the tbs or the page table must use this lock */
89
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
90

    
91
uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
92
uint8_t *code_gen_buffer;
93
unsigned long code_gen_buffer_size;
94
/* threshold to flush the translated code buffer */
95
unsigned long code_gen_buffer_max_size; 
96
uint8_t *code_gen_ptr;
97

    
98
ram_addr_t phys_ram_size;
99
int phys_ram_fd;
100
uint8_t *phys_ram_base;
101
uint8_t *phys_ram_dirty;
102
static ram_addr_t phys_ram_alloc_offset = 0;
103

    
104
CPUState *first_cpu;
105
/* current CPU in the current thread. It is only valid inside
106
   cpu_exec() */
107
CPUState *cpu_single_env;
108

    
109
typedef struct PageDesc {
110
    /* list of TBs intersecting this ram page */
111
    TranslationBlock *first_tb;
112
    /* in order to optimize self modifying code, we count the number
113
       of lookups we do to a given page to use a bitmap */
114
    unsigned int code_write_count;
115
    uint8_t *code_bitmap;
116
#if defined(CONFIG_USER_ONLY)
117
    unsigned long flags;
118
#endif
119
} PageDesc;
120

    
121
typedef struct PhysPageDesc {
122
    /* offset in host memory of the page + io_index in the low 12 bits */
123
    ram_addr_t phys_offset;
124
} PhysPageDesc;
125

    
126
#define L2_BITS 10
127
#if defined(CONFIG_USER_ONLY) && defined(TARGET_VIRT_ADDR_SPACE_BITS)
128
/* XXX: this is a temporary hack for alpha target.
129
 *      In the future, this is to be replaced by a multi-level table
130
 *      to actually be able to handle the complete 64 bits address space.
131
 */
132
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
133
#else
134
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
135
#endif
136

    
137
#define L1_SIZE (1 << L1_BITS)
138
#define L2_SIZE (1 << L2_BITS)
139

    
140
static void io_mem_init(void);
141

    
142
unsigned long qemu_real_host_page_size;
143
unsigned long qemu_host_page_bits;
144
unsigned long qemu_host_page_size;
145
unsigned long qemu_host_page_mask;
146

    
147
/* XXX: for system emulation, it could just be an array */
148
static PageDesc *l1_map[L1_SIZE];
149
PhysPageDesc **l1_phys_map;
150

    
151
/* io memory support */
152
CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
153
CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
154
void *io_mem_opaque[IO_MEM_NB_ENTRIES];
155
static int io_mem_nb;
156
#if defined(CONFIG_SOFTMMU)
157
static int io_mem_watch;
158
#endif
159

    
160
/* log support */
161
char *logfilename = "/tmp/qemu.log";
162
FILE *logfile;
163
int loglevel;
164
static int log_append = 0;
165

    
166
/* statistics */
167
static int tlb_flush_count;
168
static int tb_flush_count;
169
static int tb_phys_invalidate_count;
170

    
171
#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
172
typedef struct subpage_t {
173
    target_phys_addr_t base;
174
    CPUReadMemoryFunc **mem_read[TARGET_PAGE_SIZE][4];
175
    CPUWriteMemoryFunc **mem_write[TARGET_PAGE_SIZE][4];
176
    void *opaque[TARGET_PAGE_SIZE][2][4];
177
} subpage_t;
178

    
179
#ifdef _WIN32
180
static void map_exec(void *addr, long size)
181
{
182
    DWORD old_protect;
183
    VirtualProtect(addr, size,
184
                   PAGE_EXECUTE_READWRITE, &old_protect);
185
    
186
}
187
#else
188
static void map_exec(void *addr, long size)
189
{
190
    unsigned long start, end, page_size;
191
    
192
    page_size = getpagesize();
193
    start = (unsigned long)addr;
194
    start &= ~(page_size - 1);
195
    
196
    end = (unsigned long)addr + size;
197
    end += page_size - 1;
198
    end &= ~(page_size - 1);
199
    
200
    mprotect((void *)start, end - start,
201
             PROT_READ | PROT_WRITE | PROT_EXEC);
202
}
203
#endif
204

    
205
static void page_init(void)
206
{
207
    /* NOTE: we can always suppose that qemu_host_page_size >=
208
       TARGET_PAGE_SIZE */
209
#ifdef _WIN32
210
    {
211
        SYSTEM_INFO system_info;
212
        DWORD old_protect;
213

    
214
        GetSystemInfo(&system_info);
215
        qemu_real_host_page_size = system_info.dwPageSize;
216
    }
217
#else
218
    qemu_real_host_page_size = getpagesize();
219
#endif
220
    if (qemu_host_page_size == 0)
221
        qemu_host_page_size = qemu_real_host_page_size;
222
    if (qemu_host_page_size < TARGET_PAGE_SIZE)
223
        qemu_host_page_size = TARGET_PAGE_SIZE;
224
    qemu_host_page_bits = 0;
225
    while ((1 << qemu_host_page_bits) < qemu_host_page_size)
226
        qemu_host_page_bits++;
227
    qemu_host_page_mask = ~(qemu_host_page_size - 1);
228
    l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
229
    memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
230

    
231
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
232
    {
233
        long long startaddr, endaddr;
234
        FILE *f;
235
        int n;
236

    
237
        last_brk = (unsigned long)sbrk(0);
238
        f = fopen("/proc/self/maps", "r");
239
        if (f) {
240
            do {
241
                n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
242
                if (n == 2) {
243
                    startaddr = MIN(startaddr,
244
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
245
                    endaddr = MIN(endaddr,
246
                                    (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
247
                    page_set_flags(startaddr & TARGET_PAGE_MASK,
248
                                   TARGET_PAGE_ALIGN(endaddr),
249
                                   PAGE_RESERVED); 
250
                }
251
            } while (!feof(f));
252
            fclose(f);
253
        }
254
    }
255
#endif
256
}
257

    
258
static inline PageDesc *page_find_alloc(target_ulong index)
259
{
260
    PageDesc **lp, *p;
261

    
262
    lp = &l1_map[index >> L2_BITS];
263
    p = *lp;
264
    if (!p) {
265
        /* allocate if not found */
266
        p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
267
        memset(p, 0, sizeof(PageDesc) * L2_SIZE);
268
        *lp = p;
269
    }
270
    return p + (index & (L2_SIZE - 1));
271
}
272

    
273
static inline PageDesc *page_find(target_ulong index)
274
{
275
    PageDesc *p;
276

    
277
    p = l1_map[index >> L2_BITS];
278
    if (!p)
279
        return 0;
280
    return p + (index & (L2_SIZE - 1));
281
}
282

    
283
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
284
{
285
    void **lp, **p;
286
    PhysPageDesc *pd;
287

    
288
    p = (void **)l1_phys_map;
289
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
290

    
291
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
292
#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
293
#endif
294
    lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
295
    p = *lp;
296
    if (!p) {
297
        /* allocate if not found */
298
        if (!alloc)
299
            return NULL;
300
        p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
301
        memset(p, 0, sizeof(void *) * L1_SIZE);
302
        *lp = p;
303
    }
304
#endif
305
    lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
306
    pd = *lp;
307
    if (!pd) {
308
        int i;
309
        /* allocate if not found */
310
        if (!alloc)
311
            return NULL;
312
        pd = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
313
        *lp = pd;
314
        for (i = 0; i < L2_SIZE; i++)
315
          pd[i].phys_offset = IO_MEM_UNASSIGNED;
316
    }
317
    return ((PhysPageDesc *)pd) + (index & (L2_SIZE - 1));
318
}
319

    
320
static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
321
{
322
    return phys_page_find_alloc(index, 0);
323
}
324

    
325
#if !defined(CONFIG_USER_ONLY)
326
static void tlb_protect_code(ram_addr_t ram_addr);
327
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
328
                                    target_ulong vaddr);
329
#endif
330

    
331
#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
332

    
333
#if defined(CONFIG_USER_ONLY)
334
/* Currently it is not recommanded to allocate big chunks of data in
335
   user mode. It will change when a dedicated libc will be used */
336
#define USE_STATIC_CODE_GEN_BUFFER
337
#endif
338

    
339
#ifdef USE_STATIC_CODE_GEN_BUFFER
340
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
341
#endif
342

    
343
void code_gen_alloc(unsigned long tb_size)
344
{
345
#ifdef USE_STATIC_CODE_GEN_BUFFER
346
    code_gen_buffer = static_code_gen_buffer;
347
    code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
348
    map_exec(code_gen_buffer, code_gen_buffer_size);
349
#else
350
    code_gen_buffer_size = tb_size;
351
    if (code_gen_buffer_size == 0) {
352
#if defined(CONFIG_USER_ONLY)
353
        /* in user mode, phys_ram_size is not meaningful */
354
        code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
355
#else
356
        /* XXX: needs ajustments */
357
        code_gen_buffer_size = (int)(phys_ram_size / 4);
358
#endif
359
    }
360
    if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
361
        code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
362
    /* The code gen buffer location may have constraints depending on
363
       the host cpu and OS */
364
#if defined(__linux__) 
365
    {
366
        int flags;
367
        flags = MAP_PRIVATE | MAP_ANONYMOUS;
368
#if defined(__x86_64__)
369
        flags |= MAP_32BIT;
370
        /* Cannot map more than that */
371
        if (code_gen_buffer_size > (800 * 1024 * 1024))
372
            code_gen_buffer_size = (800 * 1024 * 1024);
373
#endif
374
        code_gen_buffer = mmap(NULL, code_gen_buffer_size,
375
                               PROT_WRITE | PROT_READ | PROT_EXEC, 
376
                               flags, -1, 0);
377
        if (code_gen_buffer == MAP_FAILED) {
378
            fprintf(stderr, "Could not allocate dynamic translator buffer\n");
379
            exit(1);
380
        }
381
    }
382
#else
383
    code_gen_buffer = qemu_malloc(code_gen_buffer_size);
384
    if (!code_gen_buffer) {
385
        fprintf(stderr, "Could not allocate dynamic translator buffer\n");
386
        exit(1);
387
    }
388
    map_exec(code_gen_buffer, code_gen_buffer_size);
389
#endif
390
#endif /* !USE_STATIC_CODE_GEN_BUFFER */
391
    map_exec(code_gen_prologue, sizeof(code_gen_prologue));
392
    code_gen_buffer_max_size = code_gen_buffer_size - 
393
        code_gen_max_block_size();
394
    code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
395
    tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
396
}
397

    
398
/* Must be called before using the QEMU cpus. 'tb_size' is the size
399
   (in bytes) allocated to the translation buffer. Zero means default
400
   size. */
401
void cpu_exec_init_all(unsigned long tb_size)
402
{
403
    cpu_gen_init();
404
    code_gen_alloc(tb_size);
405
    code_gen_ptr = code_gen_buffer;
406
    page_init();
407
    io_mem_init();
408
}
409

    
410
void cpu_exec_init(CPUState *env)
411
{
412
    CPUState **penv;
413
    int cpu_index;
414

    
415
    env->next_cpu = NULL;
416
    penv = &first_cpu;
417
    cpu_index = 0;
418
    while (*penv != NULL) {
419
        penv = (CPUState **)&(*penv)->next_cpu;
420
        cpu_index++;
421
    }
422
    env->cpu_index = cpu_index;
423
    env->nb_watchpoints = 0;
424
    *penv = env;
425
}
426

    
427
static inline void invalidate_page_bitmap(PageDesc *p)
428
{
429
    if (p->code_bitmap) {
430
        qemu_free(p->code_bitmap);
431
        p->code_bitmap = NULL;
432
    }
433
    p->code_write_count = 0;
434
}
435

    
436
/* set to NULL all the 'first_tb' fields in all PageDescs */
437
static void page_flush_tb(void)
438
{
439
    int i, j;
440
    PageDesc *p;
441

    
442
    for(i = 0; i < L1_SIZE; i++) {
443
        p = l1_map[i];
444
        if (p) {
445
            for(j = 0; j < L2_SIZE; j++) {
446
                p->first_tb = NULL;
447
                invalidate_page_bitmap(p);
448
                p++;
449
            }
450
        }
451
    }
452
}
453

    
454
/* flush all the translation blocks */
455
/* XXX: tb_flush is currently not thread safe */
456
void tb_flush(CPUState *env1)
457
{
458
    CPUState *env;
459
#if defined(DEBUG_FLUSH)
460
    printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
461
           (unsigned long)(code_gen_ptr - code_gen_buffer),
462
           nb_tbs, nb_tbs > 0 ?
463
           ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
464
#endif
465
    if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
466
        cpu_abort(env1, "Internal error: code buffer overflow\n");
467

    
468
    nb_tbs = 0;
469

    
470
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
471
        memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
472
    }
473

    
474
    memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
475
    page_flush_tb();
476

    
477
    code_gen_ptr = code_gen_buffer;
478
    /* XXX: flush processor icache at this point if cache flush is
479
       expensive */
480
    tb_flush_count++;
481
}
482

    
483
#ifdef DEBUG_TB_CHECK
484

    
485
static void tb_invalidate_check(target_ulong address)
486
{
487
    TranslationBlock *tb;
488
    int i;
489
    address &= TARGET_PAGE_MASK;
490
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
491
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
492
            if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
493
                  address >= tb->pc + tb->size)) {
494
                printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
495
                       address, (long)tb->pc, tb->size);
496
            }
497
        }
498
    }
499
}
500

    
501
/* verify that all the pages have correct rights for code */
502
static void tb_page_check(void)
503
{
504
    TranslationBlock *tb;
505
    int i, flags1, flags2;
506

    
507
    for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
508
        for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
509
            flags1 = page_get_flags(tb->pc);
510
            flags2 = page_get_flags(tb->pc + tb->size - 1);
511
            if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
512
                printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
513
                       (long)tb->pc, tb->size, flags1, flags2);
514
            }
515
        }
516
    }
517
}
518

    
519
void tb_jmp_check(TranslationBlock *tb)
520
{
521
    TranslationBlock *tb1;
522
    unsigned int n1;
523

    
524
    /* suppress any remaining jumps to this TB */
525
    tb1 = tb->jmp_first;
526
    for(;;) {
527
        n1 = (long)tb1 & 3;
528
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
529
        if (n1 == 2)
530
            break;
531
        tb1 = tb1->jmp_next[n1];
532
    }
533
    /* check end of list */
534
    if (tb1 != tb) {
535
        printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
536
    }
537
}
538

    
539
#endif
540

    
541
/* invalidate one TB */
542
static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
543
                             int next_offset)
544
{
545
    TranslationBlock *tb1;
546
    for(;;) {
547
        tb1 = *ptb;
548
        if (tb1 == tb) {
549
            *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
550
            break;
551
        }
552
        ptb = (TranslationBlock **)((char *)tb1 + next_offset);
553
    }
554
}
555

    
556
static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
557
{
558
    TranslationBlock *tb1;
559
    unsigned int n1;
560

    
561
    for(;;) {
562
        tb1 = *ptb;
563
        n1 = (long)tb1 & 3;
564
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
565
        if (tb1 == tb) {
566
            *ptb = tb1->page_next[n1];
567
            break;
568
        }
569
        ptb = &tb1->page_next[n1];
570
    }
571
}
572

    
573
static inline void tb_jmp_remove(TranslationBlock *tb, int n)
574
{
575
    TranslationBlock *tb1, **ptb;
576
    unsigned int n1;
577

    
578
    ptb = &tb->jmp_next[n];
579
    tb1 = *ptb;
580
    if (tb1) {
581
        /* find tb(n) in circular list */
582
        for(;;) {
583
            tb1 = *ptb;
584
            n1 = (long)tb1 & 3;
585
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
586
            if (n1 == n && tb1 == tb)
587
                break;
588
            if (n1 == 2) {
589
                ptb = &tb1->jmp_first;
590
            } else {
591
                ptb = &tb1->jmp_next[n1];
592
            }
593
        }
594
        /* now we can suppress tb(n) from the list */
595
        *ptb = tb->jmp_next[n];
596

    
597
        tb->jmp_next[n] = NULL;
598
    }
599
}
600

    
601
/* reset the jump entry 'n' of a TB so that it is not chained to
602
   another TB */
603
static inline void tb_reset_jump(TranslationBlock *tb, int n)
604
{
605
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
606
}
607

    
608
static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
609
{
610
    CPUState *env;
611
    PageDesc *p;
612
    unsigned int h, n1;
613
    target_phys_addr_t phys_pc;
614
    TranslationBlock *tb1, *tb2;
615

    
616
    /* remove the TB from the hash list */
617
    phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
618
    h = tb_phys_hash_func(phys_pc);
619
    tb_remove(&tb_phys_hash[h], tb,
620
              offsetof(TranslationBlock, phys_hash_next));
621

    
622
    /* remove the TB from the page list */
623
    if (tb->page_addr[0] != page_addr) {
624
        p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
625
        tb_page_remove(&p->first_tb, tb);
626
        invalidate_page_bitmap(p);
627
    }
628
    if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
629
        p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
630
        tb_page_remove(&p->first_tb, tb);
631
        invalidate_page_bitmap(p);
632
    }
633

    
634
    tb_invalidated_flag = 1;
635

    
636
    /* remove the TB from the hash list */
637
    h = tb_jmp_cache_hash_func(tb->pc);
638
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
639
        if (env->tb_jmp_cache[h] == tb)
640
            env->tb_jmp_cache[h] = NULL;
641
    }
642

    
643
    /* suppress this TB from the two jump lists */
644
    tb_jmp_remove(tb, 0);
645
    tb_jmp_remove(tb, 1);
646

    
647
    /* suppress any remaining jumps to this TB */
648
    tb1 = tb->jmp_first;
649
    for(;;) {
650
        n1 = (long)tb1 & 3;
651
        if (n1 == 2)
652
            break;
653
        tb1 = (TranslationBlock *)((long)tb1 & ~3);
654
        tb2 = tb1->jmp_next[n1];
655
        tb_reset_jump(tb1, n1);
656
        tb1->jmp_next[n1] = NULL;
657
        tb1 = tb2;
658
    }
659
    tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
660

    
661
    tb_phys_invalidate_count++;
662
}
663

    
664
static inline void set_bits(uint8_t *tab, int start, int len)
665
{
666
    int end, mask, end1;
667

    
668
    end = start + len;
669
    tab += start >> 3;
670
    mask = 0xff << (start & 7);
671
    if ((start & ~7) == (end & ~7)) {
672
        if (start < end) {
673
            mask &= ~(0xff << (end & 7));
674
            *tab |= mask;
675
        }
676
    } else {
677
        *tab++ |= mask;
678
        start = (start + 8) & ~7;
679
        end1 = end & ~7;
680
        while (start < end1) {
681
            *tab++ = 0xff;
682
            start += 8;
683
        }
684
        if (start < end) {
685
            mask = ~(0xff << (end & 7));
686
            *tab |= mask;
687
        }
688
    }
689
}
690

    
691
static void build_page_bitmap(PageDesc *p)
692
{
693
    int n, tb_start, tb_end;
694
    TranslationBlock *tb;
695

    
696
    p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
697
    if (!p->code_bitmap)
698
        return;
699
    memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
700

    
701
    tb = p->first_tb;
702
    while (tb != NULL) {
703
        n = (long)tb & 3;
704
        tb = (TranslationBlock *)((long)tb & ~3);
705
        /* NOTE: this is subtle as a TB may span two physical pages */
706
        if (n == 0) {
707
            /* NOTE: tb_end may be after the end of the page, but
708
               it is not a problem */
709
            tb_start = tb->pc & ~TARGET_PAGE_MASK;
710
            tb_end = tb_start + tb->size;
711
            if (tb_end > TARGET_PAGE_SIZE)
712
                tb_end = TARGET_PAGE_SIZE;
713
        } else {
714
            tb_start = 0;
715
            tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
716
        }
717
        set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
718
        tb = tb->page_next[n];
719
    }
720
}
721

    
722
#ifdef TARGET_HAS_PRECISE_SMC
723

    
724
static void tb_gen_code(CPUState *env,
725
                        target_ulong pc, target_ulong cs_base, int flags,
726
                        int cflags)
727
{
728
    TranslationBlock *tb;
729
    uint8_t *tc_ptr;
730
    target_ulong phys_pc, phys_page2, virt_page2;
731
    int code_gen_size;
732

    
733
    phys_pc = get_phys_addr_code(env, pc);
734
    tb = tb_alloc(pc);
735
    if (!tb) {
736
        /* flush must be done */
737
        tb_flush(env);
738
        /* cannot fail at this point */
739
        tb = tb_alloc(pc);
740
    }
741
    tc_ptr = code_gen_ptr;
742
    tb->tc_ptr = tc_ptr;
743
    tb->cs_base = cs_base;
744
    tb->flags = flags;
745
    tb->cflags = cflags;
746
    cpu_gen_code(env, tb, &code_gen_size);
747
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
748

    
749
    /* check next page if needed */
750
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
751
    phys_page2 = -1;
752
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
753
        phys_page2 = get_phys_addr_code(env, virt_page2);
754
    }
755
    tb_link_phys(tb, phys_pc, phys_page2);
756
}
757
#endif
758

    
759
/* invalidate all TBs which intersect with the target physical page
760
   starting in range [start;end[. NOTE: start and end must refer to
761
   the same physical page. 'is_cpu_write_access' should be true if called
762
   from a real cpu write access: the virtual CPU will exit the current
763
   TB if code is modified inside this TB. */
764
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
765
                                   int is_cpu_write_access)
766
{
767
    int n, current_tb_modified, current_tb_not_found, current_flags;
768
    CPUState *env = cpu_single_env;
769
    PageDesc *p;
770
    TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
771
    target_ulong tb_start, tb_end;
772
    target_ulong current_pc, current_cs_base;
773

    
774
    p = page_find(start >> TARGET_PAGE_BITS);
775
    if (!p)
776
        return;
777
    if (!p->code_bitmap &&
778
        ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
779
        is_cpu_write_access) {
780
        /* build code bitmap */
781
        build_page_bitmap(p);
782
    }
783

    
784
    /* we remove all the TBs in the range [start, end[ */
785
    /* XXX: see if in some cases it could be faster to invalidate all the code */
786
    current_tb_not_found = is_cpu_write_access;
787
    current_tb_modified = 0;
788
    current_tb = NULL; /* avoid warning */
789
    current_pc = 0; /* avoid warning */
790
    current_cs_base = 0; /* avoid warning */
791
    current_flags = 0; /* avoid warning */
792
    tb = p->first_tb;
793
    while (tb != NULL) {
794
        n = (long)tb & 3;
795
        tb = (TranslationBlock *)((long)tb & ~3);
796
        tb_next = tb->page_next[n];
797
        /* NOTE: this is subtle as a TB may span two physical pages */
798
        if (n == 0) {
799
            /* NOTE: tb_end may be after the end of the page, but
800
               it is not a problem */
801
            tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
802
            tb_end = tb_start + tb->size;
803
        } else {
804
            tb_start = tb->page_addr[1];
805
            tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
806
        }
807
        if (!(tb_end <= start || tb_start >= end)) {
808
#ifdef TARGET_HAS_PRECISE_SMC
809
            if (current_tb_not_found) {
810
                current_tb_not_found = 0;
811
                current_tb = NULL;
812
                if (env->mem_write_pc) {
813
                    /* now we have a real cpu fault */
814
                    current_tb = tb_find_pc(env->mem_write_pc);
815
                }
816
            }
817
            if (current_tb == tb &&
818
                !(current_tb->cflags & CF_SINGLE_INSN)) {
819
                /* If we are modifying the current TB, we must stop
820
                its execution. We could be more precise by checking
821
                that the modification is after the current PC, but it
822
                would require a specialized function to partially
823
                restore the CPU state */
824

    
825
                current_tb_modified = 1;
826
                cpu_restore_state(current_tb, env,
827
                                  env->mem_write_pc, NULL);
828
#if defined(TARGET_I386)
829
                current_flags = env->hflags;
830
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
831
                current_cs_base = (target_ulong)env->segs[R_CS].base;
832
                current_pc = current_cs_base + env->eip;
833
#else
834
#error unsupported CPU
835
#endif
836
            }
837
#endif /* TARGET_HAS_PRECISE_SMC */
838
            /* we need to do that to handle the case where a signal
839
               occurs while doing tb_phys_invalidate() */
840
            saved_tb = NULL;
841
            if (env) {
842
                saved_tb = env->current_tb;
843
                env->current_tb = NULL;
844
            }
845
            tb_phys_invalidate(tb, -1);
846
            if (env) {
847
                env->current_tb = saved_tb;
848
                if (env->interrupt_request && env->current_tb)
849
                    cpu_interrupt(env, env->interrupt_request);
850
            }
851
        }
852
        tb = tb_next;
853
    }
854
#if !defined(CONFIG_USER_ONLY)
855
    /* if no code remaining, no need to continue to use slow writes */
856
    if (!p->first_tb) {
857
        invalidate_page_bitmap(p);
858
        if (is_cpu_write_access) {
859
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
860
        }
861
    }
862
#endif
863
#ifdef TARGET_HAS_PRECISE_SMC
864
    if (current_tb_modified) {
865
        /* we generate a block containing just the instruction
866
           modifying the memory. It will ensure that it cannot modify
867
           itself */
868
        env->current_tb = NULL;
869
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
870
                    CF_SINGLE_INSN);
871
        cpu_resume_from_signal(env, NULL);
872
    }
873
#endif
874
}
875

    
876
/* len must be <= 8 and start must be a multiple of len */
877
static inline void tb_invalidate_phys_page_fast(target_phys_addr_t start, int len)
878
{
879
    PageDesc *p;
880
    int offset, b;
881
#if 0
882
    if (1) {
883
        if (loglevel) {
884
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
885
                   cpu_single_env->mem_write_vaddr, len,
886
                   cpu_single_env->eip,
887
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
888
        }
889
    }
890
#endif
891
    p = page_find(start >> TARGET_PAGE_BITS);
892
    if (!p)
893
        return;
894
    if (p->code_bitmap) {
895
        offset = start & ~TARGET_PAGE_MASK;
896
        b = p->code_bitmap[offset >> 3] >> (offset & 7);
897
        if (b & ((1 << len) - 1))
898
            goto do_invalidate;
899
    } else {
900
    do_invalidate:
901
        tb_invalidate_phys_page_range(start, start + len, 1);
902
    }
903
}
904

    
905
#if !defined(CONFIG_SOFTMMU)
906
static void tb_invalidate_phys_page(target_phys_addr_t addr,
907
                                    unsigned long pc, void *puc)
908
{
909
    int n, current_flags, current_tb_modified;
910
    target_ulong current_pc, current_cs_base;
911
    PageDesc *p;
912
    TranslationBlock *tb, *current_tb;
913
#ifdef TARGET_HAS_PRECISE_SMC
914
    CPUState *env = cpu_single_env;
915
#endif
916

    
917
    addr &= TARGET_PAGE_MASK;
918
    p = page_find(addr >> TARGET_PAGE_BITS);
919
    if (!p)
920
        return;
921
    tb = p->first_tb;
922
    current_tb_modified = 0;
923
    current_tb = NULL;
924
    current_pc = 0; /* avoid warning */
925
    current_cs_base = 0; /* avoid warning */
926
    current_flags = 0; /* avoid warning */
927
#ifdef TARGET_HAS_PRECISE_SMC
928
    if (tb && pc != 0) {
929
        current_tb = tb_find_pc(pc);
930
    }
931
#endif
932
    while (tb != NULL) {
933
        n = (long)tb & 3;
934
        tb = (TranslationBlock *)((long)tb & ~3);
935
#ifdef TARGET_HAS_PRECISE_SMC
936
        if (current_tb == tb &&
937
            !(current_tb->cflags & CF_SINGLE_INSN)) {
938
                /* If we are modifying the current TB, we must stop
939
                   its execution. We could be more precise by checking
940
                   that the modification is after the current PC, but it
941
                   would require a specialized function to partially
942
                   restore the CPU state */
943

    
944
            current_tb_modified = 1;
945
            cpu_restore_state(current_tb, env, pc, puc);
946
#if defined(TARGET_I386)
947
            current_flags = env->hflags;
948
            current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
949
            current_cs_base = (target_ulong)env->segs[R_CS].base;
950
            current_pc = current_cs_base + env->eip;
951
#else
952
#error unsupported CPU
953
#endif
954
        }
955
#endif /* TARGET_HAS_PRECISE_SMC */
956
        tb_phys_invalidate(tb, addr);
957
        tb = tb->page_next[n];
958
    }
959
    p->first_tb = NULL;
960
#ifdef TARGET_HAS_PRECISE_SMC
961
    if (current_tb_modified) {
962
        /* we generate a block containing just the instruction
963
           modifying the memory. It will ensure that it cannot modify
964
           itself */
965
        env->current_tb = NULL;
966
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
967
                    CF_SINGLE_INSN);
968
        cpu_resume_from_signal(env, puc);
969
    }
970
#endif
971
}
972
#endif
973

    
974
/* add the tb in the target page and protect it if necessary */
975
static inline void tb_alloc_page(TranslationBlock *tb,
976
                                 unsigned int n, target_ulong page_addr)
977
{
978
    PageDesc *p;
979
    TranslationBlock *last_first_tb;
980

    
981
    tb->page_addr[n] = page_addr;
982
    p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
983
    tb->page_next[n] = p->first_tb;
984
    last_first_tb = p->first_tb;
985
    p->first_tb = (TranslationBlock *)((long)tb | n);
986
    invalidate_page_bitmap(p);
987

    
988
#if defined(TARGET_HAS_SMC) || 1
989

    
990
#if defined(CONFIG_USER_ONLY)
991
    if (p->flags & PAGE_WRITE) {
992
        target_ulong addr;
993
        PageDesc *p2;
994
        int prot;
995

    
996
        /* force the host page as non writable (writes will have a
997
           page fault + mprotect overhead) */
998
        page_addr &= qemu_host_page_mask;
999
        prot = 0;
1000
        for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1001
            addr += TARGET_PAGE_SIZE) {
1002

    
1003
            p2 = page_find (addr >> TARGET_PAGE_BITS);
1004
            if (!p2)
1005
                continue;
1006
            prot |= p2->flags;
1007
            p2->flags &= ~PAGE_WRITE;
1008
            page_get_flags(addr);
1009
          }
1010
        mprotect(g2h(page_addr), qemu_host_page_size,
1011
                 (prot & PAGE_BITS) & ~PAGE_WRITE);
1012
#ifdef DEBUG_TB_INVALIDATE
1013
        printf("protecting code page: 0x" TARGET_FMT_lx "\n",
1014
               page_addr);
1015
#endif
1016
    }
1017
#else
1018
    /* if some code is already present, then the pages are already
1019
       protected. So we handle the case where only the first TB is
1020
       allocated in a physical page */
1021
    if (!last_first_tb) {
1022
        tlb_protect_code(page_addr);
1023
    }
1024
#endif
1025

    
1026
#endif /* TARGET_HAS_SMC */
1027
}
1028

    
1029
/* Allocate a new translation block. Flush the translation buffer if
1030
   too many translation blocks or too much generated code. */
1031
TranslationBlock *tb_alloc(target_ulong pc)
1032
{
1033
    TranslationBlock *tb;
1034

    
1035
    if (nb_tbs >= code_gen_max_blocks ||
1036
        (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
1037
        return NULL;
1038
    tb = &tbs[nb_tbs++];
1039
    tb->pc = pc;
1040
    tb->cflags = 0;
1041
    return tb;
1042
}
1043

    
1044
/* add a new TB and link it to the physical page tables. phys_page2 is
1045
   (-1) to indicate that only one page contains the TB. */
1046
void tb_link_phys(TranslationBlock *tb,
1047
                  target_ulong phys_pc, target_ulong phys_page2)
1048
{
1049
    unsigned int h;
1050
    TranslationBlock **ptb;
1051

    
1052
    /* add in the physical hash table */
1053
    h = tb_phys_hash_func(phys_pc);
1054
    ptb = &tb_phys_hash[h];
1055
    tb->phys_hash_next = *ptb;
1056
    *ptb = tb;
1057

    
1058
    /* add in the page list */
1059
    tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1060
    if (phys_page2 != -1)
1061
        tb_alloc_page(tb, 1, phys_page2);
1062
    else
1063
        tb->page_addr[1] = -1;
1064

    
1065
    tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1066
    tb->jmp_next[0] = NULL;
1067
    tb->jmp_next[1] = NULL;
1068

    
1069
    /* init original jump addresses */
1070
    if (tb->tb_next_offset[0] != 0xffff)
1071
        tb_reset_jump(tb, 0);
1072
    if (tb->tb_next_offset[1] != 0xffff)
1073
        tb_reset_jump(tb, 1);
1074

    
1075
#ifdef DEBUG_TB_CHECK
1076
    tb_page_check();
1077
#endif
1078
}
1079

    
1080
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1081
   tb[1].tc_ptr. Return NULL if not found */
1082
TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1083
{
1084
    int m_min, m_max, m;
1085
    unsigned long v;
1086
    TranslationBlock *tb;
1087

    
1088
    if (nb_tbs <= 0)
1089
        return NULL;
1090
    if (tc_ptr < (unsigned long)code_gen_buffer ||
1091
        tc_ptr >= (unsigned long)code_gen_ptr)
1092
        return NULL;
1093
    /* binary search (cf Knuth) */
1094
    m_min = 0;
1095
    m_max = nb_tbs - 1;
1096
    while (m_min <= m_max) {
1097
        m = (m_min + m_max) >> 1;
1098
        tb = &tbs[m];
1099
        v = (unsigned long)tb->tc_ptr;
1100
        if (v == tc_ptr)
1101
            return tb;
1102
        else if (tc_ptr < v) {
1103
            m_max = m - 1;
1104
        } else {
1105
            m_min = m + 1;
1106
        }
1107
    }
1108
    return &tbs[m_max];
1109
}
1110

    
1111
static void tb_reset_jump_recursive(TranslationBlock *tb);
1112

    
1113
static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1114
{
1115
    TranslationBlock *tb1, *tb_next, **ptb;
1116
    unsigned int n1;
1117

    
1118
    tb1 = tb->jmp_next[n];
1119
    if (tb1 != NULL) {
1120
        /* find head of list */
1121
        for(;;) {
1122
            n1 = (long)tb1 & 3;
1123
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1124
            if (n1 == 2)
1125
                break;
1126
            tb1 = tb1->jmp_next[n1];
1127
        }
1128
        /* we are now sure now that tb jumps to tb1 */
1129
        tb_next = tb1;
1130

    
1131
        /* remove tb from the jmp_first list */
1132
        ptb = &tb_next->jmp_first;
1133
        for(;;) {
1134
            tb1 = *ptb;
1135
            n1 = (long)tb1 & 3;
1136
            tb1 = (TranslationBlock *)((long)tb1 & ~3);
1137
            if (n1 == n && tb1 == tb)
1138
                break;
1139
            ptb = &tb1->jmp_next[n1];
1140
        }
1141
        *ptb = tb->jmp_next[n];
1142
        tb->jmp_next[n] = NULL;
1143

    
1144
        /* suppress the jump to next tb in generated code */
1145
        tb_reset_jump(tb, n);
1146

    
1147
        /* suppress jumps in the tb on which we could have jumped */
1148
        tb_reset_jump_recursive(tb_next);
1149
    }
1150
}
1151

    
1152
static void tb_reset_jump_recursive(TranslationBlock *tb)
1153
{
1154
    tb_reset_jump_recursive2(tb, 0);
1155
    tb_reset_jump_recursive2(tb, 1);
1156
}
1157

    
1158
#if defined(TARGET_HAS_ICE)
1159
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1160
{
1161
    target_phys_addr_t addr;
1162
    target_ulong pd;
1163
    ram_addr_t ram_addr;
1164
    PhysPageDesc *p;
1165

    
1166
    addr = cpu_get_phys_page_debug(env, pc);
1167
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
1168
    if (!p) {
1169
        pd = IO_MEM_UNASSIGNED;
1170
    } else {
1171
        pd = p->phys_offset;
1172
    }
1173
    ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
1174
    tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
1175
}
1176
#endif
1177

    
1178
/* Add a watchpoint.  */
1179
int  cpu_watchpoint_insert(CPUState *env, target_ulong addr)
1180
{
1181
    int i;
1182

    
1183
    for (i = 0; i < env->nb_watchpoints; i++) {
1184
        if (addr == env->watchpoint[i].vaddr)
1185
            return 0;
1186
    }
1187
    if (env->nb_watchpoints >= MAX_WATCHPOINTS)
1188
        return -1;
1189

    
1190
    i = env->nb_watchpoints++;
1191
    env->watchpoint[i].vaddr = addr;
1192
    tlb_flush_page(env, addr);
1193
    /* FIXME: This flush is needed because of the hack to make memory ops
1194
       terminate the TB.  It can be removed once the proper IO trap and
1195
       re-execute bits are in.  */
1196
    tb_flush(env);
1197
    return i;
1198
}
1199

    
1200
/* Remove a watchpoint.  */
1201
int cpu_watchpoint_remove(CPUState *env, target_ulong addr)
1202
{
1203
    int i;
1204

    
1205
    for (i = 0; i < env->nb_watchpoints; i++) {
1206
        if (addr == env->watchpoint[i].vaddr) {
1207
            env->nb_watchpoints--;
1208
            env->watchpoint[i] = env->watchpoint[env->nb_watchpoints];
1209
            tlb_flush_page(env, addr);
1210
            return 0;
1211
        }
1212
    }
1213
    return -1;
1214
}
1215

    
1216
/* Remove all watchpoints. */
1217
void cpu_watchpoint_remove_all(CPUState *env) {
1218
    int i;
1219

    
1220
    for (i = 0; i < env->nb_watchpoints; i++) {
1221
        tlb_flush_page(env, env->watchpoint[i].vaddr);
1222
    }
1223
    env->nb_watchpoints = 0;
1224
}
1225

    
1226
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1227
   breakpoint is reached */
1228
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
1229
{
1230
#if defined(TARGET_HAS_ICE)
1231
    int i;
1232

    
1233
    for(i = 0; i < env->nb_breakpoints; i++) {
1234
        if (env->breakpoints[i] == pc)
1235
            return 0;
1236
    }
1237

    
1238
    if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1239
        return -1;
1240
    env->breakpoints[env->nb_breakpoints++] = pc;
1241

    
1242
    breakpoint_invalidate(env, pc);
1243
    return 0;
1244
#else
1245
    return -1;
1246
#endif
1247
}
1248

    
1249
/* remove all breakpoints */
1250
void cpu_breakpoint_remove_all(CPUState *env) {
1251
#if defined(TARGET_HAS_ICE)
1252
    int i;
1253
    for(i = 0; i < env->nb_breakpoints; i++) {
1254
        breakpoint_invalidate(env, env->breakpoints[i]);
1255
    }
1256
    env->nb_breakpoints = 0;
1257
#endif
1258
}
1259

    
1260
/* remove a breakpoint */
1261
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
1262
{
1263
#if defined(TARGET_HAS_ICE)
1264
    int i;
1265
    for(i = 0; i < env->nb_breakpoints; i++) {
1266
        if (env->breakpoints[i] == pc)
1267
            goto found;
1268
    }
1269
    return -1;
1270
 found:
1271
    env->nb_breakpoints--;
1272
    if (i < env->nb_breakpoints)
1273
      env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
1274

    
1275
    breakpoint_invalidate(env, pc);
1276
    return 0;
1277
#else
1278
    return -1;
1279
#endif
1280
}
1281

    
1282
/* enable or disable single step mode. EXCP_DEBUG is returned by the
1283
   CPU loop after each instruction */
1284
void cpu_single_step(CPUState *env, int enabled)
1285
{
1286
#if defined(TARGET_HAS_ICE)
1287
    if (env->singlestep_enabled != enabled) {
1288
        env->singlestep_enabled = enabled;
1289
        /* must flush all the translated code to avoid inconsistancies */
1290
        /* XXX: only flush what is necessary */
1291
        tb_flush(env);
1292
    }
1293
#endif
1294
}
1295

    
1296
/* enable or disable low levels log */
1297
void cpu_set_log(int log_flags)
1298
{
1299
    loglevel = log_flags;
1300
    if (loglevel && !logfile) {
1301
        logfile = fopen(logfilename, log_append ? "a" : "w");
1302
        if (!logfile) {
1303
            perror(logfilename);
1304
            _exit(1);
1305
        }
1306
#if !defined(CONFIG_SOFTMMU)
1307
        /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1308
        {
1309
            static uint8_t logfile_buf[4096];
1310
            setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1311
        }
1312
#else
1313
        setvbuf(logfile, NULL, _IOLBF, 0);
1314
#endif
1315
        log_append = 1;
1316
    }
1317
    if (!loglevel && logfile) {
1318
        fclose(logfile);
1319
        logfile = NULL;
1320
    }
1321
}
1322

    
1323
void cpu_set_log_filename(const char *filename)
1324
{
1325
    logfilename = strdup(filename);
1326
    if (logfile) {
1327
        fclose(logfile);
1328
        logfile = NULL;
1329
    }
1330
    cpu_set_log(loglevel);
1331
}
1332

    
1333
/* mask must never be zero, except for A20 change call */
1334
void cpu_interrupt(CPUState *env, int mask)
1335
{
1336
    TranslationBlock *tb;
1337
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1338

    
1339
    env->interrupt_request |= mask;
1340
    /* if the cpu is currently executing code, we must unlink it and
1341
       all the potentially executing TB */
1342
    tb = env->current_tb;
1343
    if (tb && !testandset(&interrupt_lock)) {
1344
        env->current_tb = NULL;
1345
        tb_reset_jump_recursive(tb);
1346
        resetlock(&interrupt_lock);
1347
    }
1348
}
1349

    
1350
void cpu_reset_interrupt(CPUState *env, int mask)
1351
{
1352
    env->interrupt_request &= ~mask;
1353
}
1354

    
1355
CPULogItem cpu_log_items[] = {
1356
    { CPU_LOG_TB_OUT_ASM, "out_asm",
1357
      "show generated host assembly code for each compiled TB" },
1358
    { CPU_LOG_TB_IN_ASM, "in_asm",
1359
      "show target assembly code for each compiled TB" },
1360
    { CPU_LOG_TB_OP, "op",
1361
      "show micro ops for each compiled TB" },
1362
    { CPU_LOG_TB_OP_OPT, "op_opt",
1363
      "show micro ops "
1364
#ifdef TARGET_I386
1365
      "before eflags optimization and "
1366
#endif
1367
      "after liveness analysis" },
1368
    { CPU_LOG_INT, "int",
1369
      "show interrupts/exceptions in short format" },
1370
    { CPU_LOG_EXEC, "exec",
1371
      "show trace before each executed TB (lots of logs)" },
1372
    { CPU_LOG_TB_CPU, "cpu",
1373
      "show CPU state before block translation" },
1374
#ifdef TARGET_I386
1375
    { CPU_LOG_PCALL, "pcall",
1376
      "show protected mode far calls/returns/exceptions" },
1377
#endif
1378
#ifdef DEBUG_IOPORT
1379
    { CPU_LOG_IOPORT, "ioport",
1380
      "show all i/o ports accesses" },
1381
#endif
1382
    { 0, NULL, NULL },
1383
};
1384

    
1385
static int cmp1(const char *s1, int n, const char *s2)
1386
{
1387
    if (strlen(s2) != n)
1388
        return 0;
1389
    return memcmp(s1, s2, n) == 0;
1390
}
1391

    
1392
/* takes a comma separated list of log masks. Return 0 if error. */
1393
int cpu_str_to_log_mask(const char *str)
1394
{
1395
    CPULogItem *item;
1396
    int mask;
1397
    const char *p, *p1;
1398

    
1399
    p = str;
1400
    mask = 0;
1401
    for(;;) {
1402
        p1 = strchr(p, ',');
1403
        if (!p1)
1404
            p1 = p + strlen(p);
1405
        if(cmp1(p,p1-p,"all")) {
1406
                for(item = cpu_log_items; item->mask != 0; item++) {
1407
                        mask |= item->mask;
1408
                }
1409
        } else {
1410
        for(item = cpu_log_items; item->mask != 0; item++) {
1411
            if (cmp1(p, p1 - p, item->name))
1412
                goto found;
1413
        }
1414
        return 0;
1415
        }
1416
    found:
1417
        mask |= item->mask;
1418
        if (*p1 != ',')
1419
            break;
1420
        p = p1 + 1;
1421
    }
1422
    return mask;
1423
}
1424

    
1425
void cpu_abort(CPUState *env, const char *fmt, ...)
1426
{
1427
    va_list ap;
1428
    va_list ap2;
1429

    
1430
    va_start(ap, fmt);
1431
    va_copy(ap2, ap);
1432
    fprintf(stderr, "qemu: fatal: ");
1433
    vfprintf(stderr, fmt, ap);
1434
    fprintf(stderr, "\n");
1435
#ifdef TARGET_I386
1436
    cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1437
#else
1438
    cpu_dump_state(env, stderr, fprintf, 0);
1439
#endif
1440
    if (logfile) {
1441
        fprintf(logfile, "qemu: fatal: ");
1442
        vfprintf(logfile, fmt, ap2);
1443
        fprintf(logfile, "\n");
1444
#ifdef TARGET_I386
1445
        cpu_dump_state(env, logfile, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1446
#else
1447
        cpu_dump_state(env, logfile, fprintf, 0);
1448
#endif
1449
        fflush(logfile);
1450
        fclose(logfile);
1451
    }
1452
    va_end(ap2);
1453
    va_end(ap);
1454
    abort();
1455
}
1456

    
1457
CPUState *cpu_copy(CPUState *env)
1458
{
1459
    CPUState *new_env = cpu_init(env->cpu_model_str);
1460
    /* preserve chaining and index */
1461
    CPUState *next_cpu = new_env->next_cpu;
1462
    int cpu_index = new_env->cpu_index;
1463
    memcpy(new_env, env, sizeof(CPUState));
1464
    new_env->next_cpu = next_cpu;
1465
    new_env->cpu_index = cpu_index;
1466
    return new_env;
1467
}
1468

    
1469
#if !defined(CONFIG_USER_ONLY)
1470

    
1471
static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1472
{
1473
    unsigned int i;
1474

    
1475
    /* Discard jump cache entries for any tb which might potentially
1476
       overlap the flushed page.  */
1477
    i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1478
    memset (&env->tb_jmp_cache[i], 0, 
1479
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1480

    
1481
    i = tb_jmp_cache_hash_page(addr);
1482
    memset (&env->tb_jmp_cache[i], 0, 
1483
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
1484
}
1485

    
1486
/* NOTE: if flush_global is true, also flush global entries (not
1487
   implemented yet) */
1488
void tlb_flush(CPUState *env, int flush_global)
1489
{
1490
    int i;
1491

    
1492
#if defined(DEBUG_TLB)
1493
    printf("tlb_flush:\n");
1494
#endif
1495
    /* must reset current TB so that interrupts cannot modify the
1496
       links while we are modifying them */
1497
    env->current_tb = NULL;
1498

    
1499
    for(i = 0; i < CPU_TLB_SIZE; i++) {
1500
        env->tlb_table[0][i].addr_read = -1;
1501
        env->tlb_table[0][i].addr_write = -1;
1502
        env->tlb_table[0][i].addr_code = -1;
1503
        env->tlb_table[1][i].addr_read = -1;
1504
        env->tlb_table[1][i].addr_write = -1;
1505
        env->tlb_table[1][i].addr_code = -1;
1506
#if (NB_MMU_MODES >= 3)
1507
        env->tlb_table[2][i].addr_read = -1;
1508
        env->tlb_table[2][i].addr_write = -1;
1509
        env->tlb_table[2][i].addr_code = -1;
1510
#if (NB_MMU_MODES == 4)
1511
        env->tlb_table[3][i].addr_read = -1;
1512
        env->tlb_table[3][i].addr_write = -1;
1513
        env->tlb_table[3][i].addr_code = -1;
1514
#endif
1515
#endif
1516
    }
1517

    
1518
    memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
1519

    
1520
#if !defined(CONFIG_SOFTMMU)
1521
    munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
1522
#endif
1523
#ifdef USE_KQEMU
1524
    if (env->kqemu_enabled) {
1525
        kqemu_flush(env, flush_global);
1526
    }
1527
#endif
1528
    tlb_flush_count++;
1529
}
1530

    
1531
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
1532
{
1533
    if (addr == (tlb_entry->addr_read &
1534
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1535
        addr == (tlb_entry->addr_write &
1536
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
1537
        addr == (tlb_entry->addr_code &
1538
                 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1539
        tlb_entry->addr_read = -1;
1540
        tlb_entry->addr_write = -1;
1541
        tlb_entry->addr_code = -1;
1542
    }
1543
}
1544

    
1545
void tlb_flush_page(CPUState *env, target_ulong addr)
1546
{
1547
    int i;
1548

    
1549
#if defined(DEBUG_TLB)
1550
    printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
1551
#endif
1552
    /* must reset current TB so that interrupts cannot modify the
1553
       links while we are modifying them */
1554
    env->current_tb = NULL;
1555

    
1556
    addr &= TARGET_PAGE_MASK;
1557
    i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1558
    tlb_flush_entry(&env->tlb_table[0][i], addr);
1559
    tlb_flush_entry(&env->tlb_table[1][i], addr);
1560
#if (NB_MMU_MODES >= 3)
1561
    tlb_flush_entry(&env->tlb_table[2][i], addr);
1562
#if (NB_MMU_MODES == 4)
1563
    tlb_flush_entry(&env->tlb_table[3][i], addr);
1564
#endif
1565
#endif
1566

    
1567
    tlb_flush_jmp_cache(env, addr);
1568

    
1569
#if !defined(CONFIG_SOFTMMU)
1570
    if (addr < MMAP_AREA_END)
1571
        munmap((void *)addr, TARGET_PAGE_SIZE);
1572
#endif
1573
#ifdef USE_KQEMU
1574
    if (env->kqemu_enabled) {
1575
        kqemu_flush_page(env, addr);
1576
    }
1577
#endif
1578
}
1579

    
1580
/* update the TLBs so that writes to code in the virtual page 'addr'
1581
   can be detected */
1582
static void tlb_protect_code(ram_addr_t ram_addr)
1583
{
1584
    cpu_physical_memory_reset_dirty(ram_addr,
1585
                                    ram_addr + TARGET_PAGE_SIZE,
1586
                                    CODE_DIRTY_FLAG);
1587
}
1588

    
1589
/* update the TLB so that writes in physical page 'phys_addr' are no longer
1590
   tested for self modifying code */
1591
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1592
                                    target_ulong vaddr)
1593
{
1594
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1595
}
1596

    
1597
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1598
                                         unsigned long start, unsigned long length)
1599
{
1600
    unsigned long addr;
1601
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1602
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1603
        if ((addr - start) < length) {
1604
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1605
        }
1606
    }
1607
}
1608

    
1609
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1610
                                     int dirty_flags)
1611
{
1612
    CPUState *env;
1613
    unsigned long length, start1;
1614
    int i, mask, len;
1615
    uint8_t *p;
1616

    
1617
    start &= TARGET_PAGE_MASK;
1618
    end = TARGET_PAGE_ALIGN(end);
1619

    
1620
    length = end - start;
1621
    if (length == 0)
1622
        return;
1623
    len = length >> TARGET_PAGE_BITS;
1624
#ifdef USE_KQEMU
1625
    /* XXX: should not depend on cpu context */
1626
    env = first_cpu;
1627
    if (env->kqemu_enabled) {
1628
        ram_addr_t addr;
1629
        addr = start;
1630
        for(i = 0; i < len; i++) {
1631
            kqemu_set_notdirty(env, addr);
1632
            addr += TARGET_PAGE_SIZE;
1633
        }
1634
    }
1635
#endif
1636
    mask = ~dirty_flags;
1637
    p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1638
    for(i = 0; i < len; i++)
1639
        p[i] &= mask;
1640

    
1641
    /* we modify the TLB cache so that the dirty bit will be set again
1642
       when accessing the range */
1643
    start1 = start + (unsigned long)phys_ram_base;
1644
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
1645
        for(i = 0; i < CPU_TLB_SIZE; i++)
1646
            tlb_reset_dirty_range(&env->tlb_table[0][i], start1, length);
1647
        for(i = 0; i < CPU_TLB_SIZE; i++)
1648
            tlb_reset_dirty_range(&env->tlb_table[1][i], start1, length);
1649
#if (NB_MMU_MODES >= 3)
1650
        for(i = 0; i < CPU_TLB_SIZE; i++)
1651
            tlb_reset_dirty_range(&env->tlb_table[2][i], start1, length);
1652
#if (NB_MMU_MODES == 4)
1653
        for(i = 0; i < CPU_TLB_SIZE; i++)
1654
            tlb_reset_dirty_range(&env->tlb_table[3][i], start1, length);
1655
#endif
1656
#endif
1657
    }
1658

    
1659
#if !defined(CONFIG_SOFTMMU)
1660
    /* XXX: this is expensive */
1661
    {
1662
        VirtPageDesc *p;
1663
        int j;
1664
        target_ulong addr;
1665

    
1666
        for(i = 0; i < L1_SIZE; i++) {
1667
            p = l1_virt_map[i];
1668
            if (p) {
1669
                addr = i << (TARGET_PAGE_BITS + L2_BITS);
1670
                for(j = 0; j < L2_SIZE; j++) {
1671
                    if (p->valid_tag == virt_valid_tag &&
1672
                        p->phys_addr >= start && p->phys_addr < end &&
1673
                        (p->prot & PROT_WRITE)) {
1674
                        if (addr < MMAP_AREA_END) {
1675
                            mprotect((void *)addr, TARGET_PAGE_SIZE,
1676
                                     p->prot & ~PROT_WRITE);
1677
                        }
1678
                    }
1679
                    addr += TARGET_PAGE_SIZE;
1680
                    p++;
1681
                }
1682
            }
1683
        }
1684
    }
1685
#endif
1686
}
1687

    
1688
static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1689
{
1690
    ram_addr_t ram_addr;
1691

    
1692
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1693
        ram_addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) +
1694
            tlb_entry->addend - (unsigned long)phys_ram_base;
1695
        if (!cpu_physical_memory_is_dirty(ram_addr)) {
1696
            tlb_entry->addr_write |= IO_MEM_NOTDIRTY;
1697
        }
1698
    }
1699
}
1700

    
1701
/* update the TLB according to the current state of the dirty bits */
1702
void cpu_tlb_update_dirty(CPUState *env)
1703
{
1704
    int i;
1705
    for(i = 0; i < CPU_TLB_SIZE; i++)
1706
        tlb_update_dirty(&env->tlb_table[0][i]);
1707
    for(i = 0; i < CPU_TLB_SIZE; i++)
1708
        tlb_update_dirty(&env->tlb_table[1][i]);
1709
#if (NB_MMU_MODES >= 3)
1710
    for(i = 0; i < CPU_TLB_SIZE; i++)
1711
        tlb_update_dirty(&env->tlb_table[2][i]);
1712
#if (NB_MMU_MODES == 4)
1713
    for(i = 0; i < CPU_TLB_SIZE; i++)
1714
        tlb_update_dirty(&env->tlb_table[3][i]);
1715
#endif
1716
#endif
1717
}
1718

    
1719
static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
1720
                                  unsigned long start)
1721
{
1722
    unsigned long addr;
1723
    if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1724
        addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
1725
        if (addr == start) {
1726
            tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | IO_MEM_RAM;
1727
        }
1728
    }
1729
}
1730

    
1731
/* update the TLB corresponding to virtual page vaddr and phys addr
1732
   addr so that it is no longer dirty */
1733
static inline void tlb_set_dirty(CPUState *env,
1734
                                 unsigned long addr, target_ulong vaddr)
1735
{
1736
    int i;
1737

    
1738
    addr &= TARGET_PAGE_MASK;
1739
    i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1740
    tlb_set_dirty1(&env->tlb_table[0][i], addr);
1741
    tlb_set_dirty1(&env->tlb_table[1][i], addr);
1742
#if (NB_MMU_MODES >= 3)
1743
    tlb_set_dirty1(&env->tlb_table[2][i], addr);
1744
#if (NB_MMU_MODES == 4)
1745
    tlb_set_dirty1(&env->tlb_table[3][i], addr);
1746
#endif
1747
#endif
1748
}
1749

    
1750
/* add a new TLB entry. At most one entry for a given virtual address
1751
   is permitted. Return 0 if OK or 2 if the page could not be mapped
1752
   (can only happen in non SOFTMMU mode for I/O pages or pages
1753
   conflicting with the host address space). */
1754
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1755
                      target_phys_addr_t paddr, int prot,
1756
                      int mmu_idx, int is_softmmu)
1757
{
1758
    PhysPageDesc *p;
1759
    unsigned long pd;
1760
    unsigned int index;
1761
    target_ulong address;
1762
    target_phys_addr_t addend;
1763
    int ret;
1764
    CPUTLBEntry *te;
1765
    int i;
1766

    
1767
    p = phys_page_find(paddr >> TARGET_PAGE_BITS);
1768
    if (!p) {
1769
        pd = IO_MEM_UNASSIGNED;
1770
    } else {
1771
        pd = p->phys_offset;
1772
    }
1773
#if defined(DEBUG_TLB)
1774
    printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x idx=%d smmu=%d pd=0x%08lx\n",
1775
           vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
1776
#endif
1777

    
1778
    ret = 0;
1779
#if !defined(CONFIG_SOFTMMU)
1780
    if (is_softmmu)
1781
#endif
1782
    {
1783
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
1784
            /* IO memory case */
1785
            address = vaddr | pd;
1786
            addend = paddr;
1787
        } else {
1788
            /* standard memory */
1789
            address = vaddr;
1790
            addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1791
        }
1792

    
1793
        /* Make accesses to pages with watchpoints go via the
1794
           watchpoint trap routines.  */
1795
        for (i = 0; i < env->nb_watchpoints; i++) {
1796
            if (vaddr == (env->watchpoint[i].vaddr & TARGET_PAGE_MASK)) {
1797
                if (address & ~TARGET_PAGE_MASK) {
1798
                    env->watchpoint[i].addend = 0;
1799
                    address = vaddr | io_mem_watch;
1800
                } else {
1801
                    env->watchpoint[i].addend = pd - paddr +
1802
                        (unsigned long) phys_ram_base;
1803
                    /* TODO: Figure out how to make read watchpoints coexist
1804
                       with code.  */
1805
                    pd = (pd & TARGET_PAGE_MASK) | io_mem_watch | IO_MEM_ROMD;
1806
                }
1807
            }
1808
        }
1809

    
1810
        index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1811
        addend -= vaddr;
1812
        te = &env->tlb_table[mmu_idx][index];
1813
        te->addend = addend;
1814
        if (prot & PAGE_READ) {
1815
            te->addr_read = address;
1816
        } else {
1817
            te->addr_read = -1;
1818
        }
1819

    
1820
        if (prot & PAGE_EXEC) {
1821
            te->addr_code = address;
1822
        } else {
1823
            te->addr_code = -1;
1824
        }
1825
        if (prot & PAGE_WRITE) {
1826
            if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1827
                (pd & IO_MEM_ROMD)) {
1828
                /* write access calls the I/O callback */
1829
                te->addr_write = vaddr |
1830
                    (pd & ~(TARGET_PAGE_MASK | IO_MEM_ROMD));
1831
            } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1832
                       !cpu_physical_memory_is_dirty(pd)) {
1833
                te->addr_write = vaddr | IO_MEM_NOTDIRTY;
1834
            } else {
1835
                te->addr_write = address;
1836
            }
1837
        } else {
1838
            te->addr_write = -1;
1839
        }
1840
    }
1841
#if !defined(CONFIG_SOFTMMU)
1842
    else {
1843
        if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1844
            /* IO access: no mapping is done as it will be handled by the
1845
               soft MMU */
1846
            if (!(env->hflags & HF_SOFTMMU_MASK))
1847
                ret = 2;
1848
        } else {
1849
            void *map_addr;
1850

    
1851
            if (vaddr >= MMAP_AREA_END) {
1852
                ret = 2;
1853
            } else {
1854
                if (prot & PROT_WRITE) {
1855
                    if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
1856
#if defined(TARGET_HAS_SMC) || 1
1857
                        first_tb ||
1858
#endif
1859
                        ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1860
                         !cpu_physical_memory_is_dirty(pd))) {
1861
                        /* ROM: we do as if code was inside */
1862
                        /* if code is present, we only map as read only and save the
1863
                           original mapping */
1864
                        VirtPageDesc *vp;
1865

    
1866
                        vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
1867
                        vp->phys_addr = pd;
1868
                        vp->prot = prot;
1869
                        vp->valid_tag = virt_valid_tag;
1870
                        prot &= ~PAGE_WRITE;
1871
                    }
1872
                }
1873
                map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1874
                                MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1875
                if (map_addr == MAP_FAILED) {
1876
                    cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1877
                              paddr, vaddr);
1878
                }
1879
            }
1880
        }
1881
    }
1882
#endif
1883
    return ret;
1884
}
1885

    
1886
/* called from signal handler: invalidate the code and unprotect the
1887
   page. Return TRUE if the fault was succesfully handled. */
1888
int page_unprotect(target_ulong addr, unsigned long pc, void *puc)
1889
{
1890
#if !defined(CONFIG_SOFTMMU)
1891
    VirtPageDesc *vp;
1892

    
1893
#if defined(DEBUG_TLB)
1894
    printf("page_unprotect: addr=0x%08x\n", addr);
1895
#endif
1896
    addr &= TARGET_PAGE_MASK;
1897

    
1898
    /* if it is not mapped, no need to worry here */
1899
    if (addr >= MMAP_AREA_END)
1900
        return 0;
1901
    vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1902
    if (!vp)
1903
        return 0;
1904
    /* NOTE: in this case, validate_tag is _not_ tested as it
1905
       validates only the code TLB */
1906
    if (vp->valid_tag != virt_valid_tag)
1907
        return 0;
1908
    if (!(vp->prot & PAGE_WRITE))
1909
        return 0;
1910
#if defined(DEBUG_TLB)
1911
    printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1912
           addr, vp->phys_addr, vp->prot);
1913
#endif
1914
    if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1915
        cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1916
                  (unsigned long)addr, vp->prot);
1917
    /* set the dirty bit */
1918
    phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
1919
    /* flush the code inside */
1920
    tb_invalidate_phys_page(vp->phys_addr, pc, puc);
1921
    return 1;
1922
#else
1923
    return 0;
1924
#endif
1925
}
1926

    
1927
#else
1928

    
1929
void tlb_flush(CPUState *env, int flush_global)
1930
{
1931
}
1932

    
1933
void tlb_flush_page(CPUState *env, target_ulong addr)
1934
{
1935
}
1936

    
1937
int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
1938
                      target_phys_addr_t paddr, int prot,
1939
                      int mmu_idx, int is_softmmu)
1940
{
1941
    return 0;
1942
}
1943

    
1944
/* dump memory mappings */
1945
void page_dump(FILE *f)
1946
{
1947
    unsigned long start, end;
1948
    int i, j, prot, prot1;
1949
    PageDesc *p;
1950

    
1951
    fprintf(f, "%-8s %-8s %-8s %s\n",
1952
            "start", "end", "size", "prot");
1953
    start = -1;
1954
    end = -1;
1955
    prot = 0;
1956
    for(i = 0; i <= L1_SIZE; i++) {
1957
        if (i < L1_SIZE)
1958
            p = l1_map[i];
1959
        else
1960
            p = NULL;
1961
        for(j = 0;j < L2_SIZE; j++) {
1962
            if (!p)
1963
                prot1 = 0;
1964
            else
1965
                prot1 = p[j].flags;
1966
            if (prot1 != prot) {
1967
                end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1968
                if (start != -1) {
1969
                    fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1970
                            start, end, end - start,
1971
                            prot & PAGE_READ ? 'r' : '-',
1972
                            prot & PAGE_WRITE ? 'w' : '-',
1973
                            prot & PAGE_EXEC ? 'x' : '-');
1974
                }
1975
                if (prot1 != 0)
1976
                    start = end;
1977
                else
1978
                    start = -1;
1979
                prot = prot1;
1980
            }
1981
            if (!p)
1982
                break;
1983
        }
1984
    }
1985
}
1986

    
1987
int page_get_flags(target_ulong address)
1988
{
1989
    PageDesc *p;
1990

    
1991
    p = page_find(address >> TARGET_PAGE_BITS);
1992
    if (!p)
1993
        return 0;
1994
    return p->flags;
1995
}
1996

    
1997
/* modify the flags of a page and invalidate the code if
1998
   necessary. The flag PAGE_WRITE_ORG is positionned automatically
1999
   depending on PAGE_WRITE */
2000
void page_set_flags(target_ulong start, target_ulong end, int flags)
2001
{
2002
    PageDesc *p;
2003
    target_ulong addr;
2004

    
2005
    start = start & TARGET_PAGE_MASK;
2006
    end = TARGET_PAGE_ALIGN(end);
2007
    if (flags & PAGE_WRITE)
2008
        flags |= PAGE_WRITE_ORG;
2009
    spin_lock(&tb_lock);
2010
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2011
        p = page_find_alloc(addr >> TARGET_PAGE_BITS);
2012
        /* if the write protection is set, then we invalidate the code
2013
           inside */
2014
        if (!(p->flags & PAGE_WRITE) &&
2015
            (flags & PAGE_WRITE) &&
2016
            p->first_tb) {
2017
            tb_invalidate_phys_page(addr, 0, NULL);
2018
        }
2019
        p->flags = flags;
2020
    }
2021
    spin_unlock(&tb_lock);
2022
}
2023

    
2024
int page_check_range(target_ulong start, target_ulong len, int flags)
2025
{
2026
    PageDesc *p;
2027
    target_ulong end;
2028
    target_ulong addr;
2029

    
2030
    end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2031
    start = start & TARGET_PAGE_MASK;
2032

    
2033
    if( end < start )
2034
        /* we've wrapped around */
2035
        return -1;
2036
    for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
2037
        p = page_find(addr >> TARGET_PAGE_BITS);
2038
        if( !p )
2039
            return -1;
2040
        if( !(p->flags & PAGE_VALID) )
2041
            return -1;
2042

    
2043
        if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
2044
            return -1;
2045
        if (flags & PAGE_WRITE) {
2046
            if (!(p->flags & PAGE_WRITE_ORG))
2047
                return -1;
2048
            /* unprotect the page if it was put read-only because it
2049
               contains translated code */
2050
            if (!(p->flags & PAGE_WRITE)) {
2051
                if (!page_unprotect(addr, 0, NULL))
2052
                    return -1;
2053
            }
2054
            return 0;
2055
        }
2056
    }
2057
    return 0;
2058
}
2059

    
2060
/* called from signal handler: invalidate the code and unprotect the
2061
   page. Return TRUE if the fault was succesfully handled. */
2062
int page_unprotect(target_ulong address, unsigned long pc, void *puc)
2063
{
2064
    unsigned int page_index, prot, pindex;
2065
    PageDesc *p, *p1;
2066
    target_ulong host_start, host_end, addr;
2067

    
2068
    host_start = address & qemu_host_page_mask;
2069
    page_index = host_start >> TARGET_PAGE_BITS;
2070
    p1 = page_find(page_index);
2071
    if (!p1)
2072
        return 0;
2073
    host_end = host_start + qemu_host_page_size;
2074
    p = p1;
2075
    prot = 0;
2076
    for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
2077
        prot |= p->flags;
2078
        p++;
2079
    }
2080
    /* if the page was really writable, then we change its
2081
       protection back to writable */
2082
    if (prot & PAGE_WRITE_ORG) {
2083
        pindex = (address - host_start) >> TARGET_PAGE_BITS;
2084
        if (!(p1[pindex].flags & PAGE_WRITE)) {
2085
            mprotect((void *)g2h(host_start), qemu_host_page_size,
2086
                     (prot & PAGE_BITS) | PAGE_WRITE);
2087
            p1[pindex].flags |= PAGE_WRITE;
2088
            /* and since the content will be modified, we must invalidate
2089
               the corresponding translated code. */
2090
            tb_invalidate_phys_page(address, pc, puc);
2091
#ifdef DEBUG_TB_CHECK
2092
            tb_invalidate_check(address);
2093
#endif
2094
            return 1;
2095
        }
2096
    }
2097
    return 0;
2098
}
2099

    
2100
static inline void tlb_set_dirty(CPUState *env,
2101
                                 unsigned long addr, target_ulong vaddr)
2102
{
2103
}
2104
#endif /* defined(CONFIG_USER_ONLY) */
2105

    
2106
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2107
                             ram_addr_t memory);
2108
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2109
                           ram_addr_t orig_memory);
2110
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2111
                      need_subpage)                                     \
2112
    do {                                                                \
2113
        if (addr > start_addr)                                          \
2114
            start_addr2 = 0;                                            \
2115
        else {                                                          \
2116
            start_addr2 = start_addr & ~TARGET_PAGE_MASK;               \
2117
            if (start_addr2 > 0)                                        \
2118
                need_subpage = 1;                                       \
2119
        }                                                               \
2120
                                                                        \
2121
        if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE)        \
2122
            end_addr2 = TARGET_PAGE_SIZE - 1;                           \
2123
        else {                                                          \
2124
            end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2125
            if (end_addr2 < TARGET_PAGE_SIZE - 1)                       \
2126
                need_subpage = 1;                                       \
2127
        }                                                               \
2128
    } while (0)
2129

    
2130
/* register physical memory. 'size' must be a multiple of the target
2131
   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
2132
   io memory page */
2133
void cpu_register_physical_memory(target_phys_addr_t start_addr,
2134
                                  ram_addr_t size,
2135
                                  ram_addr_t phys_offset)
2136
{
2137
    target_phys_addr_t addr, end_addr;
2138
    PhysPageDesc *p;
2139
    CPUState *env;
2140
    ram_addr_t orig_size = size;
2141
    void *subpage;
2142

    
2143
#ifdef USE_KQEMU
2144
    /* XXX: should not depend on cpu context */
2145
    env = first_cpu;
2146
    if (env->kqemu_enabled) {
2147
        kqemu_set_phys_mem(start_addr, size, phys_offset);
2148
    }
2149
#endif
2150
    size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
2151
    end_addr = start_addr + (target_phys_addr_t)size;
2152
    for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
2153
        p = phys_page_find(addr >> TARGET_PAGE_BITS);
2154
        if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
2155
            ram_addr_t orig_memory = p->phys_offset;
2156
            target_phys_addr_t start_addr2, end_addr2;
2157
            int need_subpage = 0;
2158

    
2159
            CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2160
                          need_subpage);
2161
            if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2162
                if (!(orig_memory & IO_MEM_SUBPAGE)) {
2163
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2164
                                           &p->phys_offset, orig_memory);
2165
                } else {
2166
                    subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2167
                                            >> IO_MEM_SHIFT];
2168
                }
2169
                subpage_register(subpage, start_addr2, end_addr2, phys_offset);
2170
            } else {
2171
                p->phys_offset = phys_offset;
2172
                if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2173
                    (phys_offset & IO_MEM_ROMD))
2174
                    phys_offset += TARGET_PAGE_SIZE;
2175
            }
2176
        } else {
2177
            p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2178
            p->phys_offset = phys_offset;
2179
            if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2180
                (phys_offset & IO_MEM_ROMD))
2181
                phys_offset += TARGET_PAGE_SIZE;
2182
            else {
2183
                target_phys_addr_t start_addr2, end_addr2;
2184
                int need_subpage = 0;
2185

    
2186
                CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2187
                              end_addr2, need_subpage);
2188

    
2189
                if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
2190
                    subpage = subpage_init((addr & TARGET_PAGE_MASK),
2191
                                           &p->phys_offset, IO_MEM_UNASSIGNED);
2192
                    subpage_register(subpage, start_addr2, end_addr2,
2193
                                     phys_offset);
2194
                }
2195
            }
2196
        }
2197
    }
2198

    
2199
    /* since each CPU stores ram addresses in its TLB cache, we must
2200
       reset the modified entries */
2201
    /* XXX: slow ! */
2202
    for(env = first_cpu; env != NULL; env = env->next_cpu) {
2203
        tlb_flush(env, 1);
2204
    }
2205
}
2206

    
2207
/* XXX: temporary until new memory mapping API */
2208
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
2209
{
2210
    PhysPageDesc *p;
2211

    
2212
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2213
    if (!p)
2214
        return IO_MEM_UNASSIGNED;
2215
    return p->phys_offset;
2216
}
2217

    
2218
/* XXX: better than nothing */
2219
ram_addr_t qemu_ram_alloc(ram_addr_t size)
2220
{
2221
    ram_addr_t addr;
2222
    if ((phys_ram_alloc_offset + size) > phys_ram_size) {
2223
        fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
2224
                (uint64_t)size, (uint64_t)phys_ram_size);
2225
        abort();
2226
    }
2227
    addr = phys_ram_alloc_offset;
2228
    phys_ram_alloc_offset = TARGET_PAGE_ALIGN(phys_ram_alloc_offset + size);
2229
    return addr;
2230
}
2231

    
2232
void qemu_ram_free(ram_addr_t addr)
2233
{
2234
}
2235

    
2236
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
2237
{
2238
#ifdef DEBUG_UNASSIGNED
2239
    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
2240
#endif
2241
#ifdef TARGET_SPARC
2242
    do_unassigned_access(addr, 0, 0, 0);
2243
#elif TARGET_CRIS
2244
    do_unassigned_access(addr, 0, 0, 0);
2245
#endif
2246
    return 0;
2247
}
2248

    
2249
static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2250
{
2251
#ifdef DEBUG_UNASSIGNED
2252
    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
2253
#endif
2254
#ifdef TARGET_SPARC
2255
    do_unassigned_access(addr, 1, 0, 0);
2256
#elif TARGET_CRIS
2257
    do_unassigned_access(addr, 1, 0, 0);
2258
#endif
2259
}
2260

    
2261
static CPUReadMemoryFunc *unassigned_mem_read[3] = {
2262
    unassigned_mem_readb,
2263
    unassigned_mem_readb,
2264
    unassigned_mem_readb,
2265
};
2266

    
2267
static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
2268
    unassigned_mem_writeb,
2269
    unassigned_mem_writeb,
2270
    unassigned_mem_writeb,
2271
};
2272

    
2273
static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
2274
{
2275
    unsigned long ram_addr;
2276
    int dirty_flags;
2277
    ram_addr = addr - (unsigned long)phys_ram_base;
2278
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2279
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2280
#if !defined(CONFIG_USER_ONLY)
2281
        tb_invalidate_phys_page_fast(ram_addr, 1);
2282
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2283
#endif
2284
    }
2285
    stb_p((uint8_t *)(long)addr, val);
2286
#ifdef USE_KQEMU
2287
    if (cpu_single_env->kqemu_enabled &&
2288
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2289
        kqemu_modify_page(cpu_single_env, ram_addr);
2290
#endif
2291
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2292
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2293
    /* we remove the notdirty callback only if the code has been
2294
       flushed */
2295
    if (dirty_flags == 0xff)
2296
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2297
}
2298

    
2299
static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
2300
{
2301
    unsigned long ram_addr;
2302
    int dirty_flags;
2303
    ram_addr = addr - (unsigned long)phys_ram_base;
2304
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2305
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2306
#if !defined(CONFIG_USER_ONLY)
2307
        tb_invalidate_phys_page_fast(ram_addr, 2);
2308
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2309
#endif
2310
    }
2311
    stw_p((uint8_t *)(long)addr, val);
2312
#ifdef USE_KQEMU
2313
    if (cpu_single_env->kqemu_enabled &&
2314
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2315
        kqemu_modify_page(cpu_single_env, ram_addr);
2316
#endif
2317
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2318
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2319
    /* we remove the notdirty callback only if the code has been
2320
       flushed */
2321
    if (dirty_flags == 0xff)
2322
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2323
}
2324

    
2325
static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
2326
{
2327
    unsigned long ram_addr;
2328
    int dirty_flags;
2329
    ram_addr = addr - (unsigned long)phys_ram_base;
2330
    dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2331
    if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2332
#if !defined(CONFIG_USER_ONLY)
2333
        tb_invalidate_phys_page_fast(ram_addr, 4);
2334
        dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
2335
#endif
2336
    }
2337
    stl_p((uint8_t *)(long)addr, val);
2338
#ifdef USE_KQEMU
2339
    if (cpu_single_env->kqemu_enabled &&
2340
        (dirty_flags & KQEMU_MODIFY_PAGE_MASK) != KQEMU_MODIFY_PAGE_MASK)
2341
        kqemu_modify_page(cpu_single_env, ram_addr);
2342
#endif
2343
    dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
2344
    phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
2345
    /* we remove the notdirty callback only if the code has been
2346
       flushed */
2347
    if (dirty_flags == 0xff)
2348
        tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
2349
}
2350

    
2351
static CPUReadMemoryFunc *error_mem_read[3] = {
2352
    NULL, /* never used */
2353
    NULL, /* never used */
2354
    NULL, /* never used */
2355
};
2356

    
2357
static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
2358
    notdirty_mem_writeb,
2359
    notdirty_mem_writew,
2360
    notdirty_mem_writel,
2361
};
2362

    
2363
#if defined(CONFIG_SOFTMMU)
2364
/* Watchpoint access routines.  Watchpoints are inserted using TLB tricks,
2365
   so these check for a hit then pass through to the normal out-of-line
2366
   phys routines.  */
2367
static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
2368
{
2369
    return ldub_phys(addr);
2370
}
2371

    
2372
static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
2373
{
2374
    return lduw_phys(addr);
2375
}
2376

    
2377
static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
2378
{
2379
    return ldl_phys(addr);
2380
}
2381

    
2382
/* Generate a debug exception if a watchpoint has been hit.
2383
   Returns the real physical address of the access.  addr will be a host
2384
   address in case of a RAM location.  */
2385
static target_ulong check_watchpoint(target_phys_addr_t addr)
2386
{
2387
    CPUState *env = cpu_single_env;
2388
    target_ulong watch;
2389
    target_ulong retaddr;
2390
    int i;
2391

    
2392
    retaddr = addr;
2393
    for (i = 0; i < env->nb_watchpoints; i++) {
2394
        watch = env->watchpoint[i].vaddr;
2395
        if (((env->mem_write_vaddr ^ watch) & TARGET_PAGE_MASK) == 0) {
2396
            retaddr = addr - env->watchpoint[i].addend;
2397
            if (((addr ^ watch) & ~TARGET_PAGE_MASK) == 0) {
2398
                cpu_single_env->watchpoint_hit = i + 1;
2399
                cpu_interrupt(cpu_single_env, CPU_INTERRUPT_DEBUG);
2400
                break;
2401
            }
2402
        }
2403
    }
2404
    return retaddr;
2405
}
2406

    
2407
static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
2408
                             uint32_t val)
2409
{
2410
    addr = check_watchpoint(addr);
2411
    stb_phys(addr, val);
2412
}
2413

    
2414
static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
2415
                             uint32_t val)
2416
{
2417
    addr = check_watchpoint(addr);
2418
    stw_phys(addr, val);
2419
}
2420

    
2421
static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
2422
                             uint32_t val)
2423
{
2424
    addr = check_watchpoint(addr);
2425
    stl_phys(addr, val);
2426
}
2427

    
2428
static CPUReadMemoryFunc *watch_mem_read[3] = {
2429
    watch_mem_readb,
2430
    watch_mem_readw,
2431
    watch_mem_readl,
2432
};
2433

    
2434
static CPUWriteMemoryFunc *watch_mem_write[3] = {
2435
    watch_mem_writeb,
2436
    watch_mem_writew,
2437
    watch_mem_writel,
2438
};
2439
#endif
2440

    
2441
static inline uint32_t subpage_readlen (subpage_t *mmio, target_phys_addr_t addr,
2442
                                 unsigned int len)
2443
{
2444
    uint32_t ret;
2445
    unsigned int idx;
2446

    
2447
    idx = SUBPAGE_IDX(addr - mmio->base);
2448
#if defined(DEBUG_SUBPAGE)
2449
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
2450
           mmio, len, addr, idx);
2451
#endif
2452
    ret = (**mmio->mem_read[idx][len])(mmio->opaque[idx][0][len], addr);
2453

    
2454
    return ret;
2455
}
2456

    
2457
static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
2458
                              uint32_t value, unsigned int len)
2459
{
2460
    unsigned int idx;
2461

    
2462
    idx = SUBPAGE_IDX(addr - mmio->base);
2463
#if defined(DEBUG_SUBPAGE)
2464
    printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n", __func__,
2465
           mmio, len, addr, idx, value);
2466
#endif
2467
    (**mmio->mem_write[idx][len])(mmio->opaque[idx][1][len], addr, value);
2468
}
2469

    
2470
static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
2471
{
2472
#if defined(DEBUG_SUBPAGE)
2473
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2474
#endif
2475

    
2476
    return subpage_readlen(opaque, addr, 0);
2477
}
2478

    
2479
static void subpage_writeb (void *opaque, target_phys_addr_t addr,
2480
                            uint32_t value)
2481
{
2482
#if defined(DEBUG_SUBPAGE)
2483
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2484
#endif
2485
    subpage_writelen(opaque, addr, value, 0);
2486
}
2487

    
2488
static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
2489
{
2490
#if defined(DEBUG_SUBPAGE)
2491
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2492
#endif
2493

    
2494
    return subpage_readlen(opaque, addr, 1);
2495
}
2496

    
2497
static void subpage_writew (void *opaque, target_phys_addr_t addr,
2498
                            uint32_t value)
2499
{
2500
#if defined(DEBUG_SUBPAGE)
2501
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2502
#endif
2503
    subpage_writelen(opaque, addr, value, 1);
2504
}
2505

    
2506
static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
2507
{
2508
#if defined(DEBUG_SUBPAGE)
2509
    printf("%s: addr " TARGET_FMT_plx "\n", __func__, addr);
2510
#endif
2511

    
2512
    return subpage_readlen(opaque, addr, 2);
2513
}
2514

    
2515
static void subpage_writel (void *opaque,
2516
                         target_phys_addr_t addr, uint32_t value)
2517
{
2518
#if defined(DEBUG_SUBPAGE)
2519
    printf("%s: addr " TARGET_FMT_plx " val %08x\n", __func__, addr, value);
2520
#endif
2521
    subpage_writelen(opaque, addr, value, 2);
2522
}
2523

    
2524
static CPUReadMemoryFunc *subpage_read[] = {
2525
    &subpage_readb,
2526
    &subpage_readw,
2527
    &subpage_readl,
2528
};
2529

    
2530
static CPUWriteMemoryFunc *subpage_write[] = {
2531
    &subpage_writeb,
2532
    &subpage_writew,
2533
    &subpage_writel,
2534
};
2535

    
2536
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2537
                             ram_addr_t memory)
2538
{
2539
    int idx, eidx;
2540
    unsigned int i;
2541

    
2542
    if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2543
        return -1;
2544
    idx = SUBPAGE_IDX(start);
2545
    eidx = SUBPAGE_IDX(end);
2546
#if defined(DEBUG_SUBPAGE)
2547
    printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %d\n", __func__,
2548
           mmio, start, end, idx, eidx, memory);
2549
#endif
2550
    memory >>= IO_MEM_SHIFT;
2551
    for (; idx <= eidx; idx++) {
2552
        for (i = 0; i < 4; i++) {
2553
            if (io_mem_read[memory][i]) {
2554
                mmio->mem_read[idx][i] = &io_mem_read[memory][i];
2555
                mmio->opaque[idx][0][i] = io_mem_opaque[memory];
2556
            }
2557
            if (io_mem_write[memory][i]) {
2558
                mmio->mem_write[idx][i] = &io_mem_write[memory][i];
2559
                mmio->opaque[idx][1][i] = io_mem_opaque[memory];
2560
            }
2561
        }
2562
    }
2563

    
2564
    return 0;
2565
}
2566

    
2567
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2568
                           ram_addr_t orig_memory)
2569
{
2570
    subpage_t *mmio;
2571
    int subpage_memory;
2572

    
2573
    mmio = qemu_mallocz(sizeof(subpage_t));
2574
    if (mmio != NULL) {
2575
        mmio->base = base;
2576
        subpage_memory = cpu_register_io_memory(0, subpage_read, subpage_write, mmio);
2577
#if defined(DEBUG_SUBPAGE)
2578
        printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
2579
               mmio, base, TARGET_PAGE_SIZE, subpage_memory);
2580
#endif
2581
        *phys = subpage_memory | IO_MEM_SUBPAGE;
2582
        subpage_register(mmio, 0, TARGET_PAGE_SIZE - 1, orig_memory);
2583
    }
2584

    
2585
    return mmio;
2586
}
2587

    
2588
static void io_mem_init(void)
2589
{
2590
    cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
2591
    cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
2592
    cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
2593
    io_mem_nb = 5;
2594

    
2595
#if defined(CONFIG_SOFTMMU)
2596
    io_mem_watch = cpu_register_io_memory(-1, watch_mem_read,
2597
                                          watch_mem_write, NULL);
2598
#endif
2599
    /* alloc dirty bits array */
2600
    phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
2601
    memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
2602
}
2603

    
2604
/* mem_read and mem_write are arrays of functions containing the
2605
   function to access byte (index 0), word (index 1) and dword (index
2606
   2). Functions can be omitted with a NULL function pointer. The
2607
   registered functions may be modified dynamically later.
2608
   If io_index is non zero, the corresponding io zone is
2609
   modified. If it is zero, a new io zone is allocated. The return
2610
   value can be used with cpu_register_physical_memory(). (-1) is
2611
   returned if error. */
2612
int cpu_register_io_memory(int io_index,
2613
                           CPUReadMemoryFunc **mem_read,
2614
                           CPUWriteMemoryFunc **mem_write,
2615
                           void *opaque)
2616
{
2617
    int i, subwidth = 0;
2618

    
2619
    if (io_index <= 0) {
2620
        if (io_mem_nb >= IO_MEM_NB_ENTRIES)
2621
            return -1;
2622
        io_index = io_mem_nb++;
2623
    } else {
2624
        if (io_index >= IO_MEM_NB_ENTRIES)
2625
            return -1;
2626
    }
2627

    
2628
    for(i = 0;i < 3; i++) {
2629
        if (!mem_read[i] || !mem_write[i])
2630
            subwidth = IO_MEM_SUBWIDTH;
2631
        io_mem_read[io_index][i] = mem_read[i];
2632
        io_mem_write[io_index][i] = mem_write[i];
2633
    }
2634
    io_mem_opaque[io_index] = opaque;
2635
    return (io_index << IO_MEM_SHIFT) | subwidth;
2636
}
2637

    
2638
CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
2639
{
2640
    return io_mem_write[io_index >> IO_MEM_SHIFT];
2641
}
2642

    
2643
CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
2644
{
2645
    return io_mem_read[io_index >> IO_MEM_SHIFT];
2646
}
2647

    
2648
/* physical memory access (slow version, mainly for debug) */
2649
#if defined(CONFIG_USER_ONLY)
2650
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2651
                            int len, int is_write)
2652
{
2653
    int l, flags;
2654
    target_ulong page;
2655
    void * p;
2656

    
2657
    while (len > 0) {
2658
        page = addr & TARGET_PAGE_MASK;
2659
        l = (page + TARGET_PAGE_SIZE) - addr;
2660
        if (l > len)
2661
            l = len;
2662
        flags = page_get_flags(page);
2663
        if (!(flags & PAGE_VALID))
2664
            return;
2665
        if (is_write) {
2666
            if (!(flags & PAGE_WRITE))
2667
                return;
2668
            /* XXX: this code should not depend on lock_user */
2669
            if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
2670
                /* FIXME - should this return an error rather than just fail? */
2671
                return;
2672
            memcpy(p, buf, l);
2673
            unlock_user(p, addr, l);
2674
        } else {
2675
            if (!(flags & PAGE_READ))
2676
                return;
2677
            /* XXX: this code should not depend on lock_user */
2678
            if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
2679
                /* FIXME - should this return an error rather than just fail? */
2680
                return;
2681
            memcpy(buf, p, l);
2682
            unlock_user(p, addr, 0);
2683
        }
2684
        len -= l;
2685
        buf += l;
2686
        addr += l;
2687
    }
2688
}
2689

    
2690
#else
2691
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
2692
                            int len, int is_write)
2693
{
2694
    int l, io_index;
2695
    uint8_t *ptr;
2696
    uint32_t val;
2697
    target_phys_addr_t page;
2698
    unsigned long pd;
2699
    PhysPageDesc *p;
2700

    
2701
    while (len > 0) {
2702
        page = addr & TARGET_PAGE_MASK;
2703
        l = (page + TARGET_PAGE_SIZE) - addr;
2704
        if (l > len)
2705
            l = len;
2706
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2707
        if (!p) {
2708
            pd = IO_MEM_UNASSIGNED;
2709
        } else {
2710
            pd = p->phys_offset;
2711
        }
2712

    
2713
        if (is_write) {
2714
            if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2715
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2716
                /* XXX: could force cpu_single_env to NULL to avoid
2717
                   potential bugs */
2718
                if (l >= 4 && ((addr & 3) == 0)) {
2719
                    /* 32 bit write access */
2720
                    val = ldl_p(buf);
2721
                    io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2722
                    l = 4;
2723
                } else if (l >= 2 && ((addr & 1) == 0)) {
2724
                    /* 16 bit write access */
2725
                    val = lduw_p(buf);
2726
                    io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
2727
                    l = 2;
2728
                } else {
2729
                    /* 8 bit write access */
2730
                    val = ldub_p(buf);
2731
                    io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
2732
                    l = 1;
2733
                }
2734
            } else {
2735
                unsigned long addr1;
2736
                addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2737
                /* RAM case */
2738
                ptr = phys_ram_base + addr1;
2739
                memcpy(ptr, buf, l);
2740
                if (!cpu_physical_memory_is_dirty(addr1)) {
2741
                    /* invalidate code */
2742
                    tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
2743
                    /* set dirty bit */
2744
                    phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2745
                        (0xff & ~CODE_DIRTY_FLAG);
2746
                }
2747
            }
2748
        } else {
2749
            if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2750
                !(pd & IO_MEM_ROMD)) {
2751
                /* I/O case */
2752
                io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2753
                if (l >= 4 && ((addr & 3) == 0)) {
2754
                    /* 32 bit read access */
2755
                    val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2756
                    stl_p(buf, val);
2757
                    l = 4;
2758
                } else if (l >= 2 && ((addr & 1) == 0)) {
2759
                    /* 16 bit read access */
2760
                    val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
2761
                    stw_p(buf, val);
2762
                    l = 2;
2763
                } else {
2764
                    /* 8 bit read access */
2765
                    val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
2766
                    stb_p(buf, val);
2767
                    l = 1;
2768
                }
2769
            } else {
2770
                /* RAM case */
2771
                ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2772
                    (addr & ~TARGET_PAGE_MASK);
2773
                memcpy(buf, ptr, l);
2774
            }
2775
        }
2776
        len -= l;
2777
        buf += l;
2778
        addr += l;
2779
    }
2780
}
2781

    
2782
/* used for ROM loading : can write in RAM and ROM */
2783
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
2784
                                   const uint8_t *buf, int len)
2785
{
2786
    int l;
2787
    uint8_t *ptr;
2788
    target_phys_addr_t page;
2789
    unsigned long pd;
2790
    PhysPageDesc *p;
2791

    
2792
    while (len > 0) {
2793
        page = addr & TARGET_PAGE_MASK;
2794
        l = (page + TARGET_PAGE_SIZE) - addr;
2795
        if (l > len)
2796
            l = len;
2797
        p = phys_page_find(page >> TARGET_PAGE_BITS);
2798
        if (!p) {
2799
            pd = IO_MEM_UNASSIGNED;
2800
        } else {
2801
            pd = p->phys_offset;
2802
        }
2803

    
2804
        if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
2805
            (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
2806
            !(pd & IO_MEM_ROMD)) {
2807
            /* do nothing */
2808
        } else {
2809
            unsigned long addr1;
2810
            addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2811
            /* ROM/RAM case */
2812
            ptr = phys_ram_base + addr1;
2813
            memcpy(ptr, buf, l);
2814
        }
2815
        len -= l;
2816
        buf += l;
2817
        addr += l;
2818
    }
2819
}
2820

    
2821

    
2822
/* warning: addr must be aligned */
2823
uint32_t ldl_phys(target_phys_addr_t addr)
2824
{
2825
    int io_index;
2826
    uint8_t *ptr;
2827
    uint32_t val;
2828
    unsigned long pd;
2829
    PhysPageDesc *p;
2830

    
2831
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2832
    if (!p) {
2833
        pd = IO_MEM_UNASSIGNED;
2834
    } else {
2835
        pd = p->phys_offset;
2836
    }
2837

    
2838
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2839
        !(pd & IO_MEM_ROMD)) {
2840
        /* I/O case */
2841
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2842
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2843
    } else {
2844
        /* RAM case */
2845
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2846
            (addr & ~TARGET_PAGE_MASK);
2847
        val = ldl_p(ptr);
2848
    }
2849
    return val;
2850
}
2851

    
2852
/* warning: addr must be aligned */
2853
uint64_t ldq_phys(target_phys_addr_t addr)
2854
{
2855
    int io_index;
2856
    uint8_t *ptr;
2857
    uint64_t val;
2858
    unsigned long pd;
2859
    PhysPageDesc *p;
2860

    
2861
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2862
    if (!p) {
2863
        pd = IO_MEM_UNASSIGNED;
2864
    } else {
2865
        pd = p->phys_offset;
2866
    }
2867

    
2868
    if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
2869
        !(pd & IO_MEM_ROMD)) {
2870
        /* I/O case */
2871
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2872
#ifdef TARGET_WORDS_BIGENDIAN
2873
        val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
2874
        val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
2875
#else
2876
        val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2877
        val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
2878
#endif
2879
    } else {
2880
        /* RAM case */
2881
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2882
            (addr & ~TARGET_PAGE_MASK);
2883
        val = ldq_p(ptr);
2884
    }
2885
    return val;
2886
}
2887

    
2888
/* XXX: optimize */
2889
uint32_t ldub_phys(target_phys_addr_t addr)
2890
{
2891
    uint8_t val;
2892
    cpu_physical_memory_read(addr, &val, 1);
2893
    return val;
2894
}
2895

    
2896
/* XXX: optimize */
2897
uint32_t lduw_phys(target_phys_addr_t addr)
2898
{
2899
    uint16_t val;
2900
    cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2901
    return tswap16(val);
2902
}
2903

    
2904
/* warning: addr must be aligned. The ram page is not masked as dirty
2905
   and the code inside is not invalidated. It is useful if the dirty
2906
   bits are used to track modified PTEs */
2907
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2908
{
2909
    int io_index;
2910
    uint8_t *ptr;
2911
    unsigned long pd;
2912
    PhysPageDesc *p;
2913

    
2914
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2915
    if (!p) {
2916
        pd = IO_MEM_UNASSIGNED;
2917
    } else {
2918
        pd = p->phys_offset;
2919
    }
2920

    
2921
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2922
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2923
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2924
    } else {
2925
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2926
            (addr & ~TARGET_PAGE_MASK);
2927
        stl_p(ptr, val);
2928
    }
2929
}
2930

    
2931
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
2932
{
2933
    int io_index;
2934
    uint8_t *ptr;
2935
    unsigned long pd;
2936
    PhysPageDesc *p;
2937

    
2938
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2939
    if (!p) {
2940
        pd = IO_MEM_UNASSIGNED;
2941
    } else {
2942
        pd = p->phys_offset;
2943
    }
2944

    
2945
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2946
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2947
#ifdef TARGET_WORDS_BIGENDIAN
2948
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
2949
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
2950
#else
2951
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2952
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
2953
#endif
2954
    } else {
2955
        ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2956
            (addr & ~TARGET_PAGE_MASK);
2957
        stq_p(ptr, val);
2958
    }
2959
}
2960

    
2961
/* warning: addr must be aligned */
2962
void stl_phys(target_phys_addr_t addr, uint32_t val)
2963
{
2964
    int io_index;
2965
    uint8_t *ptr;
2966
    unsigned long pd;
2967
    PhysPageDesc *p;
2968

    
2969
    p = phys_page_find(addr >> TARGET_PAGE_BITS);
2970
    if (!p) {
2971
        pd = IO_MEM_UNASSIGNED;
2972
    } else {
2973
        pd = p->phys_offset;
2974
    }
2975

    
2976
    if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
2977
        io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2978
        io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2979
    } else {
2980
        unsigned long addr1;
2981
        addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2982
        /* RAM case */
2983
        ptr = phys_ram_base + addr1;
2984
        stl_p(ptr, val);
2985
        if (!cpu_physical_memory_is_dirty(addr1)) {
2986
            /* invalidate code */
2987
            tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2988
            /* set dirty bit */
2989
            phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2990
                (0xff & ~CODE_DIRTY_FLAG);
2991
        }
2992
    }
2993
}
2994

    
2995
/* XXX: optimize */
2996
void stb_phys(target_phys_addr_t addr, uint32_t val)
2997
{
2998
    uint8_t v = val;
2999
    cpu_physical_memory_write(addr, &v, 1);
3000
}
3001

    
3002
/* XXX: optimize */
3003
void stw_phys(target_phys_addr_t addr, uint32_t val)
3004
{
3005
    uint16_t v = tswap16(val);
3006
    cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
3007
}
3008

    
3009
/* XXX: optimize */
3010
void stq_phys(target_phys_addr_t addr, uint64_t val)
3011
{
3012
    val = tswap64(val);
3013
    cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
3014
}
3015

    
3016
#endif
3017

    
3018
/* virtual memory access for debug */
3019
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3020
                        uint8_t *buf, int len, int is_write)
3021
{
3022
    int l;
3023
    target_phys_addr_t phys_addr;
3024
    target_ulong page;
3025

    
3026
    while (len > 0) {
3027
        page = addr & TARGET_PAGE_MASK;
3028
        phys_addr = cpu_get_phys_page_debug(env, page);
3029
        /* if no physical page mapped, return an error */
3030
        if (phys_addr == -1)
3031
            return -1;
3032
        l = (page + TARGET_PAGE_SIZE) - addr;
3033
        if (l > len)
3034
            l = len;
3035
        cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
3036
                               buf, l, is_write);
3037
        len -= l;
3038
        buf += l;
3039
        addr += l;
3040
    }
3041
    return 0;
3042
}
3043

    
3044
void dump_exec_info(FILE *f,
3045
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
3046
{
3047
    int i, target_code_size, max_target_code_size;
3048
    int direct_jmp_count, direct_jmp2_count, cross_page;
3049
    TranslationBlock *tb;
3050

    
3051
    target_code_size = 0;
3052
    max_target_code_size = 0;
3053
    cross_page = 0;
3054
    direct_jmp_count = 0;
3055
    direct_jmp2_count = 0;
3056
    for(i = 0; i < nb_tbs; i++) {
3057
        tb = &tbs[i];
3058
        target_code_size += tb->size;
3059
        if (tb->size > max_target_code_size)
3060
            max_target_code_size = tb->size;
3061
        if (tb->page_addr[1] != -1)
3062
            cross_page++;
3063
        if (tb->tb_next_offset[0] != 0xffff) {
3064
            direct_jmp_count++;
3065
            if (tb->tb_next_offset[1] != 0xffff) {
3066
                direct_jmp2_count++;
3067
            }
3068
        }
3069
    }
3070
    /* XXX: avoid using doubles ? */
3071
    cpu_fprintf(f, "Translation buffer state:\n");
3072
    cpu_fprintf(f, "gen code size       %ld/%ld\n",
3073
                code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
3074
    cpu_fprintf(f, "TB count            %d/%d\n", 
3075
                nb_tbs, code_gen_max_blocks);
3076
    cpu_fprintf(f, "TB avg target size  %d max=%d bytes\n",
3077
                nb_tbs ? target_code_size / nb_tbs : 0,
3078
                max_target_code_size);
3079
    cpu_fprintf(f, "TB avg host size    %d bytes (expansion ratio: %0.1f)\n",
3080
                nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
3081
                target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
3082
    cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
3083
            cross_page,
3084
            nb_tbs ? (cross_page * 100) / nb_tbs : 0);
3085
    cpu_fprintf(f, "direct jump count   %d (%d%%) (2 jumps=%d %d%%)\n",
3086
                direct_jmp_count,
3087
                nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
3088
                direct_jmp2_count,
3089
                nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
3090
    cpu_fprintf(f, "\nStatistics:\n");
3091
    cpu_fprintf(f, "TB flush count      %d\n", tb_flush_count);
3092
    cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
3093
    cpu_fprintf(f, "TLB flush count     %d\n", tlb_flush_count);
3094
    tcg_dump_info(f, cpu_fprintf);
3095
}
3096

    
3097
#if !defined(CONFIG_USER_ONLY)
3098

    
3099
#define MMUSUFFIX _cmmu
3100
#define GETPC() NULL
3101
#define env cpu_single_env
3102
#define SOFTMMU_CODE_ACCESS
3103

    
3104
#define SHIFT 0
3105
#include "softmmu_template.h"
3106

    
3107
#define SHIFT 1
3108
#include "softmmu_template.h"
3109

    
3110
#define SHIFT 2
3111
#include "softmmu_template.h"
3112

    
3113
#define SHIFT 3
3114
#include "softmmu_template.h"
3115

    
3116
#undef env
3117

    
3118
#endif