Statistics
| Branch: | Revision:

root / exec-all.h @ 977d5710

History | View | Annotate | Download (16.6 kB)

1
/*
2
 * internal execution defines for qemu
3
 * 
4
 *  Copyright (c) 2003 Fabrice Bellard
5
 *
6
 * This library is free software; you can redistribute it and/or
7
 * modify it under the terms of the GNU Lesser General Public
8
 * License as published by the Free Software Foundation; either
9
 * version 2 of the License, or (at your option) any later version.
10
 *
11
 * This library is distributed in the hope that it will be useful,
12
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
 * Lesser General Public License for more details.
15
 *
16
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, write to the Free Software
18
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19
 */
20

    
21
/* allow to see translation results - the slowdown should be negligible, so we leave it */
22
#define DEBUG_DISAS
23

    
24
#ifndef glue
25
#define xglue(x, y) x ## y
26
#define glue(x, y) xglue(x, y)
27
#define stringify(s)        tostring(s)
28
#define tostring(s)        #s
29
#endif
30

    
31
#if GCC_MAJOR < 3
32
#define __builtin_expect(x, n) (x)
33
#endif
34

    
35
#ifdef __i386__
36
#define REGPARM(n) __attribute((regparm(n)))
37
#else
38
#define REGPARM(n)
39
#endif
40

    
41
/* is_jmp field values */
42
#define DISAS_NEXT    0 /* next instruction can be analyzed */
43
#define DISAS_JUMP    1 /* only pc was modified dynamically */
44
#define DISAS_UPDATE  2 /* cpu state was modified dynamically */
45
#define DISAS_TB_JUMP 3 /* only pc was modified statically */
46

    
47
struct TranslationBlock;
48

    
49
/* XXX: make safe guess about sizes */
50
#define MAX_OP_PER_INSTR 32
51
#define OPC_BUF_SIZE 512
52
#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
53

    
54
#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * 3)
55

    
56
extern uint16_t gen_opc_buf[OPC_BUF_SIZE];
57
extern uint32_t gen_opparam_buf[OPPARAM_BUF_SIZE];
58
extern long gen_labels[OPC_BUF_SIZE];
59
extern int nb_gen_labels;
60
extern target_ulong gen_opc_pc[OPC_BUF_SIZE];
61
extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
62
extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
63
extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
64

    
65
typedef void (GenOpFunc)(void);
66
typedef void (GenOpFunc1)(long);
67
typedef void (GenOpFunc2)(long, long);
68
typedef void (GenOpFunc3)(long, long, long);
69
                    
70
#if defined(TARGET_I386)
71

    
72
void optimize_flags_init(void);
73

    
74
#endif
75

    
76
extern FILE *logfile;
77
extern int loglevel;
78

    
79
int gen_intermediate_code(CPUState *env, struct TranslationBlock *tb);
80
int gen_intermediate_code_pc(CPUState *env, struct TranslationBlock *tb);
81
void dump_ops(const uint16_t *opc_buf, const uint32_t *opparam_buf);
82
int cpu_gen_code(CPUState *env, struct TranslationBlock *tb,
83
                 int max_code_size, int *gen_code_size_ptr);
84
int cpu_restore_state(struct TranslationBlock *tb, 
85
                      CPUState *env, unsigned long searched_pc,
86
                      void *puc);
87
int cpu_gen_code_copy(CPUState *env, struct TranslationBlock *tb,
88
                      int max_code_size, int *gen_code_size_ptr);
89
int cpu_restore_state_copy(struct TranslationBlock *tb, 
90
                           CPUState *env, unsigned long searched_pc,
91
                           void *puc);
92
void cpu_resume_from_signal(CPUState *env1, void *puc);
93
void cpu_exec_init(void);
94
int page_unprotect(unsigned long address, unsigned long pc, void *puc);
95
void tb_invalidate_phys_page_range(target_ulong start, target_ulong end, 
96
                                   int is_cpu_write_access);
97
void tb_invalidate_page_range(target_ulong start, target_ulong end);
98
void tlb_flush_page(CPUState *env, target_ulong addr);
99
void tlb_flush(CPUState *env, int flush_global);
100
int tlb_set_page(CPUState *env, target_ulong vaddr, 
101
                 target_phys_addr_t paddr, int prot, 
102
                 int is_user, int is_softmmu);
103

    
104
#define CODE_GEN_MAX_SIZE        65536
105
#define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
106

    
107
#define CODE_GEN_HASH_BITS     15
108
#define CODE_GEN_HASH_SIZE     (1 << CODE_GEN_HASH_BITS)
109

    
110
#define CODE_GEN_PHYS_HASH_BITS     15
111
#define CODE_GEN_PHYS_HASH_SIZE     (1 << CODE_GEN_PHYS_HASH_BITS)
112

    
113
/* maximum total translate dcode allocated */
114

    
115
/* NOTE: the translated code area cannot be too big because on some
116
   archs the range of "fast" function calls is limited. Here is a
117
   summary of the ranges:
118

119
   i386  : signed 32 bits
120
   arm   : signed 26 bits
121
   ppc   : signed 24 bits
122
   sparc : signed 32 bits
123
   alpha : signed 23 bits
124
*/
125

    
126
#if defined(__alpha__)
127
#define CODE_GEN_BUFFER_SIZE     (2 * 1024 * 1024)
128
#elif defined(__powerpc__)
129
#define CODE_GEN_BUFFER_SIZE     (6 * 1024 * 1024)
130
#else
131
#define CODE_GEN_BUFFER_SIZE     (8 * 1024 * 1024)
132
#endif
133

    
134
//#define CODE_GEN_BUFFER_SIZE     (128 * 1024)
135

    
136
/* estimated block size for TB allocation */
137
/* XXX: use a per code average code fragment size and modulate it
138
   according to the host CPU */
139
#if defined(CONFIG_SOFTMMU)
140
#define CODE_GEN_AVG_BLOCK_SIZE 128
141
#else
142
#define CODE_GEN_AVG_BLOCK_SIZE 64
143
#endif
144

    
145
#define CODE_GEN_MAX_BLOCKS    (CODE_GEN_BUFFER_SIZE / CODE_GEN_AVG_BLOCK_SIZE)
146

    
147
#if defined(__powerpc__) 
148
#define USE_DIRECT_JUMP
149
#endif
150
#if defined(__i386__) && !defined(_WIN32)
151
#define USE_DIRECT_JUMP
152
#endif
153

    
154
typedef struct TranslationBlock {
155
    target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
156
    target_ulong cs_base; /* CS base for this block */
157
    unsigned int flags; /* flags defining in which context the code was generated */
158
    uint16_t size;      /* size of target code for this block (1 <=
159
                           size <= TARGET_PAGE_SIZE) */
160
    uint16_t cflags;    /* compile flags */
161
#define CF_CODE_COPY   0x0001 /* block was generated in code copy mode */
162
#define CF_TB_FP_USED  0x0002 /* fp ops are used in the TB */
163
#define CF_FP_USED     0x0004 /* fp ops are used in the TB or in a chained TB */
164
#define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */
165

    
166
    uint8_t *tc_ptr;    /* pointer to the translated code */
167
    struct TranslationBlock *hash_next; /* next matching tb for virtual address */
168
    /* next matching tb for physical address. */
169
    struct TranslationBlock *phys_hash_next; 
170
    /* first and second physical page containing code. The lower bit
171
       of the pointer tells the index in page_next[] */
172
    struct TranslationBlock *page_next[2]; 
173
    target_ulong page_addr[2]; 
174

    
175
    /* the following data are used to directly call another TB from
176
       the code of this one. */
177
    uint16_t tb_next_offset[2]; /* offset of original jump target */
178
#ifdef USE_DIRECT_JUMP
179
    uint16_t tb_jmp_offset[4]; /* offset of jump instruction */
180
#else
181
    uint32_t tb_next[2]; /* address of jump generated code */
182
#endif
183
    /* list of TBs jumping to this one. This is a circular list using
184
       the two least significant bits of the pointers to tell what is
185
       the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
186
       jmp_first */
187
    struct TranslationBlock *jmp_next[2]; 
188
    struct TranslationBlock *jmp_first;
189
} TranslationBlock;
190

    
191
static inline unsigned int tb_hash_func(target_ulong pc)
192
{
193
    return pc & (CODE_GEN_HASH_SIZE - 1);
194
}
195

    
196
static inline unsigned int tb_phys_hash_func(unsigned long pc)
197
{
198
    return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
199
}
200

    
201
TranslationBlock *tb_alloc(target_ulong pc);
202
void tb_flush(CPUState *env);
203
void tb_link(TranslationBlock *tb);
204
void tb_link_phys(TranslationBlock *tb, 
205
                  target_ulong phys_pc, target_ulong phys_page2);
206

    
207
extern TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
208
extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
209

    
210
extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
211
extern uint8_t *code_gen_ptr;
212

    
213
/* find a translation block in the translation cache. If not found,
214
   return NULL and the pointer to the last element of the list in pptb */
215
static inline TranslationBlock *tb_find(TranslationBlock ***pptb,
216
                                        target_ulong pc, 
217
                                        target_ulong cs_base,
218
                                        unsigned int flags)
219
{
220
    TranslationBlock **ptb, *tb;
221
    unsigned int h;
222
 
223
    h = tb_hash_func(pc);
224
    ptb = &tb_hash[h];
225
    for(;;) {
226
        tb = *ptb;
227
        if (!tb)
228
            break;
229
        if (tb->pc == pc && tb->cs_base == cs_base && tb->flags == flags)
230
            return tb;
231
        ptb = &tb->hash_next;
232
    }
233
    *pptb = ptb;
234
    return NULL;
235
}
236

    
237

    
238
#if defined(USE_DIRECT_JUMP)
239

    
240
#if defined(__powerpc__)
241
static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
242
{
243
    uint32_t val, *ptr;
244

    
245
    /* patch the branch destination */
246
    ptr = (uint32_t *)jmp_addr;
247
    val = *ptr;
248
    val = (val & ~0x03fffffc) | ((addr - jmp_addr) & 0x03fffffc);
249
    *ptr = val;
250
    /* flush icache */
251
    asm volatile ("dcbst 0,%0" : : "r"(ptr) : "memory");
252
    asm volatile ("sync" : : : "memory");
253
    asm volatile ("icbi 0,%0" : : "r"(ptr) : "memory");
254
    asm volatile ("sync" : : : "memory");
255
    asm volatile ("isync" : : : "memory");
256
}
257
#elif defined(__i386__)
258
static inline void tb_set_jmp_target1(unsigned long jmp_addr, unsigned long addr)
259
{
260
    /* patch the branch destination */
261
    *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
262
    /* no need to flush icache explicitely */
263
}
264
#endif
265

    
266
static inline void tb_set_jmp_target(TranslationBlock *tb, 
267
                                     int n, unsigned long addr)
268
{
269
    unsigned long offset;
270

    
271
    offset = tb->tb_jmp_offset[n];
272
    tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
273
    offset = tb->tb_jmp_offset[n + 2];
274
    if (offset != 0xffff)
275
        tb_set_jmp_target1((unsigned long)(tb->tc_ptr + offset), addr);
276
}
277

    
278
#else
279

    
280
/* set the jump target */
281
static inline void tb_set_jmp_target(TranslationBlock *tb, 
282
                                     int n, unsigned long addr)
283
{
284
    tb->tb_next[n] = addr;
285
}
286

    
287
#endif
288

    
289
static inline void tb_add_jump(TranslationBlock *tb, int n, 
290
                               TranslationBlock *tb_next)
291
{
292
    /* NOTE: this test is only needed for thread safety */
293
    if (!tb->jmp_next[n]) {
294
        /* patch the native jump address */
295
        tb_set_jmp_target(tb, n, (unsigned long)tb_next->tc_ptr);
296
        
297
        /* add in TB jmp circular list */
298
        tb->jmp_next[n] = tb_next->jmp_first;
299
        tb_next->jmp_first = (TranslationBlock *)((long)(tb) | (n));
300
    }
301
}
302

    
303
TranslationBlock *tb_find_pc(unsigned long pc_ptr);
304

    
305
#ifndef offsetof
306
#define offsetof(type, field) ((size_t) &((type *)0)->field)
307
#endif
308

    
309
#if defined(_WIN32)
310
#define ASM_DATA_SECTION ".section \".data\"\n"
311
#define ASM_PREVIOUS_SECTION ".section .text\n"
312
#elif defined(__APPLE__)
313
#define ASM_DATA_SECTION ".data\n"
314
#define ASM_PREVIOUS_SECTION ".text\n"
315
#define ASM_NAME(x) "_" #x
316
#else
317
#define ASM_DATA_SECTION ".section \".data\"\n"
318
#define ASM_PREVIOUS_SECTION ".previous\n"
319
#define ASM_NAME(x) stringify(x)
320
#endif
321

    
322
#if defined(__powerpc__)
323

    
324
/* we patch the jump instruction directly */
325
#define GOTO_TB(opname, tbparam, n)\
326
do {\
327
    asm volatile (ASM_DATA_SECTION\
328
                  ASM_NAME(__op_label) #n "." ASM_NAME(opname) ":\n"\
329
                  ".long 1f\n"\
330
                  ASM_PREVIOUS_SECTION \
331
                  "b " ASM_NAME(__op_jmp) #n "\n"\
332
                  "1:\n");\
333
} while (0)
334

    
335
#elif defined(__i386__) && defined(USE_DIRECT_JUMP)
336

    
337
/* we patch the jump instruction directly */
338
#define GOTO_TB(opname, tbparam, n)\
339
do {\
340
    asm volatile (".section .data\n"\
341
                  ASM_NAME(__op_label) #n "." ASM_NAME(opname) ":\n"\
342
                  ".long 1f\n"\
343
                  ASM_PREVIOUS_SECTION \
344
                  "jmp " ASM_NAME(__op_jmp) #n "\n"\
345
                  "1:\n");\
346
} while (0)
347

    
348
#else
349

    
350
/* jump to next block operations (more portable code, does not need
351
   cache flushing, but slower because of indirect jump) */
352
#define GOTO_TB(opname, tbparam, n)\
353
do {\
354
    static void __attribute__((unused)) *dummy ## n = &&dummy_label ## n;\
355
    static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
356
    goto *(void *)(((TranslationBlock *)tbparam)->tb_next[n]);\
357
label ## n: ;\
358
dummy_label ## n: ;\
359
} while (0)
360

    
361
#endif
362

    
363
/* XXX: will be suppressed */
364
#define JUMP_TB(opname, tbparam, n, eip)\
365
do {\
366
    GOTO_TB(opname, tbparam, n);\
367
    T0 = (long)(tbparam) + (n);\
368
    EIP = (int32_t)eip;\
369
    EXIT_TB();\
370
} while (0)
371

    
372
extern CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
373
extern CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
374
extern void *io_mem_opaque[IO_MEM_NB_ENTRIES];
375

    
376
#ifdef __powerpc__
377
static inline int testandset (int *p)
378
{
379
    int ret;
380
    __asm__ __volatile__ (
381
                          "0:    lwarx %0,0,%1\n"
382
                          "      xor. %0,%3,%0\n"
383
                          "      bne 1f\n"
384
                          "      stwcx. %2,0,%1\n"
385
                          "      bne- 0b\n"
386
                          "1:    "
387
                          : "=&r" (ret)
388
                          : "r" (p), "r" (1), "r" (0)
389
                          : "cr0", "memory");
390
    return ret;
391
}
392
#endif
393

    
394
#ifdef __i386__
395
static inline int testandset (int *p)
396
{
397
    char ret;
398
    long int readval;
399
    
400
    __asm__ __volatile__ ("lock; cmpxchgl %3, %1; sete %0"
401
                          : "=q" (ret), "=m" (*p), "=a" (readval)
402
                          : "r" (1), "m" (*p), "a" (0)
403
                          : "memory");
404
    return ret;
405
}
406
#endif
407

    
408
#ifdef __x86_64__
409
static inline int testandset (int *p)
410
{
411
    char ret;
412
    int readval;
413
    
414
    __asm__ __volatile__ ("lock; cmpxchgl %3, %1; sete %0"
415
                          : "=q" (ret), "=m" (*p), "=a" (readval)
416
                          : "r" (1), "m" (*p), "a" (0)
417
                          : "memory");
418
    return ret;
419
}
420
#endif
421

    
422
#ifdef __s390__
423
static inline int testandset (int *p)
424
{
425
    int ret;
426

    
427
    __asm__ __volatile__ ("0: cs    %0,%1,0(%2)\n"
428
                          "   jl    0b"
429
                          : "=&d" (ret)
430
                          : "r" (1), "a" (p), "0" (*p) 
431
                          : "cc", "memory" );
432
    return ret;
433
}
434
#endif
435

    
436
#ifdef __alpha__
437
static inline int testandset (int *p)
438
{
439
    int ret;
440
    unsigned long one;
441

    
442
    __asm__ __volatile__ ("0:        mov 1,%2\n"
443
                          "        ldl_l %0,%1\n"
444
                          "        stl_c %2,%1\n"
445
                          "        beq %2,1f\n"
446
                          ".subsection 2\n"
447
                          "1:        br 0b\n"
448
                          ".previous"
449
                          : "=r" (ret), "=m" (*p), "=r" (one)
450
                          : "m" (*p));
451
    return ret;
452
}
453
#endif
454

    
455
#ifdef __sparc__
456
static inline int testandset (int *p)
457
{
458
        int ret;
459

    
460
        __asm__ __volatile__("ldstub        [%1], %0"
461
                             : "=r" (ret)
462
                             : "r" (p)
463
                             : "memory");
464

    
465
        return (ret ? 1 : 0);
466
}
467
#endif
468

    
469
#ifdef __arm__
470
static inline int testandset (int *spinlock)
471
{
472
    register unsigned int ret;
473
    __asm__ __volatile__("swp %0, %1, [%2]"
474
                         : "=r"(ret)
475
                         : "0"(1), "r"(spinlock));
476
    
477
    return ret;
478
}
479
#endif
480

    
481
#ifdef __mc68000
482
static inline int testandset (int *p)
483
{
484
    char ret;
485
    __asm__ __volatile__("tas %1; sne %0"
486
                         : "=r" (ret)
487
                         : "m" (p)
488
                         : "cc","memory");
489
    return ret == 0;
490
}
491
#endif
492

    
493
typedef int spinlock_t;
494

    
495
#define SPIN_LOCK_UNLOCKED 0
496

    
497
#if defined(CONFIG_USER_ONLY)
498
static inline void spin_lock(spinlock_t *lock)
499
{
500
    while (testandset(lock));
501
}
502

    
503
static inline void spin_unlock(spinlock_t *lock)
504
{
505
    *lock = 0;
506
}
507

    
508
static inline int spin_trylock(spinlock_t *lock)
509
{
510
    return !testandset(lock);
511
}
512
#else
513
static inline void spin_lock(spinlock_t *lock)
514
{
515
}
516

    
517
static inline void spin_unlock(spinlock_t *lock)
518
{
519
}
520

    
521
static inline int spin_trylock(spinlock_t *lock)
522
{
523
    return 1;
524
}
525
#endif
526

    
527
extern spinlock_t tb_lock;
528

    
529
extern int tb_invalidated_flag;
530

    
531
#if !defined(CONFIG_USER_ONLY)
532

    
533
void tlb_fill(target_ulong addr, int is_write, int is_user, 
534
              void *retaddr);
535

    
536
#define ACCESS_TYPE 3
537
#define MEMSUFFIX _code
538
#define env cpu_single_env
539

    
540
#define DATA_SIZE 1
541
#include "softmmu_header.h"
542

    
543
#define DATA_SIZE 2
544
#include "softmmu_header.h"
545

    
546
#define DATA_SIZE 4
547
#include "softmmu_header.h"
548

    
549
#define DATA_SIZE 8
550
#include "softmmu_header.h"
551

    
552
#undef ACCESS_TYPE
553
#undef MEMSUFFIX
554
#undef env
555

    
556
#endif
557

    
558
#if defined(CONFIG_USER_ONLY)
559
static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
560
{
561
    return addr;
562
}
563
#else
564
/* NOTE: this function can trigger an exception */
565
/* NOTE2: the returned address is not exactly the physical address: it
566
   is the offset relative to phys_ram_base */
567
/* XXX: i386 target specific */
568
static inline target_ulong get_phys_addr_code(CPUState *env, target_ulong addr)
569
{
570
    int is_user, index, pd;
571

    
572
    index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
573
#if defined(TARGET_I386)
574
    is_user = ((env->hflags & HF_CPL_MASK) == 3);
575
#elif defined (TARGET_PPC)
576
    is_user = msr_pr;
577
#elif defined (TARGET_SPARC)
578
    is_user = (env->psrs == 0);
579
#else
580
#error "Unimplemented !"
581
#endif
582
    if (__builtin_expect(env->tlb_read[is_user][index].address != 
583
                         (addr & TARGET_PAGE_MASK), 0)) {
584
        ldub_code(addr);
585
    }
586
    pd = env->tlb_read[is_user][index].address & ~TARGET_PAGE_MASK;
587
    if (pd > IO_MEM_ROM) {
588
        cpu_abort(env, "Trying to execute code outside RAM or ROM at 0x%08lx\n", addr);
589
    }
590
    return addr + env->tlb_read[is_user][index].addend - (unsigned long)phys_ram_base;
591
}
592
#endif