Revision 2e70f6ef

b/cpu-all.h
782 782
    __attribute__ ((__noreturn__));
783 783
extern CPUState *first_cpu;
784 784
extern CPUState *cpu_single_env;
785
extern int64_t qemu_icount;
786
extern int use_icount;
785 787

  
786 788
#define CPU_INTERRUPT_EXIT   0x01 /* wants exit from main loop */
787 789
#define CPU_INTERRUPT_HARD   0x02 /* hardware interrupt pending */
b/cpu-defs.h
130 130
                   sizeof(target_phys_addr_t))];
131 131
} CPUTLBEntry;
132 132

  
133
#ifdef WORDS_BIGENDIAN
134
typedef struct icount_decr_u16 {
135
    uint16_t high;
136
    uint16_t low;
137
} icount_decr_u16;
138
#else
139
typedef struct icount_decr_u16 {
140
    uint16_t low;
141
    uint16_t high;
142
} icount_decr_u16;
143
#endif
144

  
133 145
#define CPU_TEMP_BUF_NLONGS 128
134 146
#define CPU_COMMON                                                      \
135 147
    struct TranslationBlock *current_tb; /* currently executing TB  */  \
136 148
    /* soft mmu support */                                              \
137
    /* in order to avoid passing too many arguments to the memory       \
138
       write helpers, we store some rarely used information in the CPU  \
149
    /* in order to avoid passing too many arguments to the MMIO         \
150
       helpers, we store some rarely used information in the CPU        \
139 151
       context) */                                                      \
140
    unsigned long mem_write_pc; /* host pc at which the memory was      \
141
                                   written */                           \
142
    target_ulong mem_write_vaddr; /* target virtual addr at which the   \
143
                                     memory was written */              \
152
    unsigned long mem_io_pc; /* host pc at which the memory was         \
153
                                accessed */                             \
154
    target_ulong mem_io_vaddr; /* target virtual addr at which the      \
155
                                     memory was accessed */             \
144 156
    int halted; /* TRUE if the CPU is in suspend state */               \
145 157
    /* The meaning of the MMU modes is defined in the target code. */   \
146 158
    CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE];                  \
......
149 161
    /* buffer for temporaries in the code generator */                  \
150 162
    long temp_buf[CPU_TEMP_BUF_NLONGS];                                 \
151 163
                                                                        \
164
    int64_t icount_extra; /* Instructions until next timer event.  */   \
165
    /* Number of cycles left, with interrupt flag in high bit.          \
166
       This allows a single read-compare-cbranch-write sequence to test \
167
       for both decrementer underflow and exceptions.  */               \
168
    union {                                                             \
169
        uint32_t u32;                                                   \
170
        icount_decr_u16 u16;                                            \
171
    } icount_decr;                                                      \
172
    uint32_t can_do_io; /* nonzero if memory mapped IO is safe.  */     \
173
                                                                        \
152 174
    /* from this point: preserved by CPU reset */                       \
153 175
    /* ice debug support */                                             \
154 176
    target_ulong breakpoints[MAX_BREAKPOINTS];                          \
b/cpu-exec.c
82 82
    longjmp(env->jmp_env, 1);
83 83
}
84 84

  
85
/* Execute the code without caching the generated code. An interpreter
86
   could be used if available. */
87
static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
88
{
89
    unsigned long next_tb;
90
    TranslationBlock *tb;
91

  
92
    /* Should never happen.
93
       We only end up here when an existing TB is too long.  */
94
    if (max_cycles > CF_COUNT_MASK)
95
        max_cycles = CF_COUNT_MASK;
96

  
97
    tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
98
                     max_cycles);
99
    env->current_tb = tb;
100
    /* execute the generated code */
101
    next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
102

  
103
    if ((next_tb & 3) == 2) {
104
        /* Restore PC.  This may happen if async event occurs before
105
           the TB starts executing.  */
106
        CPU_PC_FROM_TB(env, tb);
107
    }
108
    tb_phys_invalidate(tb, -1);
109
    tb_free(tb);
110
}
111

  
85 112
static TranslationBlock *tb_find_slow(target_ulong pc,
86 113
                                      target_ulong cs_base,
87 114
                                      uint64_t flags)
88 115
{
89 116
    TranslationBlock *tb, **ptb1;
90
    int code_gen_size;
91 117
    unsigned int h;
92 118
    target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
93
    uint8_t *tc_ptr;
94 119

  
95 120
    tb_invalidated_flag = 0;
96 121

  
......
124 149
        ptb1 = &tb->phys_hash_next;
125 150
    }
126 151
 not_found:
127
    /* if no translated code available, then translate it now */
128
    tb = tb_alloc(pc);
129
    if (!tb) {
130
        /* flush must be done */
131
        tb_flush(env);
132
        /* cannot fail at this point */
133
        tb = tb_alloc(pc);
134
        /* don't forget to invalidate previous TB info */
135
        tb_invalidated_flag = 1;
136
    }
137
    tc_ptr = code_gen_ptr;
138
    tb->tc_ptr = tc_ptr;
139
    tb->cs_base = cs_base;
140
    tb->flags = flags;
141
    cpu_gen_code(env, tb, &code_gen_size);
142
    code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
143

  
144
    /* check next page if needed */
145
    virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
146
    phys_page2 = -1;
147
    if ((pc & TARGET_PAGE_MASK) != virt_page2) {
148
        phys_page2 = get_phys_addr_code(env, virt_page2);
149
    }
150
    tb_link_phys(tb, phys_pc, phys_page2);
152
   /* if no translated code available, then translate it now */
153
    tb = tb_gen_code(env, pc, cs_base, flags, 0);
151 154

  
152 155
 found:
153 156
    /* we add the TB in the virtual pc hash table */
......
583 586
                       of memory exceptions while generating the code, we
584 587
                       must recompute the hash index here */
585 588
                    next_tb = 0;
589
                    tb_invalidated_flag = 0;
586 590
                }
587 591
#ifdef DEBUG_EXEC
588 592
                if ((loglevel & CPU_LOG_EXEC)) {
......
604 608
                }
605 609
                }
606 610
                spin_unlock(&tb_lock);
607
                tc_ptr = tb->tc_ptr;
608 611
                env->current_tb = tb;
612
                while (env->current_tb) {
613
                    tc_ptr = tb->tc_ptr;
609 614
                /* execute the generated code */
610 615
#if defined(__sparc__) && !defined(HOST_SOLARIS)
611 616
#undef env
612
                env = cpu_single_env;
617
                    env = cpu_single_env;
613 618
#define env cpu_single_env
614 619
#endif
615
                next_tb = tcg_qemu_tb_exec(tc_ptr);
616
                env->current_tb = NULL;
620
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
621
                    env->current_tb = NULL;
622
                    if ((next_tb & 3) == 2) {
623
                        /* Instruction counter exired.  */
624
                        int insns_left;
625
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
626
                        /* Restore PC.  */
627
                        CPU_PC_FROM_TB(env, tb);
628
                        insns_left = env->icount_decr.u32;
629
                        if (env->icount_extra && insns_left >= 0) {
630
                            /* Refill decrementer and continue execution.  */
631
                            env->icount_extra += insns_left;
632
                            if (env->icount_extra > 0xffff) {
633
                                insns_left = 0xffff;
634
                            } else {
635
                                insns_left = env->icount_extra;
636
                            }
637
                            env->icount_extra -= insns_left;
638
                            env->icount_decr.u16.low = insns_left;
639
                        } else {
640
                            if (insns_left > 0) {
641
                                /* Execute remaining instructions.  */
642
                                cpu_exec_nocache(insns_left, tb);
643
                            }
644
                            env->exception_index = EXCP_INTERRUPT;
645
                            next_tb = 0;
646
                            cpu_loop_exit();
647
                        }
648
                    }
649
                }
617 650
                /* reset soft MMU for next block (it can currently
618 651
                   only be set by a memory fault) */
619 652
#if defined(USE_KQEMU)
b/exec-all.h
27 27
#define DISAS_UPDATE  2 /* cpu state was modified dynamically */
28 28
#define DISAS_TB_JUMP 3 /* only pc was modified statically */
29 29

  
30
struct TranslationBlock;
30
typedef struct TranslationBlock TranslationBlock;
31 31

  
32 32
/* XXX: make safe guess about sizes */
33 33
#define MAX_OP_PER_INSTR 64
......
48 48
extern target_ulong gen_opc_npc[OPC_BUF_SIZE];
49 49
extern uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
50 50
extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
51
extern uint16_t gen_opc_icount[OPC_BUF_SIZE];
51 52
extern target_ulong gen_opc_jump_pc[2];
52 53
extern uint32_t gen_opc_hflags[OPC_BUF_SIZE];
53 54

  
......
75 76
                           CPUState *env, unsigned long searched_pc,
76 77
                           void *puc);
77 78
void cpu_resume_from_signal(CPUState *env1, void *puc);
79
void cpu_io_recompile(CPUState *env, void *retaddr);
80
TranslationBlock *tb_gen_code(CPUState *env, 
81
                              target_ulong pc, target_ulong cs_base, int flags,
82
                              int cflags);
78 83
void cpu_exec_init(CPUState *env);
79 84
int page_unprotect(target_ulong address, unsigned long pc, void *puc);
80 85
void tb_invalidate_phys_page_range(target_phys_addr_t start, target_phys_addr_t end,
......
117 122
#define USE_DIRECT_JUMP
118 123
#endif
119 124

  
120
typedef struct TranslationBlock {
125
struct TranslationBlock {
121 126
    target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
122 127
    target_ulong cs_base; /* CS base for this block */
123 128
    uint64_t flags; /* flags defining in which context the code was generated */
124 129
    uint16_t size;      /* size of target code for this block (1 <=
125 130
                           size <= TARGET_PAGE_SIZE) */
126 131
    uint16_t cflags;    /* compile flags */
127
#define CF_TB_FP_USED  0x0002 /* fp ops are used in the TB */
128
#define CF_FP_USED     0x0004 /* fp ops are used in the TB or in a chained TB */
129
#define CF_SINGLE_INSN 0x0008 /* compile only a single instruction */
132
#define CF_COUNT_MASK  0x7fff
133
#define CF_LAST_IO     0x8000 /* Last insn may be an IO access.  */
130 134

  
131 135
    uint8_t *tc_ptr;    /* pointer to the translated code */
132 136
    /* next matching tb for physical address. */
......
150 154
       jmp_first */
151 155
    struct TranslationBlock *jmp_next[2];
152 156
    struct TranslationBlock *jmp_first;
153
} TranslationBlock;
157
    uint32_t icount;
158
};
154 159

  
155 160
static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
156 161
{
......
173 178
}
174 179

  
175 180
TranslationBlock *tb_alloc(target_ulong pc);
181
void tb_free(TranslationBlock *tb);
176 182
void tb_flush(CPUState *env);
177 183
void tb_link_phys(TranslationBlock *tb,
178 184
                  target_ulong phys_pc, target_ulong phys_page2);
185
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr);
179 186

  
180 187
extern TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
181 188
extern uint8_t *code_gen_ptr;
......
364 371
    }
365 372
    return addr + env1->tlb_table[mmu_idx][page_index].addend - (unsigned long)phys_ram_base;
366 373
}
374

  
375
/* Deterministic execution requires that IO only be performaed on the last
376
   instruction of a TB so that interrupts take effect immediately.  */
377
static inline int can_do_io(CPUState *env)
378
{
379
    if (!use_icount)
380
        return 1;
381

  
382
    /* If not executing code then assume we are ok.  */
383
    if (!env->current_tb)
384
        return 1;
385

  
386
    return env->can_do_io != 0;
387
}
367 388
#endif
368 389

  
369 390
#ifdef USE_KQEMU
b/exec.c
107 107
/* current CPU in the current thread. It is only valid inside
108 108
   cpu_exec() */
109 109
CPUState *cpu_single_env;
110
/* 0 = Do not count executed instructions.
111
   1 = Precice instruction counting.
112
   2 = Adaptive rate instruction counting.  */
113
int use_icount = 0;
114
/* Current instruction counter.  While executing translated code this may
115
   include some instructions that have not yet been executed.  */
116
int64_t qemu_icount;
110 117

  
111 118
typedef struct PageDesc {
112 119
    /* list of TBs intersecting this ram page */
......
633 640
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
634 641
}
635 642

  
636
static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
643
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
637 644
{
638 645
    CPUState *env;
639 646
    PageDesc *p;
......
746 753
    }
747 754
}
748 755

  
749
#ifdef TARGET_HAS_PRECISE_SMC
750

  
751
static void tb_gen_code(CPUState *env,
752
                        target_ulong pc, target_ulong cs_base, int flags,
753
                        int cflags)
756
TranslationBlock *tb_gen_code(CPUState *env,
757
                              target_ulong pc, target_ulong cs_base,
758
                              int flags, int cflags)
754 759
{
755 760
    TranslationBlock *tb;
756 761
    uint8_t *tc_ptr;
......
764 769
        tb_flush(env);
765 770
        /* cannot fail at this point */
766 771
        tb = tb_alloc(pc);
772
        /* Don't forget to invalidate previous TB info.  */
773
        tb_invalidated_flag = 1;
767 774
    }
768 775
    tc_ptr = code_gen_ptr;
769 776
    tb->tc_ptr = tc_ptr;
......
780 787
        phys_page2 = get_phys_addr_code(env, virt_page2);
781 788
    }
782 789
    tb_link_phys(tb, phys_pc, phys_page2);
790
    return tb;
783 791
}
784
#endif
785 792

  
786 793
/* invalidate all TBs which intersect with the target physical page
787 794
   starting in range [start;end[. NOTE: start and end must refer to
......
836 843
            if (current_tb_not_found) {
837 844
                current_tb_not_found = 0;
838 845
                current_tb = NULL;
839
                if (env->mem_write_pc) {
846
                if (env->mem_io_pc) {
840 847
                    /* now we have a real cpu fault */
841
                    current_tb = tb_find_pc(env->mem_write_pc);
848
                    current_tb = tb_find_pc(env->mem_io_pc);
842 849
                }
843 850
            }
844 851
            if (current_tb == tb &&
845
                !(current_tb->cflags & CF_SINGLE_INSN)) {
852
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
846 853
                /* If we are modifying the current TB, we must stop
847 854
                its execution. We could be more precise by checking
848 855
                that the modification is after the current PC, but it
......
851 858

  
852 859
                current_tb_modified = 1;
853 860
                cpu_restore_state(current_tb, env,
854
                                  env->mem_write_pc, NULL);
861
                                  env->mem_io_pc, NULL);
855 862
#if defined(TARGET_I386)
856 863
                current_flags = env->hflags;
857 864
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
......
883 890
    if (!p->first_tb) {
884 891
        invalidate_page_bitmap(p);
885 892
        if (is_cpu_write_access) {
886
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
893
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
887 894
        }
888 895
    }
889 896
#endif
......
893 900
           modifying the memory. It will ensure that it cannot modify
894 901
           itself */
895 902
        env->current_tb = NULL;
896
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
897
                    CF_SINGLE_INSN);
903
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
898 904
        cpu_resume_from_signal(env, NULL);
899 905
    }
900 906
#endif
......
909 915
    if (1) {
910 916
        if (loglevel) {
911 917
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
912
                   cpu_single_env->mem_write_vaddr, len,
918
                   cpu_single_env->mem_io_vaddr, len,
913 919
                   cpu_single_env->eip,
914 920
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
915 921
        }
......
961 967
        tb = (TranslationBlock *)((long)tb & ~3);
962 968
#ifdef TARGET_HAS_PRECISE_SMC
963 969
        if (current_tb == tb &&
964
            !(current_tb->cflags & CF_SINGLE_INSN)) {
970
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
965 971
                /* If we are modifying the current TB, we must stop
966 972
                   its execution. We could be more precise by checking
967 973
                   that the modification is after the current PC, but it
......
990 996
           modifying the memory. It will ensure that it cannot modify
991 997
           itself */
992 998
        env->current_tb = NULL;
993
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
994
                    CF_SINGLE_INSN);
999
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
995 1000
        cpu_resume_from_signal(env, puc);
996 1001
    }
997 1002
#endif
......
1068 1073
    return tb;
1069 1074
}
1070 1075

  
1076
void tb_free(TranslationBlock *tb)
1077
{
1078
    /* In practice this is mostly used for single use temorary TB
1079
       Ignore the hard cases and just back up if this TB happens to
1080
       be the last one generated.  */
1081
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1082
        code_gen_ptr = tb->tc_ptr;
1083
        nb_tbs--;
1084
    }
1085
}
1086

  
1071 1087
/* add a new TB and link it to the physical page tables. phys_page2 is
1072 1088
   (-1) to indicate that only one page contains the TB. */
1073 1089
void tb_link_phys(TranslationBlock *tb,
......
1369 1385
    TranslationBlock *tb;
1370 1386
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1371 1387
#endif
1388
    int old_mask;
1372 1389

  
1390
    old_mask = env->interrupt_request;
1373 1391
    /* FIXME: This is probably not threadsafe.  A different thread could
1374 1392
       be in the mittle of a read-modify-write operation.  */
1375 1393
    env->interrupt_request |= mask;
......
1379 1397
       emulation this often isn't actually as bad as it sounds.  Often
1380 1398
       signals are used primarily to interrupt blocking syscalls.  */
1381 1399
#else
1382
    /* if the cpu is currently executing code, we must unlink it and
1383
       all the potentially executing TB */
1384
    tb = env->current_tb;
1385
    if (tb && !testandset(&interrupt_lock)) {
1386
        env->current_tb = NULL;
1387
        tb_reset_jump_recursive(tb);
1388
        resetlock(&interrupt_lock);
1400
    if (use_icount) {
1401
        env->icount_decr.u16.high = 0x8000;
1402
#ifndef CONFIG_USER_ONLY
1403
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1404
           an async event happened and we need to process it.  */
1405
        if (!can_do_io(env)
1406
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1407
            cpu_abort(env, "Raised interrupt while not in I/O function");
1408
        }
1409
#endif
1410
    } else {
1411
        tb = env->current_tb;
1412
        /* if the cpu is currently executing code, we must unlink it and
1413
           all the potentially executing TB */
1414
        if (tb && !testandset(&interrupt_lock)) {
1415
            env->current_tb = NULL;
1416
            tb_reset_jump_recursive(tb);
1417
            resetlock(&interrupt_lock);
1418
        }
1389 1419
    }
1390 1420
#endif
1391 1421
}
......
2227 2257
    /* we remove the notdirty callback only if the code has been
2228 2258
       flushed */
2229 2259
    if (dirty_flags == 0xff)
2230
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2260
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2231 2261
}
2232 2262

  
2233 2263
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
......
2252 2282
    /* we remove the notdirty callback only if the code has been
2253 2283
       flushed */
2254 2284
    if (dirty_flags == 0xff)
2255
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2285
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2256 2286
}
2257 2287

  
2258 2288
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
......
2277 2307
    /* we remove the notdirty callback only if the code has been
2278 2308
       flushed */
2279 2309
    if (dirty_flags == 0xff)
2280
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2310
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2281 2311
}
2282 2312

  
2283 2313
static CPUReadMemoryFunc *error_mem_read[3] = {
......
2299 2329
    target_ulong vaddr;
2300 2330
    int i;
2301 2331

  
2302
    vaddr = (env->mem_write_vaddr & TARGET_PAGE_MASK) + offset;
2332
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2303 2333
    for (i = 0; i < env->nb_watchpoints; i++) {
2304 2334
        if (vaddr == env->watchpoint[i].vaddr
2305 2335
                && (env->watchpoint[i].type & flags)) {
......
2967 2997
    return 0;
2968 2998
}
2969 2999

  
3000
/* in deterministic execution mode, instructions doing device I/Os
3001
   must be at the end of the TB */
3002
void cpu_io_recompile(CPUState *env, void *retaddr)
3003
{
3004
    TranslationBlock *tb;
3005
    uint32_t n, cflags;
3006
    target_ulong pc, cs_base;
3007
    uint64_t flags;
3008

  
3009
    tb = tb_find_pc((unsigned long)retaddr);
3010
    if (!tb) {
3011
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3012
                  retaddr);
3013
    }
3014
    n = env->icount_decr.u16.low + tb->icount;
3015
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3016
    /* Calculate how many instructions had been executed before the fault
3017
       occured.  */
3018
    n = n - env->icount_decr.u16.low;
3019
    /* Generate a new TB ending on the I/O insn.  */
3020
    n++;
3021
    /* On MIPS and SH, delay slot instructions can only be restarted if
3022
       they were already the first instruction in the TB.  If this is not
3023
       the first instruction in a TB then re-execute the preceeding
3024
       branch.  */
3025
#if defined(TARGET_MIPS)
3026
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3027
        env->active_tc.PC -= 4;
3028
        env->icount_decr.u16.low++;
3029
        env->hflags &= ~MIPS_HFLAG_BMASK;
3030
    }
3031
#elif defined(TARGET_SH4)
3032
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3033
            && n > 1) {
3034
        env->pc -= 2;
3035
        env->icount_decr.u16.low++;
3036
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3037
    }
3038
#endif
3039
    /* This should never happen.  */
3040
    if (n > CF_COUNT_MASK)
3041
        cpu_abort(env, "TB too big during recompile");
3042

  
3043
    cflags = n | CF_LAST_IO;
3044
    pc = tb->pc;
3045
    cs_base = tb->cs_base;
3046
    flags = tb->flags;
3047
    tb_phys_invalidate(tb, -1);
3048
    /* FIXME: In theory this could raise an exception.  In practice
3049
       we have already translated the block once so it's probably ok.  */
3050
    tb_gen_code(env, pc, cs_base, flags, cflags);
3051
    /* TODO: If env->pc != tb->pc (i.e. the failuting instruction was not
3052
       the first in the TB) then we end up generating a whole new TB and
3053
       repeating the fault, which is horribly inefficient.
3054
       Better would be to execute just this insn uncached, or generate a
3055
       second new TB.  */
3056
    cpu_resume_from_signal(env, NULL);
3057
}
3058

  
2970 3059
void dump_exec_info(FILE *f,
2971 3060
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2972 3061
{
b/hw/mips_timer.c
91 91
    if (env->CP0_Cause & (1 << CP0Ca_DC))
92 92
        return;
93 93

  
94
    /* ??? This callback should occur when the counter is exactly equal to
95
       the comparator value.  Offset the count by one to avoid immediately
96
       retriggering the callback before any virtual time has passed.  */
97
    env->CP0_Count++;
94 98
    cpu_mips_timer_update(env);
99
    env->CP0_Count--;
95 100
    if (env->insn_flags & ISA_MIPS32R2)
96 101
        env->CP0_Cause |= 1 << CP0Ca_TI;
97 102
    qemu_irq_raise(env->irq[(env->CP0_IntCtl >> CP0IntCtl_IPTI) & 0x7]);
b/qemu-doc.texi
965 965

  
966 966
Note that this allows guest direct access to the host filesystem,
967 967
so should only be used with trusted guest OS.
968

  
969
@item -icount [N|auto]
970
Enable virtual instruction counter.  The virtual cpu will execute one
971
instruction every 2^N ns of virtual time.  If @code{auto} is specified
972
then the virtual cpu speed will be automatically adjusted to keep virtual
973
time within a few seconds of real time.
974

  
975
Note that while this option can give deterministic behavior, it does not
976
provide cycle accurate emulation.  Modern CPUs contain superscalar out of
977
order cores with complex cache heirachies.  The number of instructions
978
executed often has little or no correlation with actual performance.
968 979
@end table
969 980

  
970 981
@c man end
b/softmmu_template.h
51 51
                                                        int mmu_idx,
52 52
                                                        void *retaddr);
53 53
static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
54
                                              target_ulong addr)
54
                                              target_ulong addr,
55
                                              void *retaddr)
55 56
{
56 57
    DATA_TYPE res;
57 58
    int index;
58 59
    index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
59 60
    physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
61
    env->mem_io_pc = (unsigned long)retaddr;
62
    if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
63
            && !can_do_io(env)) {
64
        cpu_io_recompile(env, retaddr);
65
    }
60 66

  
61 67
#if SHIFT <= 2
62 68
    res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
......
95 101
            /* IO access */
96 102
            if ((addr & (DATA_SIZE - 1)) != 0)
97 103
                goto do_unaligned_access;
104
            retaddr = GETPC();
98 105
            addend = env->iotlb[mmu_idx][index];
99
            res = glue(io_read, SUFFIX)(addend, addr);
106
            res = glue(io_read, SUFFIX)(addend, addr, retaddr);
100 107
        } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
101 108
            /* slow unaligned access (it spans two pages or IO) */
102 109
        do_unaligned_access:
......
148 155
            /* IO access */
149 156
            if ((addr & (DATA_SIZE - 1)) != 0)
150 157
                goto do_unaligned_access;
158
            retaddr = GETPC();
151 159
            addend = env->iotlb[mmu_idx][index];
152
            res = glue(io_read, SUFFIX)(addend, addr);
160
            res = glue(io_read, SUFFIX)(addend, addr, retaddr);
153 161
        } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
154 162
        do_unaligned_access:
155 163
            /* slow unaligned access (it spans two pages) */
......
194 202
    int index;
195 203
    index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
196 204
    physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
205
    if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
206
            && !can_do_io(env)) {
207
        cpu_io_recompile(env, retaddr);
208
    }
197 209

  
198
    env->mem_write_vaddr = addr;
199
    env->mem_write_pc = (unsigned long)retaddr;
210
    env->mem_io_vaddr = addr;
211
    env->mem_io_pc = (unsigned long)retaddr;
200 212
#if SHIFT <= 2
201 213
    io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
202 214
#else
b/target-alpha/cpu.h
415 415
void pal_init (CPUState *env);
416 416
void call_pal (CPUState *env, int palcode);
417 417

  
418
#define CPU_PC_FROM_TB(env, tb) env->pc = tb->pc
419

  
418 420
#endif /* !defined (__CPU_ALPHA_H__) */
b/target-alpha/translate.c
43 43
    uint32_t amask;
44 44
};
45 45

  
46
TCGv cpu_env;
47

  
48
#include "gen-icount.h"
49

  
50
void alpha_translate_init()
51
{
52
    static int done_init = 0;
53
    if (done_init)
54
        return;
55
    cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
56
    done_init = 1;
57
}
58

  
46 59
static always_inline void gen_op_nop (void)
47 60
{
48 61
#if defined(GENERATE_NOP)
......
1970 1983
    uint16_t *gen_opc_end;
1971 1984
    int j, lj = -1;
1972 1985
    int ret;
1986
    int num_insns;
1987
    int max_insns;
1973 1988

  
1974 1989
    pc_start = tb->pc;
1975 1990
    gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
......
1981 1996
    ctx.mem_idx = ((env->ps >> 3) & 3);
1982 1997
    ctx.pal_mode = env->ipr[IPR_EXC_ADDR] & 1;
1983 1998
#endif
1999
    num_insns = 0;
2000
    max_insns = tb->cflags & CF_COUNT_MASK;
2001
    if (max_insns == 0)
2002
        max_insns = CF_COUNT_MASK;
2003

  
2004
    gen_icount_start();
1984 2005
    for (ret = 0; ret == 0;) {
1985 2006
        if (env->nb_breakpoints > 0) {
1986 2007
            for(j = 0; j < env->nb_breakpoints; j++) {
......
1998 2019
                    gen_opc_instr_start[lj++] = 0;
1999 2020
                gen_opc_pc[lj] = ctx.pc;
2000 2021
                gen_opc_instr_start[lj] = 1;
2022
                gen_opc_icount[lj] = num_insns;
2001 2023
            }
2002 2024
        }
2025
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2026
            gen_io_start();
2003 2027
#if defined ALPHA_DEBUG_DISAS
2004 2028
        insn_count++;
2005 2029
        if (logfile != NULL) {
......
2014 2038
            fprintf(logfile, "opcode %08x %d\n", insn, insn_count);
2015 2039
        }
2016 2040
#endif
2041
        num_insns++;
2017 2042
        ctx.pc += 4;
2018 2043
        ret = translate_one(ctxp, insn);
2019 2044
        if (ret != 0)
......
2022 2047
         * generation
2023 2048
         */
2024 2049
        if (((ctx.pc & (TARGET_PAGE_SIZE - 1)) == 0) ||
2025
            (env->singlestep_enabled)) {
2050
            (env->singlestep_enabled) ||
2051
            num_insns >= max_insns) {
2026 2052
            break;
2027 2053
        }
2028 2054
#if defined (DO_SINGLE_STEP)
......
2035 2061
#if defined (DO_TB_FLUSH)
2036 2062
    gen_op_tb_flush();
2037 2063
#endif
2064
    if (tb->cflags & CF_LAST_IO)
2065
        gen_io_end();
2038 2066
    /* Generate the return instruction */
2039 2067
    tcg_gen_exit_tb(0);
2068
    gen_icount_end(tb, num_insns);
2040 2069
    *gen_opc_ptr = INDEX_op_end;
2041 2070
    if (search_pc) {
2042 2071
        j = gen_opc_ptr - gen_opc_buf;
......
2045 2074
            gen_opc_instr_start[lj++] = 0;
2046 2075
    } else {
2047 2076
        tb->size = ctx.pc - pc_start;
2077
        tb->icount = num_insns;
2048 2078
    }
2049 2079
#if defined ALPHA_DEBUG_DISAS
2050 2080
    if (loglevel & CPU_LOG_TB_CPU) {
......
2079 2109
    if (!env)
2080 2110
        return NULL;
2081 2111
    cpu_exec_init(env);
2112
    alpha_translate_init();
2082 2113
    tlb_flush(env, 1);
2083 2114
    /* XXX: should not be hardcoded */
2084 2115
    env->implver = IMPLVER_2106x;
b/target-arm/cpu.h
417 417
}
418 418
#endif
419 419

  
420
#define CPU_PC_FROM_TB(env, tb) env->regs[15] = tb->pc
421

  
420 422
#include "cpu-all.h"
421 423

  
422 424
#endif
b/target-arm/translate.c
84 84
static TCGv cpu_T[2];
85 85
static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
86 86

  
87
#define ICOUNT_TEMP cpu_T[0]
88
#include "gen-icount.h"
89

  
87 90
/* initialize TCG globals.  */
88 91
void arm_translate_init(void)
89 92
{
......
8539 8542
    int j, lj;
8540 8543
    target_ulong pc_start;
8541 8544
    uint32_t next_page_start;
8545
    int num_insns;
8546
    int max_insns;
8542 8547

  
8543 8548
    /* generate intermediate code */
8544 8549
    num_temps = 0;
......
8575 8580
    cpu_M0 = tcg_temp_new(TCG_TYPE_I64);
8576 8581
    next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
8577 8582
    lj = -1;
8583
    num_insns = 0;
8584
    max_insns = tb->cflags & CF_COUNT_MASK;
8585
    if (max_insns == 0)
8586
        max_insns = CF_COUNT_MASK;
8587

  
8588
    gen_icount_start();
8578 8589
    /* Reset the conditional execution bits immediately. This avoids
8579 8590
       complications trying to do it at the end of the block.  */
8580 8591
    if (env->condexec_bits)
......
8625 8636
            }
8626 8637
            gen_opc_pc[lj] = dc->pc;
8627 8638
            gen_opc_instr_start[lj] = 1;
8639
            gen_opc_icount[lj] = num_insns;
8628 8640
        }
8629 8641

  
8642
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8643
            gen_io_start();
8644

  
8630 8645
        if (env->thumb) {
8631 8646
            disas_thumb_insn(env, dc);
8632 8647
            if (dc->condexec_mask) {
......
8659 8674
         * Otherwise the subsequent code could get translated several times.
8660 8675
         * Also stop translation when a page boundary is reached.  This
8661 8676
         * ensures prefech aborts occur at the right place.  */
8677
        num_insns ++;
8662 8678
    } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8663 8679
             !env->singlestep_enabled &&
8664
             dc->pc < next_page_start);
8680
             dc->pc < next_page_start &&
8681
             num_insns < max_insns);
8682

  
8683
    if (tb->cflags & CF_LAST_IO) {
8684
        if (dc->condjmp) {
8685
            /* FIXME:  This can theoretically happen with self-modifying
8686
               code.  */
8687
            cpu_abort(env, "IO on conditional branch instruction");
8688
        }
8689
        gen_io_end();
8690
    }
8665 8691

  
8666 8692
    /* At this stage dc->condjmp will only be set when the skipped
8667 8693
       instruction was a conditional branch or trap, and the PC has
......
8726 8752
            dc->condjmp = 0;
8727 8753
        }
8728 8754
    }
8755

  
8729 8756
done_generating:
8757
    gen_icount_end(tb, num_insns);
8730 8758
    *gen_opc_ptr = INDEX_op_end;
8731 8759

  
8732 8760
#ifdef DEBUG_DISAS
......
8744 8772
            gen_opc_instr_start[lj++] = 0;
8745 8773
    } else {
8746 8774
        tb->size = dc->pc - pc_start;
8775
        tb->icount = num_insns;
8747 8776
    }
8748 8777
    return 0;
8749 8778
}
b/target-cris/cpu.h
238 238
#define SFR_RW_MM_TLB_LO   env->pregs[PR_SRS]][5
239 239
#define SFR_RW_MM_TLB_HI   env->pregs[PR_SRS]][6
240 240

  
241
#define CPU_PC_FROM_TB(env, tb) env->pc = tb->pc
242

  
241 243
#include "cpu-all.h"
242 244
#endif
b/target-cris/translate.c
77 77
TCGv env_btarget;
78 78
TCGv env_pc;
79 79

  
80
#include "gen-icount.h"
81

  
80 82
/* This is the state at translation time.  */
81 83
typedef struct DisasContext {
82 84
	CPUState *env;
......
3032 3034
	struct DisasContext *dc = &ctx;
3033 3035
	uint32_t next_page_start;
3034 3036
	target_ulong npc;
3037
        int num_insns;
3038
        int max_insns;
3035 3039

  
3036 3040
	if (!logfile)
3037 3041
		logfile = stderr;
......
3092 3096

  
3093 3097
	next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
3094 3098
	lj = -1;
3099
        num_insns = 0;
3100
        max_insns = tb->cflags & CF_COUNT_MASK;
3101
        if (max_insns == 0)
3102
            max_insns = CF_COUNT_MASK;
3103

  
3104
        gen_icount_start();
3095 3105
	do
3096 3106
	{
3097 3107
		check_breakpoint(env, dc);
......
3108 3118
			else
3109 3119
				gen_opc_pc[lj] = dc->pc;
3110 3120
			gen_opc_instr_start[lj] = 1;
3121
                        gen_opc_icount[lj] = num_insns;
3111 3122
		}
3112 3123

  
3113 3124
		/* Pretty disas.  */
......
3116 3127
			DIS(fprintf(logfile, "%x ", dc->pc));
3117 3128
		}
3118 3129

  
3130
                if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
3131
                    gen_io_start();
3119 3132
		dc->clear_x = 1;
3120 3133
		if (unlikely(loglevel & CPU_LOG_TB_OP))
3121 3134
			tcg_gen_debug_insn_start(dc->pc);
......
3125 3138
		if (dc->clear_x)
3126 3139
			cris_clear_x_flag(dc);
3127 3140

  
3141
                num_insns++;
3128 3142
		/* Check for delayed branches here. If we do it before
3129 3143
		   actually genereating any host code, the simulator will just
3130 3144
		   loop doing nothing for on this program location.  */
......
3151 3165
		if (!(tb->pc & 1) && env->singlestep_enabled)
3152 3166
			break;
3153 3167
	} while (!dc->is_jmp && gen_opc_ptr < gen_opc_end
3154
		 && (dc->pc < next_page_start));
3168
		 && (dc->pc < next_page_start)
3169
                 && num_insns < max_insns);
3155 3170

  
3156 3171
	npc = dc->pc;
3157 3172
	if (dc->jmp == JMP_DIRECT && !dc->delayed_branch)
3158 3173
		npc = dc->jmp_pc;
3159 3174

  
3175
        if (tb->cflags & CF_LAST_IO)
3176
            gen_io_end();
3160 3177
	/* Force an update if the per-tb cpu state has changed.  */
3161 3178
	if (dc->is_jmp == DISAS_NEXT
3162 3179
	    && (dc->cpustate_changed || !dc->flagx_known 
......
3194 3211
				break;
3195 3212
		}
3196 3213
	}
3214
        gen_icount_end(tb, num_insns);
3197 3215
	*gen_opc_ptr = INDEX_op_end;
3198 3216
	if (search_pc) {
3199 3217
		j = gen_opc_ptr - gen_opc_buf;
......
3202 3220
			gen_opc_instr_start[lj++] = 0;
3203 3221
	} else {
3204 3222
		tb->size = dc->pc - pc_start;
3223
                tb->icount = num_insns;
3205 3224
	}
3206 3225

  
3207 3226
#ifdef DEBUG_DISAS
b/target-i386/cpu.h
753 753
}
754 754
#endif
755 755

  
756
#define CPU_PC_FROM_TB(env, tb) env->eip = tb->pc - tb->cs_base
757

  
756 758
#include "cpu-all.h"
757 759

  
758 760
#include "svm.h"
b/target-i386/translate.c
65 65
static TCGv cpu_tmp0, cpu_tmp1_i64, cpu_tmp2_i32, cpu_tmp3_i32, cpu_tmp4, cpu_ptr0, cpu_ptr1;
66 66
static TCGv cpu_tmp5, cpu_tmp6;
67 67

  
68
#include "gen-icount.h"
69

  
68 70
#ifdef TARGET_X86_64
69 71
static int x86_64_hregs;
70 72
#endif
......
1203 1205

  
1204 1206
static inline void gen_ins(DisasContext *s, int ot)
1205 1207
{
1208
    if (use_icount)
1209
        gen_io_start();
1206 1210
    gen_string_movl_A0_EDI(s);
1207 1211
    /* Note: we must do this dummy write first to be restartable in
1208 1212
       case of page fault. */
......
1215 1219
    gen_op_st_T0_A0(ot + s->mem_index);
1216 1220
    gen_op_movl_T0_Dshift(ot);
1217 1221
    gen_op_add_reg_T0(s->aflag, R_EDI);
1222
    if (use_icount)
1223
        gen_io_end();
1218 1224
}
1219 1225

  
1220 1226
static inline void gen_outs(DisasContext *s, int ot)
1221 1227
{
1228
    if (use_icount)
1229
        gen_io_start();
1222 1230
    gen_string_movl_A0_ESI(s);
1223 1231
    gen_op_ld_T0_A0(ot + s->mem_index);
1224 1232

  
......
1230 1238

  
1231 1239
    gen_op_movl_T0_Dshift(ot);
1232 1240
    gen_op_add_reg_T0(s->aflag, R_ESI);
1241
    if (use_icount)
1242
        gen_io_end();
1233 1243
}
1234 1244

  
1235 1245
/* same method as Valgrind : we generate jumps to current or next
......
5570 5580
            gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5571 5581
        } else {
5572 5582
            gen_ins(s, ot);
5583
            if (use_icount) {
5584
                gen_jmp(s, s->pc - s->cs_base);
5585
            }
5573 5586
        }
5574 5587
        break;
5575 5588
    case 0x6e: /* outsS */
......
5586 5599
            gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5587 5600
        } else {
5588 5601
            gen_outs(s, ot);
5602
            if (use_icount) {
5603
                gen_jmp(s, s->pc - s->cs_base);
5604
            }
5589 5605
        }
5590 5606
        break;
5591 5607

  
......
5602 5618
        gen_op_movl_T0_im(val);
5603 5619
        gen_check_io(s, ot, pc_start - s->cs_base,
5604 5620
                     SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
5621
        if (use_icount)
5622
            gen_io_start();
5605 5623
        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5606 5624
        tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[1], cpu_tmp2_i32);
5607 5625
        gen_op_mov_reg_T1(ot, R_EAX);
5626
        if (use_icount) {
5627
            gen_io_end();
5628
            gen_jmp(s, s->pc - s->cs_base);
5629
        }
5608 5630
        break;
5609 5631
    case 0xe6:
5610 5632
    case 0xe7:
......
5618 5640
                     svm_is_rep(prefixes));
5619 5641
        gen_op_mov_TN_reg(ot, 1, R_EAX);
5620 5642

  
5643
        if (use_icount)
5644
            gen_io_start();
5621 5645
        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5622 5646
        tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
5623 5647
        tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5624 5648
        tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32);
5649
        if (use_icount) {
5650
            gen_io_end();
5651
            gen_jmp(s, s->pc - s->cs_base);
5652
        }
5625 5653
        break;
5626 5654
    case 0xec:
5627 5655
    case 0xed:
......
5633 5661
        gen_op_andl_T0_ffff();
5634 5662
        gen_check_io(s, ot, pc_start - s->cs_base,
5635 5663
                     SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
5664
        if (use_icount)
5665
            gen_io_start();
5636 5666
        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5637 5667
        tcg_gen_helper_1_1(helper_in_func[ot], cpu_T[1], cpu_tmp2_i32);
5638 5668
        gen_op_mov_reg_T1(ot, R_EAX);
5669
        if (use_icount) {
5670
            gen_io_end();
5671
            gen_jmp(s, s->pc - s->cs_base);
5672
        }
5639 5673
        break;
5640 5674
    case 0xee:
5641 5675
    case 0xef:
......
5649 5683
                     svm_is_rep(prefixes));
5650 5684
        gen_op_mov_TN_reg(ot, 1, R_EAX);
5651 5685

  
5686
        if (use_icount)
5687
            gen_io_start();
5652 5688
        tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5653 5689
        tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
5654 5690
        tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5655 5691
        tcg_gen_helper_0_2(helper_out_func[ot], cpu_tmp2_i32, cpu_tmp3_i32);
5692
        if (use_icount) {
5693
            gen_io_end();
5694
            gen_jmp(s, s->pc - s->cs_base);
5695
        }
5656 5696
        break;
5657 5697

  
5658 5698
        /************************/
......
7109 7149
    uint64_t flags;
7110 7150
    target_ulong pc_start;
7111 7151
    target_ulong cs_base;
7152
    int num_insns;
7153
    int max_insns;
7112 7154

  
7113 7155
    /* generate intermediate code */
7114 7156
    pc_start = tb->pc;
......
7179 7221
    dc->is_jmp = DISAS_NEXT;
7180 7222
    pc_ptr = pc_start;
7181 7223
    lj = -1;
7224
    num_insns = 0;
7225
    max_insns = tb->cflags & CF_COUNT_MASK;
7226
    if (max_insns == 0)
7227
        max_insns = CF_COUNT_MASK;
7182 7228

  
7229
    gen_icount_start();
7183 7230
    for(;;) {
7184 7231
        if (env->nb_breakpoints > 0) {
7185 7232
            for(j = 0; j < env->nb_breakpoints; j++) {
......
7199 7246
            gen_opc_pc[lj] = pc_ptr;
7200 7247
            gen_opc_cc_op[lj] = dc->cc_op;
7201 7248
            gen_opc_instr_start[lj] = 1;
7249
            gen_opc_icount[lj] = num_insns;
7202 7250
        }
7251
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7252
            gen_io_start();
7253

  
7203 7254
        pc_ptr = disas_insn(dc, pc_ptr);
7255
        num_insns++;
7204 7256
        /* stop translation if indicated */
7205 7257
        if (dc->is_jmp)
7206 7258
            break;
......
7210 7262
           the flag and abort the translation to give the irqs a
7211 7263
           change to be happen */
7212 7264
        if (dc->tf || dc->singlestep_enabled ||
7213
            (flags & HF_INHIBIT_IRQ_MASK) ||
7214
            (cflags & CF_SINGLE_INSN)) {
7265
            (flags & HF_INHIBIT_IRQ_MASK)) {
7215 7266
            gen_jmp_im(pc_ptr - dc->cs_base);
7216 7267
            gen_eob(dc);
7217 7268
            break;
7218 7269
        }
7219 7270
        /* if too long translation, stop generation too */
7220 7271
        if (gen_opc_ptr >= gen_opc_end ||
7221
            (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32)) {
7272
            (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
7273
            num_insns >= max_insns) {
7222 7274
            gen_jmp_im(pc_ptr - dc->cs_base);
7223 7275
            gen_eob(dc);
7224 7276
            break;
7225 7277
        }
7226 7278
    }
7279
    if (tb->cflags & CF_LAST_IO)
7280
        gen_io_end();
7281
    gen_icount_end(tb, num_insns);
7227 7282
    *gen_opc_ptr = INDEX_op_end;
7228 7283
    /* we don't forget to fill the last values */
7229 7284
    if (search_pc) {
......
7252 7307
    }
7253 7308
#endif
7254 7309

  
7255
    if (!search_pc)
7310
    if (!search_pc) {
7256 7311
        tb->size = pc_ptr - pc_start;
7312
        tb->icount = num_insns;
7313
    }
7257 7314
    return 0;
7258 7315
}
7259 7316

  
b/target-m68k/cpu.h
235 235
}
236 236
#endif
237 237

  
238
#define CPU_PC_FROM_TB(env, tb) env->pc = tb->pc
239

  
238 240
#include "cpu-all.h"
239 241

  
240 242
#endif
b/target-m68k/translate.c
63 63
/* Used to distinguish stores from bad addressing modes.  */
64 64
static TCGv store_dummy;
65 65

  
66
#include "gen-icount.h"
67

  
66 68
void m68k_tcg_init(void)
67 69
{
68 70
    char *p;
......
2919 2921
    target_ulong pc_start;
2920 2922
    int pc_offset;
2921 2923
    int last_cc_op;
2924
    int num_insns;
2925
    int max_insns;
2922 2926

  
2923 2927
    /* generate intermediate code */
2924 2928
    pc_start = tb->pc;
......
2937 2941
    dc->is_mem = 0;
2938 2942
    dc->mactmp = NULL_QREG;
2939 2943
    lj = -1;
2944
    num_insns = 0;
2945
    max_insns = tb->cflags & CF_COUNT_MASK;
2946
    if (max_insns == 0)
2947
        max_insns = CF_COUNT_MASK;
2948

  
2949
    gen_icount_start();
2940 2950
    do {
2941 2951
        pc_offset = dc->pc - pc_start;
2942 2952
        gen_throws_exception = NULL;
......
2960 2970
            }
2961 2971
            gen_opc_pc[lj] = dc->pc;
2962 2972
            gen_opc_instr_start[lj] = 1;
2973
            gen_opc_icount[lj] = num_insns;
2963 2974
        }
2975
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
2976
            gen_io_start();
2964 2977
        last_cc_op = dc->cc_op;
2965 2978
        dc->insn_pc = dc->pc;
2966 2979
	disas_m68k_insn(env, dc);
2980
        num_insns++;
2967 2981

  
2968 2982
        /* Terminate the TB on memory ops if watchpoints are present.  */
2969 2983
        /* FIXME: This should be replacd by the deterministic execution
......
2972 2986
            break;
2973 2987
    } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2974 2988
             !env->singlestep_enabled &&
2975
             (pc_offset) < (TARGET_PAGE_SIZE - 32));
2989
             (pc_offset) < (TARGET_PAGE_SIZE - 32) &&
2990
             num_insns < max_insns);
2976 2991

  
2992
    if (tb->cflags & CF_LAST_IO)
2993
        gen_io_end();
2977 2994
    if (__builtin_expect(env->singlestep_enabled, 0)) {
2978 2995
        /* Make sure the pc is updated, and raise a debug exception.  */
2979 2996
        if (!dc->is_jmp) {
......
2999 3016
            break;
3000 3017
        }
3001 3018
    }
3019
    gen_icount_end(tb, num_insns);
3002 3020
    *gen_opc_ptr = INDEX_op_end;
3003 3021

  
3004 3022
#ifdef DEBUG_DISAS
......
3016 3034
            gen_opc_instr_start[lj++] = 0;
3017 3035
    } else {
3018 3036
        tb->size = dc->pc - pc_start;
3037
        tb->icount = num_insns;
3019 3038
    }
3020 3039

  
3021 3040
    //optimize_flags();
b/target-mips/cpu.h
572 572
uint32_t cpu_mips_get_clock (void);
573 573
int cpu_mips_signal_handler(int host_signum, void *pinfo, void *puc);
574 574

  
575
#define CPU_PC_FROM_TB(env, tb) do { \
576
    env->active_tc.PC = tb->pc; \
577
    env->hflags &= ~MIPS_HFLAG_BMASK; \
578
    env->hflags |= tb->flags & MIPS_HFLAG_BMASK; \
579
    } while (0)
580

  
575 581
#endif /* !defined (__MIPS_CPU_H__) */
b/target-mips/translate.c
428 428
/* FPU TNs, global for now. */
429 429
static TCGv fpu32_T[3], fpu64_T[3], fpu32h_T[3];
430 430

  
431
#include "gen-icount.h"
432

  
431 433
static inline void tcg_gen_helper_0_i(void *func, TCGv arg)
432 434
{
433 435
    TCGv tmp = tcg_const_i32(arg);
......
3061 3063
    case 9:
3062 3064
        switch (sel) {
3063 3065
        case 0:
3066
            /* Mark as an IO operation because we read the time.  */
3067
            if (use_icount)
3068
                gen_io_start();
3064 3069
            tcg_gen_helper_1_0(do_mfc0_count, t0);
3070
            if (use_icount) {
3071
                gen_io_end();
3072
                ctx->bstate = BS_STOP;
3073
            }
3065 3074
            rn = "Count";
3066 3075
            break;
3067 3076
        /* 6,7 are implementation dependent */
......
3422 3431
    if (sel != 0)
3423 3432
        check_insn(env, ctx, ISA_MIPS32);
3424 3433

  
3434
    if (use_icount)
3435
        gen_io_start();
3436

  
3425 3437
    switch (reg) {
3426 3438
    case 0:
3427 3439
        switch (sel) {
......
4004 4016
                rn, reg, sel);
4005 4017
    }
4006 4018
#endif
4019
    /* For simplicitly assume that all writes can cause interrupts.  */
4020
    if (use_icount) {
4021
        gen_io_end();
4022
        ctx->bstate = BS_STOP;
4023
    }
4007 4024
    return;
4008 4025

  
4009 4026
die:
......
4238 4255
    case 9:
4239 4256
        switch (sel) {
4240 4257
        case 0:
4258
            /* Mark as an IO operation because we read the time.  */
4259
            if (use_icount)
4260
                gen_io_start();
4241 4261
            tcg_gen_helper_1_0(do_mfc0_count, t0);
4262
            if (use_icount) {
4263
                gen_io_end();
4264
                ctx->bstate = BS_STOP;
4265
            }
4242 4266
            rn = "Count";
4243 4267
            break;
4244 4268
        /* 6,7 are implementation dependent */
......
4591 4615
    if (sel != 0)
4592 4616
        check_insn(env, ctx, ISA_MIPS64);
4593 4617

  
4618
    if (use_icount)
4619
        gen_io_start();
4620

  
4594 4621
    switch (reg) {
4595 4622
    case 0:
4596 4623
        switch (sel) {
......
5161 5188
    }
5162 5189
#endif
5163 5190
    tcg_temp_free(t0);
5191
    /* For simplicitly assume that all writes can cause interrupts.  */
5192
    if (use_icount) {
5193
        gen_io_end();
5194
        ctx->bstate = BS_STOP;
5195
    }
5164 5196
    return;
5165 5197

  
5166 5198
die:
......
7760 7792
        ctx->hflags &= ~MIPS_HFLAG_BMASK;
7761 7793
        ctx->bstate = BS_BRANCH;
7762 7794
        save_cpu_state(ctx, 0);
7795
        /* FIXME: Need to clear can_do_io.  */
7763 7796
        switch (hflags) {
7764 7797
        case MIPS_HFLAG_B:
7765 7798
            /* unconditional branch */
......
7807 7840
    target_ulong pc_start;
7808 7841
    uint16_t *gen_opc_end;
7809 7842
    int j, lj = -1;
7843
    int num_insns;
7844
    int max_insns;
7810 7845

  
7811 7846
    if (search_pc && loglevel)
7812 7847
        fprintf (logfile, "search pc %d\n", search_pc);
......
7826 7861
#else
7827 7862
    ctx.mem_idx = ctx.hflags & MIPS_HFLAG_KSU;
7828 7863
#endif
7864
    num_insns = 0;
7865
    num_insns = 0;
7866
    max_insns = tb->cflags & CF_COUNT_MASK;
7867
    if (max_insns == 0)
7868
        max_insns = CF_COUNT_MASK;
7829 7869
#ifdef DEBUG_DISAS
7830 7870
    if (loglevel & CPU_LOG_TB_CPU) {
7831 7871
        fprintf(logfile, "------------------------------------------------\n");
......
7838 7878
        fprintf(logfile, "\ntb %p idx %d hflags %04x\n",
7839 7879
                tb, ctx.mem_idx, ctx.hflags);
7840 7880
#endif
7881
    gen_icount_start();
7841 7882
    while (ctx.bstate == BS_NONE) {
7842 7883
        if (env->nb_breakpoints > 0) {
7843 7884
            for(j = 0; j < env->nb_breakpoints; j++) {
......
7863 7904
            gen_opc_pc[lj] = ctx.pc;
7864 7905
            gen_opc_hflags[lj] = ctx.hflags & MIPS_HFLAG_BMASK;
7865 7906
            gen_opc_instr_start[lj] = 1;
7907
            gen_opc_icount[lj] = num_insns;
7866 7908
        }
7909
        if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7910
            gen_io_start();
7867 7911
        ctx.opcode = ldl_code(ctx.pc);
7868 7912
        decode_opc(env, &ctx);
7869 7913
        ctx.pc += 4;
7914
        num_insns++;
7870 7915

  
7871 7916
        if (env->singlestep_enabled)
7872 7917
            break;
......
7880 7925
        if (gen_opc_ptr >= gen_opc_end)
7881 7926
            break;
... This diff was truncated because it exceeds the maximum size that can be displayed.

Also available in: Unified diff