Revision bf20dc07

b/cpu-exec.c
620 620
                    next_tb = tcg_qemu_tb_exec(tc_ptr);
621 621
                    env->current_tb = NULL;
622 622
                    if ((next_tb & 3) == 2) {
623
                        /* Instruction counter exired.  */
623
                        /* Instruction counter expired.  */
624 624
                        int insns_left;
625 625
                        tb = (TranslationBlock *)(long)(next_tb & ~3);
626 626
                        /* Restore PC.  */
b/exec-all.h
372 372
    return addr + env1->tlb_table[mmu_idx][page_index].addend - (unsigned long)phys_ram_base;
373 373
}
374 374

  
375
/* Deterministic execution requires that IO only be performaed on the last
375
/* Deterministic execution requires that IO only be performed on the last
376 376
   instruction of a TB so that interrupts take effect immediately.  */
377 377
static inline int can_do_io(CPUState *env)
378 378
{
b/exec.c
109 109
   cpu_exec() */
110 110
CPUState *cpu_single_env;
111 111
/* 0 = Do not count executed instructions.
112
   1 = Precice instruction counting.
112
   1 = Precise instruction counting.
113 113
   2 = Adaptive rate instruction counting.  */
114 114
int use_icount = 0;
115 115
/* Current instruction counter.  While executing translated code this may
......
1080 1080

  
1081 1081
void tb_free(TranslationBlock *tb)
1082 1082
{
1083
    /* In practice this is mostly used for single use temorary TB
1083
    /* In practice this is mostly used for single use temporary TB
1084 1084
       Ignore the hard cases and just back up if this TB happens to
1085 1085
       be the last one generated.  */
1086 1086
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
......
1394 1394

  
1395 1395
    old_mask = env->interrupt_request;
1396 1396
    /* FIXME: This is probably not threadsafe.  A different thread could
1397
       be in the mittle of a read-modify-write operation.  */
1397
       be in the middle of a read-modify-write operation.  */
1398 1398
    env->interrupt_request |= mask;
1399 1399
#if defined(USE_NPTL)
1400 1400
    /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
......
3019 3019
    n = env->icount_decr.u16.low + tb->icount;
3020 3020
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3021 3021
    /* Calculate how many instructions had been executed before the fault
3022
       occured.  */
3022
       occurred.  */
3023 3023
    n = n - env->icount_decr.u16.low;
3024 3024
    /* Generate a new TB ending on the I/O insn.  */
3025 3025
    n++;
3026 3026
    /* On MIPS and SH, delay slot instructions can only be restarted if
3027 3027
       they were already the first instruction in the TB.  If this is not
3028
       the first instruction in a TB then re-execute the preceeding
3028
       the first instruction in a TB then re-execute the preceding
3029 3029
       branch.  */
3030 3030
#if defined(TARGET_MIPS)
3031 3031
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
......
3053 3053
    /* FIXME: In theory this could raise an exception.  In practice
3054 3054
       we have already translated the block once so it's probably ok.  */
3055 3055
    tb_gen_code(env, pc, cs_base, flags, cflags);
3056
    /* TODO: If env->pc != tb->pc (i.e. the failuting instruction was not
3056
    /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
3057 3057
       the first in the TB) then we end up generating a whole new TB and
3058 3058
       repeating the fault, which is horribly inefficient.
3059 3059
       Better would be to execute just this insn uncached, or generate a
b/gen-icount.h
1
/* Helpewrs for instruction counting code genration.  */
1
/* Helpers for instruction counting code generation.  */
2 2

  
3 3
static TCGArg *icount_arg;
4 4
static int icount_label;
b/target-arm/translate.c
8684 8684
        /* Translation stops when a conditional branch is enoutered.
8685 8685
         * Otherwise the subsequent code could get translated several times.
8686 8686
         * Also stop translation when a page boundary is reached.  This
8687
         * ensures prefech aborts occur at the right place.  */
8687
         * ensures prefetch aborts occur at the right place.  */
8688 8688
        num_insns ++;
8689 8689
    } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8690 8690
             !env->singlestep_enabled &&
b/target-cris/translate.c
3141 3141

  
3142 3142
                num_insns++;
3143 3143
		/* Check for delayed branches here. If we do it before
3144
		   actually genereating any host code, the simulator will just
3144
		   actually generating any host code, the simulator will just
3145 3145
		   loop doing nothing for on this program location.  */
3146 3146
		if (dc->delayed_branch) {
3147 3147
			dc->delayed_branch--;
b/target-m68k/translate.c
2980 2980
        num_insns++;
2981 2981

  
2982 2982
        /* Terminate the TB on memory ops if watchpoints are present.  */
2983
        /* FIXME: This should be replacd by the deterministic execution
2983
        /* FIXME: This should be replaced by the deterministic execution
2984 2984
         * IRQ raising bits.  */
2985 2985
        if (dc->is_mem && env->nb_watchpoints)
2986 2986
            break;
b/target-mips/translate.c
3998 3998
                rn, reg, sel);
3999 3999
    }
4000 4000
#endif
4001
    /* For simplicitly assume that all writes can cause interrupts.  */
4001
    /* For simplicity assume that all writes can cause interrupts.  */
4002 4002
    if (use_icount) {
4003 4003
        gen_io_end();
4004 4004
        ctx->bstate = BS_STOP;
......
5170 5170
    }
5171 5171
#endif
5172 5172
    tcg_temp_free(t0);
5173
    /* For simplicitly assume that all writes can cause interrupts.  */
5173
    /* For simplicity assume that all writes can cause interrupts.  */
5174 5174
    if (use_icount) {
5175 5175
        gen_io_end();
5176 5176
        ctx->bstate = BS_STOP;
b/vl.c
239 239
static CPUState *cur_cpu;
240 240
static CPUState *next_cpu;
241 241
static int event_pending = 1;
242
/* Conversion factor from emulated instrctions to virtual clock ticks.  */
242
/* Conversion factor from emulated instructions to virtual clock ticks.  */
243 243
static int icount_time_shift;
244
/* Arbitrarily pick 1MIPS as the minimum alowable speed.  */
244
/* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
245 245
#define MAX_ICOUNT_SHIFT 10
246 246
/* Compensate for varying guest execution speed.  */
247 247
static int64_t qemu_icount_bias;
......
903 903
#endif /* _WIN32 */
904 904

  
905 905
/* Correlation between real and virtual time is always going to be
906
   farly approximate, so ignore small variation.
906
   fairly approximate, so ignore small variation.
907 907
   When the guest is idle real and virtual time will be aligned in
908 908
   the IO wait loop.  */
909 909
#define ICOUNT_WOBBLE (QEMU_TIMER_BASE / 10)
......
7262 7262
                    if (use_icount == 1) {
7263 7263
                        /* When not using an adaptive execution frequency
7264 7264
                           we tend to get badly out of sync with real time,
7265
                           so just delay for a resonable amount of time.  */
7265
                           so just delay for a reasonable amount of time.  */
7266 7266
                        delta = 0;
7267 7267
                    } else {
7268 7268
                        delta = cpu_get_icount() - cpu_get_clock();

Also available in: Unified diff