Revision 2e70f6ef exec.c

b/exec.c
107 107
/* current CPU in the current thread. It is only valid inside
108 108
   cpu_exec() */
109 109
CPUState *cpu_single_env;
110
/* 0 = Do not count executed instructions.
111
   1 = Precice instruction counting.
112
   2 = Adaptive rate instruction counting.  */
113
int use_icount = 0;
114
/* Current instruction counter.  While executing translated code this may
115
   include some instructions that have not yet been executed.  */
116
int64_t qemu_icount;
110 117

  
111 118
typedef struct PageDesc {
112 119
    /* list of TBs intersecting this ram page */
......
633 640
    tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
634 641
}
635 642

  
636
static inline void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
643
void tb_phys_invalidate(TranslationBlock *tb, target_ulong page_addr)
637 644
{
638 645
    CPUState *env;
639 646
    PageDesc *p;
......
746 753
    }
747 754
}
748 755

  
749
#ifdef TARGET_HAS_PRECISE_SMC
750

  
751
static void tb_gen_code(CPUState *env,
752
                        target_ulong pc, target_ulong cs_base, int flags,
753
                        int cflags)
756
TranslationBlock *tb_gen_code(CPUState *env,
757
                              target_ulong pc, target_ulong cs_base,
758
                              int flags, int cflags)
754 759
{
755 760
    TranslationBlock *tb;
756 761
    uint8_t *tc_ptr;
......
764 769
        tb_flush(env);
765 770
        /* cannot fail at this point */
766 771
        tb = tb_alloc(pc);
772
        /* Don't forget to invalidate previous TB info.  */
773
        tb_invalidated_flag = 1;
767 774
    }
768 775
    tc_ptr = code_gen_ptr;
769 776
    tb->tc_ptr = tc_ptr;
......
780 787
        phys_page2 = get_phys_addr_code(env, virt_page2);
781 788
    }
782 789
    tb_link_phys(tb, phys_pc, phys_page2);
790
    return tb;
783 791
}
784
#endif
785 792

  
786 793
/* invalidate all TBs which intersect with the target physical page
787 794
   starting in range [start;end[. NOTE: start and end must refer to
......
836 843
            if (current_tb_not_found) {
837 844
                current_tb_not_found = 0;
838 845
                current_tb = NULL;
839
                if (env->mem_write_pc) {
846
                if (env->mem_io_pc) {
840 847
                    /* now we have a real cpu fault */
841
                    current_tb = tb_find_pc(env->mem_write_pc);
848
                    current_tb = tb_find_pc(env->mem_io_pc);
842 849
                }
843 850
            }
844 851
            if (current_tb == tb &&
845
                !(current_tb->cflags & CF_SINGLE_INSN)) {
852
                (current_tb->cflags & CF_COUNT_MASK) != 1) {
846 853
                /* If we are modifying the current TB, we must stop
847 854
                its execution. We could be more precise by checking
848 855
                that the modification is after the current PC, but it
......
851 858

  
852 859
                current_tb_modified = 1;
853 860
                cpu_restore_state(current_tb, env,
854
                                  env->mem_write_pc, NULL);
861
                                  env->mem_io_pc, NULL);
855 862
#if defined(TARGET_I386)
856 863
                current_flags = env->hflags;
857 864
                current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
......
883 890
    if (!p->first_tb) {
884 891
        invalidate_page_bitmap(p);
885 892
        if (is_cpu_write_access) {
886
            tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
893
            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
887 894
        }
888 895
    }
889 896
#endif
......
893 900
           modifying the memory. It will ensure that it cannot modify
894 901
           itself */
895 902
        env->current_tb = NULL;
896
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
897
                    CF_SINGLE_INSN);
903
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
898 904
        cpu_resume_from_signal(env, NULL);
899 905
    }
900 906
#endif
......
909 915
    if (1) {
910 916
        if (loglevel) {
911 917
            fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
912
                   cpu_single_env->mem_write_vaddr, len,
918
                   cpu_single_env->mem_io_vaddr, len,
913 919
                   cpu_single_env->eip,
914 920
                   cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
915 921
        }
......
961 967
        tb = (TranslationBlock *)((long)tb & ~3);
962 968
#ifdef TARGET_HAS_PRECISE_SMC
963 969
        if (current_tb == tb &&
964
            !(current_tb->cflags & CF_SINGLE_INSN)) {
970
            (current_tb->cflags & CF_COUNT_MASK) != 1) {
965 971
                /* If we are modifying the current TB, we must stop
966 972
                   its execution. We could be more precise by checking
967 973
                   that the modification is after the current PC, but it
......
990 996
           modifying the memory. It will ensure that it cannot modify
991 997
           itself */
992 998
        env->current_tb = NULL;
993
        tb_gen_code(env, current_pc, current_cs_base, current_flags,
994
                    CF_SINGLE_INSN);
999
        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
995 1000
        cpu_resume_from_signal(env, puc);
996 1001
    }
997 1002
#endif
......
1068 1073
    return tb;
1069 1074
}
1070 1075

  
1076
void tb_free(TranslationBlock *tb)
1077
{
1078
    /* In practice this is mostly used for single use temorary TB
1079
       Ignore the hard cases and just back up if this TB happens to
1080
       be the last one generated.  */
1081
    if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
1082
        code_gen_ptr = tb->tc_ptr;
1083
        nb_tbs--;
1084
    }
1085
}
1086

  
1071 1087
/* add a new TB and link it to the physical page tables. phys_page2 is
1072 1088
   (-1) to indicate that only one page contains the TB. */
1073 1089
void tb_link_phys(TranslationBlock *tb,
......
1369 1385
    TranslationBlock *tb;
1370 1386
    static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
1371 1387
#endif
1388
    int old_mask;
1372 1389

  
1390
    old_mask = env->interrupt_request;
1373 1391
    /* FIXME: This is probably not threadsafe.  A different thread could
1374 1392
       be in the mittle of a read-modify-write operation.  */
1375 1393
    env->interrupt_request |= mask;
......
1379 1397
       emulation this often isn't actually as bad as it sounds.  Often
1380 1398
       signals are used primarily to interrupt blocking syscalls.  */
1381 1399
#else
1382
    /* if the cpu is currently executing code, we must unlink it and
1383
       all the potentially executing TB */
1384
    tb = env->current_tb;
1385
    if (tb && !testandset(&interrupt_lock)) {
1386
        env->current_tb = NULL;
1387
        tb_reset_jump_recursive(tb);
1388
        resetlock(&interrupt_lock);
1400
    if (use_icount) {
1401
        env->icount_decr.u16.high = 0x8000;
1402
#ifndef CONFIG_USER_ONLY
1403
        /* CPU_INTERRUPT_EXIT isn't a real interrupt.  It just means
1404
           an async event happened and we need to process it.  */
1405
        if (!can_do_io(env)
1406
            && (mask & ~(old_mask | CPU_INTERRUPT_EXIT)) != 0) {
1407
            cpu_abort(env, "Raised interrupt while not in I/O function");
1408
        }
1409
#endif
1410
    } else {
1411
        tb = env->current_tb;
1412
        /* if the cpu is currently executing code, we must unlink it and
1413
           all the potentially executing TB */
1414
        if (tb && !testandset(&interrupt_lock)) {
1415
            env->current_tb = NULL;
1416
            tb_reset_jump_recursive(tb);
1417
            resetlock(&interrupt_lock);
1418
        }
1389 1419
    }
1390 1420
#endif
1391 1421
}
......
2227 2257
    /* we remove the notdirty callback only if the code has been
2228 2258
       flushed */
2229 2259
    if (dirty_flags == 0xff)
2230
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2260
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2231 2261
}
2232 2262

  
2233 2263
static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
......
2252 2282
    /* we remove the notdirty callback only if the code has been
2253 2283
       flushed */
2254 2284
    if (dirty_flags == 0xff)
2255
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2285
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2256 2286
}
2257 2287

  
2258 2288
static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
......
2277 2307
    /* we remove the notdirty callback only if the code has been
2278 2308
       flushed */
2279 2309
    if (dirty_flags == 0xff)
2280
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_write_vaddr);
2310
        tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
2281 2311
}
2282 2312

  
2283 2313
static CPUReadMemoryFunc *error_mem_read[3] = {
......
2299 2329
    target_ulong vaddr;
2300 2330
    int i;
2301 2331

  
2302
    vaddr = (env->mem_write_vaddr & TARGET_PAGE_MASK) + offset;
2332
    vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
2303 2333
    for (i = 0; i < env->nb_watchpoints; i++) {
2304 2334
        if (vaddr == env->watchpoint[i].vaddr
2305 2335
                && (env->watchpoint[i].type & flags)) {
......
2967 2997
    return 0;
2968 2998
}
2969 2999

  
3000
/* in deterministic execution mode, instructions doing device I/Os
3001
   must be at the end of the TB */
3002
void cpu_io_recompile(CPUState *env, void *retaddr)
3003
{
3004
    TranslationBlock *tb;
3005
    uint32_t n, cflags;
3006
    target_ulong pc, cs_base;
3007
    uint64_t flags;
3008

  
3009
    tb = tb_find_pc((unsigned long)retaddr);
3010
    if (!tb) {
3011
        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", 
3012
                  retaddr);
3013
    }
3014
    n = env->icount_decr.u16.low + tb->icount;
3015
    cpu_restore_state(tb, env, (unsigned long)retaddr, NULL);
3016
    /* Calculate how many instructions had been executed before the fault
3017
       occured.  */
3018
    n = n - env->icount_decr.u16.low;
3019
    /* Generate a new TB ending on the I/O insn.  */
3020
    n++;
3021
    /* On MIPS and SH, delay slot instructions can only be restarted if
3022
       they were already the first instruction in the TB.  If this is not
3023
       the first instruction in a TB then re-execute the preceeding
3024
       branch.  */
3025
#if defined(TARGET_MIPS)
3026
    if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
3027
        env->active_tc.PC -= 4;
3028
        env->icount_decr.u16.low++;
3029
        env->hflags &= ~MIPS_HFLAG_BMASK;
3030
    }
3031
#elif defined(TARGET_SH4)
3032
    if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
3033
            && n > 1) {
3034
        env->pc -= 2;
3035
        env->icount_decr.u16.low++;
3036
        env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
3037
    }
3038
#endif
3039
    /* This should never happen.  */
3040
    if (n > CF_COUNT_MASK)
3041
        cpu_abort(env, "TB too big during recompile");
3042

  
3043
    cflags = n | CF_LAST_IO;
3044
    pc = tb->pc;
3045
    cs_base = tb->cs_base;
3046
    flags = tb->flags;
3047
    tb_phys_invalidate(tb, -1);
3048
    /* FIXME: In theory this could raise an exception.  In practice
3049
       we have already translated the block once so it's probably ok.  */
3050
    tb_gen_code(env, pc, cs_base, flags, cflags);
3051
    /* TODO: If env->pc != tb->pc (i.e. the failuting instruction was not
3052
       the first in the TB) then we end up generating a whole new TB and
3053
       repeating the fault, which is horribly inefficient.
3054
       Better would be to execute just this insn uncached, or generate a
3055
       second new TB.  */
3056
    cpu_resume_from_signal(env, NULL);
3057
}
3058

  
2970 3059
void dump_exec_info(FILE *f,
2971 3060
                    int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2972 3061
{

Also available in: Unified diff