Revision 5fafdf24 kqemu.c
b/kqemu.c | ||
---|---|---|
1 | 1 |
/* |
2 | 2 |
* KQEMU support |
3 |
*
|
|
3 |
* |
|
4 | 4 |
* Copyright (c) 2005 Fabrice Bellard |
5 | 5 |
* |
6 | 6 |
* This library is free software; you can redistribute it and/or |
... | ... | |
129 | 129 |
target cpus because they are important for user code. Strictly |
130 | 130 |
speaking, only SSE really matters because the OS must support |
131 | 131 |
it if the user code uses it. */ |
132 |
critical_features_mask =
|
|
133 |
CPUID_CMOV | CPUID_CX8 |
|
|
134 |
CPUID_FXSR | CPUID_MMX | CPUID_SSE |
|
|
132 |
critical_features_mask = |
|
133 |
CPUID_CMOV | CPUID_CX8 | |
|
134 |
CPUID_FXSR | CPUID_MMX | CPUID_SSE | |
|
135 | 135 |
CPUID_SSE2 | CPUID_SEP; |
136 | 136 |
ext_features_mask = CPUID_EXT_SSE3 | CPUID_EXT_MONITOR; |
137 | 137 |
if (!is_cpuid_supported()) { |
... | ... | |
194 | 194 |
goto fail; |
195 | 195 |
} |
196 | 196 |
|
197 |
pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH *
|
|
197 |
pages_to_flush = qemu_vmalloc(KQEMU_MAX_PAGES_TO_FLUSH * |
|
198 | 198 |
sizeof(unsigned long)); |
199 | 199 |
if (!pages_to_flush) |
200 | 200 |
goto fail; |
201 | 201 |
|
202 |
ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE *
|
|
202 |
ram_pages_to_update = qemu_vmalloc(KQEMU_MAX_RAM_PAGES_TO_UPDATE * |
|
203 | 203 |
sizeof(unsigned long)); |
204 | 204 |
if (!ram_pages_to_update) |
205 | 205 |
goto fail; |
206 | 206 |
|
207 |
modified_ram_pages = qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES *
|
|
207 |
modified_ram_pages = qemu_vmalloc(KQEMU_MAX_MODIFIED_RAM_PAGES * |
|
208 | 208 |
sizeof(unsigned long)); |
209 | 209 |
if (!modified_ram_pages) |
210 | 210 |
goto fail; |
... | ... | |
286 | 286 |
{ |
287 | 287 |
int i; |
288 | 288 |
unsigned long page_index; |
289 |
|
|
289 |
|
|
290 | 290 |
for(i = 0; i < nb_modified_ram_pages; i++) { |
291 | 291 |
page_index = modified_ram_pages[i] >> TARGET_PAGE_BITS; |
292 | 292 |
modified_ram_pages_table[page_index] = 0; |
... | ... | |
312 | 312 |
if (nb_modified_ram_pages >= KQEMU_MAX_MODIFIED_RAM_PAGES) { |
313 | 313 |
/* flush */ |
314 | 314 |
#ifdef _WIN32 |
315 |
ret = DeviceIoControl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
|
|
316 |
&nb_modified_ram_pages,
|
|
315 |
ret = DeviceIoControl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES, |
|
316 |
&nb_modified_ram_pages, |
|
317 | 317 |
sizeof(nb_modified_ram_pages), |
318 | 318 |
NULL, 0, &temp, NULL); |
319 | 319 |
#else |
320 |
ret = ioctl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES,
|
|
320 |
ret = ioctl(kqemu_fd, KQEMU_MODIFY_RAM_PAGES, |
|
321 | 321 |
&nb_modified_ram_pages); |
322 | 322 |
#endif |
323 | 323 |
kqemu_reset_modified_ram_pages(); |
... | ... | |
364 | 364 |
{ |
365 | 365 |
int fptag, i, j; |
366 | 366 |
struct fpstate fp1, *fp = &fp1; |
367 |
|
|
367 |
|
|
368 | 368 |
fp->fpuc = env->fpuc; |
369 | 369 |
fp->fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; |
370 | 370 |
fptag = 0; |
... | ... | |
384 | 384 |
} |
385 | 385 |
asm volatile ("frstor %0" : "=m" (*fp)); |
386 | 386 |
} |
387 |
|
|
387 |
|
|
388 | 388 |
static void save_native_fp_fsave(CPUState *env) |
389 | 389 |
{ |
390 | 390 |
int fptag, i, j; |
... | ... | |
470 | 470 |
struct kqemu_cpu_state *kenv) |
471 | 471 |
{ |
472 | 472 |
int selector; |
473 |
|
|
473 |
|
|
474 | 474 |
selector = (env->star >> 32) & 0xffff; |
475 | 475 |
#ifdef __x86_64__ |
476 | 476 |
if (env->hflags & HF_LMA_MASK) { |
... | ... | |
482 | 482 |
code64 = env->hflags & HF_CS64_MASK; |
483 | 483 |
|
484 | 484 |
cpu_x86_set_cpl(env, 0); |
485 |
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
|
|
486 |
0, 0xffffffff,
|
|
485 |
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, |
|
486 |
0, 0xffffffff, |
|
487 | 487 |
DESC_G_MASK | DESC_P_MASK | |
488 | 488 |
DESC_S_MASK | |
489 | 489 |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK); |
490 |
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
|
|
490 |
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, |
|
491 | 491 |
0, 0xffffffff, |
492 | 492 |
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | |
493 | 493 |
DESC_S_MASK | |
... | ... | |
497 | 497 |
env->eip = env->lstar; |
498 | 498 |
else |
499 | 499 |
env->eip = env->cstar; |
500 |
} else
|
|
500 |
} else |
|
501 | 501 |
#endif |
502 | 502 |
{ |
503 | 503 |
env->regs[R_ECX] = (uint32_t)kenv->next_eip; |
504 |
|
|
504 |
|
|
505 | 505 |
cpu_x86_set_cpl(env, 0); |
506 |
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
|
|
507 |
0, 0xffffffff,
|
|
506 |
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc, |
|
507 |
0, 0xffffffff, |
|
508 | 508 |
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | |
509 | 509 |
DESC_S_MASK | |
510 | 510 |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK); |
511 |
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
|
|
511 |
cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc, |
|
512 | 512 |
0, 0xffffffff, |
513 | 513 |
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | |
514 | 514 |
DESC_S_MASK | |
... | ... | |
605 | 605 |
} |
606 | 606 |
} |
607 | 607 |
qsort(pr, nb_pc_records, sizeof(PCRecord *), pc_rec_cmp); |
608 |
|
|
608 |
|
|
609 | 609 |
f = fopen("/tmp/kqemu.stats", "w"); |
610 | 610 |
if (!f) { |
611 | 611 |
perror("/tmp/kqemu.stats"); |
... | ... | |
616 | 616 |
for(i = 0; i < nb_pc_records; i++) { |
617 | 617 |
r = pr[i]; |
618 | 618 |
sum += r->count; |
619 |
fprintf(f, "%08lx: %" PRId64 " %0.2f%% %0.2f%%\n",
|
|
620 |
r->pc,
|
|
621 |
r->count,
|
|
619 |
fprintf(f, "%08lx: %" PRId64 " %0.2f%% %0.2f%%\n", |
|
620 |
r->pc, |
|
621 |
r->count, |
|
622 | 622 |
(double)r->count / (double)total * 100.0, |
623 | 623 |
(double)sum / (double)total * 100.0); |
624 | 624 |
} |
... | ... | |
697 | 697 |
kenv->nb_ram_pages_to_update = nb_ram_pages_to_update; |
698 | 698 |
#endif |
699 | 699 |
nb_ram_pages_to_update = 0; |
700 |
|
|
700 |
|
|
701 | 701 |
#if KQEMU_VERSION >= 0x010300 |
702 | 702 |
kenv->nb_modified_ram_pages = nb_modified_ram_pages; |
703 | 703 |
#endif |
... | ... | |
789 | 789 |
{ |
790 | 790 |
unsigned int new_hflags; |
791 | 791 |
#ifdef TARGET_X86_64 |
792 |
if ((env->hflags & HF_LMA_MASK) &&
|
|
792 |
if ((env->hflags & HF_LMA_MASK) && |
|
793 | 793 |
(env->segs[R_CS].flags & DESC_L_MASK)) { |
794 | 794 |
/* long mode */ |
795 | 795 |
new_hflags = HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; |
... | ... | |
801 | 801 |
>> (DESC_B_SHIFT - HF_CS32_SHIFT); |
802 | 802 |
new_hflags |= (env->segs[R_SS].flags & DESC_B_MASK) |
803 | 803 |
>> (DESC_B_SHIFT - HF_SS32_SHIFT); |
804 |
if (!(env->cr[0] & CR0_PE_MASK) ||
|
|
804 |
if (!(env->cr[0] & CR0_PE_MASK) || |
|
805 | 805 |
(env->eflags & VM_MASK) || |
806 | 806 |
!(env->hflags & HF_CS32_MASK)) { |
807 | 807 |
/* XXX: try to avoid this test. The problem comes from the |
... | ... | |
811 | 811 |
translate-i386.c. */ |
812 | 812 |
new_hflags |= HF_ADDSEG_MASK; |
813 | 813 |
} else { |
814 |
new_hflags |= ((env->segs[R_DS].base |
|
|
814 |
new_hflags |= ((env->segs[R_DS].base | |
|
815 | 815 |
env->segs[R_ES].base | |
816 |
env->segs[R_SS].base) != 0) <<
|
|
816 |
env->segs[R_SS].base) != 0) << |
|
817 | 817 |
HF_ADDSEG_SHIFT; |
818 | 818 |
} |
819 | 819 |
} |
820 |
env->hflags = (env->hflags &
|
|
820 |
env->hflags = (env->hflags & |
|
821 | 821 |
~(HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)) | |
822 | 822 |
new_hflags; |
823 | 823 |
} |
... | ... | |
828 | 828 |
env->hflags |= HF_OSFXSR_MASK; |
829 | 829 |
else |
830 | 830 |
env->hflags &= ~HF_OSFXSR_MASK; |
831 |
|
|
831 |
|
|
832 | 832 |
#ifdef DEBUG |
833 | 833 |
if (loglevel & CPU_LOG_INT) { |
834 | 834 |
fprintf(logfile, "kqemu: kqemu_cpu_exec: ret=0x%x\n", ret); |
... | ... | |
837 | 837 |
if (ret == KQEMU_RET_SYSCALL) { |
838 | 838 |
/* syscall instruction */ |
839 | 839 |
return do_syscall(env, kenv); |
840 |
} else
|
|
840 |
} else |
|
841 | 841 |
if ((ret & 0xff00) == KQEMU_RET_INT) { |
842 | 842 |
env->exception_index = ret & 0xff; |
843 | 843 |
env->error_code = 0; |
... | ... | |
848 | 848 |
#endif |
849 | 849 |
#ifdef DEBUG |
850 | 850 |
if (loglevel & CPU_LOG_INT) { |
851 |
fprintf(logfile, "kqemu: interrupt v=%02x:\n",
|
|
851 |
fprintf(logfile, "kqemu: interrupt v=%02x:\n", |
|
852 | 852 |
env->exception_index); |
853 | 853 |
cpu_dump_state(env, logfile, fprintf, 0); |
854 | 854 |
} |
... | ... | |
880 | 880 |
} |
881 | 881 |
#endif |
882 | 882 |
return 0; |
883 |
} else if (ret == KQEMU_RET_SOFTMMU) {
|
|
883 |
} else if (ret == KQEMU_RET_SOFTMMU) { |
|
884 | 884 |
#ifdef CONFIG_PROFILER |
885 | 885 |
{ |
886 | 886 |
unsigned long pc = env->eip + env->segs[R_CS].base; |
... | ... | |
904 | 904 |
void kqemu_cpu_interrupt(CPUState *env) |
905 | 905 |
{ |
906 | 906 |
#if defined(_WIN32) && KQEMU_VERSION >= 0x010101 |
907 |
/* cancelling the I/O request causes KQEMU to finish executing the
|
|
907 |
/* cancelling the I/O request causes KQEMU to finish executing the |
|
908 | 908 |
current block and successfully returning. */ |
909 | 909 |
CancelIo(kqemu_fd); |
910 | 910 |
#endif |
Also available in: Unified diff