Revision 61382a50 exec.c
b/exec.c | ||
---|---|---|
444 | 444 |
prot = 0; |
445 | 445 |
for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) |
446 | 446 |
prot |= page_get_flags(addr); |
447 |
#if !defined(CONFIG_SOFTMMU) |
|
447 | 448 |
mprotect((void *)host_start, host_page_size, |
448 | 449 |
(prot & PAGE_BITS) & ~PAGE_WRITE); |
450 |
#endif |
|
451 |
#if !defined(CONFIG_USER_ONLY) |
|
452 |
/* suppress soft TLB */ |
|
453 |
/* XXX: must flush on all processor with same address space */ |
|
454 |
tlb_flush_page_write(cpu_single_env, host_start); |
|
455 |
#endif |
|
449 | 456 |
#ifdef DEBUG_TB_INVALIDATE |
450 | 457 |
printf("protecting code page: 0x%08lx\n", |
451 | 458 |
host_start); |
452 | 459 |
#endif |
453 | 460 |
p->flags &= ~PAGE_WRITE; |
454 |
#ifdef DEBUG_TB_CHECK |
|
455 |
tb_page_check(); |
|
456 |
#endif |
|
457 | 461 |
} |
458 | 462 |
} |
459 | 463 |
|
... | ... | |
483 | 487 |
if (page_index2 != page_index1) { |
484 | 488 |
tb_alloc_page(tb, page_index2); |
485 | 489 |
} |
490 |
#ifdef DEBUG_TB_CHECK |
|
491 |
tb_page_check(); |
|
492 |
#endif |
|
486 | 493 |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
487 | 494 |
tb->jmp_next[0] = NULL; |
488 | 495 |
tb->jmp_next[1] = NULL; |
... | ... | |
517 | 524 |
/* if the page was really writable, then we change its |
518 | 525 |
protection back to writable */ |
519 | 526 |
if (prot & PAGE_WRITE_ORG) { |
520 |
mprotect((void *)host_start, host_page_size, |
|
521 |
(prot & PAGE_BITS) | PAGE_WRITE); |
|
522 | 527 |
pindex = (address - host_start) >> TARGET_PAGE_BITS; |
523 |
p1[pindex].flags |= PAGE_WRITE; |
|
524 |
/* and since the content will be modified, we must invalidate |
|
525 |
the corresponding translated code. */ |
|
526 |
tb_invalidate_page(address); |
|
528 |
if (!(p1[pindex].flags & PAGE_WRITE)) { |
|
529 |
#if !defined(CONFIG_SOFTMMU) |
|
530 |
mprotect((void *)host_start, host_page_size, |
|
531 |
(prot & PAGE_BITS) | PAGE_WRITE); |
|
532 |
#endif |
|
533 |
p1[pindex].flags |= PAGE_WRITE; |
|
534 |
/* and since the content will be modified, we must invalidate |
|
535 |
the corresponding translated code. */ |
|
536 |
tb_invalidate_page(address); |
|
527 | 537 |
#ifdef DEBUG_TB_CHECK |
528 |
tb_invalidate_check(address); |
|
538 |
tb_invalidate_check(address);
|
|
529 | 539 |
#endif |
530 |
return 1; |
|
531 |
} else { |
|
532 |
return 0; |
|
540 |
return 1; |
|
541 |
} |
|
533 | 542 |
} |
543 |
return 0; |
|
534 | 544 |
} |
535 | 545 |
|
536 | 546 |
/* call this function when system calls directly modify a memory area */ |
... | ... | |
734 | 744 |
/* unmap all maped pages and flush all associated code */ |
735 | 745 |
void page_unmap(void) |
736 | 746 |
{ |
737 |
PageDesc *p, *pmap; |
|
738 |
unsigned long addr; |
|
739 |
int i, j, ret, j1; |
|
747 |
PageDesc *pmap; |
|
748 |
int i; |
|
740 | 749 |
|
741 | 750 |
for(i = 0; i < L1_SIZE; i++) { |
742 | 751 |
pmap = l1_map[i]; |
743 | 752 |
if (pmap) { |
753 |
#if !defined(CONFIG_SOFTMMU) |
|
754 |
PageDesc *p; |
|
755 |
unsigned long addr; |
|
756 |
int j, ret, j1; |
|
757 |
|
|
744 | 758 |
p = pmap; |
745 | 759 |
for(j = 0;j < L2_SIZE;) { |
746 | 760 |
if (p->flags & PAGE_VALID) { |
... | ... | |
763 | 777 |
j++; |
764 | 778 |
} |
765 | 779 |
} |
780 |
#endif |
|
766 | 781 |
free(pmap); |
767 | 782 |
l1_map[i] = NULL; |
768 | 783 |
} |
... | ... | |
773 | 788 |
|
774 | 789 |
void tlb_flush(CPUState *env) |
775 | 790 |
{ |
776 |
#if defined(TARGET_I386)
|
|
791 |
#if !defined(CONFIG_USER_ONLY)
|
|
777 | 792 |
int i; |
778 | 793 |
for(i = 0; i < CPU_TLB_SIZE; i++) { |
779 | 794 |
env->tlb_read[0][i].address = -1; |
... | ... | |
784 | 799 |
#endif |
785 | 800 |
} |
786 | 801 |
|
802 |
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr) |
|
803 |
{ |
|
804 |
if (addr == (tlb_entry->address & |
|
805 |
(TARGET_PAGE_MASK | TLB_INVALID_MASK))) |
|
806 |
tlb_entry->address = -1; |
|
807 |
} |
|
808 |
|
|
787 | 809 |
void tlb_flush_page(CPUState *env, uint32_t addr) |
788 | 810 |
{ |
789 |
#if defined(TARGET_I386) |
|
811 |
#if !defined(CONFIG_USER_ONLY) |
|
812 |
int i; |
|
813 |
|
|
814 |
addr &= TARGET_PAGE_MASK; |
|
815 |
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
|
816 |
tlb_flush_entry(&env->tlb_read[0][i], addr); |
|
817 |
tlb_flush_entry(&env->tlb_write[0][i], addr); |
|
818 |
tlb_flush_entry(&env->tlb_read[1][i], addr); |
|
819 |
tlb_flush_entry(&env->tlb_write[1][i], addr); |
|
820 |
#endif |
|
821 |
} |
|
822 |
|
|
823 |
/* make all write to page 'addr' trigger a TLB exception to detect |
|
824 |
self modifying code */ |
|
825 |
void tlb_flush_page_write(CPUState *env, uint32_t addr) |
|
826 |
{ |
|
827 |
#if !defined(CONFIG_USER_ONLY) |
|
790 | 828 |
int i; |
791 | 829 |
|
830 |
addr &= TARGET_PAGE_MASK; |
|
792 | 831 |
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
793 |
env->tlb_read[0][i].address = -1; |
|
794 |
env->tlb_write[0][i].address = -1; |
|
795 |
env->tlb_read[1][i].address = -1; |
|
796 |
env->tlb_write[1][i].address = -1; |
|
832 |
tlb_flush_entry(&env->tlb_write[0][i], addr); |
|
833 |
tlb_flush_entry(&env->tlb_write[1][i], addr); |
|
797 | 834 |
#endif |
798 | 835 |
} |
799 | 836 |
|
... | ... | |
900 | 937 |
} |
901 | 938 |
return io_index << IO_MEM_SHIFT; |
902 | 939 |
} |
940 |
|
|
941 |
#if !defined(CONFIG_USER_ONLY) |
|
942 |
|
|
943 |
#define MMUSUFFIX _cmmu |
|
944 |
#define GETPC() NULL |
|
945 |
#define env cpu_single_env |
|
946 |
|
|
947 |
#define SHIFT 0 |
|
948 |
#include "softmmu_template.h" |
|
949 |
|
|
950 |
#define SHIFT 1 |
|
951 |
#include "softmmu_template.h" |
|
952 |
|
|
953 |
#define SHIFT 2 |
|
954 |
#include "softmmu_template.h" |
|
955 |
|
|
956 |
#define SHIFT 3 |
|
957 |
#include "softmmu_template.h" |
|
958 |
|
|
959 |
#undef env |
|
960 |
|
|
961 |
#endif |
Also available in: Unified diff