Revision 61382a50
b/cpu-all.h | ||
---|---|---|
20 | 20 |
#ifndef CPU_ALL_H |
21 | 21 |
#define CPU_ALL_H |
22 | 22 |
|
23 |
/* all CPU memory access use these macros */ |
|
24 |
static inline int ldub(void *ptr) |
|
23 |
/* CPU memory access without any memory or io remapping */ |
|
24 |
|
|
25 |
static inline int ldub_raw(void *ptr) |
|
25 | 26 |
{ |
26 | 27 |
return *(uint8_t *)ptr; |
27 | 28 |
} |
28 | 29 |
|
29 |
static inline int ldsb(void *ptr) |
|
30 |
static inline int ldsb_raw(void *ptr)
|
|
30 | 31 |
{ |
31 | 32 |
return *(int8_t *)ptr; |
32 | 33 |
} |
33 | 34 |
|
34 |
static inline void stb(void *ptr, int v) |
|
35 |
static inline void stb_raw(void *ptr, int v)
|
|
35 | 36 |
{ |
36 | 37 |
*(uint8_t *)ptr = v; |
37 | 38 |
} |
... | ... | |
42 | 43 |
#if defined(WORDS_BIGENDIAN) || defined(__arm__) |
43 | 44 |
|
44 | 45 |
/* conservative code for little endian unaligned accesses */ |
45 |
static inline int lduw(void *ptr) |
|
46 |
static inline int lduw_raw(void *ptr)
|
|
46 | 47 |
{ |
47 | 48 |
#ifdef __powerpc__ |
48 | 49 |
int val; |
... | ... | |
54 | 55 |
#endif |
55 | 56 |
} |
56 | 57 |
|
57 |
static inline int ldsw(void *ptr) |
|
58 |
static inline int ldsw_raw(void *ptr)
|
|
58 | 59 |
{ |
59 | 60 |
#ifdef __powerpc__ |
60 | 61 |
int val; |
... | ... | |
66 | 67 |
#endif |
67 | 68 |
} |
68 | 69 |
|
69 |
static inline int ldl(void *ptr) |
|
70 |
static inline int ldl_raw(void *ptr)
|
|
70 | 71 |
{ |
71 | 72 |
#ifdef __powerpc__ |
72 | 73 |
int val; |
... | ... | |
78 | 79 |
#endif |
79 | 80 |
} |
80 | 81 |
|
81 |
static inline uint64_t ldq(void *ptr) |
|
82 |
static inline uint64_t ldq_raw(void *ptr)
|
|
82 | 83 |
{ |
83 | 84 |
uint8_t *p = ptr; |
84 | 85 |
uint32_t v1, v2; |
85 |
v1 = ldl(p); |
|
86 |
v2 = ldl(p + 4); |
|
86 |
v1 = ldl_raw(p);
|
|
87 |
v2 = ldl_raw(p + 4);
|
|
87 | 88 |
return v1 | ((uint64_t)v2 << 32); |
88 | 89 |
} |
89 | 90 |
|
90 |
static inline void stw(void *ptr, int v) |
|
91 |
static inline void stw_raw(void *ptr, int v)
|
|
91 | 92 |
{ |
92 | 93 |
#ifdef __powerpc__ |
93 | 94 |
__asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr)); |
... | ... | |
98 | 99 |
#endif |
99 | 100 |
} |
100 | 101 |
|
101 |
static inline void stl(void *ptr, int v) |
|
102 |
static inline void stl_raw(void *ptr, int v)
|
|
102 | 103 |
{ |
103 | 104 |
#ifdef __powerpc__ |
104 | 105 |
__asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr)); |
... | ... | |
111 | 112 |
#endif |
112 | 113 |
} |
113 | 114 |
|
114 |
static inline void stq(void *ptr, uint64_t v) |
|
115 |
static inline void stq_raw(void *ptr, uint64_t v)
|
|
115 | 116 |
{ |
116 | 117 |
uint8_t *p = ptr; |
117 |
stl(p, (uint32_t)v); |
|
118 |
stl(p + 4, v >> 32); |
|
118 |
stl_raw(p, (uint32_t)v);
|
|
119 |
stl_raw(p + 4, v >> 32);
|
|
119 | 120 |
} |
120 | 121 |
|
121 | 122 |
/* float access */ |
122 | 123 |
|
123 |
static inline float ldfl(void *ptr) |
|
124 |
static inline float ldfl_raw(void *ptr)
|
|
124 | 125 |
{ |
125 | 126 |
union { |
126 | 127 |
float f; |
127 | 128 |
uint32_t i; |
128 | 129 |
} u; |
129 |
u.i = ldl(ptr); |
|
130 |
u.i = ldl_raw(ptr);
|
|
130 | 131 |
return u.f; |
131 | 132 |
} |
132 | 133 |
|
133 |
static inline void stfl(void *ptr, float v) |
|
134 |
static inline void stfl_raw(void *ptr, float v)
|
|
134 | 135 |
{ |
135 | 136 |
union { |
136 | 137 |
float f; |
137 | 138 |
uint32_t i; |
138 | 139 |
} u; |
139 | 140 |
u.f = v; |
140 |
stl(ptr, u.i); |
|
141 |
stl_raw(ptr, u.i);
|
|
141 | 142 |
} |
142 | 143 |
|
143 | 144 |
|
144 | 145 |
#if defined(__arm__) && !defined(WORDS_BIGENDIAN) |
145 | 146 |
|
146 | 147 |
/* NOTE: arm is horrible as double 32 bit words are stored in big endian ! */ |
147 |
static inline double ldfq(void *ptr) |
|
148 |
static inline double ldfq_raw(void *ptr)
|
|
148 | 149 |
{ |
149 | 150 |
union { |
150 | 151 |
double d; |
151 | 152 |
uint32_t tab[2]; |
152 | 153 |
} u; |
153 |
u.tab[1] = ldl(ptr); |
|
154 |
u.tab[0] = ldl(ptr + 4); |
|
154 |
u.tab[1] = ldl_raw(ptr);
|
|
155 |
u.tab[0] = ldl_raw(ptr + 4);
|
|
155 | 156 |
return u.d; |
156 | 157 |
} |
157 | 158 |
|
158 |
static inline void stfq(void *ptr, double v) |
|
159 |
static inline void stfq_raw(void *ptr, double v)
|
|
159 | 160 |
{ |
160 | 161 |
union { |
161 | 162 |
double d; |
162 | 163 |
uint32_t tab[2]; |
163 | 164 |
} u; |
164 | 165 |
u.d = v; |
165 |
stl(ptr, u.tab[1]); |
|
166 |
stl(ptr + 4, u.tab[0]); |
|
166 |
stl_raw(ptr, u.tab[1]);
|
|
167 |
stl_raw(ptr + 4, u.tab[0]);
|
|
167 | 168 |
} |
168 | 169 |
|
169 | 170 |
#else |
170 |
static inline double ldfq(void *ptr) |
|
171 |
static inline double ldfq_raw(void *ptr)
|
|
171 | 172 |
{ |
172 | 173 |
union { |
173 | 174 |
double d; |
174 | 175 |
uint64_t i; |
175 | 176 |
} u; |
176 |
u.i = ldq(ptr); |
|
177 |
u.i = ldq_raw(ptr);
|
|
177 | 178 |
return u.d; |
178 | 179 |
} |
179 | 180 |
|
180 |
static inline void stfq(void *ptr, double v) |
|
181 |
static inline void stfq_raw(void *ptr, double v)
|
|
181 | 182 |
{ |
182 | 183 |
union { |
183 | 184 |
double d; |
184 | 185 |
uint64_t i; |
185 | 186 |
} u; |
186 | 187 |
u.d = v; |
187 |
stq(ptr, u.i); |
|
188 |
stq_raw(ptr, u.i);
|
|
188 | 189 |
} |
189 | 190 |
#endif |
190 | 191 |
|
191 | 192 |
#elif defined(TARGET_WORDS_BIGENDIAN) && !defined(WORDS_BIGENDIAN) |
192 | 193 |
|
193 |
static inline int lduw(void *ptr) |
|
194 |
static inline int lduw_raw(void *ptr)
|
|
194 | 195 |
{ |
195 | 196 |
uint8_t *b = (uint8_t *) ptr; |
196 | 197 |
return (b[0]<<8|b[1]); |
197 | 198 |
} |
198 | 199 |
|
199 |
static inline int ldsw(void *ptr) |
|
200 |
static inline int ldsw_raw(void *ptr)
|
|
200 | 201 |
{ |
201 | 202 |
int8_t *b = (int8_t *) ptr; |
202 | 203 |
return (b[0]<<8|b[1]); |
203 | 204 |
} |
204 | 205 |
|
205 |
static inline int ldl(void *ptr) |
|
206 |
static inline int ldl_raw(void *ptr)
|
|
206 | 207 |
{ |
207 | 208 |
uint8_t *b = (uint8_t *) ptr; |
208 | 209 |
return (b[0]<<24|b[1]<<16|b[2]<<8|b[3]); |
209 | 210 |
} |
210 | 211 |
|
211 |
static inline uint64_t ldq(void *ptr) |
|
212 |
static inline uint64_t ldq_raw(void *ptr)
|
|
212 | 213 |
{ |
213 | 214 |
uint32_t a,b; |
214 | 215 |
a = ldl (ptr); |
... | ... | |
216 | 217 |
return (((uint64_t)a<<32)|b); |
217 | 218 |
} |
218 | 219 |
|
219 |
static inline void stw(void *ptr, int v) |
|
220 |
static inline void stw_raw(void *ptr, int v)
|
|
220 | 221 |
{ |
221 | 222 |
uint8_t *d = (uint8_t *) ptr; |
222 | 223 |
d[0] = v >> 8; |
223 | 224 |
d[1] = v; |
224 | 225 |
} |
225 | 226 |
|
226 |
static inline void stl(void *ptr, int v) |
|
227 |
static inline void stl_raw(void *ptr, int v)
|
|
227 | 228 |
{ |
228 | 229 |
uint8_t *d = (uint8_t *) ptr; |
229 | 230 |
d[0] = v >> 24; |
... | ... | |
232 | 233 |
d[3] = v; |
233 | 234 |
} |
234 | 235 |
|
235 |
static inline void stq(void *ptr, uint64_t v) |
|
236 |
static inline void stq_raw(void *ptr, uint64_t v)
|
|
236 | 237 |
{ |
237 | 238 |
stl (ptr, v); |
238 | 239 |
stl (ptr+4, v >> 32); |
... | ... | |
240 | 241 |
|
241 | 242 |
#else |
242 | 243 |
|
243 |
static inline int lduw(void *ptr) |
|
244 |
static inline int lduw_raw(void *ptr)
|
|
244 | 245 |
{ |
245 | 246 |
return *(uint16_t *)ptr; |
246 | 247 |
} |
247 | 248 |
|
248 |
static inline int ldsw(void *ptr) |
|
249 |
static inline int ldsw_raw(void *ptr)
|
|
249 | 250 |
{ |
250 | 251 |
return *(int16_t *)ptr; |
251 | 252 |
} |
252 | 253 |
|
253 |
static inline int ldl(void *ptr) |
|
254 |
static inline int ldl_raw(void *ptr)
|
|
254 | 255 |
{ |
255 | 256 |
return *(uint32_t *)ptr; |
256 | 257 |
} |
257 | 258 |
|
258 |
static inline uint64_t ldq(void *ptr) |
|
259 |
static inline uint64_t ldq_raw(void *ptr)
|
|
259 | 260 |
{ |
260 | 261 |
return *(uint64_t *)ptr; |
261 | 262 |
} |
262 | 263 |
|
263 |
static inline void stw(void *ptr, int v) |
|
264 |
static inline void stw_raw(void *ptr, int v)
|
|
264 | 265 |
{ |
265 | 266 |
*(uint16_t *)ptr = v; |
266 | 267 |
} |
267 | 268 |
|
268 |
static inline void stl(void *ptr, int v) |
|
269 |
static inline void stl_raw(void *ptr, int v)
|
|
269 | 270 |
{ |
270 | 271 |
*(uint32_t *)ptr = v; |
271 | 272 |
} |
272 | 273 |
|
273 |
static inline void stq(void *ptr, uint64_t v) |
|
274 |
static inline void stq_raw(void *ptr, uint64_t v)
|
|
274 | 275 |
{ |
275 | 276 |
*(uint64_t *)ptr = v; |
276 | 277 |
} |
277 | 278 |
|
278 | 279 |
/* float access */ |
279 | 280 |
|
280 |
static inline float ldfl(void *ptr) |
|
281 |
static inline float ldfl_raw(void *ptr)
|
|
281 | 282 |
{ |
282 | 283 |
return *(float *)ptr; |
283 | 284 |
} |
284 | 285 |
|
285 |
static inline double ldfq(void *ptr) |
|
286 |
static inline double ldfq_raw(void *ptr)
|
|
286 | 287 |
{ |
287 | 288 |
return *(double *)ptr; |
288 | 289 |
} |
289 | 290 |
|
290 |
static inline void stfl(void *ptr, float v) |
|
291 |
static inline void stfl_raw(void *ptr, float v)
|
|
291 | 292 |
{ |
292 | 293 |
*(float *)ptr = v; |
293 | 294 |
} |
294 | 295 |
|
295 |
static inline void stfq(void *ptr, double v) |
|
296 |
static inline void stfq_raw(void *ptr, double v)
|
|
296 | 297 |
{ |
297 | 298 |
*(double *)ptr = v; |
298 | 299 |
} |
299 | 300 |
#endif |
300 | 301 |
|
302 |
/* MMU memory access macros */ |
|
303 |
|
|
304 |
#if defined(CONFIG_USER_ONLY) |
|
305 |
|
|
306 |
/* if user mode, no other memory access functions */ |
|
307 |
#define ldub(p) ldub_raw(p) |
|
308 |
#define ldsb(p) ldsb_raw(p) |
|
309 |
#define lduw(p) lduw_raw(p) |
|
310 |
#define ldsw(p) ldsw_raw(p) |
|
311 |
#define ldl(p) ldl_raw(p) |
|
312 |
#define ldq(p) ldq_raw(p) |
|
313 |
#define ldfl(p) ldfl_raw(p) |
|
314 |
#define ldfq(p) ldfq_raw(p) |
|
315 |
#define stb(p, v) stb_raw(p, v) |
|
316 |
#define stw(p, v) stw_raw(p, v) |
|
317 |
#define stl(p, v) stl_raw(p, v) |
|
318 |
#define stq(p, v) stq_raw(p, v) |
|
319 |
#define stfl(p, v) stfl_raw(p, v) |
|
320 |
#define stfq(p, v) stfq_raw(p, v) |
|
321 |
|
|
322 |
#define ldub_code(p) ldub_raw(p) |
|
323 |
#define ldsb_code(p) ldsb_raw(p) |
|
324 |
#define lduw_code(p) lduw_raw(p) |
|
325 |
#define ldsw_code(p) ldsw_raw(p) |
|
326 |
#define ldl_code(p) ldl_raw(p) |
|
327 |
|
|
328 |
#define ldub_kernel(p) ldub_raw(p) |
|
329 |
#define ldsb_kernel(p) ldsb_raw(p) |
|
330 |
#define lduw_kernel(p) lduw_raw(p) |
|
331 |
#define ldsw_kernel(p) ldsw_raw(p) |
|
332 |
#define ldl_kernel(p) ldl_raw(p) |
|
333 |
#define stb_kernel(p, v) stb_raw(p, v) |
|
334 |
#define stw_kernel(p, v) stw_raw(p, v) |
|
335 |
#define stl_kernel(p, v) stl_raw(p, v) |
|
336 |
#define stq_kernel(p, v) stq_raw(p, v) |
|
337 |
|
|
338 |
#endif /* defined(CONFIG_USER_ONLY) */ |
|
339 |
|
|
301 | 340 |
/* page related stuff */ |
302 | 341 |
|
303 | 342 |
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) |
b/exec.c | ||
---|---|---|
444 | 444 |
prot = 0; |
445 | 445 |
for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE) |
446 | 446 |
prot |= page_get_flags(addr); |
447 |
#if !defined(CONFIG_SOFTMMU) |
|
447 | 448 |
mprotect((void *)host_start, host_page_size, |
448 | 449 |
(prot & PAGE_BITS) & ~PAGE_WRITE); |
450 |
#endif |
|
451 |
#if !defined(CONFIG_USER_ONLY) |
|
452 |
/* suppress soft TLB */ |
|
453 |
/* XXX: must flush on all processor with same address space */ |
|
454 |
tlb_flush_page_write(cpu_single_env, host_start); |
|
455 |
#endif |
|
449 | 456 |
#ifdef DEBUG_TB_INVALIDATE |
450 | 457 |
printf("protecting code page: 0x%08lx\n", |
451 | 458 |
host_start); |
452 | 459 |
#endif |
453 | 460 |
p->flags &= ~PAGE_WRITE; |
454 |
#ifdef DEBUG_TB_CHECK |
|
455 |
tb_page_check(); |
|
456 |
#endif |
|
457 | 461 |
} |
458 | 462 |
} |
459 | 463 |
|
... | ... | |
483 | 487 |
if (page_index2 != page_index1) { |
484 | 488 |
tb_alloc_page(tb, page_index2); |
485 | 489 |
} |
490 |
#ifdef DEBUG_TB_CHECK |
|
491 |
tb_page_check(); |
|
492 |
#endif |
|
486 | 493 |
tb->jmp_first = (TranslationBlock *)((long)tb | 2); |
487 | 494 |
tb->jmp_next[0] = NULL; |
488 | 495 |
tb->jmp_next[1] = NULL; |
... | ... | |
517 | 524 |
/* if the page was really writable, then we change its |
518 | 525 |
protection back to writable */ |
519 | 526 |
if (prot & PAGE_WRITE_ORG) { |
520 |
mprotect((void *)host_start, host_page_size, |
|
521 |
(prot & PAGE_BITS) | PAGE_WRITE); |
|
522 | 527 |
pindex = (address - host_start) >> TARGET_PAGE_BITS; |
523 |
p1[pindex].flags |= PAGE_WRITE; |
|
524 |
/* and since the content will be modified, we must invalidate |
|
525 |
the corresponding translated code. */ |
|
526 |
tb_invalidate_page(address); |
|
528 |
if (!(p1[pindex].flags & PAGE_WRITE)) { |
|
529 |
#if !defined(CONFIG_SOFTMMU) |
|
530 |
mprotect((void *)host_start, host_page_size, |
|
531 |
(prot & PAGE_BITS) | PAGE_WRITE); |
|
532 |
#endif |
|
533 |
p1[pindex].flags |= PAGE_WRITE; |
|
534 |
/* and since the content will be modified, we must invalidate |
|
535 |
the corresponding translated code. */ |
|
536 |
tb_invalidate_page(address); |
|
527 | 537 |
#ifdef DEBUG_TB_CHECK |
528 |
tb_invalidate_check(address); |
|
538 |
tb_invalidate_check(address);
|
|
529 | 539 |
#endif |
530 |
return 1; |
|
531 |
} else { |
|
532 |
return 0; |
|
540 |
return 1; |
|
541 |
} |
|
533 | 542 |
} |
543 |
return 0; |
|
534 | 544 |
} |
535 | 545 |
|
536 | 546 |
/* call this function when system calls directly modify a memory area */ |
... | ... | |
734 | 744 |
/* unmap all maped pages and flush all associated code */ |
735 | 745 |
void page_unmap(void) |
736 | 746 |
{ |
737 |
PageDesc *p, *pmap; |
|
738 |
unsigned long addr; |
|
739 |
int i, j, ret, j1; |
|
747 |
PageDesc *pmap; |
|
748 |
int i; |
|
740 | 749 |
|
741 | 750 |
for(i = 0; i < L1_SIZE; i++) { |
742 | 751 |
pmap = l1_map[i]; |
743 | 752 |
if (pmap) { |
753 |
#if !defined(CONFIG_SOFTMMU) |
|
754 |
PageDesc *p; |
|
755 |
unsigned long addr; |
|
756 |
int j, ret, j1; |
|
757 |
|
|
744 | 758 |
p = pmap; |
745 | 759 |
for(j = 0;j < L2_SIZE;) { |
746 | 760 |
if (p->flags & PAGE_VALID) { |
... | ... | |
763 | 777 |
j++; |
764 | 778 |
} |
765 | 779 |
} |
780 |
#endif |
|
766 | 781 |
free(pmap); |
767 | 782 |
l1_map[i] = NULL; |
768 | 783 |
} |
... | ... | |
773 | 788 |
|
774 | 789 |
void tlb_flush(CPUState *env) |
775 | 790 |
{ |
776 |
#if defined(TARGET_I386)
|
|
791 |
#if !defined(CONFIG_USER_ONLY)
|
|
777 | 792 |
int i; |
778 | 793 |
for(i = 0; i < CPU_TLB_SIZE; i++) { |
779 | 794 |
env->tlb_read[0][i].address = -1; |
... | ... | |
784 | 799 |
#endif |
785 | 800 |
} |
786 | 801 |
|
802 |
static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, uint32_t addr) |
|
803 |
{ |
|
804 |
if (addr == (tlb_entry->address & |
|
805 |
(TARGET_PAGE_MASK | TLB_INVALID_MASK))) |
|
806 |
tlb_entry->address = -1; |
|
807 |
} |
|
808 |
|
|
787 | 809 |
void tlb_flush_page(CPUState *env, uint32_t addr) |
788 | 810 |
{ |
789 |
#if defined(TARGET_I386) |
|
811 |
#if !defined(CONFIG_USER_ONLY) |
|
812 |
int i; |
|
813 |
|
|
814 |
addr &= TARGET_PAGE_MASK; |
|
815 |
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
|
816 |
tlb_flush_entry(&env->tlb_read[0][i], addr); |
|
817 |
tlb_flush_entry(&env->tlb_write[0][i], addr); |
|
818 |
tlb_flush_entry(&env->tlb_read[1][i], addr); |
|
819 |
tlb_flush_entry(&env->tlb_write[1][i], addr); |
|
820 |
#endif |
|
821 |
} |
|
822 |
|
|
823 |
/* make all write to page 'addr' trigger a TLB exception to detect |
|
824 |
self modifying code */ |
|
825 |
void tlb_flush_page_write(CPUState *env, uint32_t addr) |
|
826 |
{ |
|
827 |
#if !defined(CONFIG_USER_ONLY) |
|
790 | 828 |
int i; |
791 | 829 |
|
830 |
addr &= TARGET_PAGE_MASK; |
|
792 | 831 |
i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
793 |
env->tlb_read[0][i].address = -1; |
|
794 |
env->tlb_write[0][i].address = -1; |
|
795 |
env->tlb_read[1][i].address = -1; |
|
796 |
env->tlb_write[1][i].address = -1; |
|
832 |
tlb_flush_entry(&env->tlb_write[0][i], addr); |
|
833 |
tlb_flush_entry(&env->tlb_write[1][i], addr); |
|
797 | 834 |
#endif |
798 | 835 |
} |
799 | 836 |
|
... | ... | |
900 | 937 |
} |
901 | 938 |
return io_index << IO_MEM_SHIFT; |
902 | 939 |
} |
940 |
|
|
941 |
#if !defined(CONFIG_USER_ONLY) |
|
942 |
|
|
943 |
#define MMUSUFFIX _cmmu |
|
944 |
#define GETPC() NULL |
|
945 |
#define env cpu_single_env |
|
946 |
|
|
947 |
#define SHIFT 0 |
|
948 |
#include "softmmu_template.h" |
|
949 |
|
|
950 |
#define SHIFT 1 |
|
951 |
#include "softmmu_template.h" |
|
952 |
|
|
953 |
#define SHIFT 2 |
|
954 |
#include "softmmu_template.h" |
|
955 |
|
|
956 |
#define SHIFT 3 |
|
957 |
#include "softmmu_template.h" |
|
958 |
|
|
959 |
#undef env |
|
960 |
|
|
961 |
#endif |
b/hw/vga_template.h | ||
---|---|---|
354 | 354 |
|
355 | 355 |
w = width; |
356 | 356 |
do { |
357 |
v = lduw((void *)s); |
|
357 |
v = lduw_raw((void *)s);
|
|
358 | 358 |
r = (v >> 7) & 0xf8; |
359 | 359 |
g = (v >> 2) & 0xf8; |
360 | 360 |
b = (v << 3) & 0xf8; |
... | ... | |
379 | 379 |
|
380 | 380 |
w = width; |
381 | 381 |
do { |
382 |
v = lduw((void *)s); |
|
382 |
v = lduw_raw((void *)s);
|
|
383 | 383 |
r = (v >> 8) & 0xf8; |
384 | 384 |
g = (v >> 3) & 0xfc; |
385 | 385 |
b = (v << 3) & 0xf8; |
b/softmmu_header.h | ||
---|---|---|
19 | 19 |
*/ |
20 | 20 |
#if DATA_SIZE == 8 |
21 | 21 |
#define SUFFIX q |
22 |
#define USUFFIX q |
|
22 | 23 |
#define DATA_TYPE uint64_t |
23 | 24 |
#elif DATA_SIZE == 4 |
24 | 25 |
#define SUFFIX l |
26 |
#define USUFFIX l |
|
25 | 27 |
#define DATA_TYPE uint32_t |
26 | 28 |
#elif DATA_SIZE == 2 |
27 | 29 |
#define SUFFIX w |
30 |
#define USUFFIX uw |
|
28 | 31 |
#define DATA_TYPE uint16_t |
29 | 32 |
#define DATA_STYPE int16_t |
30 | 33 |
#elif DATA_SIZE == 1 |
31 | 34 |
#define SUFFIX b |
35 |
#define USUFFIX ub |
|
32 | 36 |
#define DATA_TYPE uint8_t |
33 | 37 |
#define DATA_STYPE int8_t |
34 | 38 |
#else |
35 | 39 |
#error unsupported data size |
36 | 40 |
#endif |
37 | 41 |
|
38 |
#if MEMUSER == 0 |
|
39 |
#define MEMSUFFIX _kernel |
|
42 |
#if ACCESS_TYPE == 0 |
|
43 |
|
|
44 |
#define CPU_MEM_INDEX 0 |
|
45 |
#define MMUSUFFIX _mmu |
|
46 |
|
|
47 |
#elif ACCESS_TYPE == 1 |
|
48 |
|
|
49 |
#define CPU_MEM_INDEX 1 |
|
50 |
#define MMUSUFFIX _mmu |
|
51 |
|
|
52 |
#elif ACCESS_TYPE == 2 |
|
53 |
|
|
54 |
#define CPU_MEM_INDEX ((env->hflags & HF_CPL_MASK) == 3) |
|
55 |
#define MMUSUFFIX _mmu |
|
56 |
|
|
57 |
#elif ACCESS_TYPE == 3 |
|
58 |
|
|
59 |
#define CPU_MEM_INDEX ((env->hflags & HF_CPL_MASK) == 3) |
|
60 |
#define MMUSUFFIX _cmmu |
|
61 |
|
|
40 | 62 |
#else |
41 |
#define MEMSUFFIX _user
|
|
63 |
#error invalid ACCESS_TYPE
|
|
42 | 64 |
#endif |
43 | 65 |
|
44 | 66 |
#if DATA_SIZE == 8 |
... | ... | |
48 | 70 |
#endif |
49 | 71 |
|
50 | 72 |
|
51 |
#if MEMUSER == 0 |
|
52 |
DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr); |
|
53 |
void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE v); |
|
54 |
#endif |
|
73 |
DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(unsigned long addr, |
|
74 |
int is_user); |
|
75 |
void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr, DATA_TYPE v, int is_user); |
|
55 | 76 |
|
56 |
static inline int glue(glue(ldu, SUFFIX), MEMSUFFIX)(void *ptr)
|
|
77 |
static inline int glue(glue(ld, USUFFIX), MEMSUFFIX)(void *ptr)
|
|
57 | 78 |
{ |
58 | 79 |
int index; |
59 | 80 |
RES_TYPE res; |
60 | 81 |
unsigned long addr, physaddr; |
82 |
int is_user; |
|
83 |
|
|
61 | 84 |
addr = (unsigned long)ptr; |
62 | 85 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
63 |
if (__builtin_expect(env->tlb_read[MEMUSER][index].address != |
|
86 |
is_user = CPU_MEM_INDEX; |
|
87 |
if (__builtin_expect(env->tlb_read[is_user][index].address != |
|
64 | 88 |
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { |
65 |
res = glue(glue(__ld, SUFFIX), _mmu)(addr);
|
|
89 |
res = glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user);
|
|
66 | 90 |
} else { |
67 |
physaddr = addr + env->tlb_read[MEMUSER][index].addend;
|
|
68 |
res = glue(glue(ldu, SUFFIX), _raw)((uint8_t *)physaddr);
|
|
91 |
physaddr = addr + env->tlb_read[is_user][index].addend;
|
|
92 |
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
|
|
69 | 93 |
} |
70 | 94 |
return res; |
71 | 95 |
} |
... | ... | |
75 | 99 |
{ |
76 | 100 |
int res, index; |
77 | 101 |
unsigned long addr, physaddr; |
102 |
int is_user; |
|
103 |
|
|
78 | 104 |
addr = (unsigned long)ptr; |
79 | 105 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
80 |
if (__builtin_expect(env->tlb_read[MEMUSER][index].address != |
|
106 |
is_user = CPU_MEM_INDEX; |
|
107 |
if (__builtin_expect(env->tlb_read[is_user][index].address != |
|
81 | 108 |
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { |
82 |
res = (DATA_STYPE)glue(glue(__ld, SUFFIX), _mmu)(addr);
|
|
109 |
res = (DATA_STYPE)glue(glue(__ld, SUFFIX), MMUSUFFIX)(addr, is_user);
|
|
83 | 110 |
} else { |
84 |
physaddr = addr + env->tlb_read[MEMUSER][index].addend;
|
|
111 |
physaddr = addr + env->tlb_read[is_user][index].addend;
|
|
85 | 112 |
res = glue(glue(lds, SUFFIX), _raw)((uint8_t *)physaddr); |
86 | 113 |
} |
87 | 114 |
return res; |
... | ... | |
92 | 119 |
{ |
93 | 120 |
int index; |
94 | 121 |
unsigned long addr, physaddr; |
122 |
int is_user; |
|
123 |
|
|
95 | 124 |
addr = (unsigned long)ptr; |
96 | 125 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
97 |
if (__builtin_expect(env->tlb_write[MEMUSER][index].address != |
|
126 |
is_user = CPU_MEM_INDEX; |
|
127 |
if (__builtin_expect(env->tlb_write[is_user][index].address != |
|
98 | 128 |
(addr & (TARGET_PAGE_MASK | (DATA_SIZE - 1))), 0)) { |
99 |
glue(glue(__st, SUFFIX), _mmu)(addr, v);
|
|
129 |
glue(glue(__st, SUFFIX), MMUSUFFIX)(addr, v, is_user);
|
|
100 | 130 |
} else { |
101 |
physaddr = addr + env->tlb_write[MEMUSER][index].addend;
|
|
131 |
physaddr = addr + env->tlb_write[is_user][index].addend;
|
|
102 | 132 |
glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, v); |
103 | 133 |
} |
104 | 134 |
} |
... | ... | |
107 | 137 |
#undef DATA_TYPE |
108 | 138 |
#undef DATA_STYPE |
109 | 139 |
#undef SUFFIX |
140 |
#undef USUFFIX |
|
110 | 141 |
#undef DATA_SIZE |
111 |
#undef MEMSUFFIX |
|
142 |
#undef CPU_MEM_INDEX |
|
143 |
#undef MMUSUFFIX |
b/softmmu_template.h | ||
---|---|---|
21 | 21 |
|
22 | 22 |
#if DATA_SIZE == 8 |
23 | 23 |
#define SUFFIX q |
24 |
#define USUFFIX q |
|
24 | 25 |
#define DATA_TYPE uint64_t |
25 | 26 |
#elif DATA_SIZE == 4 |
26 | 27 |
#define SUFFIX l |
28 |
#define USUFFIX l |
|
27 | 29 |
#define DATA_TYPE uint32_t |
28 | 30 |
#elif DATA_SIZE == 2 |
29 | 31 |
#define SUFFIX w |
32 |
#define USUFFIX uw |
|
30 | 33 |
#define DATA_TYPE uint16_t |
31 | 34 |
#elif DATA_SIZE == 1 |
32 | 35 |
#define SUFFIX b |
36 |
#define USUFFIX ub |
|
33 | 37 |
#define DATA_TYPE uint8_t |
34 | 38 |
#else |
35 | 39 |
#error unsupported data size |
36 | 40 |
#endif |
37 | 41 |
|
38 |
static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr); |
|
39 |
static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val, |
|
40 |
void *retaddr); |
|
42 |
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(unsigned long addr, |
|
43 |
int is_user, |
|
44 |
void *retaddr); |
|
45 |
static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr, |
|
46 |
DATA_TYPE val, |
|
47 |
int is_user, |
|
48 |
void *retaddr); |
|
41 | 49 |
|
42 | 50 |
static inline DATA_TYPE glue(io_read, SUFFIX)(unsigned long physaddr, |
43 | 51 |
unsigned long tlb_addr) |
... | ... | |
81 | 89 |
} |
82 | 90 |
|
83 | 91 |
/* handle all cases except unaligned access which span two pages */ |
84 |
DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr) |
|
92 |
DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), MMUSUFFIX)(unsigned long addr, |
|
93 |
int is_user) |
|
85 | 94 |
{ |
86 | 95 |
DATA_TYPE res; |
87 |
int is_user, index;
|
|
96 |
int index; |
|
88 | 97 |
unsigned long physaddr, tlb_addr; |
89 | 98 |
void *retaddr; |
90 | 99 |
|
91 | 100 |
/* test if there is match for unaligned or IO access */ |
92 | 101 |
/* XXX: could done more in memory macro in a non portable way */ |
93 |
is_user = ((env->hflags & HF_CPL_MASK) == 3); |
|
94 | 102 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
95 | 103 |
redo: |
96 | 104 |
tlb_addr = env->tlb_read[is_user][index].address; |
... | ... | |
104 | 112 |
} else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { |
105 | 113 |
/* slow unaligned access (it spans two pages or IO) */ |
106 | 114 |
do_unaligned_access: |
107 |
retaddr = __builtin_return_address(0); |
|
108 |
res = glue(slow_ld, SUFFIX)(addr, retaddr); |
|
115 |
retaddr = GETPC(); |
|
116 |
res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr, |
|
117 |
is_user, retaddr); |
|
109 | 118 |
} else { |
110 | 119 |
/* unaligned access in the same page */ |
111 |
res = glue(glue(ldu, SUFFIX), _raw)((uint8_t *)physaddr);
|
|
120 |
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
|
|
112 | 121 |
} |
113 | 122 |
} else { |
114 | 123 |
/* the page is not in the TLB : fill it */ |
115 |
retaddr = __builtin_return_address(0);
|
|
116 |
tlb_fill(addr, 0, retaddr); |
|
124 |
retaddr = GETPC();
|
|
125 |
tlb_fill(addr, 0, is_user, retaddr);
|
|
117 | 126 |
goto redo; |
118 | 127 |
} |
119 | 128 |
return res; |
120 | 129 |
} |
121 | 130 |
|
122 | 131 |
/* handle all unaligned cases */ |
123 |
static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr) |
|
132 |
static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(unsigned long addr, |
|
133 |
int is_user, |
|
134 |
void *retaddr) |
|
124 | 135 |
{ |
125 | 136 |
DATA_TYPE res, res1, res2; |
126 |
int is_user, index, shift;
|
|
137 |
int index, shift; |
|
127 | 138 |
unsigned long physaddr, tlb_addr, addr1, addr2; |
128 | 139 |
|
129 |
is_user = ((env->hflags & HF_CPL_MASK) == 3); |
|
130 | 140 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
131 | 141 |
redo: |
132 | 142 |
tlb_addr = env->tlb_read[is_user][index].address; |
... | ... | |
142 | 152 |
/* slow unaligned access (it spans two pages) */ |
143 | 153 |
addr1 = addr & ~(DATA_SIZE - 1); |
144 | 154 |
addr2 = addr1 + DATA_SIZE; |
145 |
res1 = glue(slow_ld, SUFFIX)(addr1, retaddr); |
|
146 |
res2 = glue(slow_ld, SUFFIX)(addr2, retaddr); |
|
155 |
res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1, |
|
156 |
is_user, retaddr); |
|
157 |
res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2, |
|
158 |
is_user, retaddr); |
|
147 | 159 |
shift = (addr & (DATA_SIZE - 1)) * 8; |
148 | 160 |
#ifdef TARGET_WORDS_BIGENDIAN |
149 | 161 |
res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift)); |
... | ... | |
152 | 164 |
#endif |
153 | 165 |
} else { |
154 | 166 |
/* unaligned/aligned access in the same page */ |
155 |
res = glue(glue(ldu, SUFFIX), _raw)((uint8_t *)physaddr);
|
|
167 |
res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)physaddr);
|
|
156 | 168 |
} |
157 | 169 |
} else { |
158 | 170 |
/* the page is not in the TLB : fill it */ |
159 |
tlb_fill(addr, 0, retaddr); |
|
171 |
tlb_fill(addr, 0, is_user, retaddr);
|
|
160 | 172 |
goto redo; |
161 | 173 |
} |
162 | 174 |
return res; |
163 | 175 |
} |
164 | 176 |
|
165 | 177 |
|
166 |
void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE val) |
|
178 |
void REGPARM(2) glue(glue(__st, SUFFIX), MMUSUFFIX)(unsigned long addr, |
|
179 |
DATA_TYPE val, |
|
180 |
int is_user) |
|
167 | 181 |
{ |
168 | 182 |
unsigned long physaddr, tlb_addr; |
169 | 183 |
void *retaddr; |
170 |
int is_user, index;
|
|
184 |
int index; |
|
171 | 185 |
|
172 |
is_user = ((env->hflags & HF_CPL_MASK) == 3); |
|
173 | 186 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
174 | 187 |
redo: |
175 | 188 |
tlb_addr = env->tlb_write[is_user][index].address; |
... | ... | |
182 | 195 |
glue(io_write, SUFFIX)(physaddr, val, tlb_addr); |
183 | 196 |
} else if (((addr & 0xfff) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) { |
184 | 197 |
do_unaligned_access: |
185 |
retaddr = __builtin_return_address(0); |
|
186 |
glue(slow_st, SUFFIX)(addr, val, retaddr); |
|
198 |
retaddr = GETPC(); |
|
199 |
glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val, |
|
200 |
is_user, retaddr); |
|
187 | 201 |
} else { |
188 | 202 |
/* aligned/unaligned access in the same page */ |
189 | 203 |
glue(glue(st, SUFFIX), _raw)((uint8_t *)physaddr, val); |
190 | 204 |
} |
191 | 205 |
} else { |
192 | 206 |
/* the page is not in the TLB : fill it */ |
193 |
retaddr = __builtin_return_address(0);
|
|
194 |
tlb_fill(addr, 1, retaddr); |
|
207 |
retaddr = GETPC();
|
|
208 |
tlb_fill(addr, 1, is_user, retaddr);
|
|
195 | 209 |
goto redo; |
196 | 210 |
} |
197 | 211 |
} |
198 | 212 |
|
199 | 213 |
/* handles all unaligned cases */ |
200 |
static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val, |
|
201 |
void *retaddr) |
|
214 |
static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(unsigned long addr, |
|
215 |
DATA_TYPE val, |
|
216 |
int is_user, |
|
217 |
void *retaddr) |
|
202 | 218 |
{ |
203 | 219 |
unsigned long physaddr, tlb_addr; |
204 |
int is_user, index, i;
|
|
220 |
int index, i; |
|
205 | 221 |
|
206 |
is_user = ((env->hflags & HF_CPL_MASK) == 3); |
|
207 | 222 |
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); |
208 | 223 |
redo: |
209 | 224 |
tlb_addr = env->tlb_write[is_user][index].address; |
... | ... | |
219 | 234 |
/* XXX: not efficient, but simple */ |
220 | 235 |
for(i = 0;i < DATA_SIZE; i++) { |
221 | 236 |
#ifdef TARGET_WORDS_BIGENDIAN |
222 |
slow_stb(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)), retaddr); |
|
237 |
glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)), |
|
238 |
is_user, retaddr); |
|
223 | 239 |
#else |
224 |
slow_stb(addr + i, val >> (i * 8), retaddr); |
|
240 |
glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8), |
|
241 |
is_user, retaddr); |
|
225 | 242 |
#endif |
226 | 243 |
} |
227 | 244 |
} else { |
... | ... | |
230 | 247 |
} |
231 | 248 |
} else { |
232 | 249 |
/* the page is not in the TLB : fill it */ |
233 |
tlb_fill(addr, 1, retaddr); |
|
250 |
tlb_fill(addr, 1, is_user, retaddr);
|
|
234 | 251 |
goto redo; |
235 | 252 |
} |
236 | 253 |
} |
... | ... | |
238 | 255 |
#undef SHIFT |
239 | 256 |
#undef DATA_TYPE |
240 | 257 |
#undef SUFFIX |
258 |
#undef USUFFIX |
|
241 | 259 |
#undef DATA_SIZE |
b/target-i386/exec.h | ||
---|---|---|
137 | 137 |
void cpu_x86_update_cr0(CPUX86State *env); |
138 | 138 |
void cpu_x86_update_cr3(CPUX86State *env); |
139 | 139 |
void cpu_x86_flush_tlb(CPUX86State *env, uint32_t addr); |
140 |
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write); |
|
141 |
void tlb_fill(unsigned long addr, int is_write, void *retaddr); |
|
140 |
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, |
|
141 |
int is_write, int is_user, int is_softmmu); |
|
142 |
void tlb_fill(unsigned long addr, int is_write, int is_user, |
|
143 |
void *retaddr); |
|
142 | 144 |
void __hidden cpu_lock(void); |
143 | 145 |
void __hidden cpu_unlock(void); |
144 | 146 |
void do_interrupt(int intno, int is_int, int error_code, |
... | ... | |
366 | 368 |
(eflags & update_mask); |
367 | 369 |
} |
368 | 370 |
|
369 |
/* memory access macros */ |
|
371 |
/* XXX: move that to a generic header */ |
|
372 |
#if !defined(CONFIG_USER_ONLY) |
|
370 | 373 |
|
371 |
#define ldul ldl |
|
372 |
#define lduq ldq |
|
373 | 374 |
#define ldul_user ldl_user |
374 | 375 |
#define ldul_kernel ldl_kernel |
375 | 376 |
|
376 |
#define ldub_raw ldub |
|
377 |
#define ldsb_raw ldsb |
|
378 |
#define lduw_raw lduw |
|
379 |
#define ldsw_raw ldsw |
|
380 |
#define ldl_raw ldl |
|
381 |
#define ldq_raw ldq |
|
377 |
#define ACCESS_TYPE 0 |
|
378 |
#define MEMSUFFIX _kernel |
|
379 |
#define DATA_SIZE 1 |
|
380 |
#include "softmmu_header.h" |
|
381 |
|
|
382 |
#define DATA_SIZE 2 |
|
383 |
#include "softmmu_header.h" |
|
382 | 384 |
|
383 |
#define stb_raw stb |
|
384 |
#define stw_raw stw |
|
385 |
#define stl_raw stl |
|
386 |
#define stq_raw stq |
|
385 |
#define DATA_SIZE 4 |
|
386 |
#include "softmmu_header.h" |
|
387 |
|
|
388 |
#define DATA_SIZE 8 |
|
389 |
#include "softmmu_header.h" |
|
390 |
#undef ACCESS_TYPE |
|
391 |
#undef MEMSUFFIX |
|
387 | 392 |
|
388 |
#define MEMUSER 0 |
|
393 |
#define ACCESS_TYPE 1 |
|
394 |
#define MEMSUFFIX _user |
|
389 | 395 |
#define DATA_SIZE 1 |
390 | 396 |
#include "softmmu_header.h" |
391 | 397 |
|
... | ... | |
397 | 403 |
|
398 | 404 |
#define DATA_SIZE 8 |
399 | 405 |
#include "softmmu_header.h" |
406 |
#undef ACCESS_TYPE |
|
407 |
#undef MEMSUFFIX |
|
400 | 408 |
|
401 |
#undef MEMUSER |
|
402 |
#define MEMUSER 1 |
|
409 |
/* these access are slower, they must be as rare as possible */ |
|
410 |
#define ACCESS_TYPE 2 |
|
411 |
#define MEMSUFFIX _data |
|
403 | 412 |
#define DATA_SIZE 1 |
404 | 413 |
#include "softmmu_header.h" |
405 | 414 |
|
... | ... | |
411 | 420 |
|
412 | 421 |
#define DATA_SIZE 8 |
413 | 422 |
#include "softmmu_header.h" |
423 |
#undef ACCESS_TYPE |
|
424 |
#undef MEMSUFFIX |
|
425 |
|
|
426 |
#define ldub(p) ldub_data(p) |
|
427 |
#define ldsb(p) ldsb_data(p) |
|
428 |
#define lduw(p) lduw_data(p) |
|
429 |
#define ldsw(p) ldsw_data(p) |
|
430 |
#define ldl(p) ldl_data(p) |
|
431 |
#define ldq(p) ldq_data(p) |
|
432 |
|
|
433 |
#define stb(p, v) stb_data(p, v) |
|
434 |
#define stw(p, v) stw_data(p, v) |
|
435 |
#define stl(p, v) stl_data(p, v) |
|
436 |
#define stq(p, v) stq_data(p, v) |
|
437 |
|
|
438 |
static inline double ldfq(void *ptr) |
|
439 |
{ |
|
440 |
union { |
|
441 |
double d; |
|
442 |
uint64_t i; |
|
443 |
} u; |
|
444 |
u.i = ldq(ptr); |
|
445 |
return u.d; |
|
446 |
} |
|
447 |
|
|
448 |
static inline void stfq(void *ptr, double v) |
|
449 |
{ |
|
450 |
union { |
|
451 |
double d; |
|
452 |
uint64_t i; |
|
453 |
} u; |
|
454 |
u.d = v; |
|
455 |
stq(ptr, u.i); |
|
456 |
} |
|
414 | 457 |
|
415 |
#undef MEMUSER |
|
458 |
static inline float ldfl(void *ptr) |
|
459 |
{ |
|
460 |
union { |
|
461 |
float f; |
|
462 |
uint32_t i; |
|
463 |
} u; |
|
464 |
u.i = ldl(ptr); |
|
465 |
return u.f; |
|
466 |
} |
|
467 |
|
|
468 |
static inline void stfl(void *ptr, float v) |
|
469 |
{ |
|
470 |
union { |
|
471 |
float f; |
|
472 |
uint32_t i; |
|
473 |
} u; |
|
474 |
u.f = v; |
|
475 |
stl(ptr, u.i); |
|
476 |
} |
|
416 | 477 |
|
478 |
#endif /* !defined(CONFIG_USER_ONLY) */ |
b/target-i386/helper.c | ||
---|---|---|
153 | 153 |
if (index + (4 << shift) - 1 > env->tr.limit) |
154 | 154 |
raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc); |
155 | 155 |
if (shift == 0) { |
156 |
*esp_ptr = lduw(env->tr.base + index); |
|
157 |
*ss_ptr = lduw(env->tr.base + index + 2); |
|
156 |
*esp_ptr = lduw_kernel(env->tr.base + index);
|
|
157 |
*ss_ptr = lduw_kernel(env->tr.base + index + 2);
|
|
158 | 158 |
} else { |
159 |
*esp_ptr = ldl(env->tr.base + index); |
|
160 |
*ss_ptr = lduw(env->tr.base + index + 4); |
|
159 |
*esp_ptr = ldl_kernel(env->tr.base + index);
|
|
160 |
*ss_ptr = lduw_kernel(env->tr.base + index + 4);
|
|
161 | 161 |
} |
162 | 162 |
} |
163 | 163 |
|
... | ... | |
177 | 177 |
if ((index + 7) > dt->limit) |
178 | 178 |
return -1; |
179 | 179 |
ptr = dt->base + index; |
180 |
*e1_ptr = ldl(ptr); |
|
181 |
*e2_ptr = ldl(ptr + 4); |
|
180 |
*e1_ptr = ldl_kernel(ptr);
|
|
181 |
*e2_ptr = ldl_kernel(ptr + 4);
|
|
182 | 182 |
return 0; |
183 | 183 |
} |
184 | 184 |
|
... | ... | |
226 | 226 |
if (intno * 8 + 7 > dt->limit) |
227 | 227 |
raise_exception_err(EXCP0D_GPF, intno * 8 + 2); |
228 | 228 |
ptr = dt->base + intno * 8; |
229 |
e1 = ldl(ptr); |
|
230 |
e2 = ldl(ptr + 4); |
|
229 |
e1 = ldl_kernel(ptr);
|
|
230 |
e2 = ldl_kernel(ptr + 4);
|
|
231 | 231 |
/* check gate type */ |
232 | 232 |
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; |
233 | 233 |
switch(type) { |
... | ... | |
344 | 344 |
int old_eflags; |
345 | 345 |
if (env->eflags & VM_MASK) { |
346 | 346 |
ssp -= 4; |
347 |
stl(ssp, env->segs[R_GS].selector); |
|
347 |
stl_kernel(ssp, env->segs[R_GS].selector);
|
|
348 | 348 |
ssp -= 4; |
349 |
stl(ssp, env->segs[R_FS].selector); |
|
349 |
stl_kernel(ssp, env->segs[R_FS].selector);
|
|
350 | 350 |
ssp -= 4; |
351 |
stl(ssp, env->segs[R_DS].selector); |
|
351 |
stl_kernel(ssp, env->segs[R_DS].selector);
|
|
352 | 352 |
ssp -= 4; |
353 |
stl(ssp, env->segs[R_ES].selector); |
|
353 |
stl_kernel(ssp, env->segs[R_ES].selector);
|
|
354 | 354 |
} |
355 | 355 |
if (new_stack) { |
356 | 356 |
ssp -= 4; |
357 |
stl(ssp, old_ss); |
|
357 |
stl_kernel(ssp, old_ss);
|
|
358 | 358 |
ssp -= 4; |
359 |
stl(ssp, old_esp); |
|
359 |
stl_kernel(ssp, old_esp);
|
|
360 | 360 |
} |
361 | 361 |
ssp -= 4; |
362 | 362 |
old_eflags = compute_eflags(); |
363 |
stl(ssp, old_eflags); |
|
363 |
stl_kernel(ssp, old_eflags);
|
|
364 | 364 |
ssp -= 4; |
365 |
stl(ssp, old_cs); |
|
365 |
stl_kernel(ssp, old_cs);
|
|
366 | 366 |
ssp -= 4; |
367 |
stl(ssp, old_eip); |
|
367 |
stl_kernel(ssp, old_eip);
|
|
368 | 368 |
if (has_error_code) { |
369 | 369 |
ssp -= 4; |
370 |
stl(ssp, error_code); |
|
370 |
stl_kernel(ssp, error_code);
|
|
371 | 371 |
} |
372 | 372 |
} else { |
373 | 373 |
if (new_stack) { |
374 | 374 |
ssp -= 2; |
375 |
stw(ssp, old_ss); |
|
375 |
stw_kernel(ssp, old_ss);
|
|
376 | 376 |
ssp -= 2; |
377 |
stw(ssp, old_esp); |
|
377 |
stw_kernel(ssp, old_esp);
|
|
378 | 378 |
} |
379 | 379 |
ssp -= 2; |
380 |
stw(ssp, compute_eflags()); |
|
380 |
stw_kernel(ssp, compute_eflags());
|
|
381 | 381 |
ssp -= 2; |
382 |
stw(ssp, old_cs); |
|
382 |
stw_kernel(ssp, old_cs);
|
|
383 | 383 |
ssp -= 2; |
384 |
stw(ssp, old_eip); |
|
384 |
stw_kernel(ssp, old_eip);
|
|
385 | 385 |
if (has_error_code) { |
386 | 386 |
ssp -= 2; |
387 |
stw(ssp, error_code); |
|
387 |
stw_kernel(ssp, error_code);
|
|
388 | 388 |
} |
389 | 389 |
} |
390 | 390 |
|
... | ... | |
410 | 410 |
if (intno * 4 + 3 > dt->limit) |
411 | 411 |
raise_exception_err(EXCP0D_GPF, intno * 8 + 2); |
412 | 412 |
ptr = dt->base + intno * 4; |
413 |
offset = lduw(ptr); |
|
414 |
selector = lduw(ptr + 2); |
|
413 |
offset = lduw_kernel(ptr);
|
|
414 |
selector = lduw_kernel(ptr + 2);
|
|
415 | 415 |
esp = ESP; |
416 | 416 |
ssp = env->segs[R_SS].base; |
417 | 417 |
if (is_int) |
... | ... | |
420 | 420 |
old_eip = env->eip; |
421 | 421 |
old_cs = env->segs[R_CS].selector; |
422 | 422 |
esp -= 2; |
423 |
stw(ssp + (esp & 0xffff), compute_eflags()); |
|
423 |
stw_kernel(ssp + (esp & 0xffff), compute_eflags());
|
|
424 | 424 |
esp -= 2; |
425 |
stw(ssp + (esp & 0xffff), old_cs); |
|
425 |
stw_kernel(ssp + (esp & 0xffff), old_cs);
|
|
426 | 426 |
esp -= 2; |
427 |
stw(ssp + (esp & 0xffff), old_eip); |
|
427 |
stw_kernel(ssp + (esp & 0xffff), old_eip);
|
|
428 | 428 |
|
429 | 429 |
/* update processor state */ |
430 | 430 |
ESP = (ESP & ~0xffff) | (esp & 0xffff); |
... | ... | |
445 | 445 |
|
446 | 446 |
dt = &env->idt; |
447 | 447 |
ptr = dt->base + (intno * 8); |
448 |
e2 = ldl(ptr + 4); |
|
448 |
e2 = ldl_kernel(ptr + 4);
|
|
449 | 449 |
|
450 | 450 |
dpl = (e2 >> DESC_DPL_SHIFT) & 3; |
451 | 451 |
cpl = env->hflags & HF_CPL_MASK; |
... | ... | |
651 | 651 |
if ((index + 7) > dt->limit) |
652 | 652 |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
653 | 653 |
ptr = dt->base + index; |
654 |
e1 = ldl(ptr); |
|
655 |
e2 = ldl(ptr + 4); |
|
654 |
e1 = ldl_kernel(ptr);
|
|
655 |
e2 = ldl_kernel(ptr + 4);
|
|
656 | 656 |
if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) |
657 | 657 |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
658 | 658 |
if (!(e2 & DESC_P_MASK)) |
... | ... | |
684 | 684 |
if ((index + 7) > dt->limit) |
685 | 685 |
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); |
686 | 686 |
ptr = dt->base + index; |
687 |
e1 = ldl(ptr); |
|
688 |
e2 = ldl(ptr + 4); |
|
687 |
e1 = ldl_kernel(ptr);
|
|
688 |
e2 = ldl_kernel(ptr + 4);
|
|
689 | 689 |
type = (e2 >> DESC_TYPE_SHIFT) & 0xf; |
690 | 690 |
if ((e2 & DESC_S_MASK) || |
691 | 691 |
(type != 2 && type != 9)) |
... | ... | |
694 | 694 |
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); |
695 | 695 |
load_seg_cache_raw_dt(&env->tr, e1, e2); |
696 | 696 |
e2 |= 0x00000200; /* set the busy bit */ |
697 |
stl(ptr + 4, e2); |
|
697 |
stl_kernel(ptr + 4, e2);
|
|
698 | 698 |
} |
699 | 699 |
env->tr.selector = selector; |
700 | 700 |
} |
... | ... | |
813 | 813 |
ssp = env->segs[R_SS].base; |
814 | 814 |
if (shift) { |
815 | 815 |
esp -= 4; |
816 |
stl(ssp + (esp & esp_mask), env->segs[R_CS].selector); |
|
816 |
stl_kernel(ssp + (esp & esp_mask), env->segs[R_CS].selector);
|
|
817 | 817 |
esp -= 4; |
818 |
stl(ssp + (esp & esp_mask), next_eip); |
|
818 |
stl_kernel(ssp + (esp & esp_mask), next_eip);
|
|
819 | 819 |
} else { |
820 | 820 |
esp -= 2; |
821 |
stw(ssp + (esp & esp_mask), env->segs[R_CS].selector); |
|
821 |
stw_kernel(ssp + (esp & esp_mask), env->segs[R_CS].selector);
|
|
822 | 822 |
esp -= 2; |
823 |
stw(ssp + (esp & esp_mask), next_eip); |
|
823 |
stw_kernel(ssp + (esp & esp_mask), next_eip);
|
|
824 | 824 |
} |
825 | 825 |
|
826 | 826 |
if (!(env->segs[R_SS].flags & DESC_B_MASK)) |
... | ... | |
873 | 873 |
ssp = env->segs[R_SS].base + sp; |
874 | 874 |
if (shift) { |
875 | 875 |
ssp -= 4; |
876 |
stl(ssp, env->segs[R_CS].selector); |
|
876 |
stl_kernel(ssp, env->segs[R_CS].selector);
|
|
877 | 877 |
ssp -= 4; |
878 |
stl(ssp, next_eip); |
|
878 |
stl_kernel(ssp, next_eip);
|
|
879 | 879 |
} else { |
880 | 880 |
ssp -= 2; |
881 |
stw(ssp, env->segs[R_CS].selector); |
|
881 |
stw_kernel(ssp, env->segs[R_CS].selector);
|
|
882 | 882 |
ssp -= 2; |
883 |
stw(ssp, next_eip); |
|
883 |
stw_kernel(ssp, next_eip);
|
|
884 | 884 |
} |
885 | 885 |
sp -= (4 << shift); |
886 | 886 |
|
... | ... | |
975 | 975 |
ssp = env->segs[R_SS].base + sp; |
976 | 976 |
if (shift) { |
977 | 977 |
ssp -= 4; |
978 |
stl(ssp, old_ss); |
|
978 |
stl_kernel(ssp, old_ss);
|
|
979 | 979 |
ssp -= 4; |
980 |
stl(ssp, old_esp); |
|
980 |
stl_kernel(ssp, old_esp);
|
|
981 | 981 |
ssp -= 4 * param_count; |
982 | 982 |
for(i = 0; i < param_count; i++) { |
983 |
val = ldl(old_ssp + i * 4); |
|
984 |
stl(ssp + i * 4, val); |
|
983 |
val = ldl_kernel(old_ssp + i * 4);
|
|
984 |
stl_kernel(ssp + i * 4, val);
|
|
985 | 985 |
} |
986 | 986 |
} else { |
987 | 987 |
ssp -= 2; |
988 |
stw(ssp, old_ss); |
|
988 |
stw_kernel(ssp, old_ss);
|
|
989 | 989 |
ssp -= 2; |
990 |
stw(ssp, old_esp); |
|
990 |
stw_kernel(ssp, old_esp);
|
|
991 | 991 |
ssp -= 2 * param_count; |
992 | 992 |
for(i = 0; i < param_count; i++) { |
993 |
val = lduw(old_ssp + i * 2); |
|
994 |
stw(ssp + i * 2, val); |
|
993 |
val = lduw_kernel(old_ssp + i * 2);
|
|
994 |
stw_kernel(ssp + i * 2, val);
|
|
995 | 995 |
} |
996 | 996 |
} |
997 | 997 |
} else { |
... | ... | |
1004 | 1004 |
|
1005 | 1005 |
if (shift) { |
1006 | 1006 |
ssp -= 4; |
1007 |
stl(ssp, env->segs[R_CS].selector); |
|
1007 |
stl_kernel(ssp, env->segs[R_CS].selector);
|
|
1008 | 1008 |
ssp -= 4; |
1009 |
stl(ssp, next_eip); |
|
1009 |
stl_kernel(ssp, next_eip);
|
|
1010 | 1010 |
} else { |
1011 | 1011 |
ssp -= 2; |
1012 |
stw(ssp, env->segs[R_CS].selector); |
|
1012 |
stw_kernel(ssp, env->segs[R_CS].selector);
|
|
1013 | 1013 |
ssp -= 2; |
1014 |
stw(ssp, next_eip); |
|
1014 |
stw_kernel(ssp, next_eip);
|
|
1015 | 1015 |
} |
1016 | 1016 |
|
1017 | 1017 |
sp -= push_size; |
... | ... | |
1042 | 1042 |
ssp = env->segs[R_SS].base + sp; |
1043 | 1043 |
if (shift == 1) { |
1044 | 1044 |
/* 32 bits */ |
1045 |
new_eflags = ldl(ssp + 8); |
|
1046 |
new_cs = ldl(ssp + 4) & 0xffff; |
|
1047 |
new_eip = ldl(ssp) & 0xffff; |
|
1045 |
new_eflags = ldl_kernel(ssp + 8);
|
|
1046 |
new_cs = ldl_kernel(ssp + 4) & 0xffff;
|
|
1047 |
new_eip = ldl_kernel(ssp) & 0xffff;
|
|
1048 | 1048 |
} else { |
1049 | 1049 |
/* 16 bits */ |
1050 |
new_eflags = lduw(ssp + 4); |
|
1051 |
new_cs = lduw(ssp + 2); |
|
1052 |
new_eip = lduw(ssp); |
|
1050 |
new_eflags = lduw_kernel(ssp + 4);
|
|
1051 |
new_cs = lduw_kernel(ssp + 2);
|
|
1052 |
new_eip = lduw_kernel(ssp);
|
|
1053 | 1053 |
} |
1054 | 1054 |
new_esp = sp + (6 << shift); |
1055 | 1055 |
ESP = (ESP & 0xffff0000) | |
... | ... | |
1078 | 1078 |
if (shift == 1) { |
1079 | 1079 |
/* 32 bits */ |
1080 | 1080 |
if (is_iret) |
1081 |
new_eflags = ldl(ssp + 8); |
|
1082 |
new_cs = ldl(ssp + 4) & 0xffff; |
|
1083 |
new_eip = ldl(ssp); |
|
1081 |
new_eflags = ldl_kernel(ssp + 8);
|
|
1082 |
new_cs = ldl_kernel(ssp + 4) & 0xffff;
|
|
1083 |
new_eip = ldl_kernel(ssp);
|
|
1084 | 1084 |
if (is_iret && (new_eflags & VM_MASK)) |
1085 | 1085 |
goto return_to_vm86; |
1086 | 1086 |
} else { |
1087 | 1087 |
/* 16 bits */ |
1088 | 1088 |
if (is_iret) |
1089 |
new_eflags = lduw(ssp + 4); |
|
1090 |
new_cs = lduw(ssp + 2); |
|
1091 |
new_eip = lduw(ssp); |
|
1089 |
new_eflags = lduw_kernel(ssp + 4);
|
|
1090 |
new_cs = lduw_kernel(ssp + 2);
|
|
1091 |
new_eip = lduw_kernel(ssp);
|
|
1092 | 1092 |
} |
1093 | 1093 |
if ((new_cs & 0xfffc) == 0) |
1094 | 1094 |
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); |
... | ... | |
1124 | 1124 |
ssp += (4 << shift) + ((2 * is_iret) << shift) + addend; |
1125 | 1125 |
if (shift == 1) { |
1126 | 1126 |
/* 32 bits */ |
1127 |
new_esp = ldl(ssp); |
|
1128 |
new_ss = ldl(ssp + 4) & 0xffff; |
|
1127 |
new_esp = ldl_kernel(ssp);
|
|
1128 |
new_ss = ldl_kernel(ssp + 4) & 0xffff;
|
|
1129 | 1129 |
} else { |
1130 | 1130 |
/* 16 bits */ |
1131 |
new_esp = lduw(ssp); |
|
1132 |
new_ss = lduw(ssp + 2); |
|
1131 |
new_esp = lduw_kernel(ssp);
|
|
1132 |
new_ss = lduw_kernel(ssp + 2);
|
|
1133 | 1133 |
} |
1134 | 1134 |
|
1135 | 1135 |
if ((new_ss & 3) != rpl) |
... | ... | |
1175 | 1175 |
return; |
1176 | 1176 |
|
1177 | 1177 |
return_to_vm86: |
1178 |
new_esp = ldl(ssp + 12); |
|
1179 |
new_ss = ldl(ssp + 16); |
|
1180 |
new_es = ldl(ssp + 20); |
|
1181 |
new_ds = ldl(ssp + 24); |
|
1182 |
new_fs = ldl(ssp + 28); |
|
1183 |
new_gs = ldl(ssp + 32); |
|
1178 |
new_esp = ldl_kernel(ssp + 12);
|
|
1179 |
new_ss = ldl_kernel(ssp + 16);
|
|
1180 |
new_es = ldl_kernel(ssp + 20);
|
|
1181 |
new_ds = ldl_kernel(ssp + 24);
|
|
1182 |
new_fs = ldl_kernel(ssp + 28);
|
|
1183 |
new_gs = ldl_kernel(ssp + 32);
|
|
1184 | 1184 |
|
1185 | 1185 |
/* modify processor state */ |
1186 | 1186 |
load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK); |
... | ... | |
1770 | 1770 |
} |
1771 | 1771 |
} |
1772 | 1772 |
|
1773 |
#if !defined(CONFIG_USER_ONLY) |
|
1774 |
|
|
1775 |
#define MMUSUFFIX _mmu |
|
1776 |
#define GETPC() (__builtin_return_address(0)) |
|
1777 |
|
|
1773 | 1778 |
#define SHIFT 0 |
1774 | 1779 |
#include "softmmu_template.h" |
1775 | 1780 |
|
... | ... | |
1782 | 1787 |
#define SHIFT 3 |
1783 | 1788 |
#include "softmmu_template.h" |
1784 | 1789 |
|
1785 |
/* try to fill the TLB and return an exception if error */ |
|
1786 |
void tlb_fill(unsigned long addr, int is_write, void *retaddr) |
|
1790 |
#endif |
|
1791 |
|
|
1792 |
/* try to fill the TLB and return an exception if error. If retaddr is |
|
1793 |
NULL, it means that the function was called in C code (i.e. not |
|
1794 |
from generated code or from helper.c) */ |
|
1795 |
/* XXX: fix it to restore all registers */ |
|
1796 |
void tlb_fill(unsigned long addr, int is_write, int is_user, void *retaddr) |
|
1787 | 1797 |
{ |
1788 | 1798 |
TranslationBlock *tb; |
1789 | 1799 |
int ret; |
1790 | 1800 |
unsigned long pc; |
1791 |
ret = cpu_x86_handle_mmu_fault(env, addr, is_write); |
|
1801 |
CPUX86State *saved_env; |
|
1802 |
|
|
1803 |
/* XXX: hack to restore env in all cases, even if not called from |
|
1804 |
generated code */ |
|
1805 |
saved_env = env; |
|
1806 |
env = cpu_single_env; |
|
1807 |
if (is_write && page_unprotect(addr)) { |
|
1808 |
/* nothing more to do: the page was write protected because |
|
1809 |
there was code in it. page_unprotect() flushed the code. */ |
|
1810 |
} |
|
1811 |
|
|
1812 |
ret = cpu_x86_handle_mmu_fault(env, addr, is_write, is_user, 1); |
|
1792 | 1813 |
if (ret) { |
1793 |
/* now we have a real cpu fault */ |
|
1794 |
pc = (unsigned long)retaddr; |
|
1795 |
tb = tb_find_pc(pc); |
|
1796 |
if (tb) { |
|
1797 |
/* the PC is inside the translated code. It means that we have |
|
1798 |
a virtual CPU fault */ |
|
1799 |
cpu_restore_state(tb, env, pc); |
|
1814 |
if (retaddr) { |
|
1815 |
/* now we have a real cpu fault */ |
|
1816 |
pc = (unsigned long)retaddr; |
|
1817 |
tb = tb_find_pc(pc); |
|
1818 |
if (tb) { |
|
1819 |
/* the PC is inside the translated code. It means that we have |
|
1820 |
a virtual CPU fault */ |
|
1821 |
cpu_restore_state(tb, env, pc); |
|
1822 |
} |
|
1800 | 1823 |
} |
1801 | 1824 |
raise_exception_err(EXCP0E_PAGE, env->error_code); |
1802 | 1825 |
} |
1826 |
env = saved_env; |
|
1803 | 1827 |
} |
b/target-i386/helper2.c | ||
---|---|---|
210 | 210 |
flags = page_get_flags(addr); |
211 | 211 |
if (flags & PAGE_VALID) { |
212 | 212 |
virt_addr = addr & ~0xfff; |
213 |
#if !defined(CONFIG_SOFTMMU) |
|
213 | 214 |
munmap((void *)virt_addr, 4096); |
215 |
#endif |
|
214 | 216 |
page_set_flags(virt_addr, virt_addr + 4096, 0); |
215 | 217 |
} |
216 | 218 |
} |
... | ... | |
221 | 223 |
1 = generate PF fault |
222 | 224 |
2 = soft MMU activation required for this block |
223 | 225 |
*/ |
224 |
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) |
|
226 |
int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, |
|
227 |
int is_write, int is_user, int is_softmmu) |
|
225 | 228 |
{ |
226 | 229 |
uint8_t *pde_ptr, *pte_ptr; |
227 | 230 |
uint32_t pde, pte, virt_addr; |
228 |
int cpl, error_code, is_dirty, is_user, prot, page_size, ret;
|
|
231 |
int error_code, is_dirty, prot, page_size, ret;
|
|
229 | 232 |
unsigned long pd; |
230 | 233 |
|
231 |
cpl = env->hflags & HF_CPL_MASK; |
|
232 |
is_user = (cpl == 3); |
|
233 |
|
|
234 | 234 |
#ifdef DEBUG_MMU |
235 | 235 |
printf("MMU fault: addr=0x%08x w=%d u=%d eip=%08x\n", |
236 | 236 |
addr, is_write, is_user, env->eip); |
... | ... | |
252 | 252 |
|
253 | 253 |
/* page directory entry */ |
254 | 254 |
pde_ptr = phys_ram_base + ((env->cr[3] & ~0xfff) + ((addr >> 20) & ~3)); |
255 |
pde = ldl(pde_ptr); |
|
255 |
pde = ldl_raw(pde_ptr);
|
|
256 | 256 |
if (!(pde & PG_PRESENT_MASK)) { |
257 | 257 |
error_code = 0; |
258 | 258 |
goto do_fault; |
Also available in: Unified diff