Statistics
| Branch: | Revision:

root / target-i386 / arch_memory_mapping.c @ a8170e5e

History | View | Annotate | Download (7.7 kB)

1
/*
2
 * i386 memory mapping
3
 *
4
 * Copyright Fujitsu, Corp. 2011, 2012
5
 *
6
 * Authors:
7
 *     Wen Congyang <wency@cn.fujitsu.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10
 * See the COPYING file in the top-level directory.
11
 *
12
 */
13

    
14
#include "cpu.h"
15
#include "cpu-all.h"
16
#include "memory_mapping.h"
17

    
18
/* PAE Paging or IA-32e Paging */
19
static void walk_pte(MemoryMappingList *list, hwaddr pte_start_addr,
20
                     int32_t a20_mask, target_ulong start_line_addr)
21
{
22
    hwaddr pte_addr, start_paddr;
23
    uint64_t pte;
24
    target_ulong start_vaddr;
25
    int i;
26

    
27
    for (i = 0; i < 512; i++) {
28
        pte_addr = (pte_start_addr + i * 8) & a20_mask;
29
        pte = ldq_phys(pte_addr);
30
        if (!(pte & PG_PRESENT_MASK)) {
31
            /* not present */
32
            continue;
33
        }
34

    
35
        start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63);
36
        if (cpu_physical_memory_is_io(start_paddr)) {
37
            /* I/O region */
38
            continue;
39
        }
40

    
41
        start_vaddr = start_line_addr | ((i & 0x1fff) << 12);
42
        memory_mapping_list_add_merge_sorted(list, start_paddr,
43
                                             start_vaddr, 1 << 12);
44
    }
45
}
46

    
47
/* 32-bit Paging */
48
static void walk_pte2(MemoryMappingList *list,
49
                      hwaddr pte_start_addr, int32_t a20_mask,
50
                      target_ulong start_line_addr)
51
{
52
    hwaddr pte_addr, start_paddr;
53
    uint32_t pte;
54
    target_ulong start_vaddr;
55
    int i;
56

    
57
    for (i = 0; i < 1024; i++) {
58
        pte_addr = (pte_start_addr + i * 4) & a20_mask;
59
        pte = ldl_phys(pte_addr);
60
        if (!(pte & PG_PRESENT_MASK)) {
61
            /* not present */
62
            continue;
63
        }
64

    
65
        start_paddr = pte & ~0xfff;
66
        if (cpu_physical_memory_is_io(start_paddr)) {
67
            /* I/O region */
68
            continue;
69
        }
70

    
71
        start_vaddr = start_line_addr | ((i & 0x3ff) << 12);
72
        memory_mapping_list_add_merge_sorted(list, start_paddr,
73
                                             start_vaddr, 1 << 12);
74
    }
75
}
76

    
77
/* PAE Paging or IA-32e Paging */
78
static void walk_pde(MemoryMappingList *list, hwaddr pde_start_addr,
79
                     int32_t a20_mask, target_ulong start_line_addr)
80
{
81
    hwaddr pde_addr, pte_start_addr, start_paddr;
82
    uint64_t pde;
83
    target_ulong line_addr, start_vaddr;
84
    int i;
85

    
86
    for (i = 0; i < 512; i++) {
87
        pde_addr = (pde_start_addr + i * 8) & a20_mask;
88
        pde = ldq_phys(pde_addr);
89
        if (!(pde & PG_PRESENT_MASK)) {
90
            /* not present */
91
            continue;
92
        }
93

    
94
        line_addr = start_line_addr | ((i & 0x1ff) << 21);
95
        if (pde & PG_PSE_MASK) {
96
            /* 2 MB page */
97
            start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63);
98
            if (cpu_physical_memory_is_io(start_paddr)) {
99
                /* I/O region */
100
                continue;
101
            }
102
            start_vaddr = line_addr;
103
            memory_mapping_list_add_merge_sorted(list, start_paddr,
104
                                                 start_vaddr, 1 << 21);
105
            continue;
106
        }
107

    
108
        pte_start_addr = (pde & ~0xfff) & a20_mask;
109
        walk_pte(list, pte_start_addr, a20_mask, line_addr);
110
    }
111
}
112

    
113
/* 32-bit Paging */
114
static void walk_pde2(MemoryMappingList *list,
115
                      hwaddr pde_start_addr, int32_t a20_mask,
116
                      bool pse)
117
{
118
    hwaddr pde_addr, pte_start_addr, start_paddr;
119
    uint32_t pde;
120
    target_ulong line_addr, start_vaddr;
121
    int i;
122

    
123
    for (i = 0; i < 1024; i++) {
124
        pde_addr = (pde_start_addr + i * 4) & a20_mask;
125
        pde = ldl_phys(pde_addr);
126
        if (!(pde & PG_PRESENT_MASK)) {
127
            /* not present */
128
            continue;
129
        }
130

    
131
        line_addr = (((unsigned int)i & 0x3ff) << 22);
132
        if ((pde & PG_PSE_MASK) && pse) {
133
            /* 4 MB page */
134
            start_paddr = (pde & ~0x3fffff) | ((pde & 0x1fe000) << 19);
135
            if (cpu_physical_memory_is_io(start_paddr)) {
136
                /* I/O region */
137
                continue;
138
            }
139
            start_vaddr = line_addr;
140
            memory_mapping_list_add_merge_sorted(list, start_paddr,
141
                                                 start_vaddr, 1 << 22);
142
            continue;
143
        }
144

    
145
        pte_start_addr = (pde & ~0xfff) & a20_mask;
146
        walk_pte2(list, pte_start_addr, a20_mask, line_addr);
147
    }
148
}
149

    
150
/* PAE Paging */
151
static void walk_pdpe2(MemoryMappingList *list,
152
                       hwaddr pdpe_start_addr, int32_t a20_mask)
153
{
154
    hwaddr pdpe_addr, pde_start_addr;
155
    uint64_t pdpe;
156
    target_ulong line_addr;
157
    int i;
158

    
159
    for (i = 0; i < 4; i++) {
160
        pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
161
        pdpe = ldq_phys(pdpe_addr);
162
        if (!(pdpe & PG_PRESENT_MASK)) {
163
            /* not present */
164
            continue;
165
        }
166

    
167
        line_addr = (((unsigned int)i & 0x3) << 30);
168
        pde_start_addr = (pdpe & ~0xfff) & a20_mask;
169
        walk_pde(list, pde_start_addr, a20_mask, line_addr);
170
    }
171
}
172

    
173
#ifdef TARGET_X86_64
174
/* IA-32e Paging */
175
static void walk_pdpe(MemoryMappingList *list,
176
                      hwaddr pdpe_start_addr, int32_t a20_mask,
177
                      target_ulong start_line_addr)
178
{
179
    hwaddr pdpe_addr, pde_start_addr, start_paddr;
180
    uint64_t pdpe;
181
    target_ulong line_addr, start_vaddr;
182
    int i;
183

    
184
    for (i = 0; i < 512; i++) {
185
        pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
186
        pdpe = ldq_phys(pdpe_addr);
187
        if (!(pdpe & PG_PRESENT_MASK)) {
188
            /* not present */
189
            continue;
190
        }
191

    
192
        line_addr = start_line_addr | ((i & 0x1ffULL) << 30);
193
        if (pdpe & PG_PSE_MASK) {
194
            /* 1 GB page */
195
            start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63);
196
            if (cpu_physical_memory_is_io(start_paddr)) {
197
                /* I/O region */
198
                continue;
199
            }
200
            start_vaddr = line_addr;
201
            memory_mapping_list_add_merge_sorted(list, start_paddr,
202
                                                 start_vaddr, 1 << 30);
203
            continue;
204
        }
205

    
206
        pde_start_addr = (pdpe & ~0xfff) & a20_mask;
207
        walk_pde(list, pde_start_addr, a20_mask, line_addr);
208
    }
209
}
210

    
211
/* IA-32e Paging */
212
static void walk_pml4e(MemoryMappingList *list,
213
                       hwaddr pml4e_start_addr, int32_t a20_mask)
214
{
215
    hwaddr pml4e_addr, pdpe_start_addr;
216
    uint64_t pml4e;
217
    target_ulong line_addr;
218
    int i;
219

    
220
    for (i = 0; i < 512; i++) {
221
        pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask;
222
        pml4e = ldq_phys(pml4e_addr);
223
        if (!(pml4e & PG_PRESENT_MASK)) {
224
            /* not present */
225
            continue;
226
        }
227

    
228
        line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48);
229
        pdpe_start_addr = (pml4e & ~0xfff) & a20_mask;
230
        walk_pdpe(list, pdpe_start_addr, a20_mask, line_addr);
231
    }
232
}
233
#endif
234

    
235
int cpu_get_memory_mapping(MemoryMappingList *list, CPUArchState *env)
236
{
237
    if (!cpu_paging_enabled(env)) {
238
        /* paging is disabled */
239
        return 0;
240
    }
241

    
242
    if (env->cr[4] & CR4_PAE_MASK) {
243
#ifdef TARGET_X86_64
244
        if (env->hflags & HF_LMA_MASK) {
245
            hwaddr pml4e_addr;
246

    
247
            pml4e_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
248
            walk_pml4e(list, pml4e_addr, env->a20_mask);
249
        } else
250
#endif
251
        {
252
            hwaddr pdpe_addr;
253

    
254
            pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask;
255
            walk_pdpe2(list, pdpe_addr, env->a20_mask);
256
        }
257
    } else {
258
        hwaddr pde_addr;
259
        bool pse;
260

    
261
        pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
262
        pse = !!(env->cr[4] & CR4_PSE_MASK);
263
        walk_pde2(list, pde_addr, env->a20_mask, pse);
264
    }
265

    
266
    return 0;
267
}
268

    
269
bool cpu_paging_enabled(CPUArchState *env)
270
{
271
    return env->cr[0] & CR0_PG_MASK;
272
}