Revision cf129f3a
b/linux-user/elfload.c | ||
---|---|---|
1034 | 1034 |
return p; |
1035 | 1035 |
} |
1036 | 1036 |
|
1037 |
static void set_brk(abi_ulong start, abi_ulong end) |
|
1038 |
{ |
|
1039 |
/* page-align the start and end addresses... */ |
|
1040 |
start = HOST_PAGE_ALIGN(start); |
|
1041 |
end = HOST_PAGE_ALIGN(end); |
|
1042 |
if (end <= start) |
|
1043 |
return; |
|
1044 |
if(target_mmap(start, end - start, |
|
1045 |
PROT_READ | PROT_WRITE | PROT_EXEC, |
|
1046 |
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) { |
|
1047 |
perror("cannot mmap brk"); |
|
1048 |
exit(-1); |
|
1049 |
} |
|
1050 |
} |
|
1051 |
|
|
1052 |
|
|
1053 |
/* We need to explicitly zero any fractional pages after the data |
|
1054 |
section (i.e. bss). This would contain the junk from the file that |
|
1055 |
should not be in memory. */ |
|
1056 |
static void padzero(abi_ulong elf_bss, abi_ulong last_bss) |
|
1057 |
{ |
|
1058 |
abi_ulong nbyte; |
|
1059 |
|
|
1060 |
if (elf_bss >= last_bss) |
|
1061 |
return; |
|
1062 |
|
|
1063 |
/* XXX: this is really a hack : if the real host page size is |
|
1064 |
smaller than the target page size, some pages after the end |
|
1065 |
of the file may not be mapped. A better fix would be to |
|
1066 |
patch target_mmap(), but it is more complicated as the file |
|
1067 |
size must be known */ |
|
1068 |
if (qemu_real_host_page_size < qemu_host_page_size) { |
|
1069 |
abi_ulong end_addr, end_addr1; |
|
1070 |
end_addr1 = (elf_bss + qemu_real_host_page_size - 1) & |
|
1071 |
~(qemu_real_host_page_size - 1); |
|
1072 |
end_addr = HOST_PAGE_ALIGN(elf_bss); |
|
1073 |
if (end_addr1 < end_addr) { |
|
1074 |
mmap((void *)g2h(end_addr1), end_addr - end_addr1, |
|
1075 |
PROT_READ|PROT_WRITE|PROT_EXEC, |
|
1076 |
MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
|
1077 |
} |
|
1037 |
/* Map and zero the bss. We need to explicitly zero any fractional pages |
|
1038 |
after the data section (i.e. bss). */ |
|
1039 |
static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot) |
|
1040 |
{ |
|
1041 |
uintptr_t host_start, host_map_start, host_end; |
|
1042 |
|
|
1043 |
last_bss = TARGET_PAGE_ALIGN(last_bss); |
|
1044 |
|
|
1045 |
/* ??? There is confusion between qemu_real_host_page_size and |
|
1046 |
qemu_host_page_size here and elsewhere in target_mmap, which |
|
1047 |
may lead to the end of the data section mapping from the file |
|
1048 |
not being mapped. At least there was an explicit test and |
|
1049 |
comment for that here, suggesting that "the file size must |
|
1050 |
be known". The comment probably pre-dates the introduction |
|
1051 |
of the fstat system call in target_mmap which does in fact |
|
1052 |
find out the size. What isn't clear is if the workaround |
|
1053 |
here is still actually needed. For now, continue with it, |
|
1054 |
but merge it with the "normal" mmap that would allocate the bss. */ |
|
1055 |
|
|
1056 |
host_start = (uintptr_t) g2h(elf_bss); |
|
1057 |
host_end = (uintptr_t) g2h(last_bss); |
|
1058 |
host_map_start = (host_start + qemu_real_host_page_size - 1); |
|
1059 |
host_map_start &= -qemu_real_host_page_size; |
|
1060 |
|
|
1061 |
if (host_map_start < host_end) { |
|
1062 |
void *p = mmap((void *)host_map_start, host_end - host_map_start, |
|
1063 |
prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
|
1064 |
if (p == MAP_FAILED) { |
|
1065 |
perror("cannot mmap brk"); |
|
1066 |
exit(-1); |
|
1078 | 1067 |
} |
1079 | 1068 |
|
1080 |
nbyte = elf_bss & (qemu_host_page_size-1); |
|
1081 |
if (nbyte) { |
|
1082 |
nbyte = qemu_host_page_size - nbyte; |
|
1083 |
do { |
|
1084 |
/* FIXME - what to do if put_user() fails? */ |
|
1085 |
put_user_u8(0, elf_bss); |
|
1086 |
elf_bss++; |
|
1087 |
} while (--nbyte); |
|
1088 |
} |
|
1089 |
} |
|
1069 |
/* Since we didn't use target_mmap, make sure to record |
|
1070 |
the validity of the pages with qemu. */ |
|
1071 |
page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot|PAGE_VALID); |
|
1072 |
} |
|
1090 | 1073 |
|
1074 |
if (host_start < host_map_start) { |
|
1075 |
memset((void *)host_start, 0, host_map_start - host_start); |
|
1076 |
} |
|
1077 |
} |
|
1091 | 1078 |
|
1092 | 1079 |
static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, |
1093 | 1080 |
struct elfhdr * exec, |
... | ... | |
1179 | 1166 |
abi_ulong load_addr = 0; |
1180 | 1167 |
int load_addr_set = 0; |
1181 | 1168 |
int retval; |
1182 |
abi_ulong last_bss, elf_bss; |
|
1183 | 1169 |
abi_ulong error; |
1184 | 1170 |
int i; |
1185 | 1171 |
|
1186 |
elf_bss = 0; |
|
1187 |
last_bss = 0; |
|
1188 | 1172 |
error = 0; |
1189 | 1173 |
|
1190 | 1174 |
#ifdef BSWAP_NEEDED |
... | ... | |
1257 | 1241 |
int elf_type = MAP_PRIVATE | MAP_DENYWRITE; |
1258 | 1242 |
int elf_prot = 0; |
1259 | 1243 |
abi_ulong vaddr = 0; |
1260 |
abi_ulong k; |
|
1261 | 1244 |
|
1262 | 1245 |
if (eppnt->p_flags & PF_R) elf_prot = PROT_READ; |
1263 | 1246 |
if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE; |
... | ... | |
1285 | 1268 |
load_addr_set = 1; |
1286 | 1269 |
} |
1287 | 1270 |
|
1288 |
/* |
|
1289 |
* Find the end of the file mapping for this phdr, and keep |
|
1290 |
* track of the largest address we see for this. |
|
1291 |
*/ |
|
1292 |
k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; |
|
1293 |
if (k > elf_bss) elf_bss = k; |
|
1294 |
|
|
1295 |
/* |
|
1296 |
* Do the same thing for the memory mapping - between |
|
1297 |
* elf_bss and last_bss is the bss section. |
|
1298 |
*/ |
|
1299 |
k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; |
|
1300 |
if (k > last_bss) last_bss = k; |
|
1271 |
/* If the load segment requests extra zeros (e.g. bss), map it. */ |
|
1272 |
if (eppnt->p_filesz < eppnt->p_memsz) { |
|
1273 |
abi_ulong base = load_addr + eppnt->p_vaddr; |
|
1274 |
zero_bss(base + eppnt->p_filesz, |
|
1275 |
base + eppnt->p_memsz, elf_prot); |
|
1276 |
} |
|
1301 | 1277 |
} |
1302 | 1278 |
|
1303 | 1279 |
/* Now use mmap to map the library into memory. */ |
1304 | 1280 |
|
1305 | 1281 |
close(interpreter_fd); |
1306 |
|
|
1307 |
/* |
|
1308 |
* Now fill out the bss section. First pad the last page up |
|
1309 |
* to the page boundary, and then perform a mmap to make sure |
|
1310 |
* that there are zeromapped pages up to and including the last |
|
1311 |
* bss page. |
|
1312 |
*/ |
|
1313 |
padzero(elf_bss, last_bss); |
|
1314 |
elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */ |
|
1315 |
|
|
1316 |
/* Map the last of the bss segment */ |
|
1317 |
if (last_bss > elf_bss) { |
|
1318 |
target_mmap(elf_bss, last_bss-elf_bss, |
|
1319 |
PROT_READ|PROT_WRITE|PROT_EXEC, |
|
1320 |
MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
|
1321 |
} |
|
1322 | 1282 |
free(elf_phdata); |
1323 | 1283 |
|
1324 | 1284 |
*interp_load_addr = load_addr; |
... | ... | |
1472 | 1432 |
abi_ulong mapped_addr; |
1473 | 1433 |
struct elf_phdr * elf_ppnt; |
1474 | 1434 |
struct elf_phdr *elf_phdata; |
1475 |
abi_ulong elf_bss, k, elf_brk;
|
|
1435 |
abi_ulong k, elf_brk; |
|
1476 | 1436 |
int retval; |
1477 | 1437 |
char * elf_interpreter; |
1478 | 1438 |
abi_ulong elf_entry, interp_load_addr = 0; |
... | ... | |
1531 | 1491 |
#endif |
1532 | 1492 |
elf_ppnt = elf_phdata; |
1533 | 1493 |
|
1534 |
elf_bss = 0; |
|
1535 | 1494 |
elf_brk = 0; |
1536 | 1495 |
|
1537 |
|
|
1538 | 1496 |
elf_stack = ~((abi_ulong)0UL); |
1539 | 1497 |
elf_interpreter = NULL; |
1540 | 1498 |
start_code = ~((abi_ulong)0UL); |
... | ... | |
1838 | 1796 |
if (start_data < k) |
1839 | 1797 |
start_data = k; |
1840 | 1798 |
k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; |
1841 |
if (k > elf_bss) |
|
1842 |
elf_bss = k; |
|
1843 | 1799 |
if ((elf_ppnt->p_flags & PF_X) && end_code < k) |
1844 | 1800 |
end_code = k; |
1845 | 1801 |
if (end_data < k) |
1846 | 1802 |
end_data = k; |
1847 | 1803 |
k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; |
1848 |
if (k > elf_brk) elf_brk = k; |
|
1804 |
if (k > elf_brk) { |
|
1805 |
elf_brk = TARGET_PAGE_ALIGN(k); |
|
1806 |
} |
|
1807 |
|
|
1808 |
/* If the load segment requests extra zeros (e.g. bss), map it. */ |
|
1809 |
if (elf_ppnt->p_filesz < elf_ppnt->p_memsz) { |
|
1810 |
abi_ulong base = load_bias + elf_ppnt->p_vaddr; |
|
1811 |
zero_bss(base + elf_ppnt->p_filesz, |
|
1812 |
base + elf_ppnt->p_memsz, elf_prot); |
|
1813 |
} |
|
1849 | 1814 |
} |
1850 | 1815 |
|
1851 | 1816 |
elf_entry += load_bias; |
1852 |
elf_bss += load_bias; |
|
1853 | 1817 |
elf_brk += load_bias; |
1854 | 1818 |
start_code += load_bias; |
1855 | 1819 |
end_code += load_bias; |
... | ... | |
1904 | 1868 |
info->end_data = end_data; |
1905 | 1869 |
info->start_stack = bprm->p; |
1906 | 1870 |
|
1907 |
/* Calling set_brk effectively mmaps the pages that we need for the bss and break |
|
1908 |
sections */ |
|
1909 |
set_brk(elf_bss, elf_brk); |
|
1910 |
|
|
1911 |
padzero(elf_bss, elf_brk); |
|
1912 |
|
|
1913 | 1871 |
#if 0 |
1914 | 1872 |
printf("(start_brk) %x\n" , info->start_brk); |
1915 | 1873 |
printf("(end_code) %x\n" , info->end_code); |
Also available in: Unified diff