root / linux-user / elfload.c @ 2f619698
History | View | Annotate | Download (40.6 kB)
1 |
/* This is the Linux kernel elf-loading code, ported into user space */
|
---|---|
2 |
|
3 |
#include <stdio.h> |
4 |
#include <sys/types.h> |
5 |
#include <fcntl.h> |
6 |
#include <errno.h> |
7 |
#include <unistd.h> |
8 |
#include <sys/mman.h> |
9 |
#include <stdlib.h> |
10 |
#include <string.h> |
11 |
|
12 |
#include "qemu.h" |
13 |
#include "disas.h" |
14 |
|
15 |
/* from personality.h */
|
16 |
|
17 |
/*
|
18 |
* Flags for bug emulation.
|
19 |
*
|
20 |
* These occupy the top three bytes.
|
21 |
*/
|
22 |
enum {
|
23 |
ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ |
24 |
FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to descriptors |
25 |
* (signal handling)
|
26 |
*/
|
27 |
MMAP_PAGE_ZERO = 0x0100000,
|
28 |
ADDR_COMPAT_LAYOUT = 0x0200000,
|
29 |
READ_IMPLIES_EXEC = 0x0400000,
|
30 |
ADDR_LIMIT_32BIT = 0x0800000,
|
31 |
SHORT_INODE = 0x1000000,
|
32 |
WHOLE_SECONDS = 0x2000000,
|
33 |
STICKY_TIMEOUTS = 0x4000000,
|
34 |
ADDR_LIMIT_3GB = 0x8000000,
|
35 |
}; |
36 |
|
37 |
/*
|
38 |
* Personality types.
|
39 |
*
|
40 |
* These go in the low byte. Avoid using the top bit, it will
|
41 |
* conflict with error returns.
|
42 |
*/
|
43 |
enum {
|
44 |
PER_LINUX = 0x0000,
|
45 |
PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT,
|
46 |
PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS,
|
47 |
PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
|
48 |
PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
|
49 |
PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS |
|
50 |
WHOLE_SECONDS | SHORT_INODE, |
51 |
PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
|
52 |
PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
|
53 |
PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS,
|
54 |
PER_BSD = 0x0006,
|
55 |
PER_SUNOS = 0x0006 | STICKY_TIMEOUTS,
|
56 |
PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
|
57 |
PER_LINUX32 = 0x0008,
|
58 |
PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB,
|
59 |
PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ |
60 |
PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ |
61 |
PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ |
62 |
PER_RISCOS = 0x000c,
|
63 |
PER_SOLARIS = 0x000d | STICKY_TIMEOUTS,
|
64 |
PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
|
65 |
PER_OSF4 = 0x000f, /* OSF/1 v4 */ |
66 |
PER_HPUX = 0x0010,
|
67 |
PER_MASK = 0x00ff,
|
68 |
}; |
69 |
|
70 |
/*
|
71 |
* Return the base personality without flags.
|
72 |
*/
|
73 |
#define personality(pers) (pers & PER_MASK)
|
74 |
|
75 |
/* this flag is uneffective under linux too, should be deleted */
|
76 |
#ifndef MAP_DENYWRITE
|
77 |
#define MAP_DENYWRITE 0 |
78 |
#endif
|
79 |
|
80 |
/* should probably go in elf.h */
|
81 |
#ifndef ELIBBAD
|
82 |
#define ELIBBAD 80 |
83 |
#endif
|
84 |
|
85 |
#ifdef TARGET_I386
|
86 |
|
87 |
#define ELF_PLATFORM get_elf_platform()
|
88 |
|
89 |
static const char *get_elf_platform(void) |
90 |
{ |
91 |
static char elf_platform[] = "i386"; |
92 |
int family = (global_env->cpuid_version >> 8) & 0xff; |
93 |
if (family > 6) |
94 |
family = 6;
|
95 |
if (family >= 3) |
96 |
elf_platform[1] = '0' + family; |
97 |
return elf_platform;
|
98 |
} |
99 |
|
100 |
#define ELF_HWCAP get_elf_hwcap()
|
101 |
|
102 |
static uint32_t get_elf_hwcap(void) |
103 |
{ |
104 |
return global_env->cpuid_features;
|
105 |
} |
106 |
|
107 |
#ifdef TARGET_X86_64
|
108 |
#define ELF_START_MMAP 0x2aaaaab000ULL |
109 |
#define elf_check_arch(x) ( ((x) == ELF_ARCH) )
|
110 |
|
111 |
#define ELF_CLASS ELFCLASS64
|
112 |
#define ELF_DATA ELFDATA2LSB
|
113 |
#define ELF_ARCH EM_X86_64
|
114 |
|
115 |
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) |
116 |
{ |
117 |
regs->rax = 0;
|
118 |
regs->rsp = infop->start_stack; |
119 |
regs->rip = infop->entry; |
120 |
} |
121 |
|
122 |
#else
|
123 |
|
124 |
#define ELF_START_MMAP 0x80000000 |
125 |
|
126 |
/*
|
127 |
* This is used to ensure we don't load something for the wrong architecture.
|
128 |
*/
|
129 |
#define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
|
130 |
|
131 |
/*
|
132 |
* These are used to set parameters in the core dumps.
|
133 |
*/
|
134 |
#define ELF_CLASS ELFCLASS32
|
135 |
#define ELF_DATA ELFDATA2LSB
|
136 |
#define ELF_ARCH EM_386
|
137 |
|
138 |
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) |
139 |
{ |
140 |
regs->esp = infop->start_stack; |
141 |
regs->eip = infop->entry; |
142 |
|
143 |
/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
|
144 |
starts %edx contains a pointer to a function which might be
|
145 |
registered using `atexit'. This provides a mean for the
|
146 |
dynamic linker to call DT_FINI functions for shared libraries
|
147 |
that have been loaded before the code runs.
|
148 |
|
149 |
A value of 0 tells we have no such handler. */
|
150 |
regs->edx = 0;
|
151 |
} |
152 |
#endif
|
153 |
|
154 |
#define USE_ELF_CORE_DUMP
|
155 |
#define ELF_EXEC_PAGESIZE 4096 |
156 |
|
157 |
#endif
|
158 |
|
159 |
#ifdef TARGET_ARM
|
160 |
|
161 |
#define ELF_START_MMAP 0x80000000 |
162 |
|
163 |
#define elf_check_arch(x) ( (x) == EM_ARM )
|
164 |
|
165 |
#define ELF_CLASS ELFCLASS32
|
166 |
#ifdef TARGET_WORDS_BIGENDIAN
|
167 |
#define ELF_DATA ELFDATA2MSB
|
168 |
#else
|
169 |
#define ELF_DATA ELFDATA2LSB
|
170 |
#endif
|
171 |
#define ELF_ARCH EM_ARM
|
172 |
|
173 |
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) |
174 |
{ |
175 |
abi_long stack = infop->start_stack; |
176 |
memset(regs, 0, sizeof(*regs)); |
177 |
regs->ARM_cpsr = 0x10;
|
178 |
if (infop->entry & 1) |
179 |
regs->ARM_cpsr |= CPSR_T; |
180 |
regs->ARM_pc = infop->entry & 0xfffffffe;
|
181 |
regs->ARM_sp = infop->start_stack; |
182 |
/* FIXME - what to for failure of get_user()? */
|
183 |
get_user_ual(regs->ARM_r2, stack + 8); /* envp */ |
184 |
get_user_ual(regs->ARM_r1, stack + 4); /* envp */ |
185 |
/* XXX: it seems that r0 is zeroed after ! */
|
186 |
regs->ARM_r0 = 0;
|
187 |
/* For uClinux PIC binaries. */
|
188 |
/* XXX: Linux does this only on ARM with no MMU (do we care ?) */
|
189 |
regs->ARM_r10 = infop->start_data; |
190 |
} |
191 |
|
192 |
#define USE_ELF_CORE_DUMP
|
193 |
#define ELF_EXEC_PAGESIZE 4096 |
194 |
|
195 |
enum
|
196 |
{ |
197 |
ARM_HWCAP_ARM_SWP = 1 << 0, |
198 |
ARM_HWCAP_ARM_HALF = 1 << 1, |
199 |
ARM_HWCAP_ARM_THUMB = 1 << 2, |
200 |
ARM_HWCAP_ARM_26BIT = 1 << 3, |
201 |
ARM_HWCAP_ARM_FAST_MULT = 1 << 4, |
202 |
ARM_HWCAP_ARM_FPA = 1 << 5, |
203 |
ARM_HWCAP_ARM_VFP = 1 << 6, |
204 |
ARM_HWCAP_ARM_EDSP = 1 << 7, |
205 |
}; |
206 |
|
207 |
#define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF \
|
208 |
| ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT \ |
209 |
| ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP) |
210 |
|
211 |
#endif
|
212 |
|
213 |
#ifdef TARGET_SPARC
|
214 |
#ifdef TARGET_SPARC64
|
215 |
|
216 |
#define ELF_START_MMAP 0x80000000 |
217 |
|
218 |
#ifndef TARGET_ABI32
|
219 |
#define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
|
220 |
#else
|
221 |
#define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
|
222 |
#endif
|
223 |
|
224 |
#define ELF_CLASS ELFCLASS64
|
225 |
#define ELF_DATA ELFDATA2MSB
|
226 |
#define ELF_ARCH EM_SPARCV9
|
227 |
|
228 |
#define STACK_BIAS 2047 |
229 |
|
230 |
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) |
231 |
{ |
232 |
#ifndef TARGET_ABI32
|
233 |
regs->tstate = 0;
|
234 |
#endif
|
235 |
regs->pc = infop->entry; |
236 |
regs->npc = regs->pc + 4;
|
237 |
regs->y = 0;
|
238 |
#ifdef TARGET_ABI32
|
239 |
regs->u_regs[14] = infop->start_stack - 16 * 4; |
240 |
#else
|
241 |
if (personality(infop->personality) == PER_LINUX32)
|
242 |
regs->u_regs[14] = infop->start_stack - 16 * 4; |
243 |
else
|
244 |
regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS; |
245 |
#endif
|
246 |
} |
247 |
|
248 |
#else
|
249 |
#define ELF_START_MMAP 0x80000000 |
250 |
|
251 |
#define elf_check_arch(x) ( (x) == EM_SPARC )
|
252 |
|
253 |
#define ELF_CLASS ELFCLASS32
|
254 |
#define ELF_DATA ELFDATA2MSB
|
255 |
#define ELF_ARCH EM_SPARC
|
256 |
|
257 |
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) |
258 |
{ |
259 |
regs->psr = 0;
|
260 |
regs->pc = infop->entry; |
261 |
regs->npc = regs->pc + 4;
|
262 |
regs->y = 0;
|
263 |
regs->u_regs[14] = infop->start_stack - 16 * 4; |
264 |
} |
265 |
|
266 |
#endif
|
267 |
#endif
|
268 |
|
269 |
#ifdef TARGET_PPC
|
270 |
|
271 |
#define ELF_START_MMAP 0x80000000 |
272 |
|
273 |
#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
|
274 |
|
275 |
#define elf_check_arch(x) ( (x) == EM_PPC64 )
|
276 |
|
277 |
#define ELF_CLASS ELFCLASS64
|
278 |
|
279 |
#else
|
280 |
|
281 |
#define elf_check_arch(x) ( (x) == EM_PPC )
|
282 |
|
283 |
#define ELF_CLASS ELFCLASS32
|
284 |
|
285 |
#endif
|
286 |
|
287 |
#ifdef TARGET_WORDS_BIGENDIAN
|
288 |
#define ELF_DATA ELFDATA2MSB
|
289 |
#else
|
290 |
#define ELF_DATA ELFDATA2LSB
|
291 |
#endif
|
292 |
#define ELF_ARCH EM_PPC
|
293 |
|
294 |
/*
|
295 |
* We need to put in some extra aux table entries to tell glibc what
|
296 |
* the cache block size is, so it can use the dcbz instruction safely.
|
297 |
*/
|
298 |
#define AT_DCACHEBSIZE 19 |
299 |
#define AT_ICACHEBSIZE 20 |
300 |
#define AT_UCACHEBSIZE 21 |
301 |
/* A special ignored type value for PPC, for glibc compatibility. */
|
302 |
#define AT_IGNOREPPC 22 |
303 |
/*
|
304 |
* The requirements here are:
|
305 |
* - keep the final alignment of sp (sp & 0xf)
|
306 |
* - make sure the 32-bit value at the first 16 byte aligned position of
|
307 |
* AUXV is greater than 16 for glibc compatibility.
|
308 |
* AT_IGNOREPPC is used for that.
|
309 |
* - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
|
310 |
* even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
|
311 |
*/
|
312 |
#define DLINFO_ARCH_ITEMS 5 |
313 |
#define ARCH_DLINFO \
|
314 |
do { \
|
315 |
NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20); \
|
316 |
NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20); \
|
317 |
NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \
|
318 |
/* \
|
319 |
* Now handle glibc compatibility. \
|
320 |
*/ \
|
321 |
NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ |
322 |
NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ |
323 |
} while (0) |
324 |
|
325 |
static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) |
326 |
{ |
327 |
abi_ulong pos = infop->start_stack; |
328 |
abi_ulong tmp; |
329 |
#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
|
330 |
abi_ulong entry, toc; |
331 |
#endif
|
332 |
|
333 |
_regs->gpr[1] = infop->start_stack;
|
334 |
#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
|
335 |
entry = ldq_raw(infop->entry) + infop->load_addr; |
336 |
toc = ldq_raw(infop->entry + 8) + infop->load_addr;
|
337 |
_regs->gpr[2] = toc;
|
338 |
infop->entry = entry; |
339 |
#endif
|
340 |
_regs->nip = infop->entry; |
341 |
/* Note that isn't exactly what regular kernel does
|
342 |
* but this is what the ABI wants and is needed to allow
|
343 |
* execution of PPC BSD programs.
|
344 |
*/
|
345 |
/* FIXME - what to for failure of get_user()? */
|
346 |
get_user_ual(_regs->gpr[3], pos);
|
347 |
pos += sizeof(abi_ulong);
|
348 |
_regs->gpr[4] = pos;
|
349 |
for (tmp = 1; tmp != 0; pos += sizeof(abi_ulong)) |
350 |
tmp = ldl(pos); |
351 |
_regs->gpr[5] = pos;
|
352 |
} |
353 |
|
354 |
#define USE_ELF_CORE_DUMP
|
355 |
#define ELF_EXEC_PAGESIZE 4096 |
356 |
|
357 |
#endif
|
358 |
|
359 |
#ifdef TARGET_MIPS
|
360 |
|
361 |
#define ELF_START_MMAP 0x80000000 |
362 |
|
363 |
#define elf_check_arch(x) ( (x) == EM_MIPS )
|
364 |
|
365 |
#ifdef TARGET_MIPS64
|
366 |
#define ELF_CLASS ELFCLASS64
|
367 |
#else
|
368 |
#define ELF_CLASS ELFCLASS32
|
369 |
#endif
|
370 |
#ifdef TARGET_WORDS_BIGENDIAN
|
371 |
#define ELF_DATA ELFDATA2MSB
|
372 |
#else
|
373 |
#define ELF_DATA ELFDATA2LSB
|
374 |
#endif
|
375 |
#define ELF_ARCH EM_MIPS
|
376 |
|
377 |
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) |
378 |
{ |
379 |
regs->cp0_status = 2 << CP0St_KSU;
|
380 |
regs->cp0_epc = infop->entry; |
381 |
regs->regs[29] = infop->start_stack;
|
382 |
} |
383 |
|
384 |
#define USE_ELF_CORE_DUMP
|
385 |
#define ELF_EXEC_PAGESIZE 4096 |
386 |
|
387 |
#endif /* TARGET_MIPS */ |
388 |
|
389 |
#ifdef TARGET_SH4
|
390 |
|
391 |
#define ELF_START_MMAP 0x80000000 |
392 |
|
393 |
#define elf_check_arch(x) ( (x) == EM_SH )
|
394 |
|
395 |
#define ELF_CLASS ELFCLASS32
|
396 |
#define ELF_DATA ELFDATA2LSB
|
397 |
#define ELF_ARCH EM_SH
|
398 |
|
399 |
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) |
400 |
{ |
401 |
/* Check other registers XXXXX */
|
402 |
regs->pc = infop->entry; |
403 |
regs->regs[15] = infop->start_stack;
|
404 |
} |
405 |
|
406 |
#define USE_ELF_CORE_DUMP
|
407 |
#define ELF_EXEC_PAGESIZE 4096 |
408 |
|
409 |
#endif
|
410 |
|
411 |
#ifdef TARGET_CRIS
|
412 |
|
413 |
#define ELF_START_MMAP 0x80000000 |
414 |
|
415 |
#define elf_check_arch(x) ( (x) == EM_CRIS )
|
416 |
|
417 |
#define ELF_CLASS ELFCLASS32
|
418 |
#define ELF_DATA ELFDATA2LSB
|
419 |
#define ELF_ARCH EM_CRIS
|
420 |
|
421 |
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) |
422 |
{ |
423 |
regs->erp = infop->entry; |
424 |
} |
425 |
|
426 |
#define USE_ELF_CORE_DUMP
|
427 |
#define ELF_EXEC_PAGESIZE 8192 |
428 |
|
429 |
#endif
|
430 |
|
431 |
#ifdef TARGET_M68K
|
432 |
|
433 |
#define ELF_START_MMAP 0x80000000 |
434 |
|
435 |
#define elf_check_arch(x) ( (x) == EM_68K )
|
436 |
|
437 |
#define ELF_CLASS ELFCLASS32
|
438 |
#define ELF_DATA ELFDATA2MSB
|
439 |
#define ELF_ARCH EM_68K
|
440 |
|
441 |
/* ??? Does this need to do anything?
|
442 |
#define ELF_PLAT_INIT(_r) */
|
443 |
|
444 |
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) |
445 |
{ |
446 |
regs->usp = infop->start_stack; |
447 |
regs->sr = 0;
|
448 |
regs->pc = infop->entry; |
449 |
} |
450 |
|
451 |
#define USE_ELF_CORE_DUMP
|
452 |
#define ELF_EXEC_PAGESIZE 8192 |
453 |
|
454 |
#endif
|
455 |
|
456 |
#ifdef TARGET_ALPHA
|
457 |
|
458 |
#define ELF_START_MMAP (0x30000000000ULL) |
459 |
|
460 |
#define elf_check_arch(x) ( (x) == ELF_ARCH )
|
461 |
|
462 |
#define ELF_CLASS ELFCLASS64
|
463 |
#define ELF_DATA ELFDATA2MSB
|
464 |
#define ELF_ARCH EM_ALPHA
|
465 |
|
466 |
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) |
467 |
{ |
468 |
regs->pc = infop->entry; |
469 |
regs->ps = 8;
|
470 |
regs->usp = infop->start_stack; |
471 |
regs->unique = infop->start_data; /* ? */
|
472 |
printf("Set unique value to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n", |
473 |
regs->unique, infop->start_data); |
474 |
} |
475 |
|
476 |
#define USE_ELF_CORE_DUMP
|
477 |
#define ELF_EXEC_PAGESIZE 8192 |
478 |
|
479 |
#endif /* TARGET_ALPHA */ |
480 |
|
481 |
#ifndef ELF_PLATFORM
|
482 |
#define ELF_PLATFORM (NULL) |
483 |
#endif
|
484 |
|
485 |
#ifndef ELF_HWCAP
|
486 |
#define ELF_HWCAP 0 |
487 |
#endif
|
488 |
|
489 |
#ifdef TARGET_ABI32
|
490 |
#undef ELF_CLASS
|
491 |
#define ELF_CLASS ELFCLASS32
|
492 |
#undef bswaptls
|
493 |
#define bswaptls(ptr) bswap32s(ptr)
|
494 |
#endif
|
495 |
|
496 |
#include "elf.h" |
497 |
|
498 |
struct exec
|
499 |
{ |
500 |
unsigned int a_info; /* Use macros N_MAGIC, etc for access */ |
501 |
unsigned int a_text; /* length of text, in bytes */ |
502 |
unsigned int a_data; /* length of data, in bytes */ |
503 |
unsigned int a_bss; /* length of uninitialized data area, in bytes */ |
504 |
unsigned int a_syms; /* length of symbol table data in file, in bytes */ |
505 |
unsigned int a_entry; /* start address */ |
506 |
unsigned int a_trsize; /* length of relocation info for text, in bytes */ |
507 |
unsigned int a_drsize; /* length of relocation info for data, in bytes */ |
508 |
}; |
509 |
|
510 |
|
511 |
#define N_MAGIC(exec) ((exec).a_info & 0xffff) |
512 |
#define OMAGIC 0407 |
513 |
#define NMAGIC 0410 |
514 |
#define ZMAGIC 0413 |
515 |
#define QMAGIC 0314 |
516 |
|
517 |
/* max code+data+bss space allocated to elf interpreter */
|
518 |
#define INTERP_MAP_SIZE (32 * 1024 * 1024) |
519 |
|
520 |
/* max code+data+bss+brk space allocated to ET_DYN executables */
|
521 |
#define ET_DYN_MAP_SIZE (128 * 1024 * 1024) |
522 |
|
523 |
/* Necessary parameters */
|
524 |
#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
|
525 |
#define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1)) |
526 |
#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1)) |
527 |
|
528 |
#define INTERPRETER_NONE 0 |
529 |
#define INTERPRETER_AOUT 1 |
530 |
#define INTERPRETER_ELF 2 |
531 |
|
532 |
#define DLINFO_ITEMS 12 |
533 |
|
534 |
static inline void memcpy_fromfs(void * to, const void * from, unsigned long n) |
535 |
{ |
536 |
memcpy(to, from, n); |
537 |
} |
538 |
|
539 |
extern unsigned long x86_stack_size; |
540 |
|
541 |
static int load_aout_interp(void * exptr, int interp_fd); |
542 |
|
543 |
#ifdef BSWAP_NEEDED
|
544 |
static void bswap_ehdr(struct elfhdr *ehdr) |
545 |
{ |
546 |
bswap16s(&ehdr->e_type); /* Object file type */
|
547 |
bswap16s(&ehdr->e_machine); /* Architecture */
|
548 |
bswap32s(&ehdr->e_version); /* Object file version */
|
549 |
bswaptls(&ehdr->e_entry); /* Entry point virtual address */
|
550 |
bswaptls(&ehdr->e_phoff); /* Program header table file offset */
|
551 |
bswaptls(&ehdr->e_shoff); /* Section header table file offset */
|
552 |
bswap32s(&ehdr->e_flags); /* Processor-specific flags */
|
553 |
bswap16s(&ehdr->e_ehsize); /* ELF header size in bytes */
|
554 |
bswap16s(&ehdr->e_phentsize); /* Program header table entry size */
|
555 |
bswap16s(&ehdr->e_phnum); /* Program header table entry count */
|
556 |
bswap16s(&ehdr->e_shentsize); /* Section header table entry size */
|
557 |
bswap16s(&ehdr->e_shnum); /* Section header table entry count */
|
558 |
bswap16s(&ehdr->e_shstrndx); /* Section header string table index */
|
559 |
} |
560 |
|
561 |
static void bswap_phdr(struct elf_phdr *phdr) |
562 |
{ |
563 |
bswap32s(&phdr->p_type); /* Segment type */
|
564 |
bswaptls(&phdr->p_offset); /* Segment file offset */
|
565 |
bswaptls(&phdr->p_vaddr); /* Segment virtual address */
|
566 |
bswaptls(&phdr->p_paddr); /* Segment physical address */
|
567 |
bswaptls(&phdr->p_filesz); /* Segment size in file */
|
568 |
bswaptls(&phdr->p_memsz); /* Segment size in memory */
|
569 |
bswap32s(&phdr->p_flags); /* Segment flags */
|
570 |
bswaptls(&phdr->p_align); /* Segment alignment */
|
571 |
} |
572 |
|
573 |
static void bswap_shdr(struct elf_shdr *shdr) |
574 |
{ |
575 |
bswap32s(&shdr->sh_name); |
576 |
bswap32s(&shdr->sh_type); |
577 |
bswaptls(&shdr->sh_flags); |
578 |
bswaptls(&shdr->sh_addr); |
579 |
bswaptls(&shdr->sh_offset); |
580 |
bswaptls(&shdr->sh_size); |
581 |
bswap32s(&shdr->sh_link); |
582 |
bswap32s(&shdr->sh_info); |
583 |
bswaptls(&shdr->sh_addralign); |
584 |
bswaptls(&shdr->sh_entsize); |
585 |
} |
586 |
|
587 |
static void bswap_sym(struct elf_sym *sym) |
588 |
{ |
589 |
bswap32s(&sym->st_name); |
590 |
bswaptls(&sym->st_value); |
591 |
bswaptls(&sym->st_size); |
592 |
bswap16s(&sym->st_shndx); |
593 |
} |
594 |
#endif
|
595 |
|
596 |
/*
|
597 |
* 'copy_elf_strings()' copies argument/envelope strings from user
|
598 |
* memory to free pages in kernel mem. These are in a format ready
|
599 |
* to be put directly into the top of new user memory.
|
600 |
*
|
601 |
*/
|
602 |
static abi_ulong copy_elf_strings(int argc,char ** argv, void **page, |
603 |
abi_ulong p) |
604 |
{ |
605 |
char *tmp, *tmp1, *pag = NULL; |
606 |
int len, offset = 0; |
607 |
|
608 |
if (!p) {
|
609 |
return 0; /* bullet-proofing */ |
610 |
} |
611 |
while (argc-- > 0) { |
612 |
tmp = argv[argc]; |
613 |
if (!tmp) {
|
614 |
fprintf(stderr, "VFS: argc is wrong");
|
615 |
exit(-1);
|
616 |
} |
617 |
tmp1 = tmp; |
618 |
while (*tmp++);
|
619 |
len = tmp - tmp1; |
620 |
if (p < len) { /* this shouldn't happen - 128kB */ |
621 |
return 0; |
622 |
} |
623 |
while (len) {
|
624 |
--p; --tmp; --len; |
625 |
if (--offset < 0) { |
626 |
offset = p % TARGET_PAGE_SIZE; |
627 |
pag = (char *)page[p/TARGET_PAGE_SIZE];
|
628 |
if (!pag) {
|
629 |
pag = (char *)malloc(TARGET_PAGE_SIZE);
|
630 |
memset(pag, 0, TARGET_PAGE_SIZE);
|
631 |
page[p/TARGET_PAGE_SIZE] = pag; |
632 |
if (!pag)
|
633 |
return 0; |
634 |
} |
635 |
} |
636 |
if (len == 0 || offset == 0) { |
637 |
*(pag + offset) = *tmp; |
638 |
} |
639 |
else {
|
640 |
int bytes_to_copy = (len > offset) ? offset : len;
|
641 |
tmp -= bytes_to_copy; |
642 |
p -= bytes_to_copy; |
643 |
offset -= bytes_to_copy; |
644 |
len -= bytes_to_copy; |
645 |
memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
|
646 |
} |
647 |
} |
648 |
} |
649 |
return p;
|
650 |
} |
651 |
|
652 |
static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm, |
653 |
struct image_info *info)
|
654 |
{ |
655 |
abi_ulong stack_base, size, error; |
656 |
int i;
|
657 |
|
658 |
/* Create enough stack to hold everything. If we don't use
|
659 |
* it for args, we'll use it for something else...
|
660 |
*/
|
661 |
size = x86_stack_size; |
662 |
if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
|
663 |
size = MAX_ARG_PAGES*TARGET_PAGE_SIZE; |
664 |
error = target_mmap(0,
|
665 |
size + qemu_host_page_size, |
666 |
PROT_READ | PROT_WRITE, |
667 |
MAP_PRIVATE | MAP_ANONYMOUS, |
668 |
-1, 0); |
669 |
if (error == -1) { |
670 |
perror("stk mmap");
|
671 |
exit(-1);
|
672 |
} |
673 |
/* we reserve one extra page at the top of the stack as guard */
|
674 |
target_mprotect(error + size, qemu_host_page_size, PROT_NONE); |
675 |
|
676 |
stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE; |
677 |
p += stack_base; |
678 |
|
679 |
for (i = 0 ; i < MAX_ARG_PAGES ; i++) { |
680 |
if (bprm->page[i]) {
|
681 |
info->rss++; |
682 |
/* FIXME - check return value of memcpy_to_target() for failure */
|
683 |
memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE); |
684 |
free(bprm->page[i]); |
685 |
} |
686 |
stack_base += TARGET_PAGE_SIZE; |
687 |
} |
688 |
return p;
|
689 |
} |
690 |
|
691 |
static void set_brk(abi_ulong start, abi_ulong end) |
692 |
{ |
693 |
/* page-align the start and end addresses... */
|
694 |
start = HOST_PAGE_ALIGN(start); |
695 |
end = HOST_PAGE_ALIGN(end); |
696 |
if (end <= start)
|
697 |
return;
|
698 |
if(target_mmap(start, end - start,
|
699 |
PROT_READ | PROT_WRITE | PROT_EXEC, |
700 |
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) { |
701 |
perror("cannot mmap brk");
|
702 |
exit(-1);
|
703 |
} |
704 |
} |
705 |
|
706 |
|
707 |
/* We need to explicitly zero any fractional pages after the data
|
708 |
section (i.e. bss). This would contain the junk from the file that
|
709 |
should not be in memory. */
|
710 |
static void padzero(abi_ulong elf_bss, abi_ulong last_bss) |
711 |
{ |
712 |
abi_ulong nbyte; |
713 |
|
714 |
if (elf_bss >= last_bss)
|
715 |
return;
|
716 |
|
717 |
/* XXX: this is really a hack : if the real host page size is
|
718 |
smaller than the target page size, some pages after the end
|
719 |
of the file may not be mapped. A better fix would be to
|
720 |
patch target_mmap(), but it is more complicated as the file
|
721 |
size must be known */
|
722 |
if (qemu_real_host_page_size < qemu_host_page_size) {
|
723 |
abi_ulong end_addr, end_addr1; |
724 |
end_addr1 = (elf_bss + qemu_real_host_page_size - 1) &
|
725 |
~(qemu_real_host_page_size - 1);
|
726 |
end_addr = HOST_PAGE_ALIGN(elf_bss); |
727 |
if (end_addr1 < end_addr) {
|
728 |
mmap((void *)g2h(end_addr1), end_addr - end_addr1,
|
729 |
PROT_READ|PROT_WRITE|PROT_EXEC, |
730 |
MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
731 |
} |
732 |
} |
733 |
|
734 |
nbyte = elf_bss & (qemu_host_page_size-1);
|
735 |
if (nbyte) {
|
736 |
nbyte = qemu_host_page_size - nbyte; |
737 |
do {
|
738 |
/* FIXME - what to do if put_user() fails? */
|
739 |
put_user_u8(0, elf_bss);
|
740 |
elf_bss++; |
741 |
} while (--nbyte);
|
742 |
} |
743 |
} |
744 |
|
745 |
|
746 |
static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, |
747 |
struct elfhdr * exec,
|
748 |
abi_ulong load_addr, |
749 |
abi_ulong load_bias, |
750 |
abi_ulong interp_load_addr, int ibcs,
|
751 |
struct image_info *info)
|
752 |
{ |
753 |
abi_ulong sp; |
754 |
int size;
|
755 |
abi_ulong u_platform; |
756 |
const char *k_platform; |
757 |
const int n = sizeof(elf_addr_t); |
758 |
|
759 |
sp = p; |
760 |
u_platform = 0;
|
761 |
k_platform = ELF_PLATFORM; |
762 |
if (k_platform) {
|
763 |
size_t len = strlen(k_platform) + 1;
|
764 |
sp -= (len + n - 1) & ~(n - 1); |
765 |
u_platform = sp; |
766 |
/* FIXME - check return value of memcpy_to_target() for failure */
|
767 |
memcpy_to_target(sp, k_platform, len); |
768 |
} |
769 |
/*
|
770 |
* Force 16 byte _final_ alignment here for generality.
|
771 |
*/
|
772 |
sp = sp &~ (abi_ulong)15;
|
773 |
size = (DLINFO_ITEMS + 1) * 2; |
774 |
if (k_platform)
|
775 |
size += 2;
|
776 |
#ifdef DLINFO_ARCH_ITEMS
|
777 |
size += DLINFO_ARCH_ITEMS * 2;
|
778 |
#endif
|
779 |
size += envc + argc + 2;
|
780 |
size += (!ibcs ? 3 : 1); /* argc itself */ |
781 |
size *= n; |
782 |
if (size & 15) |
783 |
sp -= 16 - (size & 15); |
784 |
|
785 |
/* This is correct because Linux defines
|
786 |
* elf_addr_t as Elf32_Off / Elf64_Off
|
787 |
*/
|
788 |
#define NEW_AUX_ENT(id, val) do { \ |
789 |
sp -= n; put_user_ual(val, sp); \ |
790 |
sp -= n; put_user_ual(id, sp); \ |
791 |
} while(0) |
792 |
|
793 |
NEW_AUX_ENT (AT_NULL, 0);
|
794 |
|
795 |
/* There must be exactly DLINFO_ITEMS entries here. */
|
796 |
NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff)); |
797 |
NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr))); |
798 |
NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum)); |
799 |
NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE)); |
800 |
NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr)); |
801 |
NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
|
802 |
NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry); |
803 |
NEW_AUX_ENT(AT_UID, (abi_ulong) getuid()); |
804 |
NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid()); |
805 |
NEW_AUX_ENT(AT_GID, (abi_ulong) getgid()); |
806 |
NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid()); |
807 |
NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP); |
808 |
if (k_platform)
|
809 |
NEW_AUX_ENT(AT_PLATFORM, u_platform); |
810 |
#ifdef ARCH_DLINFO
|
811 |
/*
|
812 |
* ARCH_DLINFO must come last so platform specific code can enforce
|
813 |
* special alignment requirements on the AUXV if necessary (eg. PPC).
|
814 |
*/
|
815 |
ARCH_DLINFO; |
816 |
#endif
|
817 |
#undef NEW_AUX_ENT
|
818 |
|
819 |
sp = loader_build_argptr(envc, argc, sp, p, !ibcs); |
820 |
return sp;
|
821 |
} |
822 |
|
823 |
|
824 |
static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex, |
825 |
int interpreter_fd,
|
826 |
abi_ulong *interp_load_addr) |
827 |
{ |
828 |
struct elf_phdr *elf_phdata = NULL; |
829 |
struct elf_phdr *eppnt;
|
830 |
abi_ulong load_addr = 0;
|
831 |
int load_addr_set = 0; |
832 |
int retval;
|
833 |
abi_ulong last_bss, elf_bss; |
834 |
abi_ulong error; |
835 |
int i;
|
836 |
|
837 |
elf_bss = 0;
|
838 |
last_bss = 0;
|
839 |
error = 0;
|
840 |
|
841 |
#ifdef BSWAP_NEEDED
|
842 |
bswap_ehdr(interp_elf_ex); |
843 |
#endif
|
844 |
/* First of all, some simple consistency checks */
|
845 |
if ((interp_elf_ex->e_type != ET_EXEC &&
|
846 |
interp_elf_ex->e_type != ET_DYN) || |
847 |
!elf_check_arch(interp_elf_ex->e_machine)) { |
848 |
return ~((abi_ulong)0UL); |
849 |
} |
850 |
|
851 |
|
852 |
/* Now read in all of the header information */
|
853 |
|
854 |
if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE) |
855 |
return ~(abi_ulong)0UL; |
856 |
|
857 |
elf_phdata = (struct elf_phdr *)
|
858 |
malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); |
859 |
|
860 |
if (!elf_phdata)
|
861 |
return ~((abi_ulong)0UL); |
862 |
|
863 |
/*
|
864 |
* If the size of this structure has changed, then punt, since
|
865 |
* we will be doing the wrong thing.
|
866 |
*/
|
867 |
if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr)) { |
868 |
free(elf_phdata); |
869 |
return ~((abi_ulong)0UL); |
870 |
} |
871 |
|
872 |
retval = lseek(interpreter_fd, interp_elf_ex->e_phoff, SEEK_SET); |
873 |
if(retval >= 0) { |
874 |
retval = read(interpreter_fd, |
875 |
(char *) elf_phdata,
|
876 |
sizeof(struct elf_phdr) * interp_elf_ex->e_phnum); |
877 |
} |
878 |
if (retval < 0) { |
879 |
perror("load_elf_interp");
|
880 |
exit(-1);
|
881 |
free (elf_phdata); |
882 |
return retval;
|
883 |
} |
884 |
#ifdef BSWAP_NEEDED
|
885 |
eppnt = elf_phdata; |
886 |
for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) { |
887 |
bswap_phdr(eppnt); |
888 |
} |
889 |
#endif
|
890 |
|
891 |
if (interp_elf_ex->e_type == ET_DYN) {
|
892 |
/* in order to avoid hardcoding the interpreter load
|
893 |
address in qemu, we allocate a big enough memory zone */
|
894 |
error = target_mmap(0, INTERP_MAP_SIZE,
|
895 |
PROT_NONE, MAP_PRIVATE | MAP_ANON, |
896 |
-1, 0); |
897 |
if (error == -1) { |
898 |
perror("mmap");
|
899 |
exit(-1);
|
900 |
} |
901 |
load_addr = error; |
902 |
load_addr_set = 1;
|
903 |
} |
904 |
|
905 |
eppnt = elf_phdata; |
906 |
for(i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) |
907 |
if (eppnt->p_type == PT_LOAD) {
|
908 |
int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
|
909 |
int elf_prot = 0; |
910 |
abi_ulong vaddr = 0;
|
911 |
abi_ulong k; |
912 |
|
913 |
if (eppnt->p_flags & PF_R) elf_prot = PROT_READ;
|
914 |
if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
|
915 |
if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
|
916 |
if (interp_elf_ex->e_type == ET_EXEC || load_addr_set) {
|
917 |
elf_type |= MAP_FIXED; |
918 |
vaddr = eppnt->p_vaddr; |
919 |
} |
920 |
error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr), |
921 |
eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr), |
922 |
elf_prot, |
923 |
elf_type, |
924 |
interpreter_fd, |
925 |
eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr)); |
926 |
|
927 |
if (error == -1) { |
928 |
/* Real error */
|
929 |
close(interpreter_fd); |
930 |
free(elf_phdata); |
931 |
return ~((abi_ulong)0UL); |
932 |
} |
933 |
|
934 |
if (!load_addr_set && interp_elf_ex->e_type == ET_DYN) {
|
935 |
load_addr = error; |
936 |
load_addr_set = 1;
|
937 |
} |
938 |
|
939 |
/*
|
940 |
* Find the end of the file mapping for this phdr, and keep
|
941 |
* track of the largest address we see for this.
|
942 |
*/
|
943 |
k = load_addr + eppnt->p_vaddr + eppnt->p_filesz; |
944 |
if (k > elf_bss) elf_bss = k;
|
945 |
|
946 |
/*
|
947 |
* Do the same thing for the memory mapping - between
|
948 |
* elf_bss and last_bss is the bss section.
|
949 |
*/
|
950 |
k = load_addr + eppnt->p_memsz + eppnt->p_vaddr; |
951 |
if (k > last_bss) last_bss = k;
|
952 |
} |
953 |
|
954 |
/* Now use mmap to map the library into memory. */
|
955 |
|
956 |
close(interpreter_fd); |
957 |
|
958 |
/*
|
959 |
* Now fill out the bss section. First pad the last page up
|
960 |
* to the page boundary, and then perform a mmap to make sure
|
961 |
* that there are zeromapped pages up to and including the last
|
962 |
* bss page.
|
963 |
*/
|
964 |
padzero(elf_bss, last_bss); |
965 |
elf_bss = TARGET_ELF_PAGESTART(elf_bss + qemu_host_page_size - 1); /* What we have mapped so far */ |
966 |
|
967 |
/* Map the last of the bss segment */
|
968 |
if (last_bss > elf_bss) {
|
969 |
target_mmap(elf_bss, last_bss-elf_bss, |
970 |
PROT_READ|PROT_WRITE|PROT_EXEC, |
971 |
MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
972 |
} |
973 |
free(elf_phdata); |
974 |
|
975 |
*interp_load_addr = load_addr; |
976 |
return ((abi_ulong) interp_elf_ex->e_entry) + load_addr;
|
977 |
} |
978 |
|
979 |
/* Best attempt to load symbols from this ELF object. */
|
980 |
static void load_symbols(struct elfhdr *hdr, int fd) |
981 |
{ |
982 |
unsigned int i; |
983 |
struct elf_shdr sechdr, symtab, strtab;
|
984 |
char *strings;
|
985 |
struct syminfo *s;
|
986 |
#if (ELF_CLASS == ELFCLASS64)
|
987 |
// Disas uses 32 bit symbols
|
988 |
struct elf32_sym *syms32 = NULL; |
989 |
struct elf_sym *sym;
|
990 |
#endif
|
991 |
|
992 |
lseek(fd, hdr->e_shoff, SEEK_SET); |
993 |
for (i = 0; i < hdr->e_shnum; i++) { |
994 |
if (read(fd, &sechdr, sizeof(sechdr)) != sizeof(sechdr)) |
995 |
return;
|
996 |
#ifdef BSWAP_NEEDED
|
997 |
bswap_shdr(&sechdr); |
998 |
#endif
|
999 |
if (sechdr.sh_type == SHT_SYMTAB) {
|
1000 |
symtab = sechdr; |
1001 |
lseek(fd, hdr->e_shoff |
1002 |
+ sizeof(sechdr) * sechdr.sh_link, SEEK_SET);
|
1003 |
if (read(fd, &strtab, sizeof(strtab)) |
1004 |
!= sizeof(strtab))
|
1005 |
return;
|
1006 |
#ifdef BSWAP_NEEDED
|
1007 |
bswap_shdr(&strtab); |
1008 |
#endif
|
1009 |
goto found;
|
1010 |
} |
1011 |
} |
1012 |
return; /* Shouldn't happen... */ |
1013 |
|
1014 |
found:
|
1015 |
/* Now know where the strtab and symtab are. Snarf them. */
|
1016 |
s = malloc(sizeof(*s));
|
1017 |
s->disas_symtab = malloc(symtab.sh_size); |
1018 |
#if (ELF_CLASS == ELFCLASS64)
|
1019 |
syms32 = malloc(symtab.sh_size / sizeof(struct elf_sym) |
1020 |
* sizeof(struct elf32_sym)); |
1021 |
#endif
|
1022 |
s->disas_strtab = strings = malloc(strtab.sh_size); |
1023 |
if (!s->disas_symtab || !s->disas_strtab)
|
1024 |
return;
|
1025 |
|
1026 |
lseek(fd, symtab.sh_offset, SEEK_SET); |
1027 |
if (read(fd, s->disas_symtab, symtab.sh_size) != symtab.sh_size)
|
1028 |
return;
|
1029 |
|
1030 |
for (i = 0; i < symtab.sh_size / sizeof(struct elf_sym); i++) { |
1031 |
#ifdef BSWAP_NEEDED
|
1032 |
bswap_sym(s->disas_symtab + sizeof(struct elf_sym)*i); |
1033 |
#endif
|
1034 |
#if (ELF_CLASS == ELFCLASS64)
|
1035 |
sym = s->disas_symtab + sizeof(struct elf_sym)*i; |
1036 |
syms32[i].st_name = sym->st_name; |
1037 |
syms32[i].st_info = sym->st_info; |
1038 |
syms32[i].st_other = sym->st_other; |
1039 |
syms32[i].st_shndx = sym->st_shndx; |
1040 |
syms32[i].st_value = sym->st_value & 0xffffffff;
|
1041 |
syms32[i].st_size = sym->st_size & 0xffffffff;
|
1042 |
#endif
|
1043 |
} |
1044 |
|
1045 |
#if (ELF_CLASS == ELFCLASS64)
|
1046 |
free(s->disas_symtab); |
1047 |
s->disas_symtab = syms32; |
1048 |
#endif
|
1049 |
lseek(fd, strtab.sh_offset, SEEK_SET); |
1050 |
if (read(fd, strings, strtab.sh_size) != strtab.sh_size)
|
1051 |
return;
|
1052 |
s->disas_num_syms = symtab.sh_size / sizeof(struct elf_sym); |
1053 |
s->next = syminfos; |
1054 |
syminfos = s; |
1055 |
} |
1056 |
|
1057 |
int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs, |
1058 |
struct image_info * info)
|
1059 |
{ |
1060 |
struct elfhdr elf_ex;
|
1061 |
struct elfhdr interp_elf_ex;
|
1062 |
struct exec interp_ex;
|
1063 |
int interpreter_fd = -1; /* avoid warning */ |
1064 |
abi_ulong load_addr, load_bias; |
1065 |
int load_addr_set = 0; |
1066 |
unsigned int interpreter_type = INTERPRETER_NONE; |
1067 |
unsigned char ibcs2_interpreter; |
1068 |
int i;
|
1069 |
abi_ulong mapped_addr; |
1070 |
struct elf_phdr * elf_ppnt;
|
1071 |
struct elf_phdr *elf_phdata;
|
1072 |
abi_ulong elf_bss, k, elf_brk; |
1073 |
int retval;
|
1074 |
char * elf_interpreter;
|
1075 |
abi_ulong elf_entry, interp_load_addr = 0;
|
1076 |
int status;
|
1077 |
abi_ulong start_code, end_code, start_data, end_data; |
1078 |
abi_ulong reloc_func_desc = 0;
|
1079 |
abi_ulong elf_stack; |
1080 |
char passed_fileno[6]; |
1081 |
|
1082 |
ibcs2_interpreter = 0;
|
1083 |
status = 0;
|
1084 |
load_addr = 0;
|
1085 |
load_bias = 0;
|
1086 |
elf_ex = *((struct elfhdr *) bprm->buf); /* exec-header */ |
1087 |
#ifdef BSWAP_NEEDED
|
1088 |
bswap_ehdr(&elf_ex); |
1089 |
#endif
|
1090 |
|
1091 |
/* First of all, some simple consistency checks */
|
1092 |
if ((elf_ex.e_type != ET_EXEC && elf_ex.e_type != ET_DYN) ||
|
1093 |
(! elf_check_arch(elf_ex.e_machine))) { |
1094 |
return -ENOEXEC;
|
1095 |
} |
1096 |
|
1097 |
bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
|
1098 |
bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p); |
1099 |
bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p); |
1100 |
if (!bprm->p) {
|
1101 |
retval = -E2BIG; |
1102 |
} |
1103 |
|
1104 |
/* Now read in all of the header information */
|
1105 |
elf_phdata = (struct elf_phdr *)malloc(elf_ex.e_phentsize*elf_ex.e_phnum);
|
1106 |
if (elf_phdata == NULL) { |
1107 |
return -ENOMEM;
|
1108 |
} |
1109 |
|
1110 |
retval = lseek(bprm->fd, elf_ex.e_phoff, SEEK_SET); |
1111 |
if(retval > 0) { |
1112 |
retval = read(bprm->fd, (char *) elf_phdata,
|
1113 |
elf_ex.e_phentsize * elf_ex.e_phnum); |
1114 |
} |
1115 |
|
1116 |
if (retval < 0) { |
1117 |
perror("load_elf_binary");
|
1118 |
exit(-1);
|
1119 |
free (elf_phdata); |
1120 |
return -errno;
|
1121 |
} |
1122 |
|
1123 |
#ifdef BSWAP_NEEDED
|
1124 |
elf_ppnt = elf_phdata; |
1125 |
for (i=0; i<elf_ex.e_phnum; i++, elf_ppnt++) { |
1126 |
bswap_phdr(elf_ppnt); |
1127 |
} |
1128 |
#endif
|
1129 |
elf_ppnt = elf_phdata; |
1130 |
|
1131 |
elf_bss = 0;
|
1132 |
elf_brk = 0;
|
1133 |
|
1134 |
|
1135 |
elf_stack = ~((abi_ulong)0UL);
|
1136 |
elf_interpreter = NULL;
|
1137 |
start_code = ~((abi_ulong)0UL);
|
1138 |
end_code = 0;
|
1139 |
start_data = 0;
|
1140 |
end_data = 0;
|
1141 |
|
1142 |
for(i=0;i < elf_ex.e_phnum; i++) { |
1143 |
if (elf_ppnt->p_type == PT_INTERP) {
|
1144 |
if ( elf_interpreter != NULL ) |
1145 |
{ |
1146 |
free (elf_phdata); |
1147 |
free(elf_interpreter); |
1148 |
close(bprm->fd); |
1149 |
return -EINVAL;
|
1150 |
} |
1151 |
|
1152 |
/* This is the program interpreter used for
|
1153 |
* shared libraries - for now assume that this
|
1154 |
* is an a.out format binary
|
1155 |
*/
|
1156 |
|
1157 |
elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
|
1158 |
|
1159 |
if (elf_interpreter == NULL) { |
1160 |
free (elf_phdata); |
1161 |
close(bprm->fd); |
1162 |
return -ENOMEM;
|
1163 |
} |
1164 |
|
1165 |
retval = lseek(bprm->fd, elf_ppnt->p_offset, SEEK_SET); |
1166 |
if(retval >= 0) { |
1167 |
retval = read(bprm->fd, elf_interpreter, elf_ppnt->p_filesz); |
1168 |
} |
1169 |
if(retval < 0) { |
1170 |
perror("load_elf_binary2");
|
1171 |
exit(-1);
|
1172 |
} |
1173 |
|
1174 |
/* If the program interpreter is one of these two,
|
1175 |
then assume an iBCS2 image. Otherwise assume
|
1176 |
a native linux image. */
|
1177 |
|
1178 |
/* JRP - Need to add X86 lib dir stuff here... */
|
1179 |
|
1180 |
if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 || |
1181 |
strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) { |
1182 |
ibcs2_interpreter = 1;
|
1183 |
} |
1184 |
|
1185 |
#if 0
|
1186 |
printf("Using ELF interpreter %s\n", elf_interpreter);
|
1187 |
#endif
|
1188 |
if (retval >= 0) { |
1189 |
retval = open(path(elf_interpreter), O_RDONLY); |
1190 |
if(retval >= 0) { |
1191 |
interpreter_fd = retval; |
1192 |
} |
1193 |
else {
|
1194 |
perror(elf_interpreter); |
1195 |
exit(-1);
|
1196 |
/* retval = -errno; */
|
1197 |
} |
1198 |
} |
1199 |
|
1200 |
if (retval >= 0) { |
1201 |
retval = lseek(interpreter_fd, 0, SEEK_SET);
|
1202 |
if(retval >= 0) { |
1203 |
retval = read(interpreter_fd,bprm->buf,128);
|
1204 |
} |
1205 |
} |
1206 |
if (retval >= 0) { |
1207 |
interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */ |
1208 |
interp_elf_ex=*((struct elfhdr *) bprm->buf); /* elf exec-header */ |
1209 |
} |
1210 |
if (retval < 0) { |
1211 |
perror("load_elf_binary3");
|
1212 |
exit(-1);
|
1213 |
free (elf_phdata); |
1214 |
free(elf_interpreter); |
1215 |
close(bprm->fd); |
1216 |
return retval;
|
1217 |
} |
1218 |
} |
1219 |
elf_ppnt++; |
1220 |
} |
1221 |
|
1222 |
/* Some simple consistency checks for the interpreter */
|
1223 |
if (elf_interpreter){
|
1224 |
interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT; |
1225 |
|
1226 |
/* Now figure out which format our binary is */
|
1227 |
if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
|
1228 |
(N_MAGIC(interp_ex) != QMAGIC)) { |
1229 |
interpreter_type = INTERPRETER_ELF; |
1230 |
} |
1231 |
|
1232 |
if (interp_elf_ex.e_ident[0] != 0x7f || |
1233 |
strncmp(&interp_elf_ex.e_ident[1], "ELF",3) != 0) { |
1234 |
interpreter_type &= ~INTERPRETER_ELF; |
1235 |
} |
1236 |
|
1237 |
if (!interpreter_type) {
|
1238 |
free(elf_interpreter); |
1239 |
free(elf_phdata); |
1240 |
close(bprm->fd); |
1241 |
return -ELIBBAD;
|
1242 |
} |
1243 |
} |
1244 |
|
1245 |
/* OK, we are done with that, now set up the arg stuff,
|
1246 |
and then start this sucker up */
|
1247 |
|
1248 |
{ |
1249 |
char * passed_p;
|
1250 |
|
1251 |
if (interpreter_type == INTERPRETER_AOUT) {
|
1252 |
snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd); |
1253 |
passed_p = passed_fileno; |
1254 |
|
1255 |
if (elf_interpreter) {
|
1256 |
bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
|
1257 |
bprm->argc++; |
1258 |
} |
1259 |
} |
1260 |
if (!bprm->p) {
|
1261 |
if (elf_interpreter) {
|
1262 |
free(elf_interpreter); |
1263 |
} |
1264 |
free (elf_phdata); |
1265 |
close(bprm->fd); |
1266 |
return -E2BIG;
|
1267 |
} |
1268 |
} |
1269 |
|
1270 |
/* OK, This is the point of no return */
|
1271 |
info->end_data = 0;
|
1272 |
info->end_code = 0;
|
1273 |
info->start_mmap = (abi_ulong)ELF_START_MMAP; |
1274 |
info->mmap = 0;
|
1275 |
elf_entry = (abi_ulong) elf_ex.e_entry; |
1276 |
|
1277 |
/* Do this so that we can load the interpreter, if need be. We will
|
1278 |
change some of these later */
|
1279 |
info->rss = 0;
|
1280 |
bprm->p = setup_arg_pages(bprm->p, bprm, info); |
1281 |
info->start_stack = bprm->p; |
1282 |
|
1283 |
/* Now we do a little grungy work by mmaping the ELF image into
|
1284 |
* the correct location in memory. At this point, we assume that
|
1285 |
* the image should be loaded at fixed address, not at a variable
|
1286 |
* address.
|
1287 |
*/
|
1288 |
|
1289 |
for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) { |
1290 |
int elf_prot = 0; |
1291 |
int elf_flags = 0; |
1292 |
abi_ulong error; |
1293 |
|
1294 |
if (elf_ppnt->p_type != PT_LOAD)
|
1295 |
continue;
|
1296 |
|
1297 |
if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
|
1298 |
if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
|
1299 |
if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
|
1300 |
elf_flags = MAP_PRIVATE | MAP_DENYWRITE; |
1301 |
if (elf_ex.e_type == ET_EXEC || load_addr_set) {
|
1302 |
elf_flags |= MAP_FIXED; |
1303 |
} else if (elf_ex.e_type == ET_DYN) { |
1304 |
/* Try and get dynamic programs out of the way of the default mmap
|
1305 |
base, as well as whatever program they might try to exec. This
|
1306 |
is because the brk will follow the loader, and is not movable. */
|
1307 |
/* NOTE: for qemu, we do a big mmap to get enough space
|
1308 |
without hardcoding any address */
|
1309 |
error = target_mmap(0, ET_DYN_MAP_SIZE,
|
1310 |
PROT_NONE, MAP_PRIVATE | MAP_ANON, |
1311 |
-1, 0); |
1312 |
if (error == -1) { |
1313 |
perror("mmap");
|
1314 |
exit(-1);
|
1315 |
} |
1316 |
load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr); |
1317 |
} |
1318 |
|
1319 |
error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr), |
1320 |
(elf_ppnt->p_filesz + |
1321 |
TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)), |
1322 |
elf_prot, |
1323 |
(MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE), |
1324 |
bprm->fd, |
1325 |
(elf_ppnt->p_offset - |
1326 |
TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr))); |
1327 |
if (error == -1) { |
1328 |
perror("mmap");
|
1329 |
exit(-1);
|
1330 |
} |
1331 |
|
1332 |
#ifdef LOW_ELF_STACK
|
1333 |
if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
|
1334 |
elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr); |
1335 |
#endif
|
1336 |
|
1337 |
if (!load_addr_set) {
|
1338 |
load_addr_set = 1;
|
1339 |
load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset; |
1340 |
if (elf_ex.e_type == ET_DYN) {
|
1341 |
load_bias += error - |
1342 |
TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr); |
1343 |
load_addr += load_bias; |
1344 |
reloc_func_desc = load_bias; |
1345 |
} |
1346 |
} |
1347 |
k = elf_ppnt->p_vaddr; |
1348 |
if (k < start_code)
|
1349 |
start_code = k; |
1350 |
if (start_data < k)
|
1351 |
start_data = k; |
1352 |
k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz; |
1353 |
if (k > elf_bss)
|
1354 |
elf_bss = k; |
1355 |
if ((elf_ppnt->p_flags & PF_X) && end_code < k)
|
1356 |
end_code = k; |
1357 |
if (end_data < k)
|
1358 |
end_data = k; |
1359 |
k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz; |
1360 |
if (k > elf_brk) elf_brk = k;
|
1361 |
} |
1362 |
|
1363 |
elf_entry += load_bias; |
1364 |
elf_bss += load_bias; |
1365 |
elf_brk += load_bias; |
1366 |
start_code += load_bias; |
1367 |
end_code += load_bias; |
1368 |
start_data += load_bias; |
1369 |
end_data += load_bias; |
1370 |
|
1371 |
if (elf_interpreter) {
|
1372 |
if (interpreter_type & 1) { |
1373 |
elf_entry = load_aout_interp(&interp_ex, interpreter_fd); |
1374 |
} |
1375 |
else if (interpreter_type & 2) { |
1376 |
elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd, |
1377 |
&interp_load_addr); |
1378 |
} |
1379 |
reloc_func_desc = interp_load_addr; |
1380 |
|
1381 |
close(interpreter_fd); |
1382 |
free(elf_interpreter); |
1383 |
|
1384 |
if (elf_entry == ~((abi_ulong)0UL)) { |
1385 |
printf("Unable to load interpreter\n");
|
1386 |
free(elf_phdata); |
1387 |
exit(-1);
|
1388 |
return 0; |
1389 |
} |
1390 |
} |
1391 |
|
1392 |
free(elf_phdata); |
1393 |
|
1394 |
if (loglevel)
|
1395 |
load_symbols(&elf_ex, bprm->fd); |
1396 |
|
1397 |
if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
|
1398 |
info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX); |
1399 |
|
1400 |
#ifdef LOW_ELF_STACK
|
1401 |
info->start_stack = bprm->p = elf_stack - 4;
|
1402 |
#endif
|
1403 |
bprm->p = create_elf_tables(bprm->p, |
1404 |
bprm->argc, |
1405 |
bprm->envc, |
1406 |
&elf_ex, |
1407 |
load_addr, load_bias, |
1408 |
interp_load_addr, |
1409 |
(interpreter_type == INTERPRETER_AOUT ? 0 : 1), |
1410 |
info); |
1411 |
info->load_addr = reloc_func_desc; |
1412 |
info->start_brk = info->brk = elf_brk; |
1413 |
info->end_code = end_code; |
1414 |
info->start_code = start_code; |
1415 |
info->start_data = start_data; |
1416 |
info->end_data = end_data; |
1417 |
info->start_stack = bprm->p; |
1418 |
|
1419 |
/* Calling set_brk effectively mmaps the pages that we need for the bss and break
|
1420 |
sections */
|
1421 |
set_brk(elf_bss, elf_brk); |
1422 |
|
1423 |
padzero(elf_bss, elf_brk); |
1424 |
|
1425 |
#if 0
|
1426 |
printf("(start_brk) %x\n" , info->start_brk);
|
1427 |
printf("(end_code) %x\n" , info->end_code);
|
1428 |
printf("(start_code) %x\n" , info->start_code);
|
1429 |
printf("(end_data) %x\n" , info->end_data);
|
1430 |
printf("(start_stack) %x\n" , info->start_stack);
|
1431 |
printf("(brk) %x\n" , info->brk);
|
1432 |
#endif
|
1433 |
|
1434 |
if ( info->personality == PER_SVR4 )
|
1435 |
{ |
1436 |
/* Why this, you ask??? Well SVr4 maps page 0 as read-only,
|
1437 |
and some applications "depend" upon this behavior.
|
1438 |
Since we do not have the power to recompile these, we
|
1439 |
emulate the SVr4 behavior. Sigh. */
|
1440 |
mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
|
1441 |
MAP_FIXED | MAP_PRIVATE, -1, 0); |
1442 |
} |
1443 |
|
1444 |
info->entry = elf_entry; |
1445 |
|
1446 |
return 0; |
1447 |
} |
1448 |
|
1449 |
static int load_aout_interp(void * exptr, int interp_fd) |
1450 |
{ |
1451 |
printf("a.out interpreter not yet supported\n");
|
1452 |
return(0); |
1453 |
} |
1454 |
|
1455 |
void do_init_thread(struct target_pt_regs *regs, struct image_info *infop) |
1456 |
{ |
1457 |
init_thread(regs, infop); |
1458 |
} |