Statistics
| Branch: | Revision:

root / linux-user / elfload.c @ 60dcbcb5

History | View | Annotate | Download (83.2 kB)

1
/* This is the Linux kernel elf-loading code, ported into user space */
2
#include <sys/time.h>
3
#include <sys/param.h>
4

    
5
#include <stdio.h>
6
#include <sys/types.h>
7
#include <fcntl.h>
8
#include <errno.h>
9
#include <unistd.h>
10
#include <sys/mman.h>
11
#include <sys/resource.h>
12
#include <stdlib.h>
13
#include <string.h>
14
#include <time.h>
15

    
16
#include "qemu.h"
17
#include "disas.h"
18

    
19
#ifdef _ARCH_PPC64
20
#undef ARCH_DLINFO
21
#undef ELF_PLATFORM
22
#undef ELF_HWCAP
23
#undef ELF_CLASS
24
#undef ELF_DATA
25
#undef ELF_ARCH
26
#endif
27

    
28
#define ELF_OSABI   ELFOSABI_SYSV
29

    
30
/* from personality.h */
31

    
32
/*
33
 * Flags for bug emulation.
34
 *
35
 * These occupy the top three bytes.
36
 */
37
enum {
38
    ADDR_NO_RANDOMIZE = 0x0040000,      /* disable randomization of VA space */
39
    FDPIC_FUNCPTRS =    0x0080000,      /* userspace function ptrs point to
40
                                           descriptors (signal handling) */
41
    MMAP_PAGE_ZERO =    0x0100000,
42
    ADDR_COMPAT_LAYOUT = 0x0200000,
43
    READ_IMPLIES_EXEC = 0x0400000,
44
    ADDR_LIMIT_32BIT =  0x0800000,
45
    SHORT_INODE =       0x1000000,
46
    WHOLE_SECONDS =     0x2000000,
47
    STICKY_TIMEOUTS =   0x4000000,
48
    ADDR_LIMIT_3GB =    0x8000000,
49
};
50

    
51
/*
52
 * Personality types.
53
 *
54
 * These go in the low byte.  Avoid using the top bit, it will
55
 * conflict with error returns.
56
 */
57
enum {
58
    PER_LINUX =         0x0000,
59
    PER_LINUX_32BIT =   0x0000 | ADDR_LIMIT_32BIT,
60
    PER_LINUX_FDPIC =   0x0000 | FDPIC_FUNCPTRS,
61
    PER_SVR4 =          0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
62
    PER_SVR3 =          0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
63
    PER_SCOSVR3 =       0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
64
    PER_OSR5 =          0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
65
    PER_WYSEV386 =      0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
66
    PER_ISCR4 =         0x0005 | STICKY_TIMEOUTS,
67
    PER_BSD =           0x0006,
68
    PER_SUNOS =         0x0006 | STICKY_TIMEOUTS,
69
    PER_XENIX =         0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
70
    PER_LINUX32 =       0x0008,
71
    PER_LINUX32_3GB =   0x0008 | ADDR_LIMIT_3GB,
72
    PER_IRIX32 =        0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
73
    PER_IRIXN32 =       0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
74
    PER_IRIX64 =        0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
75
    PER_RISCOS =        0x000c,
76
    PER_SOLARIS =       0x000d | STICKY_TIMEOUTS,
77
    PER_UW7 =           0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
78
    PER_OSF4 =          0x000f,                  /* OSF/1 v4 */
79
    PER_HPUX =          0x0010,
80
    PER_MASK =          0x00ff,
81
};
82

    
83
/*
84
 * Return the base personality without flags.
85
 */
86
#define personality(pers)       (pers & PER_MASK)
87

    
88
/* this flag is uneffective under linux too, should be deleted */
89
#ifndef MAP_DENYWRITE
90
#define MAP_DENYWRITE 0
91
#endif
92

    
93
/* should probably go in elf.h */
94
#ifndef ELIBBAD
95
#define ELIBBAD 80
96
#endif
97

    
98
#ifdef TARGET_WORDS_BIGENDIAN
99
#define ELF_DATA        ELFDATA2MSB
100
#else
101
#define ELF_DATA        ELFDATA2LSB
102
#endif
103

    
104
typedef target_ulong    target_elf_greg_t;
105
#ifdef USE_UID16
106
typedef uint16_t        target_uid_t;
107
typedef uint16_t        target_gid_t;
108
#else
109
typedef uint32_t        target_uid_t;
110
typedef uint32_t        target_gid_t;
111
#endif
112
typedef int32_t         target_pid_t;
113

    
114
#ifdef TARGET_I386
115

    
116
#define ELF_PLATFORM get_elf_platform()
117

    
118
static const char *get_elf_platform(void)
119
{
120
    static char elf_platform[] = "i386";
121
    int family = (thread_env->cpuid_version >> 8) & 0xff;
122
    if (family > 6)
123
        family = 6;
124
    if (family >= 3)
125
        elf_platform[1] = '0' + family;
126
    return elf_platform;
127
}
128

    
129
#define ELF_HWCAP get_elf_hwcap()
130

    
131
static uint32_t get_elf_hwcap(void)
132
{
133
    return thread_env->cpuid_features;
134
}
135

    
136
#ifdef TARGET_X86_64
137
#define ELF_START_MMAP 0x2aaaaab000ULL
138
#define elf_check_arch(x) ( ((x) == ELF_ARCH) )
139

    
140
#define ELF_CLASS      ELFCLASS64
141
#define ELF_ARCH       EM_X86_64
142

    
143
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
144
{
145
    regs->rax = 0;
146
    regs->rsp = infop->start_stack;
147
    regs->rip = infop->entry;
148
}
149

    
150
#define ELF_NREG    27
151
typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
152

    
153
/*
154
 * Note that ELF_NREG should be 29 as there should be place for
155
 * TRAPNO and ERR "registers" as well but linux doesn't dump
156
 * those.
157
 *
158
 * See linux kernel: arch/x86/include/asm/elf.h
159
 */
160
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
161
{
162
    (*regs)[0] = env->regs[15];
163
    (*regs)[1] = env->regs[14];
164
    (*regs)[2] = env->regs[13];
165
    (*regs)[3] = env->regs[12];
166
    (*regs)[4] = env->regs[R_EBP];
167
    (*regs)[5] = env->regs[R_EBX];
168
    (*regs)[6] = env->regs[11];
169
    (*regs)[7] = env->regs[10];
170
    (*regs)[8] = env->regs[9];
171
    (*regs)[9] = env->regs[8];
172
    (*regs)[10] = env->regs[R_EAX];
173
    (*regs)[11] = env->regs[R_ECX];
174
    (*regs)[12] = env->regs[R_EDX];
175
    (*regs)[13] = env->regs[R_ESI];
176
    (*regs)[14] = env->regs[R_EDI];
177
    (*regs)[15] = env->regs[R_EAX]; /* XXX */
178
    (*regs)[16] = env->eip;
179
    (*regs)[17] = env->segs[R_CS].selector & 0xffff;
180
    (*regs)[18] = env->eflags;
181
    (*regs)[19] = env->regs[R_ESP];
182
    (*regs)[20] = env->segs[R_SS].selector & 0xffff;
183
    (*regs)[21] = env->segs[R_FS].selector & 0xffff;
184
    (*regs)[22] = env->segs[R_GS].selector & 0xffff;
185
    (*regs)[23] = env->segs[R_DS].selector & 0xffff;
186
    (*regs)[24] = env->segs[R_ES].selector & 0xffff;
187
    (*regs)[25] = env->segs[R_FS].selector & 0xffff;
188
    (*regs)[26] = env->segs[R_GS].selector & 0xffff;
189
}
190

    
191
#else
192

    
193
#define ELF_START_MMAP 0x80000000
194

    
195
/*
196
 * This is used to ensure we don't load something for the wrong architecture.
197
 */
198
#define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
199

    
200
/*
201
 * These are used to set parameters in the core dumps.
202
 */
203
#define ELF_CLASS       ELFCLASS32
204
#define ELF_ARCH        EM_386
205

    
206
static inline void init_thread(struct target_pt_regs *regs,
207
                               struct image_info *infop)
208
{
209
    regs->esp = infop->start_stack;
210
    regs->eip = infop->entry;
211

    
212
    /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
213
       starts %edx contains a pointer to a function which might be
214
       registered using `atexit'.  This provides a mean for the
215
       dynamic linker to call DT_FINI functions for shared libraries
216
       that have been loaded before the code runs.
217

218
       A value of 0 tells we have no such handler.  */
219
    regs->edx = 0;
220
}
221

    
222
#define ELF_NREG    17
223
typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
224

    
225
/*
226
 * Note that ELF_NREG should be 19 as there should be place for
227
 * TRAPNO and ERR "registers" as well but linux doesn't dump
228
 * those.
229
 *
230
 * See linux kernel: arch/x86/include/asm/elf.h
231
 */
232
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
233
{
234
    (*regs)[0] = env->regs[R_EBX];
235
    (*regs)[1] = env->regs[R_ECX];
236
    (*regs)[2] = env->regs[R_EDX];
237
    (*regs)[3] = env->regs[R_ESI];
238
    (*regs)[4] = env->regs[R_EDI];
239
    (*regs)[5] = env->regs[R_EBP];
240
    (*regs)[6] = env->regs[R_EAX];
241
    (*regs)[7] = env->segs[R_DS].selector & 0xffff;
242
    (*regs)[8] = env->segs[R_ES].selector & 0xffff;
243
    (*regs)[9] = env->segs[R_FS].selector & 0xffff;
244
    (*regs)[10] = env->segs[R_GS].selector & 0xffff;
245
    (*regs)[11] = env->regs[R_EAX]; /* XXX */
246
    (*regs)[12] = env->eip;
247
    (*regs)[13] = env->segs[R_CS].selector & 0xffff;
248
    (*regs)[14] = env->eflags;
249
    (*regs)[15] = env->regs[R_ESP];
250
    (*regs)[16] = env->segs[R_SS].selector & 0xffff;
251
}
252
#endif
253

    
254
#define USE_ELF_CORE_DUMP
255
#define ELF_EXEC_PAGESIZE       4096
256

    
257
#endif
258

    
259
#ifdef TARGET_ARM
260

    
261
#define ELF_START_MMAP 0x80000000
262

    
263
#define elf_check_arch(x) ( (x) == EM_ARM )
264

    
265
#define ELF_CLASS       ELFCLASS32
266
#define ELF_ARCH        EM_ARM
267

    
268
static inline void init_thread(struct target_pt_regs *regs,
269
                               struct image_info *infop)
270
{
271
    abi_long stack = infop->start_stack;
272
    memset(regs, 0, sizeof(*regs));
273
    regs->ARM_cpsr = 0x10;
274
    if (infop->entry & 1)
275
        regs->ARM_cpsr |= CPSR_T;
276
    regs->ARM_pc = infop->entry & 0xfffffffe;
277
    regs->ARM_sp = infop->start_stack;
278
    /* FIXME - what to for failure of get_user()? */
279
    get_user_ual(regs->ARM_r2, stack + 8); /* envp */
280
    get_user_ual(regs->ARM_r1, stack + 4); /* envp */
281
    /* XXX: it seems that r0 is zeroed after ! */
282
    regs->ARM_r0 = 0;
283
    /* For uClinux PIC binaries.  */
284
    /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
285
    regs->ARM_r10 = infop->start_data;
286
}
287

    
288
#define ELF_NREG    18
289
typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
290

    
291
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
292
{
293
    (*regs)[0] = tswapl(env->regs[0]);
294
    (*regs)[1] = tswapl(env->regs[1]);
295
    (*regs)[2] = tswapl(env->regs[2]);
296
    (*regs)[3] = tswapl(env->regs[3]);
297
    (*regs)[4] = tswapl(env->regs[4]);
298
    (*regs)[5] = tswapl(env->regs[5]);
299
    (*regs)[6] = tswapl(env->regs[6]);
300
    (*regs)[7] = tswapl(env->regs[7]);
301
    (*regs)[8] = tswapl(env->regs[8]);
302
    (*regs)[9] = tswapl(env->regs[9]);
303
    (*regs)[10] = tswapl(env->regs[10]);
304
    (*regs)[11] = tswapl(env->regs[11]);
305
    (*regs)[12] = tswapl(env->regs[12]);
306
    (*regs)[13] = tswapl(env->regs[13]);
307
    (*regs)[14] = tswapl(env->regs[14]);
308
    (*regs)[15] = tswapl(env->regs[15]);
309

    
310
    (*regs)[16] = tswapl(cpsr_read((CPUState *)env));
311
    (*regs)[17] = tswapl(env->regs[0]); /* XXX */
312
}
313

    
314
#define USE_ELF_CORE_DUMP
315
#define ELF_EXEC_PAGESIZE       4096
316

    
317
enum
318
{
319
    ARM_HWCAP_ARM_SWP       = 1 << 0,
320
    ARM_HWCAP_ARM_HALF      = 1 << 1,
321
    ARM_HWCAP_ARM_THUMB     = 1 << 2,
322
    ARM_HWCAP_ARM_26BIT     = 1 << 3,
323
    ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
324
    ARM_HWCAP_ARM_FPA       = 1 << 5,
325
    ARM_HWCAP_ARM_VFP       = 1 << 6,
326
    ARM_HWCAP_ARM_EDSP      = 1 << 7,
327
    ARM_HWCAP_ARM_JAVA      = 1 << 8,
328
    ARM_HWCAP_ARM_IWMMXT    = 1 << 9,
329
    ARM_HWCAP_ARM_THUMBEE   = 1 << 10,
330
    ARM_HWCAP_ARM_NEON      = 1 << 11,
331
    ARM_HWCAP_ARM_VFPv3     = 1 << 12,
332
    ARM_HWCAP_ARM_VFPv3D16  = 1 << 13,
333
};
334

    
335
#define ELF_HWCAP (ARM_HWCAP_ARM_SWP | ARM_HWCAP_ARM_HALF               \
336
                   | ARM_HWCAP_ARM_THUMB | ARM_HWCAP_ARM_FAST_MULT      \
337
                   | ARM_HWCAP_ARM_FPA | ARM_HWCAP_ARM_VFP              \
338
                   | ARM_HWCAP_ARM_NEON | ARM_HWCAP_ARM_VFPv3 )
339

    
340
#endif
341

    
342
#ifdef TARGET_SPARC
343
#ifdef TARGET_SPARC64
344

    
345
#define ELF_START_MMAP 0x80000000
346

    
347
#ifndef TARGET_ABI32
348
#define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
349
#else
350
#define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
351
#endif
352

    
353
#define ELF_CLASS   ELFCLASS64
354
#define ELF_ARCH    EM_SPARCV9
355

    
356
#define STACK_BIAS              2047
357

    
358
static inline void init_thread(struct target_pt_regs *regs,
359
                               struct image_info *infop)
360
{
361
#ifndef TARGET_ABI32
362
    regs->tstate = 0;
363
#endif
364
    regs->pc = infop->entry;
365
    regs->npc = regs->pc + 4;
366
    regs->y = 0;
367
#ifdef TARGET_ABI32
368
    regs->u_regs[14] = infop->start_stack - 16 * 4;
369
#else
370
    if (personality(infop->personality) == PER_LINUX32)
371
        regs->u_regs[14] = infop->start_stack - 16 * 4;
372
    else
373
        regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
374
#endif
375
}
376

    
377
#else
378
#define ELF_START_MMAP 0x80000000
379

    
380
#define elf_check_arch(x) ( (x) == EM_SPARC )
381

    
382
#define ELF_CLASS   ELFCLASS32
383
#define ELF_ARCH    EM_SPARC
384

    
385
static inline void init_thread(struct target_pt_regs *regs,
386
                               struct image_info *infop)
387
{
388
    regs->psr = 0;
389
    regs->pc = infop->entry;
390
    regs->npc = regs->pc + 4;
391
    regs->y = 0;
392
    regs->u_regs[14] = infop->start_stack - 16 * 4;
393
}
394

    
395
#endif
396
#endif
397

    
398
#ifdef TARGET_PPC
399

    
400
#define ELF_START_MMAP 0x80000000
401

    
402
#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
403

    
404
#define elf_check_arch(x) ( (x) == EM_PPC64 )
405

    
406
#define ELF_CLASS       ELFCLASS64
407

    
408
#else
409

    
410
#define elf_check_arch(x) ( (x) == EM_PPC )
411

    
412
#define ELF_CLASS       ELFCLASS32
413

    
414
#endif
415

    
416
#define ELF_ARCH        EM_PPC
417

    
418
/* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
419
   See arch/powerpc/include/asm/cputable.h.  */
420
enum {
421
    QEMU_PPC_FEATURE_32 = 0x80000000,
422
    QEMU_PPC_FEATURE_64 = 0x40000000,
423
    QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
424
    QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
425
    QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
426
    QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
427
    QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
428
    QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
429
    QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
430
    QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
431
    QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
432
    QEMU_PPC_FEATURE_NO_TB = 0x00100000,
433
    QEMU_PPC_FEATURE_POWER4 = 0x00080000,
434
    QEMU_PPC_FEATURE_POWER5 = 0x00040000,
435
    QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
436
    QEMU_PPC_FEATURE_CELL = 0x00010000,
437
    QEMU_PPC_FEATURE_BOOKE = 0x00008000,
438
    QEMU_PPC_FEATURE_SMT = 0x00004000,
439
    QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
440
    QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
441
    QEMU_PPC_FEATURE_PA6T = 0x00000800,
442
    QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
443
    QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
444
    QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
445
    QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
446
    QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
447

    
448
    QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
449
    QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
450
};
451

    
452
#define ELF_HWCAP get_elf_hwcap()
453

    
454
static uint32_t get_elf_hwcap(void)
455
{
456
    CPUState *e = thread_env;
457
    uint32_t features = 0;
458

    
459
    /* We don't have to be terribly complete here; the high points are
460
       Altivec/FP/SPE support.  Anything else is just a bonus.  */
461
#define GET_FEATURE(flag, feature)                                      \
462
    do {if (e->insns_flags & flag) features |= feature; } while(0)
463
    GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
464
    GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
465
    GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
466
    GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
467
    GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
468
    GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
469
    GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
470
    GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
471
#undef GET_FEATURE
472

    
473
    return features;
474
}
475

    
476
/*
477
 * The requirements here are:
478
 * - keep the final alignment of sp (sp & 0xf)
479
 * - make sure the 32-bit value at the first 16 byte aligned position of
480
 *   AUXV is greater than 16 for glibc compatibility.
481
 *   AT_IGNOREPPC is used for that.
482
 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
483
 *   even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
484
 */
485
#define DLINFO_ARCH_ITEMS       5
486
#define ARCH_DLINFO                                     \
487
    do {                                                \
488
        NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20);              \
489
        NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20);              \
490
        NEW_AUX_ENT(AT_UCACHEBSIZE, 0);                 \
491
        /*                                              \
492
         * Now handle glibc compatibility.              \
493
         */                                             \
494
        NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
495
        NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
496
    } while (0)
497

    
498
static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
499
{
500
    _regs->gpr[1] = infop->start_stack;
501
#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
502
    _regs->gpr[2] = ldq_raw(infop->entry + 8) + infop->load_addr;
503
    infop->entry = ldq_raw(infop->entry) + infop->load_addr;
504
#endif
505
    _regs->nip = infop->entry;
506
}
507

    
508
/* See linux kernel: arch/powerpc/include/asm/elf.h.  */
509
#define ELF_NREG 48
510
typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
511

    
512
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
513
{
514
    int i;
515
    target_ulong ccr = 0;
516

    
517
    for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
518
        (*regs)[i] = tswapl(env->gpr[i]);
519
    }
520

    
521
    (*regs)[32] = tswapl(env->nip);
522
    (*regs)[33] = tswapl(env->msr);
523
    (*regs)[35] = tswapl(env->ctr);
524
    (*regs)[36] = tswapl(env->lr);
525
    (*regs)[37] = tswapl(env->xer);
526

    
527
    for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
528
        ccr |= env->crf[i] << (32 - ((i + 1) * 4));
529
    }
530
    (*regs)[38] = tswapl(ccr);
531
}
532

    
533
#define USE_ELF_CORE_DUMP
534
#define ELF_EXEC_PAGESIZE       4096
535

    
536
#endif
537

    
538
#ifdef TARGET_MIPS
539

    
540
#define ELF_START_MMAP 0x80000000
541

    
542
#define elf_check_arch(x) ( (x) == EM_MIPS )
543

    
544
#ifdef TARGET_MIPS64
545
#define ELF_CLASS   ELFCLASS64
546
#else
547
#define ELF_CLASS   ELFCLASS32
548
#endif
549
#define ELF_ARCH    EM_MIPS
550

    
551
static inline void init_thread(struct target_pt_regs *regs,
552
                               struct image_info *infop)
553
{
554
    regs->cp0_status = 2 << CP0St_KSU;
555
    regs->cp0_epc = infop->entry;
556
    regs->regs[29] = infop->start_stack;
557
}
558

    
559
/* See linux kernel: arch/mips/include/asm/elf.h.  */
560
#define ELF_NREG 45
561
typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
562

    
563
/* See linux kernel: arch/mips/include/asm/reg.h.  */
564
enum {
565
#ifdef TARGET_MIPS64
566
    TARGET_EF_R0 = 0,
567
#else
568
    TARGET_EF_R0 = 6,
569
#endif
570
    TARGET_EF_R26 = TARGET_EF_R0 + 26,
571
    TARGET_EF_R27 = TARGET_EF_R0 + 27,
572
    TARGET_EF_LO = TARGET_EF_R0 + 32,
573
    TARGET_EF_HI = TARGET_EF_R0 + 33,
574
    TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
575
    TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
576
    TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
577
    TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
578
};
579

    
580
/* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
581
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
582
{
583
    int i;
584

    
585
    for (i = 0; i < TARGET_EF_R0; i++) {
586
        (*regs)[i] = 0;
587
    }
588
    (*regs)[TARGET_EF_R0] = 0;
589

    
590
    for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
591
        (*regs)[TARGET_EF_R0 + i] = tswapl(env->active_tc.gpr[i]);
592
    }
593

    
594
    (*regs)[TARGET_EF_R26] = 0;
595
    (*regs)[TARGET_EF_R27] = 0;
596
    (*regs)[TARGET_EF_LO] = tswapl(env->active_tc.LO[0]);
597
    (*regs)[TARGET_EF_HI] = tswapl(env->active_tc.HI[0]);
598
    (*regs)[TARGET_EF_CP0_EPC] = tswapl(env->active_tc.PC);
599
    (*regs)[TARGET_EF_CP0_BADVADDR] = tswapl(env->CP0_BadVAddr);
600
    (*regs)[TARGET_EF_CP0_STATUS] = tswapl(env->CP0_Status);
601
    (*regs)[TARGET_EF_CP0_CAUSE] = tswapl(env->CP0_Cause);
602
}
603

    
604
#define USE_ELF_CORE_DUMP
605
#define ELF_EXEC_PAGESIZE        4096
606

    
607
#endif /* TARGET_MIPS */
608

    
609
#ifdef TARGET_MICROBLAZE
610

    
611
#define ELF_START_MMAP 0x80000000
612

    
613
#define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
614

    
615
#define ELF_CLASS   ELFCLASS32
616
#define ELF_ARCH    EM_MICROBLAZE
617

    
618
static inline void init_thread(struct target_pt_regs *regs,
619
                               struct image_info *infop)
620
{
621
    regs->pc = infop->entry;
622
    regs->r1 = infop->start_stack;
623

    
624
}
625

    
626
#define ELF_EXEC_PAGESIZE        4096
627

    
628
#define USE_ELF_CORE_DUMP
629
#define ELF_NREG 38
630
typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
631

    
632
/* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
633
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
634
{
635
    int i, pos = 0;
636

    
637
    for (i = 0; i < 32; i++) {
638
        (*regs)[pos++] = tswapl(env->regs[i]);
639
    }
640

    
641
    for (i = 0; i < 6; i++) {
642
        (*regs)[pos++] = tswapl(env->sregs[i]);
643
    }
644
}
645

    
646
#endif /* TARGET_MICROBLAZE */
647

    
648
#ifdef TARGET_SH4
649

    
650
#define ELF_START_MMAP 0x80000000
651

    
652
#define elf_check_arch(x) ( (x) == EM_SH )
653

    
654
#define ELF_CLASS ELFCLASS32
655
#define ELF_ARCH  EM_SH
656

    
657
static inline void init_thread(struct target_pt_regs *regs,
658
                               struct image_info *infop)
659
{
660
    /* Check other registers XXXXX */
661
    regs->pc = infop->entry;
662
    regs->regs[15] = infop->start_stack;
663
}
664

    
665
/* See linux kernel: arch/sh/include/asm/elf.h.  */
666
#define ELF_NREG 23
667
typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
668

    
669
/* See linux kernel: arch/sh/include/asm/ptrace.h.  */
670
enum {
671
    TARGET_REG_PC = 16,
672
    TARGET_REG_PR = 17,
673
    TARGET_REG_SR = 18,
674
    TARGET_REG_GBR = 19,
675
    TARGET_REG_MACH = 20,
676
    TARGET_REG_MACL = 21,
677
    TARGET_REG_SYSCALL = 22
678
};
679

    
680
static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
681
                                      const CPUState *env)
682
{
683
    int i;
684

    
685
    for (i = 0; i < 16; i++) {
686
        (*regs[i]) = tswapl(env->gregs[i]);
687
    }
688

    
689
    (*regs)[TARGET_REG_PC] = tswapl(env->pc);
690
    (*regs)[TARGET_REG_PR] = tswapl(env->pr);
691
    (*regs)[TARGET_REG_SR] = tswapl(env->sr);
692
    (*regs)[TARGET_REG_GBR] = tswapl(env->gbr);
693
    (*regs)[TARGET_REG_MACH] = tswapl(env->mach);
694
    (*regs)[TARGET_REG_MACL] = tswapl(env->macl);
695
    (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
696
}
697

    
698
#define USE_ELF_CORE_DUMP
699
#define ELF_EXEC_PAGESIZE        4096
700

    
701
#endif
702

    
703
#ifdef TARGET_CRIS
704

    
705
#define ELF_START_MMAP 0x80000000
706

    
707
#define elf_check_arch(x) ( (x) == EM_CRIS )
708

    
709
#define ELF_CLASS ELFCLASS32
710
#define ELF_ARCH  EM_CRIS
711

    
712
static inline void init_thread(struct target_pt_regs *regs,
713
                               struct image_info *infop)
714
{
715
    regs->erp = infop->entry;
716
}
717

    
718
#define ELF_EXEC_PAGESIZE        8192
719

    
720
#endif
721

    
722
#ifdef TARGET_M68K
723

    
724
#define ELF_START_MMAP 0x80000000
725

    
726
#define elf_check_arch(x) ( (x) == EM_68K )
727

    
728
#define ELF_CLASS       ELFCLASS32
729
#define ELF_ARCH        EM_68K
730

    
731
/* ??? Does this need to do anything?
732
   #define ELF_PLAT_INIT(_r) */
733

    
734
static inline void init_thread(struct target_pt_regs *regs,
735
                               struct image_info *infop)
736
{
737
    regs->usp = infop->start_stack;
738
    regs->sr = 0;
739
    regs->pc = infop->entry;
740
}
741

    
742
/* See linux kernel: arch/m68k/include/asm/elf.h.  */
743
#define ELF_NREG 20
744
typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
745

    
746
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUState *env)
747
{
748
    (*regs)[0] = tswapl(env->dregs[1]);
749
    (*regs)[1] = tswapl(env->dregs[2]);
750
    (*regs)[2] = tswapl(env->dregs[3]);
751
    (*regs)[3] = tswapl(env->dregs[4]);
752
    (*regs)[4] = tswapl(env->dregs[5]);
753
    (*regs)[5] = tswapl(env->dregs[6]);
754
    (*regs)[6] = tswapl(env->dregs[7]);
755
    (*regs)[7] = tswapl(env->aregs[0]);
756
    (*regs)[8] = tswapl(env->aregs[1]);
757
    (*regs)[9] = tswapl(env->aregs[2]);
758
    (*regs)[10] = tswapl(env->aregs[3]);
759
    (*regs)[11] = tswapl(env->aregs[4]);
760
    (*regs)[12] = tswapl(env->aregs[5]);
761
    (*regs)[13] = tswapl(env->aregs[6]);
762
    (*regs)[14] = tswapl(env->dregs[0]);
763
    (*regs)[15] = tswapl(env->aregs[7]);
764
    (*regs)[16] = tswapl(env->dregs[0]); /* FIXME: orig_d0 */
765
    (*regs)[17] = tswapl(env->sr);
766
    (*regs)[18] = tswapl(env->pc);
767
    (*regs)[19] = 0;  /* FIXME: regs->format | regs->vector */
768
}
769

    
770
#define USE_ELF_CORE_DUMP
771
#define ELF_EXEC_PAGESIZE       8192
772

    
773
#endif
774

    
775
#ifdef TARGET_ALPHA
776

    
777
#define ELF_START_MMAP (0x30000000000ULL)
778

    
779
#define elf_check_arch(x) ( (x) == ELF_ARCH )
780

    
781
#define ELF_CLASS      ELFCLASS64
782
#define ELF_ARCH       EM_ALPHA
783

    
784
static inline void init_thread(struct target_pt_regs *regs,
785
                               struct image_info *infop)
786
{
787
    regs->pc = infop->entry;
788
    regs->ps = 8;
789
    regs->usp = infop->start_stack;
790
}
791

    
792
#define ELF_EXEC_PAGESIZE        8192
793

    
794
#endif /* TARGET_ALPHA */
795

    
796
#ifndef ELF_PLATFORM
797
#define ELF_PLATFORM (NULL)
798
#endif
799

    
800
#ifndef ELF_HWCAP
801
#define ELF_HWCAP 0
802
#endif
803

    
804
#ifdef TARGET_ABI32
805
#undef ELF_CLASS
806
#define ELF_CLASS ELFCLASS32
807
#undef bswaptls
808
#define bswaptls(ptr) bswap32s(ptr)
809
#endif
810

    
811
#include "elf.h"
812

    
813
struct exec
814
{
815
    unsigned int a_info;   /* Use macros N_MAGIC, etc for access */
816
    unsigned int a_text;   /* length of text, in bytes */
817
    unsigned int a_data;   /* length of data, in bytes */
818
    unsigned int a_bss;    /* length of uninitialized data area, in bytes */
819
    unsigned int a_syms;   /* length of symbol table data in file, in bytes */
820
    unsigned int a_entry;  /* start address */
821
    unsigned int a_trsize; /* length of relocation info for text, in bytes */
822
    unsigned int a_drsize; /* length of relocation info for data, in bytes */
823
};
824

    
825

    
826
#define N_MAGIC(exec) ((exec).a_info & 0xffff)
827
#define OMAGIC 0407
828
#define NMAGIC 0410
829
#define ZMAGIC 0413
830
#define QMAGIC 0314
831

    
832
/* max code+data+bss+brk space allocated to ET_DYN executables */
833
#define ET_DYN_MAP_SIZE (128 * 1024 * 1024)
834

    
835
/* Necessary parameters */
836
#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
837
#define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
838
#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
839

    
840
#define INTERPRETER_NONE 0
841
#define INTERPRETER_AOUT 1
842
#define INTERPRETER_ELF 2
843

    
844
#define DLINFO_ITEMS 12
845

    
846
static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
847
{
848
    memcpy(to, from, n);
849
}
850

    
851
static int load_aout_interp(void * exptr, int interp_fd);
852

    
853
#ifdef BSWAP_NEEDED
854
static void bswap_ehdr(struct elfhdr *ehdr)
855
{
856
    bswap16s(&ehdr->e_type);            /* Object file type */
857
    bswap16s(&ehdr->e_machine);         /* Architecture */
858
    bswap32s(&ehdr->e_version);         /* Object file version */
859
    bswaptls(&ehdr->e_entry);           /* Entry point virtual address */
860
    bswaptls(&ehdr->e_phoff);           /* Program header table file offset */
861
    bswaptls(&ehdr->e_shoff);           /* Section header table file offset */
862
    bswap32s(&ehdr->e_flags);           /* Processor-specific flags */
863
    bswap16s(&ehdr->e_ehsize);          /* ELF header size in bytes */
864
    bswap16s(&ehdr->e_phentsize);       /* Program header table entry size */
865
    bswap16s(&ehdr->e_phnum);           /* Program header table entry count */
866
    bswap16s(&ehdr->e_shentsize);       /* Section header table entry size */
867
    bswap16s(&ehdr->e_shnum);           /* Section header table entry count */
868
    bswap16s(&ehdr->e_shstrndx);        /* Section header string table index */
869
}
870

    
871
static void bswap_phdr(struct elf_phdr *phdr, int phnum)
872
{
873
    int i;
874
    for (i = 0; i < phnum; ++i, ++phdr) {
875
        bswap32s(&phdr->p_type);        /* Segment type */
876
        bswap32s(&phdr->p_flags);       /* Segment flags */
877
        bswaptls(&phdr->p_offset);      /* Segment file offset */
878
        bswaptls(&phdr->p_vaddr);       /* Segment virtual address */
879
        bswaptls(&phdr->p_paddr);       /* Segment physical address */
880
        bswaptls(&phdr->p_filesz);      /* Segment size in file */
881
        bswaptls(&phdr->p_memsz);       /* Segment size in memory */
882
        bswaptls(&phdr->p_align);       /* Segment alignment */
883
    }
884
}
885

    
886
static void bswap_shdr(struct elf_shdr *shdr, int shnum)
887
{
888
    int i;
889
    for (i = 0; i < shnum; ++i, ++shdr) {
890
        bswap32s(&shdr->sh_name);
891
        bswap32s(&shdr->sh_type);
892
        bswaptls(&shdr->sh_flags);
893
        bswaptls(&shdr->sh_addr);
894
        bswaptls(&shdr->sh_offset);
895
        bswaptls(&shdr->sh_size);
896
        bswap32s(&shdr->sh_link);
897
        bswap32s(&shdr->sh_info);
898
        bswaptls(&shdr->sh_addralign);
899
        bswaptls(&shdr->sh_entsize);
900
    }
901
}
902

    
903
static void bswap_sym(struct elf_sym *sym)
904
{
905
    bswap32s(&sym->st_name);
906
    bswaptls(&sym->st_value);
907
    bswaptls(&sym->st_size);
908
    bswap16s(&sym->st_shndx);
909
}
910
#else
911
static inline void bswap_ehdr(struct elfhdr *ehdr) { }
912
static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
913
static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
914
static inline void bswap_sym(struct elf_sym *sym) { }
915
#endif
916

    
917
#ifdef USE_ELF_CORE_DUMP
918
static int elf_core_dump(int, const CPUState *);
919
#endif /* USE_ELF_CORE_DUMP */
920
static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
921

    
922
/* Verify the portions of EHDR within E_IDENT for the target.
923
   This can be performed before bswapping the entire header.  */
924
static bool elf_check_ident(struct elfhdr *ehdr)
925
{
926
    return (ehdr->e_ident[EI_MAG0] == ELFMAG0
927
            && ehdr->e_ident[EI_MAG1] == ELFMAG1
928
            && ehdr->e_ident[EI_MAG2] == ELFMAG2
929
            && ehdr->e_ident[EI_MAG3] == ELFMAG3
930
            && ehdr->e_ident[EI_CLASS] == ELF_CLASS
931
            && ehdr->e_ident[EI_DATA] == ELF_DATA
932
            && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
933
}
934

    
935
/* Verify the portions of EHDR outside of E_IDENT for the target.
936
   This has to wait until after bswapping the header.  */
937
static bool elf_check_ehdr(struct elfhdr *ehdr)
938
{
939
    return (elf_check_arch(ehdr->e_machine)
940
            && ehdr->e_ehsize == sizeof(struct elfhdr)
941
            && ehdr->e_phentsize == sizeof(struct elf_phdr)
942
            && ehdr->e_shentsize == sizeof(struct elf_shdr)
943
            && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
944
}
945

    
946
/*
947
 * 'copy_elf_strings()' copies argument/envelope strings from user
948
 * memory to free pages in kernel mem. These are in a format ready
949
 * to be put directly into the top of new user memory.
950
 *
951
 */
952
static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
953
                                  abi_ulong p)
954
{
955
    char *tmp, *tmp1, *pag = NULL;
956
    int len, offset = 0;
957

    
958
    if (!p) {
959
        return 0;       /* bullet-proofing */
960
    }
961
    while (argc-- > 0) {
962
        tmp = argv[argc];
963
        if (!tmp) {
964
            fprintf(stderr, "VFS: argc is wrong");
965
            exit(-1);
966
        }
967
        tmp1 = tmp;
968
        while (*tmp++);
969
        len = tmp - tmp1;
970
        if (p < len) {  /* this shouldn't happen - 128kB */
971
            return 0;
972
        }
973
        while (len) {
974
            --p; --tmp; --len;
975
            if (--offset < 0) {
976
                offset = p % TARGET_PAGE_SIZE;
977
                pag = (char *)page[p/TARGET_PAGE_SIZE];
978
                if (!pag) {
979
                    pag = (char *)malloc(TARGET_PAGE_SIZE);
980
                    memset(pag, 0, TARGET_PAGE_SIZE);
981
                    page[p/TARGET_PAGE_SIZE] = pag;
982
                    if (!pag)
983
                        return 0;
984
                }
985
            }
986
            if (len == 0 || offset == 0) {
987
                *(pag + offset) = *tmp;
988
            }
989
            else {
990
                int bytes_to_copy = (len > offset) ? offset : len;
991
                tmp -= bytes_to_copy;
992
                p -= bytes_to_copy;
993
                offset -= bytes_to_copy;
994
                len -= bytes_to_copy;
995
                memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
996
            }
997
        }
998
    }
999
    return p;
1000
}
1001

    
1002
static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
1003
                                 struct image_info *info)
1004
{
1005
    abi_ulong stack_base, size, error, guard;
1006
    int i;
1007

    
1008
    /* Create enough stack to hold everything.  If we don't use
1009
       it for args, we'll use it for something else.  */
1010
    size = guest_stack_size;
1011
    if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) {
1012
        size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1013
    }
1014
    guard = TARGET_PAGE_SIZE;
1015
    if (guard < qemu_real_host_page_size) {
1016
        guard = qemu_real_host_page_size;
1017
    }
1018

    
1019
    error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
1020
                        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1021
    if (error == -1) {
1022
        perror("mmap stack");
1023
        exit(-1);
1024
    }
1025

    
1026
    /* We reserve one extra page at the top of the stack as guard.  */
1027
    target_mprotect(error, guard, PROT_NONE);
1028

    
1029
    info->stack_limit = error + guard;
1030
    stack_base = info->stack_limit + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1031
    p += stack_base;
1032

    
1033
    for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1034
        if (bprm->page[i]) {
1035
            info->rss++;
1036
            /* FIXME - check return value of memcpy_to_target() for failure */
1037
            memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
1038
            free(bprm->page[i]);
1039
        }
1040
        stack_base += TARGET_PAGE_SIZE;
1041
    }
1042
    return p;
1043
}
1044

    
1045
/* Map and zero the bss.  We need to explicitly zero any fractional pages
1046
   after the data section (i.e. bss).  */
1047
static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
1048
{
1049
    uintptr_t host_start, host_map_start, host_end;
1050

    
1051
    last_bss = TARGET_PAGE_ALIGN(last_bss);
1052

    
1053
    /* ??? There is confusion between qemu_real_host_page_size and
1054
       qemu_host_page_size here and elsewhere in target_mmap, which
1055
       may lead to the end of the data section mapping from the file
1056
       not being mapped.  At least there was an explicit test and
1057
       comment for that here, suggesting that "the file size must
1058
       be known".  The comment probably pre-dates the introduction
1059
       of the fstat system call in target_mmap which does in fact
1060
       find out the size.  What isn't clear is if the workaround
1061
       here is still actually needed.  For now, continue with it,
1062
       but merge it with the "normal" mmap that would allocate the bss.  */
1063

    
1064
    host_start = (uintptr_t) g2h(elf_bss);
1065
    host_end = (uintptr_t) g2h(last_bss);
1066
    host_map_start = (host_start + qemu_real_host_page_size - 1);
1067
    host_map_start &= -qemu_real_host_page_size;
1068

    
1069
    if (host_map_start < host_end) {
1070
        void *p = mmap((void *)host_map_start, host_end - host_map_start,
1071
                       prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1072
        if (p == MAP_FAILED) {
1073
            perror("cannot mmap brk");
1074
            exit(-1);
1075
        }
1076

    
1077
        /* Since we didn't use target_mmap, make sure to record
1078
           the validity of the pages with qemu.  */
1079
        page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot|PAGE_VALID);
1080
    }
1081

    
1082
    if (host_start < host_map_start) {
1083
        memset((void *)host_start, 0, host_map_start - host_start);
1084
    }
1085
}
1086

    
1087
static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1088
                                   struct elfhdr * exec,
1089
                                   abi_ulong load_addr,
1090
                                   abi_ulong load_bias,
1091
                                   abi_ulong interp_load_addr, int ibcs,
1092
                                   struct image_info *info)
1093
{
1094
    abi_ulong sp;
1095
    int size;
1096
    abi_ulong u_platform;
1097
    const char *k_platform;
1098
    const int n = sizeof(elf_addr_t);
1099

    
1100
    sp = p;
1101
    u_platform = 0;
1102
    k_platform = ELF_PLATFORM;
1103
    if (k_platform) {
1104
        size_t len = strlen(k_platform) + 1;
1105
        sp -= (len + n - 1) & ~(n - 1);
1106
        u_platform = sp;
1107
        /* FIXME - check return value of memcpy_to_target() for failure */
1108
        memcpy_to_target(sp, k_platform, len);
1109
    }
1110
    /*
1111
     * Force 16 byte _final_ alignment here for generality.
1112
     */
1113
    sp = sp &~ (abi_ulong)15;
1114
    size = (DLINFO_ITEMS + 1) * 2;
1115
    if (k_platform)
1116
        size += 2;
1117
#ifdef DLINFO_ARCH_ITEMS
1118
    size += DLINFO_ARCH_ITEMS * 2;
1119
#endif
1120
    size += envc + argc + 2;
1121
    size += (!ibcs ? 3 : 1);    /* argc itself */
1122
    size *= n;
1123
    if (size & 15)
1124
        sp -= 16 - (size & 15);
1125

    
1126
    /* This is correct because Linux defines
1127
     * elf_addr_t as Elf32_Off / Elf64_Off
1128
     */
1129
#define NEW_AUX_ENT(id, val) do {               \
1130
        sp -= n; put_user_ual(val, sp);         \
1131
        sp -= n; put_user_ual(id, sp);          \
1132
    } while(0)
1133

    
1134
    NEW_AUX_ENT (AT_NULL, 0);
1135

    
1136
    /* There must be exactly DLINFO_ITEMS entries here.  */
1137
    NEW_AUX_ENT(AT_PHDR, (abi_ulong)(load_addr + exec->e_phoff));
1138
    NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1139
    NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1140
    NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1141
    NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_load_addr));
1142
    NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1143
    NEW_AUX_ENT(AT_ENTRY, load_bias + exec->e_entry);
1144
    NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1145
    NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1146
    NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1147
    NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1148
    NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1149
    NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1150
    if (k_platform)
1151
        NEW_AUX_ENT(AT_PLATFORM, u_platform);
1152
#ifdef ARCH_DLINFO
1153
    /*
1154
     * ARCH_DLINFO must come last so platform specific code can enforce
1155
     * special alignment requirements on the AUXV if necessary (eg. PPC).
1156
     */
1157
    ARCH_DLINFO;
1158
#endif
1159
#undef NEW_AUX_ENT
1160

    
1161
    info->saved_auxv = sp;
1162

    
1163
    sp = loader_build_argptr(envc, argc, sp, p, !ibcs);
1164
    return sp;
1165
}
1166

    
1167

    
1168
static abi_ulong load_elf_interp(struct elfhdr * interp_elf_ex,
1169
                                 int interpreter_fd,
1170
                                 abi_ulong *interp_load_addr,
1171
                                 char bprm_buf[BPRM_BUF_SIZE])
1172
{
1173
    struct elf_phdr *elf_phdata  =  NULL;
1174
    abi_ulong load_addr, load_bias, loaddr, hiaddr;
1175
    int retval;
1176
    abi_ulong error;
1177
    int i;
1178

    
1179
    bswap_ehdr(interp_elf_ex);
1180
    if (!elf_check_ehdr(interp_elf_ex)) {
1181
        return ~((abi_ulong)0UL);
1182
    }
1183

    
1184
    /* Now read in all of the header information */
1185
    elf_phdata =  (struct elf_phdr *)
1186
        malloc(sizeof(struct elf_phdr) * interp_elf_ex->e_phnum);
1187
    if (!elf_phdata)
1188
        return ~((abi_ulong)0UL);
1189

    
1190
    i = interp_elf_ex->e_phnum * sizeof(struct elf_phdr);
1191
    if (interp_elf_ex->e_phoff + i <= BPRM_BUF_SIZE) {
1192
        memcpy(elf_phdata, bprm_buf + interp_elf_ex->e_phoff, i);
1193
    } else {
1194
        retval = pread(interpreter_fd, elf_phdata, i, interp_elf_ex->e_phoff);
1195
        if (retval != i) {
1196
            perror("load_elf_interp");
1197
            exit(-1);
1198
        }
1199
    }
1200
    bswap_phdr(elf_phdata, interp_elf_ex->e_phnum);
1201

    
1202
    /* Find the maximum size of the image and allocate an appropriate
1203
       amount of memory to handle that.  */
1204
    loaddr = -1, hiaddr = 0;
1205
    for (i = 0; i < interp_elf_ex->e_phnum; ++i) {
1206
        if (elf_phdata[i].p_type == PT_LOAD) {
1207
            abi_ulong a = elf_phdata[i].p_vaddr;
1208
            if (a < loaddr) {
1209
                loaddr = a;
1210
            }
1211
            a += elf_phdata[i].p_memsz;
1212
            if (a > hiaddr) {
1213
                hiaddr = a;
1214
            }
1215
        }
1216
    }
1217

    
1218
    load_addr = loaddr;
1219
    if (interp_elf_ex->e_type == ET_DYN) {
1220
        /* The image indicates that it can be loaded anywhere.  Find a
1221
           location that can hold the memory space required.  If the
1222
           image is pre-linked, LOADDR will be non-zero.  Since we do
1223
           not supply MAP_FIXED here we'll use that address if and
1224
           only if it remains available.  */
1225
        load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
1226
                                MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
1227
                                -1, 0);
1228
        if (load_addr == -1) {
1229
            perror("mmap");
1230
            exit(-1);
1231
        }
1232
    }
1233
    load_bias = load_addr - loaddr;
1234

    
1235
    for (i = 0; i < interp_elf_ex->e_phnum; i++) {
1236
        struct elf_phdr *eppnt = elf_phdata + i;
1237
        if (eppnt->p_type == PT_LOAD) {
1238
            abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
1239
            int elf_prot = 0;
1240

    
1241
            if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
1242
            if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1243
            if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1244

    
1245
            vaddr = load_bias + eppnt->p_vaddr;
1246
            vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
1247
            vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
1248

    
1249
            error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
1250
                                elf_prot, MAP_PRIVATE | MAP_FIXED,
1251
                                interpreter_fd, eppnt->p_offset - vaddr_po);
1252
            if (error == -1) {
1253
                /* Real error */
1254
                close(interpreter_fd);
1255
                free(elf_phdata);
1256
                return ~((abi_ulong)0UL);
1257
            }
1258

    
1259
            vaddr_ef = vaddr + eppnt->p_filesz;
1260
            vaddr_em = vaddr + eppnt->p_memsz;
1261

    
1262
            /* If the load segment requests extra zeros (e.g. bss), map it.  */
1263
            if (vaddr_ef < vaddr_em) {
1264
                zero_bss(vaddr_ef, vaddr_em, elf_prot);
1265
            }
1266
        }
1267
    }
1268

    
1269
    if (qemu_log_enabled()) {
1270
        load_symbols(interp_elf_ex, interpreter_fd, load_bias);
1271
    }
1272

    
1273
    close(interpreter_fd);
1274
    free(elf_phdata);
1275

    
1276
    *interp_load_addr = load_addr;
1277
    return ((abi_ulong) interp_elf_ex->e_entry) + load_bias;
1278
}
1279

    
1280
static int symfind(const void *s0, const void *s1)
1281
{
1282
    struct elf_sym *key = (struct elf_sym *)s0;
1283
    struct elf_sym *sym = (struct elf_sym *)s1;
1284
    int result = 0;
1285
    if (key->st_value < sym->st_value) {
1286
        result = -1;
1287
    } else if (key->st_value >= sym->st_value + sym->st_size) {
1288
        result = 1;
1289
    }
1290
    return result;
1291
}
1292

    
1293
static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1294
{
1295
#if ELF_CLASS == ELFCLASS32
1296
    struct elf_sym *syms = s->disas_symtab.elf32;
1297
#else
1298
    struct elf_sym *syms = s->disas_symtab.elf64;
1299
#endif
1300

    
1301
    // binary search
1302
    struct elf_sym key;
1303
    struct elf_sym *sym;
1304

    
1305
    key.st_value = orig_addr;
1306

    
1307
    sym = bsearch(&key, syms, s->disas_num_syms, sizeof(*syms), symfind);
1308
    if (sym != NULL) {
1309
        return s->disas_strtab + sym->st_name;
1310
    }
1311

    
1312
    return "";
1313
}
1314

    
1315
/* FIXME: This should use elf_ops.h  */
1316
static int symcmp(const void *s0, const void *s1)
1317
{
1318
    struct elf_sym *sym0 = (struct elf_sym *)s0;
1319
    struct elf_sym *sym1 = (struct elf_sym *)s1;
1320
    return (sym0->st_value < sym1->st_value)
1321
        ? -1
1322
        : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1323
}
1324

    
1325
/* Best attempt to load symbols from this ELF object. */
1326
static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
1327
{
1328
    int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
1329
    struct elf_shdr *shdr;
1330
    char *strings;
1331
    struct syminfo *s;
1332
    struct elf_sym *syms;
1333

    
1334
    shnum = hdr->e_shnum;
1335
    i = shnum * sizeof(struct elf_shdr);
1336
    shdr = (struct elf_shdr *)alloca(i);
1337
    if (pread(fd, shdr, i, hdr->e_shoff) != i) {
1338
        return;
1339
    }
1340

    
1341
    bswap_shdr(shdr, shnum);
1342
    for (i = 0; i < shnum; ++i) {
1343
        if (shdr[i].sh_type == SHT_SYMTAB) {
1344
            sym_idx = i;
1345
            str_idx = shdr[i].sh_link;
1346
            goto found;
1347
        }
1348
    }
1349

    
1350
    /* There will be no symbol table if the file was stripped.  */
1351
    return;
1352

    
1353
 found:
1354
    /* Now know where the strtab and symtab are.  Snarf them.  */
1355
    s = malloc(sizeof(*s));
1356
    if (!s) {
1357
        return;
1358
    }
1359

    
1360
    i = shdr[str_idx].sh_size;
1361
    s->disas_strtab = strings = malloc(i);
1362
    if (!strings || pread(fd, strings, i, shdr[str_idx].sh_offset) != i) {
1363
        free(s);
1364
        free(strings);
1365
        return;
1366
    }
1367

    
1368
    i = shdr[sym_idx].sh_size;
1369
    syms = malloc(i);
1370
    if (!syms || pread(fd, syms, i, shdr[sym_idx].sh_offset) != i) {
1371
        free(s);
1372
        free(strings);
1373
        free(syms);
1374
        return;
1375
    }
1376

    
1377
    nsyms = i / sizeof(struct elf_sym);
1378
    for (i = 0; i < nsyms; ) {
1379
        bswap_sym(syms + i);
1380
        /* Throw away entries which we do not need.  */
1381
        if (syms[i].st_shndx == SHN_UNDEF
1382
            || syms[i].st_shndx >= SHN_LORESERVE
1383
            || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1384
            if (i < --nsyms) {
1385
                syms[i] = syms[nsyms];
1386
            }
1387
        } else {
1388
#if defined(TARGET_ARM) || defined (TARGET_MIPS)
1389
            /* The bottom address bit marks a Thumb or MIPS16 symbol.  */
1390
            syms[i].st_value &= ~(target_ulong)1;
1391
#endif
1392
            syms[i].st_value += load_bias;
1393
            i++;
1394
        }
1395
    }
1396

    
1397
    syms = realloc(syms, nsyms * sizeof(*syms));
1398
    qsort(syms, nsyms, sizeof(*syms), symcmp);
1399

    
1400
    s->disas_num_syms = nsyms;
1401
#if ELF_CLASS == ELFCLASS32
1402
    s->disas_symtab.elf32 = syms;
1403
#else
1404
    s->disas_symtab.elf64 = syms;
1405
#endif
1406
    s->lookup_symbol = lookup_symbolxx;
1407
    s->next = syminfos;
1408
    syminfos = s;
1409
}
1410

    
1411
int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1412
                    struct image_info * info)
1413
{
1414
    struct elfhdr elf_ex;
1415
    struct elfhdr interp_elf_ex;
1416
    struct exec interp_ex;
1417
    int interpreter_fd = -1; /* avoid warning */
1418
    abi_ulong load_addr, load_bias;
1419
    int load_addr_set = 0;
1420
    unsigned int interpreter_type = INTERPRETER_NONE;
1421
    unsigned char ibcs2_interpreter;
1422
    int i;
1423
    abi_ulong mapped_addr;
1424
    struct elf_phdr * elf_ppnt;
1425
    struct elf_phdr *elf_phdata;
1426
    abi_ulong k, elf_brk;
1427
    int retval;
1428
    char * elf_interpreter;
1429
    abi_ulong elf_entry, interp_load_addr = 0;
1430
    int status;
1431
    abi_ulong start_code, end_code, start_data, end_data;
1432
    abi_ulong reloc_func_desc = 0;
1433
    abi_ulong elf_stack;
1434
    char passed_fileno[6];
1435

    
1436
    ibcs2_interpreter = 0;
1437
    status = 0;
1438
    load_addr = 0;
1439
    load_bias = 0;
1440
    elf_ex = *((struct elfhdr *) bprm->buf);          /* exec-header */
1441

    
1442
    /* First of all, some simple consistency checks */
1443
    if (!elf_check_ident(&elf_ex)) {
1444
        return -ENOEXEC;
1445
    }
1446
    bswap_ehdr(&elf_ex);
1447
    if (!elf_check_ehdr(&elf_ex)) {
1448
        return -ENOEXEC;
1449
    }
1450

    
1451
    bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
1452
    bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
1453
    bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
1454
    if (!bprm->p) {
1455
        retval = -E2BIG;
1456
    }
1457

    
1458
    /* Now read in all of the header information */
1459
    elf_phdata = (struct elf_phdr *)
1460
        malloc(elf_ex.e_phnum * sizeof(struct elf_phdr));
1461
    if (elf_phdata == NULL) {
1462
        return -ENOMEM;
1463
    }
1464

    
1465
    i = elf_ex.e_phnum * sizeof(struct elf_phdr);
1466
    if (elf_ex.e_phoff + i <= BPRM_BUF_SIZE) {
1467
        memcpy(elf_phdata, bprm->buf + elf_ex.e_phoff, i);
1468
    } else {
1469
        retval = pread(bprm->fd, (char *) elf_phdata, i, elf_ex.e_phoff);
1470
        if (retval != i) {
1471
            perror("load_elf_binary");
1472
            exit(-1);
1473
        }
1474
    }
1475
    bswap_phdr(elf_phdata, elf_ex.e_phnum);
1476

    
1477
    elf_brk = 0;
1478
    elf_stack = ~((abi_ulong)0UL);
1479
    elf_interpreter = NULL;
1480
    start_code = ~((abi_ulong)0UL);
1481
    end_code = 0;
1482
    start_data = 0;
1483
    end_data = 0;
1484
    interp_ex.a_info = 0;
1485

    
1486
    elf_ppnt = elf_phdata;
1487
    for(i=0;i < elf_ex.e_phnum; i++) {
1488
        if (elf_ppnt->p_type == PT_INTERP) {
1489
            if ( elf_interpreter != NULL )
1490
            {
1491
                free (elf_phdata);
1492
                free(elf_interpreter);
1493
                close(bprm->fd);
1494
                return -EINVAL;
1495
            }
1496

    
1497
            /* This is the program interpreter used for
1498
             * shared libraries - for now assume that this
1499
             * is an a.out format binary
1500
             */
1501

    
1502
            elf_interpreter = (char *)malloc(elf_ppnt->p_filesz);
1503

    
1504
            if (elf_interpreter == NULL) {
1505
                free (elf_phdata);
1506
                close(bprm->fd);
1507
                return -ENOMEM;
1508
            }
1509

    
1510
            if (elf_ppnt->p_offset + elf_ppnt->p_filesz <= BPRM_BUF_SIZE) {
1511
                memcpy(elf_interpreter, bprm->buf + elf_ppnt->p_offset,
1512
                       elf_ppnt->p_filesz);
1513
            } else {
1514
                retval = pread(bprm->fd, elf_interpreter, elf_ppnt->p_filesz,
1515
                               elf_ppnt->p_offset);
1516
                if (retval != elf_ppnt->p_filesz) {
1517
                    perror("load_elf_binary2");
1518
                    exit(-1);
1519
                }
1520
            }
1521

    
1522
            /* If the program interpreter is one of these two,
1523
               then assume an iBCS2 image. Otherwise assume
1524
               a native linux image. */
1525

    
1526
            /* JRP - Need to add X86 lib dir stuff here... */
1527

    
1528
            if (strcmp(elf_interpreter,"/usr/lib/libc.so.1") == 0 ||
1529
                strcmp(elf_interpreter,"/usr/lib/ld.so.1") == 0) {
1530
                ibcs2_interpreter = 1;
1531
            }
1532

    
1533
            retval = open(path(elf_interpreter), O_RDONLY);
1534
            if (retval < 0) {
1535
                perror(elf_interpreter);
1536
                exit(-1);
1537
            }
1538
            interpreter_fd = retval;
1539

    
1540
            retval = read(interpreter_fd, bprm->buf, BPRM_BUF_SIZE);
1541
            if (retval < 0) {
1542
                perror("load_elf_binary3");
1543
                exit(-1);
1544
            }
1545
            if (retval < BPRM_BUF_SIZE) {
1546
                memset(bprm->buf, 0, BPRM_BUF_SIZE - retval);
1547
            }
1548

    
1549
            interp_ex = *((struct exec *) bprm->buf); /* aout exec-header */
1550
            interp_elf_ex = *((struct elfhdr *) bprm->buf); /* elf exec-header */
1551
        }
1552
        elf_ppnt++;
1553
    }
1554

    
1555
    /* Some simple consistency checks for the interpreter */
1556
    if (elf_interpreter){
1557
        interpreter_type = INTERPRETER_ELF | INTERPRETER_AOUT;
1558

    
1559
        /* Now figure out which format our binary is */
1560
        if ((N_MAGIC(interp_ex) != OMAGIC) && (N_MAGIC(interp_ex) != ZMAGIC) &&
1561
            (N_MAGIC(interp_ex) != QMAGIC)) {
1562
            interpreter_type = INTERPRETER_ELF;
1563
        }
1564

    
1565
        if (!elf_check_ident(&interp_elf_ex)) {
1566
            interpreter_type &= ~INTERPRETER_ELF;
1567
        }
1568

    
1569
        if (!interpreter_type) {
1570
            free(elf_interpreter);
1571
            free(elf_phdata);
1572
            close(bprm->fd);
1573
            return -ELIBBAD;
1574
        }
1575
    }
1576

    
1577
    /* OK, we are done with that, now set up the arg stuff,
1578
       and then start this sucker up */
1579

    
1580
    {
1581
        char * passed_p;
1582

    
1583
        if (interpreter_type == INTERPRETER_AOUT) {
1584
            snprintf(passed_fileno, sizeof(passed_fileno), "%d", bprm->fd);
1585
            passed_p = passed_fileno;
1586

    
1587
            if (elf_interpreter) {
1588
                bprm->p = copy_elf_strings(1,&passed_p,bprm->page,bprm->p);
1589
                bprm->argc++;
1590
            }
1591
        }
1592
        if (!bprm->p) {
1593
            if (elf_interpreter) {
1594
                free(elf_interpreter);
1595
            }
1596
            free (elf_phdata);
1597
            close(bprm->fd);
1598
            return -E2BIG;
1599
        }
1600
    }
1601

    
1602
    /* OK, This is the point of no return */
1603
    info->end_data = 0;
1604
    info->end_code = 0;
1605
    info->start_mmap = (abi_ulong)ELF_START_MMAP;
1606
    info->mmap = 0;
1607
    elf_entry = (abi_ulong) elf_ex.e_entry;
1608

    
1609
#if defined(CONFIG_USE_GUEST_BASE)
1610
    /*
1611
     * In case where user has not explicitly set the guest_base, we
1612
     * probe here that should we set it automatically.
1613
     */
1614
    if (!(have_guest_base || reserved_va)) {
1615
        /*
1616
         * Go through ELF program header table and find the address
1617
         * range used by loadable segments.  Check that this is available on
1618
         * the host, and if not find a suitable value for guest_base.  */
1619
        abi_ulong app_start = ~0;
1620
        abi_ulong app_end = 0;
1621
        abi_ulong addr;
1622
        unsigned long host_start;
1623
        unsigned long real_start;
1624
        unsigned long host_size;
1625
        for (i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum;
1626
             i++, elf_ppnt++) {
1627
            if (elf_ppnt->p_type != PT_LOAD)
1628
                continue;
1629
            addr = elf_ppnt->p_vaddr;
1630
            if (addr < app_start) {
1631
                app_start = addr;
1632
            }
1633
            addr += elf_ppnt->p_memsz;
1634
            if (addr > app_end) {
1635
                app_end = addr;
1636
            }
1637
        }
1638

    
1639
        /* If we don't have any loadable segments then something
1640
           is very wrong.  */
1641
        assert(app_start < app_end);
1642

    
1643
        /* Round addresses to page boundaries.  */
1644
        app_start = app_start & qemu_host_page_mask;
1645
        app_end = HOST_PAGE_ALIGN(app_end);
1646
        if (app_start < mmap_min_addr) {
1647
            host_start = HOST_PAGE_ALIGN(mmap_min_addr);
1648
        } else {
1649
            host_start = app_start;
1650
            if (host_start != app_start) {
1651
                fprintf(stderr, "qemu: Address overflow loading ELF binary\n");
1652
                abort();
1653
            }
1654
        }
1655
        host_size = app_end - app_start;
1656
        while (1) {
1657
            /* Do not use mmap_find_vma here because that is limited to the
1658
               guest address space.  We are going to make the
1659
               guest address space fit whatever we're given.  */
1660
            real_start = (unsigned long)mmap((void *)host_start, host_size,
1661
                PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
1662
            if (real_start == (unsigned long)-1) {
1663
                fprintf(stderr, "qemu: Virtual memory exausted\n");
1664
                abort();
1665
            }
1666
            if (real_start == host_start) {
1667
                break;
1668
            }
1669
            /* That address didn't work.  Unmap and try a different one.
1670
               The address the host picked because is typically
1671
               right at the top of the host address space and leaves the
1672
               guest with no usable address space.  Resort to a linear search.
1673
               We already compensated for mmap_min_addr, so this should not
1674
               happen often.  Probably means we got unlucky and host address
1675
               space randomization put a shared library somewhere
1676
               inconvenient.  */
1677
            munmap((void *)real_start, host_size);
1678
            host_start += qemu_host_page_size;
1679
            if (host_start == app_start) {
1680
                /* Theoretically possible if host doesn't have any
1681
                   suitably aligned areas.  Normally the first mmap will
1682
                   fail.  */
1683
                fprintf(stderr, "qemu: Unable to find space for application\n");
1684
                abort();
1685
            }
1686
        }
1687
        qemu_log("Relocating guest address space from 0x" TARGET_ABI_FMT_lx
1688
                 " to 0x%lx\n", app_start, real_start);
1689
        guest_base = real_start - app_start;
1690
    }
1691
#endif /* CONFIG_USE_GUEST_BASE */
1692

    
1693
    /* Do this so that we can load the interpreter, if need be.  We will
1694
       change some of these later */
1695
    info->rss = 0;
1696
    bprm->p = setup_arg_pages(bprm->p, bprm, info);
1697
    info->start_stack = bprm->p;
1698

    
1699
    /* Now we do a little grungy work by mmaping the ELF image into
1700
     * the correct location in memory.  At this point, we assume that
1701
     * the image should be loaded at fixed address, not at a variable
1702
     * address.
1703
     */
1704

    
1705
    for(i = 0, elf_ppnt = elf_phdata; i < elf_ex.e_phnum; i++, elf_ppnt++) {
1706
        int elf_prot = 0;
1707
        int elf_flags = 0;
1708
        abi_ulong error;
1709

    
1710
        if (elf_ppnt->p_type != PT_LOAD)
1711
            continue;
1712

    
1713
        if (elf_ppnt->p_flags & PF_R) elf_prot |= PROT_READ;
1714
        if (elf_ppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1715
        if (elf_ppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1716
        elf_flags = MAP_PRIVATE | MAP_DENYWRITE;
1717
        if (elf_ex.e_type == ET_EXEC || load_addr_set) {
1718
            elf_flags |= MAP_FIXED;
1719
        } else if (elf_ex.e_type == ET_DYN) {
1720
            /* Try and get dynamic programs out of the way of the default mmap
1721
               base, as well as whatever program they might try to exec.  This
1722
               is because the brk will follow the loader, and is not movable.  */
1723
            /* NOTE: for qemu, we do a big mmap to get enough space
1724
               without hardcoding any address */
1725
            error = target_mmap(0, ET_DYN_MAP_SIZE,
1726
                                PROT_NONE, MAP_PRIVATE | MAP_ANON,
1727
                                -1, 0);
1728
            if (error == -1) {
1729
                perror("mmap");
1730
                exit(-1);
1731
            }
1732
            load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
1733
        }
1734

    
1735
        error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
1736
                            (elf_ppnt->p_filesz +
1737
                             TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
1738
                            elf_prot,
1739
                            (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
1740
                            bprm->fd,
1741
                            (elf_ppnt->p_offset -
1742
                             TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
1743
        if (error == -1) {
1744
            perror("mmap");
1745
            exit(-1);
1746
        }
1747

    
1748
#ifdef LOW_ELF_STACK
1749
        if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
1750
            elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
1751
#endif
1752

    
1753
        if (!load_addr_set) {
1754
            load_addr_set = 1;
1755
            load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
1756
            if (elf_ex.e_type == ET_DYN) {
1757
                load_bias += error -
1758
                    TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
1759
                load_addr += load_bias;
1760
                reloc_func_desc = load_bias;
1761
            }
1762
        }
1763
        k = elf_ppnt->p_vaddr;
1764
        if (k < start_code)
1765
            start_code = k;
1766
        if (start_data < k)
1767
            start_data = k;
1768
        k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1769
        if ((elf_ppnt->p_flags & PF_X) && end_code <  k)
1770
            end_code = k;
1771
        if (end_data < k)
1772
            end_data = k;
1773
        k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1774
        if (k > elf_brk) {
1775
            elf_brk = TARGET_PAGE_ALIGN(k);
1776
        }
1777

    
1778
        /* If the load segment requests extra zeros (e.g. bss), map it.  */
1779
        if (elf_ppnt->p_filesz < elf_ppnt->p_memsz) {
1780
            abi_ulong base = load_bias + elf_ppnt->p_vaddr;
1781
            zero_bss(base + elf_ppnt->p_filesz,
1782
                     base + elf_ppnt->p_memsz, elf_prot);
1783
        }
1784
    }
1785

    
1786
    elf_entry += load_bias;
1787
    elf_brk += load_bias;
1788
    start_code += load_bias;
1789
    end_code += load_bias;
1790
    start_data += load_bias;
1791
    end_data += load_bias;
1792

    
1793
    if (elf_interpreter) {
1794
        if (interpreter_type & 1) {
1795
            elf_entry = load_aout_interp(&interp_ex, interpreter_fd);
1796
        } else if (interpreter_type & 2) {
1797
            elf_entry = load_elf_interp(&interp_elf_ex, interpreter_fd,
1798
                                        &interp_load_addr, bprm->buf);
1799
        }
1800
        reloc_func_desc = interp_load_addr;
1801

    
1802
        close(interpreter_fd);
1803
        free(elf_interpreter);
1804

    
1805
        if (elf_entry == ~((abi_ulong)0UL)) {
1806
            printf("Unable to load interpreter\n");
1807
            free(elf_phdata);
1808
            exit(-1);
1809
            return 0;
1810
        }
1811
    }
1812

    
1813
    free(elf_phdata);
1814

    
1815
    if (qemu_log_enabled()) {
1816
        load_symbols(&elf_ex, bprm->fd, load_bias);
1817
    }
1818

    
1819
    if (interpreter_type != INTERPRETER_AOUT) close(bprm->fd);
1820
    info->personality = (ibcs2_interpreter ? PER_SVR4 : PER_LINUX);
1821

    
1822
#ifdef LOW_ELF_STACK
1823
    info->start_stack = bprm->p = elf_stack - 4;
1824
#endif
1825
    bprm->p = create_elf_tables(bprm->p,
1826
                                bprm->argc,
1827
                                bprm->envc,
1828
                                &elf_ex,
1829
                                load_addr, load_bias,
1830
                                interp_load_addr,
1831
                                (interpreter_type == INTERPRETER_AOUT ? 0 : 1),
1832
                                info);
1833
    info->load_addr = reloc_func_desc;
1834
    info->start_brk = info->brk = elf_brk;
1835
    info->end_code = end_code;
1836
    info->start_code = start_code;
1837
    info->start_data = start_data;
1838
    info->end_data = end_data;
1839
    info->start_stack = bprm->p;
1840

    
1841
#if 0
1842
    printf("(start_brk) %x\n" , info->start_brk);
1843
    printf("(end_code) %x\n" , info->end_code);
1844
    printf("(start_code) %x\n" , info->start_code);
1845
    printf("(end_data) %x\n" , info->end_data);
1846
    printf("(start_stack) %x\n" , info->start_stack);
1847
    printf("(brk) %x\n" , info->brk);
1848
#endif
1849

    
1850
    if ( info->personality == PER_SVR4 )
1851
    {
1852
        /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
1853
           and some applications "depend" upon this behavior.
1854
           Since we do not have the power to recompile these, we
1855
           emulate the SVr4 behavior.  Sigh.  */
1856
        mapped_addr = target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
1857
                                  MAP_FIXED | MAP_PRIVATE, -1, 0);
1858
    }
1859

    
1860
    info->entry = elf_entry;
1861

    
1862
#ifdef USE_ELF_CORE_DUMP
1863
    bprm->core_dump = &elf_core_dump;
1864
#endif
1865

    
1866
    return 0;
1867
}
1868

    
1869
#ifdef USE_ELF_CORE_DUMP
1870
/*
1871
 * Definitions to generate Intel SVR4-like core files.
1872
 * These mostly have the same names as the SVR4 types with "target_elf_"
1873
 * tacked on the front to prevent clashes with linux definitions,
1874
 * and the typedef forms have been avoided.  This is mostly like
1875
 * the SVR4 structure, but more Linuxy, with things that Linux does
1876
 * not support and which gdb doesn't really use excluded.
1877
 *
1878
 * Fields we don't dump (their contents is zero) in linux-user qemu
1879
 * are marked with XXX.
1880
 *
1881
 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
1882
 *
1883
 * Porting ELF coredump for target is (quite) simple process.  First you
1884
 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
1885
 * the target resides):
1886
 *
1887
 * #define USE_ELF_CORE_DUMP
1888
 *
1889
 * Next you define type of register set used for dumping.  ELF specification
1890
 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
1891
 *
1892
 * typedef <target_regtype> target_elf_greg_t;
1893
 * #define ELF_NREG <number of registers>
1894
 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
1895
 *
1896
 * Last step is to implement target specific function that copies registers
1897
 * from given cpu into just specified register set.  Prototype is:
1898
 *
1899
 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
1900
 *                                const CPUState *env);
1901
 *
1902
 * Parameters:
1903
 *     regs - copy register values into here (allocated and zeroed by caller)
1904
 *     env - copy registers from here
1905
 *
1906
 * Example for ARM target is provided in this file.
1907
 */
1908

    
1909
/* An ELF note in memory */
1910
struct memelfnote {
1911
    const char *name;
1912
    size_t     namesz;
1913
    size_t     namesz_rounded;
1914
    int        type;
1915
    size_t     datasz;
1916
    void       *data;
1917
    size_t     notesz;
1918
};
1919

    
1920
struct target_elf_siginfo {
1921
    int  si_signo; /* signal number */
1922
    int  si_code;  /* extra code */
1923
    int  si_errno; /* errno */
1924
};
1925

    
1926
struct target_elf_prstatus {
1927
    struct target_elf_siginfo pr_info;      /* Info associated with signal */
1928
    short              pr_cursig;    /* Current signal */
1929
    target_ulong       pr_sigpend;   /* XXX */
1930
    target_ulong       pr_sighold;   /* XXX */
1931
    target_pid_t       pr_pid;
1932
    target_pid_t       pr_ppid;
1933
    target_pid_t       pr_pgrp;
1934
    target_pid_t       pr_sid;
1935
    struct target_timeval pr_utime;  /* XXX User time */
1936
    struct target_timeval pr_stime;  /* XXX System time */
1937
    struct target_timeval pr_cutime; /* XXX Cumulative user time */
1938
    struct target_timeval pr_cstime; /* XXX Cumulative system time */
1939
    target_elf_gregset_t      pr_reg;       /* GP registers */
1940
    int                pr_fpvalid;   /* XXX */
1941
};
1942

    
1943
#define ELF_PRARGSZ     (80) /* Number of chars for args */
1944

    
1945
struct target_elf_prpsinfo {
1946
    char         pr_state;       /* numeric process state */
1947
    char         pr_sname;       /* char for pr_state */
1948
    char         pr_zomb;        /* zombie */
1949
    char         pr_nice;        /* nice val */
1950
    target_ulong pr_flag;        /* flags */
1951
    target_uid_t pr_uid;
1952
    target_gid_t pr_gid;
1953
    target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
1954
    /* Lots missing */
1955
    char    pr_fname[16];           /* filename of executable */
1956
    char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
1957
};
1958

    
1959
/* Here is the structure in which status of each thread is captured. */
1960
struct elf_thread_status {
1961
    QTAILQ_ENTRY(elf_thread_status)  ets_link;
1962
    struct target_elf_prstatus prstatus;   /* NT_PRSTATUS */
1963
#if 0
1964
    elf_fpregset_t fpu;             /* NT_PRFPREG */
1965
    struct task_struct *thread;
1966
    elf_fpxregset_t xfpu;           /* ELF_CORE_XFPREG_TYPE */
1967
#endif
1968
    struct memelfnote notes[1];
1969
    int num_notes;
1970
};
1971

    
1972
struct elf_note_info {
1973
    struct memelfnote   *notes;
1974
    struct target_elf_prstatus *prstatus;  /* NT_PRSTATUS */
1975
    struct target_elf_prpsinfo *psinfo;    /* NT_PRPSINFO */
1976

    
1977
    QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
1978
#if 0
1979
    /*
1980
     * Current version of ELF coredump doesn't support
1981
     * dumping fp regs etc.
1982
     */
1983
    elf_fpregset_t *fpu;
1984
    elf_fpxregset_t *xfpu;
1985
    int thread_status_size;
1986
#endif
1987
    int notes_size;
1988
    int numnote;
1989
};
1990

    
1991
struct vm_area_struct {
1992
    abi_ulong   vma_start;  /* start vaddr of memory region */
1993
    abi_ulong   vma_end;    /* end vaddr of memory region */
1994
    abi_ulong   vma_flags;  /* protection etc. flags for the region */
1995
    QTAILQ_ENTRY(vm_area_struct) vma_link;
1996
};
1997

    
1998
struct mm_struct {
1999
    QTAILQ_HEAD(, vm_area_struct) mm_mmap;
2000
    int mm_count;           /* number of mappings */
2001
};
2002

    
2003
static struct mm_struct *vma_init(void);
2004
static void vma_delete(struct mm_struct *);
2005
static int vma_add_mapping(struct mm_struct *, abi_ulong,
2006
                           abi_ulong, abi_ulong);
2007
static int vma_get_mapping_count(const struct mm_struct *);
2008
static struct vm_area_struct *vma_first(const struct mm_struct *);
2009
static struct vm_area_struct *vma_next(struct vm_area_struct *);
2010
static abi_ulong vma_dump_size(const struct vm_area_struct *);
2011
static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2012
                      unsigned long flags);
2013

    
2014
static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
2015
static void fill_note(struct memelfnote *, const char *, int,
2016
                      unsigned int, void *);
2017
static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
2018
static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
2019
static void fill_auxv_note(struct memelfnote *, const TaskState *);
2020
static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
2021
static size_t note_size(const struct memelfnote *);
2022
static void free_note_info(struct elf_note_info *);
2023
static int fill_note_info(struct elf_note_info *, long, const CPUState *);
2024
static void fill_thread_info(struct elf_note_info *, const CPUState *);
2025
static int core_dump_filename(const TaskState *, char *, size_t);
2026

    
2027
static int dump_write(int, const void *, size_t);
2028
static int write_note(struct memelfnote *, int);
2029
static int write_note_info(struct elf_note_info *, int);
2030

    
2031
#ifdef BSWAP_NEEDED
2032
static void bswap_prstatus(struct target_elf_prstatus *prstatus)
2033
{
2034
    prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
2035
    prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
2036
    prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
2037
    prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
2038
    prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
2039
    prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
2040
    prstatus->pr_pid = tswap32(prstatus->pr_pid);
2041
    prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
2042
    prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
2043
    prstatus->pr_sid = tswap32(prstatus->pr_sid);
2044
    /* cpu times are not filled, so we skip them */
2045
    /* regs should be in correct format already */
2046
    prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
2047
}
2048

    
2049
static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
2050
{
2051
    psinfo->pr_flag = tswapl(psinfo->pr_flag);
2052
    psinfo->pr_uid = tswap16(psinfo->pr_uid);
2053
    psinfo->pr_gid = tswap16(psinfo->pr_gid);
2054
    psinfo->pr_pid = tswap32(psinfo->pr_pid);
2055
    psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
2056
    psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
2057
    psinfo->pr_sid = tswap32(psinfo->pr_sid);
2058
}
2059

    
2060
static void bswap_note(struct elf_note *en)
2061
{
2062
    bswap32s(&en->n_namesz);
2063
    bswap32s(&en->n_descsz);
2064
    bswap32s(&en->n_type);
2065
}
2066
#else
2067
static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
2068
static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
2069
static inline void bswap_note(struct elf_note *en) { }
2070
#endif /* BSWAP_NEEDED */
2071

    
2072
/*
2073
 * Minimal support for linux memory regions.  These are needed
2074
 * when we are finding out what memory exactly belongs to
2075
 * emulated process.  No locks needed here, as long as
2076
 * thread that received the signal is stopped.
2077
 */
2078

    
2079
static struct mm_struct *vma_init(void)
2080
{
2081
    struct mm_struct *mm;
2082

    
2083
    if ((mm = qemu_malloc(sizeof (*mm))) == NULL)
2084
        return (NULL);
2085

    
2086
    mm->mm_count = 0;
2087
    QTAILQ_INIT(&mm->mm_mmap);
2088

    
2089
    return (mm);
2090
}
2091

    
2092
static void vma_delete(struct mm_struct *mm)
2093
{
2094
    struct vm_area_struct *vma;
2095

    
2096
    while ((vma = vma_first(mm)) != NULL) {
2097
        QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
2098
        qemu_free(vma);
2099
    }
2100
    qemu_free(mm);
2101
}
2102

    
2103
static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
2104
                           abi_ulong end, abi_ulong flags)
2105
{
2106
    struct vm_area_struct *vma;
2107

    
2108
    if ((vma = qemu_mallocz(sizeof (*vma))) == NULL)
2109
        return (-1);
2110

    
2111
    vma->vma_start = start;
2112
    vma->vma_end = end;
2113
    vma->vma_flags = flags;
2114

    
2115
    QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
2116
    mm->mm_count++;
2117

    
2118
    return (0);
2119
}
2120

    
2121
static struct vm_area_struct *vma_first(const struct mm_struct *mm)
2122
{
2123
    return (QTAILQ_FIRST(&mm->mm_mmap));
2124
}
2125

    
2126
static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2127
{
2128
    return (QTAILQ_NEXT(vma, vma_link));
2129
}
2130

    
2131
static int vma_get_mapping_count(const struct mm_struct *mm)
2132
{
2133
    return (mm->mm_count);
2134
}
2135

    
2136
/*
2137
 * Calculate file (dump) size of given memory region.
2138
 */
2139
static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2140
{
2141
    /* if we cannot even read the first page, skip it */
2142
    if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2143
        return (0);
2144

    
2145
    /*
2146
     * Usually we don't dump executable pages as they contain
2147
     * non-writable code that debugger can read directly from
2148
     * target library etc.  However, thread stacks are marked
2149
     * also executable so we read in first page of given region
2150
     * and check whether it contains elf header.  If there is
2151
     * no elf header, we dump it.
2152
     */
2153
    if (vma->vma_flags & PROT_EXEC) {
2154
        char page[TARGET_PAGE_SIZE];
2155

    
2156
        copy_from_user(page, vma->vma_start, sizeof (page));
2157
        if ((page[EI_MAG0] == ELFMAG0) &&
2158
            (page[EI_MAG1] == ELFMAG1) &&
2159
            (page[EI_MAG2] == ELFMAG2) &&
2160
            (page[EI_MAG3] == ELFMAG3)) {
2161
            /*
2162
             * Mappings are possibly from ELF binary.  Don't dump
2163
             * them.
2164
             */
2165
            return (0);
2166
        }
2167
    }
2168

    
2169
    return (vma->vma_end - vma->vma_start);
2170
}
2171

    
2172
static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2173
                      unsigned long flags)
2174
{
2175
    struct mm_struct *mm = (struct mm_struct *)priv;
2176

    
2177
    vma_add_mapping(mm, start, end, flags);
2178
    return (0);
2179
}
2180

    
2181
static void fill_note(struct memelfnote *note, const char *name, int type,
2182
                      unsigned int sz, void *data)
2183
{
2184
    unsigned int namesz;
2185

    
2186
    namesz = strlen(name) + 1;
2187
    note->name = name;
2188
    note->namesz = namesz;
2189
    note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2190
    note->type = type;
2191
    note->datasz = roundup(sz, sizeof (int32_t));;
2192
    note->data = data;
2193

    
2194
    /*
2195
     * We calculate rounded up note size here as specified by
2196
     * ELF document.
2197
     */
2198
    note->notesz = sizeof (struct elf_note) +
2199
        note->namesz_rounded + note->datasz;
2200
}
2201

    
2202
static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2203
                            uint32_t flags)
2204
{
2205
    (void) memset(elf, 0, sizeof(*elf));
2206

    
2207
    (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2208
    elf->e_ident[EI_CLASS] = ELF_CLASS;
2209
    elf->e_ident[EI_DATA] = ELF_DATA;
2210
    elf->e_ident[EI_VERSION] = EV_CURRENT;
2211
    elf->e_ident[EI_OSABI] = ELF_OSABI;
2212

    
2213
    elf->e_type = ET_CORE;
2214
    elf->e_machine = machine;
2215
    elf->e_version = EV_CURRENT;
2216
    elf->e_phoff = sizeof(struct elfhdr);
2217
    elf->e_flags = flags;
2218
    elf->e_ehsize = sizeof(struct elfhdr);
2219
    elf->e_phentsize = sizeof(struct elf_phdr);
2220
    elf->e_phnum = segs;
2221

    
2222
    bswap_ehdr(elf);
2223
}
2224

    
2225
static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2226
{
2227
    phdr->p_type = PT_NOTE;
2228
    phdr->p_offset = offset;
2229
    phdr->p_vaddr = 0;
2230
    phdr->p_paddr = 0;
2231
    phdr->p_filesz = sz;
2232
    phdr->p_memsz = 0;
2233
    phdr->p_flags = 0;
2234
    phdr->p_align = 0;
2235

    
2236
    bswap_phdr(phdr, 1);
2237
}
2238

    
2239
static size_t note_size(const struct memelfnote *note)
2240
{
2241
    return (note->notesz);
2242
}
2243

    
2244
static void fill_prstatus(struct target_elf_prstatus *prstatus,
2245
                          const TaskState *ts, int signr)
2246
{
2247
    (void) memset(prstatus, 0, sizeof (*prstatus));
2248
    prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2249
    prstatus->pr_pid = ts->ts_tid;
2250
    prstatus->pr_ppid = getppid();
2251
    prstatus->pr_pgrp = getpgrp();
2252
    prstatus->pr_sid = getsid(0);
2253

    
2254
    bswap_prstatus(prstatus);
2255
}
2256

    
2257
static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2258
{
2259
    char *filename, *base_filename;
2260
    unsigned int i, len;
2261

    
2262
    (void) memset(psinfo, 0, sizeof (*psinfo));
2263

    
2264
    len = ts->info->arg_end - ts->info->arg_start;
2265
    if (len >= ELF_PRARGSZ)
2266
        len = ELF_PRARGSZ - 1;
2267
    if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2268
        return -EFAULT;
2269
    for (i = 0; i < len; i++)
2270
        if (psinfo->pr_psargs[i] == 0)
2271
            psinfo->pr_psargs[i] = ' ';
2272
    psinfo->pr_psargs[len] = 0;
2273

    
2274
    psinfo->pr_pid = getpid();
2275
    psinfo->pr_ppid = getppid();
2276
    psinfo->pr_pgrp = getpgrp();
2277
    psinfo->pr_sid = getsid(0);
2278
    psinfo->pr_uid = getuid();
2279
    psinfo->pr_gid = getgid();
2280

    
2281
    filename = strdup(ts->bprm->filename);
2282
    base_filename = strdup(basename(filename));
2283
    (void) strncpy(psinfo->pr_fname, base_filename,
2284
                   sizeof(psinfo->pr_fname));
2285
    free(base_filename);
2286
    free(filename);
2287

    
2288
    bswap_psinfo(psinfo);
2289
    return (0);
2290
}
2291

    
2292
static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2293
{
2294
    elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2295
    elf_addr_t orig_auxv = auxv;
2296
    abi_ulong val;
2297
    void *ptr;
2298
    int i, len;
2299

    
2300
    /*
2301
     * Auxiliary vector is stored in target process stack.  It contains
2302
     * {type, value} pairs that we need to dump into note.  This is not
2303
     * strictly necessary but we do it here for sake of completeness.
2304
     */
2305

    
2306
    /* find out lenght of the vector, AT_NULL is terminator */
2307
    i = len = 0;
2308
    do {
2309
        get_user_ual(val, auxv);
2310
        i += 2;
2311
        auxv += 2 * sizeof (elf_addr_t);
2312
    } while (val != AT_NULL);
2313
    len = i * sizeof (elf_addr_t);
2314

    
2315
    /* read in whole auxv vector and copy it to memelfnote */
2316
    ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2317
    if (ptr != NULL) {
2318
        fill_note(note, "CORE", NT_AUXV, len, ptr);
2319
        unlock_user(ptr, auxv, len);
2320
    }
2321
}
2322

    
2323
/*
2324
 * Constructs name of coredump file.  We have following convention
2325
 * for the name:
2326
 *     qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2327
 *
2328
 * Returns 0 in case of success, -1 otherwise (errno is set).
2329
 */
2330
static int core_dump_filename(const TaskState *ts, char *buf,
2331
                              size_t bufsize)
2332
{
2333
    char timestamp[64];
2334
    char *filename = NULL;
2335
    char *base_filename = NULL;
2336
    struct timeval tv;
2337
    struct tm tm;
2338

    
2339
    assert(bufsize >= PATH_MAX);
2340

    
2341
    if (gettimeofday(&tv, NULL) < 0) {
2342
        (void) fprintf(stderr, "unable to get current timestamp: %s",
2343
                       strerror(errno));
2344
        return (-1);
2345
    }
2346

    
2347
    filename = strdup(ts->bprm->filename);
2348
    base_filename = strdup(basename(filename));
2349
    (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2350
                    localtime_r(&tv.tv_sec, &tm));
2351
    (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2352
                    base_filename, timestamp, (int)getpid());
2353
    free(base_filename);
2354
    free(filename);
2355

    
2356
    return (0);
2357
}
2358

    
2359
static int dump_write(int fd, const void *ptr, size_t size)
2360
{
2361
    const char *bufp = (const char *)ptr;
2362
    ssize_t bytes_written, bytes_left;
2363
    struct rlimit dumpsize;
2364
    off_t pos;
2365

    
2366
    bytes_written = 0;
2367
    getrlimit(RLIMIT_CORE, &dumpsize);
2368
    if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2369
        if (errno == ESPIPE) { /* not a seekable stream */
2370
            bytes_left = size;
2371
        } else {
2372
            return pos;
2373
        }
2374
    } else {
2375
        if (dumpsize.rlim_cur <= pos) {
2376
            return -1;
2377
        } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2378
            bytes_left = size;
2379
        } else {
2380
            size_t limit_left=dumpsize.rlim_cur - pos;
2381
            bytes_left = limit_left >= size ? size : limit_left ;
2382
        }
2383
    }
2384

    
2385
    /*
2386
     * In normal conditions, single write(2) should do but
2387
     * in case of socket etc. this mechanism is more portable.
2388
     */
2389
    do {
2390
        bytes_written = write(fd, bufp, bytes_left);
2391
        if (bytes_written < 0) {
2392
            if (errno == EINTR)
2393
                continue;
2394
            return (-1);
2395
        } else if (bytes_written == 0) { /* eof */
2396
            return (-1);
2397
        }
2398
        bufp += bytes_written;
2399
        bytes_left -= bytes_written;
2400
    } while (bytes_left > 0);
2401

    
2402
    return (0);
2403
}
2404

    
2405
static int write_note(struct memelfnote *men, int fd)
2406
{
2407
    struct elf_note en;
2408

    
2409
    en.n_namesz = men->namesz;
2410
    en.n_type = men->type;
2411
    en.n_descsz = men->datasz;
2412

    
2413
    bswap_note(&en);
2414

    
2415
    if (dump_write(fd, &en, sizeof(en)) != 0)
2416
        return (-1);
2417
    if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2418
        return (-1);
2419
    if (dump_write(fd, men->data, men->datasz) != 0)
2420
        return (-1);
2421

    
2422
    return (0);
2423
}
2424

    
2425
static void fill_thread_info(struct elf_note_info *info, const CPUState *env)
2426
{
2427
    TaskState *ts = (TaskState *)env->opaque;
2428
    struct elf_thread_status *ets;
2429

    
2430
    ets = qemu_mallocz(sizeof (*ets));
2431
    ets->num_notes = 1; /* only prstatus is dumped */
2432
    fill_prstatus(&ets->prstatus, ts, 0);
2433
    elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2434
    fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2435
              &ets->prstatus);
2436

    
2437
    QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2438

    
2439
    info->notes_size += note_size(&ets->notes[0]);
2440
}
2441

    
2442
static int fill_note_info(struct elf_note_info *info,
2443
                          long signr, const CPUState *env)
2444
{
2445
#define NUMNOTES 3
2446
    CPUState *cpu = NULL;
2447
    TaskState *ts = (TaskState *)env->opaque;
2448
    int i;
2449

    
2450
    (void) memset(info, 0, sizeof (*info));
2451

    
2452
    QTAILQ_INIT(&info->thread_list);
2453

    
2454
    info->notes = qemu_mallocz(NUMNOTES * sizeof (struct memelfnote));
2455
    if (info->notes == NULL)
2456
        return (-ENOMEM);
2457
    info->prstatus = qemu_mallocz(sizeof (*info->prstatus));
2458
    if (info->prstatus == NULL)
2459
        return (-ENOMEM);
2460
    info->psinfo = qemu_mallocz(sizeof (*info->psinfo));
2461
    if (info->prstatus == NULL)
2462
        return (-ENOMEM);
2463

    
2464
    /*
2465
     * First fill in status (and registers) of current thread
2466
     * including process info & aux vector.
2467
     */
2468
    fill_prstatus(info->prstatus, ts, signr);
2469
    elf_core_copy_regs(&info->prstatus->pr_reg, env);
2470
    fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2471
              sizeof (*info->prstatus), info->prstatus);
2472
    fill_psinfo(info->psinfo, ts);
2473
    fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2474
              sizeof (*info->psinfo), info->psinfo);
2475
    fill_auxv_note(&info->notes[2], ts);
2476
    info->numnote = 3;
2477

    
2478
    info->notes_size = 0;
2479
    for (i = 0; i < info->numnote; i++)
2480
        info->notes_size += note_size(&info->notes[i]);
2481

    
2482
    /* read and fill status of all threads */
2483
    cpu_list_lock();
2484
    for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2485
        if (cpu == thread_env)
2486
            continue;
2487
        fill_thread_info(info, cpu);
2488
    }
2489
    cpu_list_unlock();
2490

    
2491
    return (0);
2492
}
2493

    
2494
static void free_note_info(struct elf_note_info *info)
2495
{
2496
    struct elf_thread_status *ets;
2497

    
2498
    while (!QTAILQ_EMPTY(&info->thread_list)) {
2499
        ets = QTAILQ_FIRST(&info->thread_list);
2500
        QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2501
        qemu_free(ets);
2502
    }
2503

    
2504
    qemu_free(info->prstatus);
2505
    qemu_free(info->psinfo);
2506
    qemu_free(info->notes);
2507
}
2508

    
2509
static int write_note_info(struct elf_note_info *info, int fd)
2510
{
2511
    struct elf_thread_status *ets;
2512
    int i, error = 0;
2513

    
2514
    /* write prstatus, psinfo and auxv for current thread */
2515
    for (i = 0; i < info->numnote; i++)
2516
        if ((error = write_note(&info->notes[i], fd)) != 0)
2517
            return (error);
2518

    
2519
    /* write prstatus for each thread */
2520
    for (ets = info->thread_list.tqh_first; ets != NULL;
2521
         ets = ets->ets_link.tqe_next) {
2522
        if ((error = write_note(&ets->notes[0], fd)) != 0)
2523
            return (error);
2524
    }
2525

    
2526
    return (0);
2527
}
2528

    
2529
/*
2530
 * Write out ELF coredump.
2531
 *
2532
 * See documentation of ELF object file format in:
2533
 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2534
 *
2535
 * Coredump format in linux is following:
2536
 *
2537
 * 0   +----------------------+         \
2538
 *     | ELF header           | ET_CORE  |
2539
 *     +----------------------+          |
2540
 *     | ELF program headers  |          |--- headers
2541
 *     | - NOTE section       |          |
2542
 *     | - PT_LOAD sections   |          |
2543
 *     +----------------------+         /
2544
 *     | NOTEs:               |
2545
 *     | - NT_PRSTATUS        |
2546
 *     | - NT_PRSINFO         |
2547
 *     | - NT_AUXV            |
2548
 *     +----------------------+ <-- aligned to target page
2549
 *     | Process memory dump  |
2550
 *     :                      :
2551
 *     .                      .
2552
 *     :                      :
2553
 *     |                      |
2554
 *     +----------------------+
2555
 *
2556
 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2557
 * NT_PRSINFO  -> struct elf_prpsinfo
2558
 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2559
 *
2560
 * Format follows System V format as close as possible.  Current
2561
 * version limitations are as follows:
2562
 *     - no floating point registers are dumped
2563
 *
2564
 * Function returns 0 in case of success, negative errno otherwise.
2565
 *
2566
 * TODO: make this work also during runtime: it should be
2567
 * possible to force coredump from running process and then
2568
 * continue processing.  For example qemu could set up SIGUSR2
2569
 * handler (provided that target process haven't registered
2570
 * handler for that) that does the dump when signal is received.
2571
 */
2572
static int elf_core_dump(int signr, const CPUState *env)
2573
{
2574
    const TaskState *ts = (const TaskState *)env->opaque;
2575
    struct vm_area_struct *vma = NULL;
2576
    char corefile[PATH_MAX];
2577
    struct elf_note_info info;
2578
    struct elfhdr elf;
2579
    struct elf_phdr phdr;
2580
    struct rlimit dumpsize;
2581
    struct mm_struct *mm = NULL;
2582
    off_t offset = 0, data_offset = 0;
2583
    int segs = 0;
2584
    int fd = -1;
2585

    
2586
    errno = 0;
2587
    getrlimit(RLIMIT_CORE, &dumpsize);
2588
    if (dumpsize.rlim_cur == 0)
2589
        return 0;
2590

    
2591
    if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2592
        return (-errno);
2593

    
2594
    if ((fd = open(corefile, O_WRONLY | O_CREAT,
2595
                   S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2596
        return (-errno);
2597

    
2598
    /*
2599
     * Walk through target process memory mappings and
2600
     * set up structure containing this information.  After
2601
     * this point vma_xxx functions can be used.
2602
     */
2603
    if ((mm = vma_init()) == NULL)
2604
        goto out;
2605

    
2606
    walk_memory_regions(mm, vma_walker);
2607
    segs = vma_get_mapping_count(mm);
2608

    
2609
    /*
2610
     * Construct valid coredump ELF header.  We also
2611
     * add one more segment for notes.
2612
     */
2613
    fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2614
    if (dump_write(fd, &elf, sizeof (elf)) != 0)
2615
        goto out;
2616

    
2617
    /* fill in in-memory version of notes */
2618
    if (fill_note_info(&info, signr, env) < 0)
2619
        goto out;
2620

    
2621
    offset += sizeof (elf);                             /* elf header */
2622
    offset += (segs + 1) * sizeof (struct elf_phdr);    /* program headers */
2623

    
2624
    /* write out notes program header */
2625
    fill_elf_note_phdr(&phdr, info.notes_size, offset);
2626

    
2627
    offset += info.notes_size;
2628
    if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2629
        goto out;
2630

    
2631
    /*
2632
     * ELF specification wants data to start at page boundary so
2633
     * we align it here.
2634
     */
2635
    offset = roundup(offset, ELF_EXEC_PAGESIZE);
2636

    
2637
    /*
2638
     * Write program headers for memory regions mapped in
2639
     * the target process.
2640
     */
2641
    for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2642
        (void) memset(&phdr, 0, sizeof (phdr));
2643

    
2644
        phdr.p_type = PT_LOAD;
2645
        phdr.p_offset = offset;
2646
        phdr.p_vaddr = vma->vma_start;
2647
        phdr.p_paddr = 0;
2648
        phdr.p_filesz = vma_dump_size(vma);
2649
        offset += phdr.p_filesz;
2650
        phdr.p_memsz = vma->vma_end - vma->vma_start;
2651
        phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2652
        if (vma->vma_flags & PROT_WRITE)
2653
            phdr.p_flags |= PF_W;
2654
        if (vma->vma_flags & PROT_EXEC)
2655
            phdr.p_flags |= PF_X;
2656
        phdr.p_align = ELF_EXEC_PAGESIZE;
2657

    
2658
        dump_write(fd, &phdr, sizeof (phdr));
2659
    }
2660

    
2661
    /*
2662
     * Next we write notes just after program headers.  No
2663
     * alignment needed here.
2664
     */
2665
    if (write_note_info(&info, fd) < 0)
2666
        goto out;
2667

    
2668
    /* align data to page boundary */
2669
    data_offset = lseek(fd, 0, SEEK_CUR);
2670
    data_offset = TARGET_PAGE_ALIGN(data_offset);
2671
    if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2672
        goto out;
2673

    
2674
    /*
2675
     * Finally we can dump process memory into corefile as well.
2676
     */
2677
    for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2678
        abi_ulong addr;
2679
        abi_ulong end;
2680

    
2681
        end = vma->vma_start + vma_dump_size(vma);
2682

    
2683
        for (addr = vma->vma_start; addr < end;
2684
             addr += TARGET_PAGE_SIZE) {
2685
            char page[TARGET_PAGE_SIZE];
2686
            int error;
2687

    
2688
            /*
2689
             *  Read in page from target process memory and
2690
             *  write it to coredump file.
2691
             */
2692
            error = copy_from_user(page, addr, sizeof (page));
2693
            if (error != 0) {
2694
                (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2695
                               addr);
2696
                errno = -error;
2697
                goto out;
2698
            }
2699
            if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2700
                goto out;
2701
        }
2702
    }
2703

    
2704
 out:
2705
    free_note_info(&info);
2706
    if (mm != NULL)
2707
        vma_delete(mm);
2708
    (void) close(fd);
2709

    
2710
    if (errno != 0)
2711
        return (-errno);
2712
    return (0);
2713
}
2714

    
2715
#endif /* USE_ELF_CORE_DUMP */
2716

    
2717
static int load_aout_interp(void * exptr, int interp_fd)
2718
{
2719
    printf("a.out interpreter not yet supported\n");
2720
    return(0);
2721
}
2722

    
2723
void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2724
{
2725
    init_thread(regs, infop);
2726
}