Statistics
| Branch: | Revision:

root / linux-user / elfload.c @ 76cad711

History | View | Annotate | Download (87 kB)

1
/* This is the Linux kernel elf-loading code, ported into user space */
2
#include <sys/time.h>
3
#include <sys/param.h>
4

    
5
#include <stdio.h>
6
#include <sys/types.h>
7
#include <fcntl.h>
8
#include <errno.h>
9
#include <unistd.h>
10
#include <sys/mman.h>
11
#include <sys/resource.h>
12
#include <stdlib.h>
13
#include <string.h>
14
#include <time.h>
15

    
16
#include "qemu.h"
17
#include "disas/disas.h"
18

    
19
#ifdef _ARCH_PPC64
20
#undef ARCH_DLINFO
21
#undef ELF_PLATFORM
22
#undef ELF_HWCAP
23
#undef ELF_CLASS
24
#undef ELF_DATA
25
#undef ELF_ARCH
26
#endif
27

    
28
#define ELF_OSABI   ELFOSABI_SYSV
29

    
30
/* from personality.h */
31

    
32
/*
33
 * Flags for bug emulation.
34
 *
35
 * These occupy the top three bytes.
36
 */
37
enum {
38
    ADDR_NO_RANDOMIZE = 0x0040000,      /* disable randomization of VA space */
39
    FDPIC_FUNCPTRS =    0x0080000,      /* userspace function ptrs point to
40
                                           descriptors (signal handling) */
41
    MMAP_PAGE_ZERO =    0x0100000,
42
    ADDR_COMPAT_LAYOUT = 0x0200000,
43
    READ_IMPLIES_EXEC = 0x0400000,
44
    ADDR_LIMIT_32BIT =  0x0800000,
45
    SHORT_INODE =       0x1000000,
46
    WHOLE_SECONDS =     0x2000000,
47
    STICKY_TIMEOUTS =   0x4000000,
48
    ADDR_LIMIT_3GB =    0x8000000,
49
};
50

    
51
/*
52
 * Personality types.
53
 *
54
 * These go in the low byte.  Avoid using the top bit, it will
55
 * conflict with error returns.
56
 */
57
enum {
58
    PER_LINUX =         0x0000,
59
    PER_LINUX_32BIT =   0x0000 | ADDR_LIMIT_32BIT,
60
    PER_LINUX_FDPIC =   0x0000 | FDPIC_FUNCPTRS,
61
    PER_SVR4 =          0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
62
    PER_SVR3 =          0x0002 | STICKY_TIMEOUTS | SHORT_INODE,
63
    PER_SCOSVR3 =       0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE,
64
    PER_OSR5 =          0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS,
65
    PER_WYSEV386 =      0x0004 | STICKY_TIMEOUTS | SHORT_INODE,
66
    PER_ISCR4 =         0x0005 | STICKY_TIMEOUTS,
67
    PER_BSD =           0x0006,
68
    PER_SUNOS =         0x0006 | STICKY_TIMEOUTS,
69
    PER_XENIX =         0x0007 | STICKY_TIMEOUTS | SHORT_INODE,
70
    PER_LINUX32 =       0x0008,
71
    PER_LINUX32_3GB =   0x0008 | ADDR_LIMIT_3GB,
72
    PER_IRIX32 =        0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */
73
    PER_IRIXN32 =       0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */
74
    PER_IRIX64 =        0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */
75
    PER_RISCOS =        0x000c,
76
    PER_SOLARIS =       0x000d | STICKY_TIMEOUTS,
77
    PER_UW7 =           0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO,
78
    PER_OSF4 =          0x000f,                  /* OSF/1 v4 */
79
    PER_HPUX =          0x0010,
80
    PER_MASK =          0x00ff,
81
};
82

    
83
/*
84
 * Return the base personality without flags.
85
 */
86
#define personality(pers)       (pers & PER_MASK)
87

    
88
/* this flag is uneffective under linux too, should be deleted */
89
#ifndef MAP_DENYWRITE
90
#define MAP_DENYWRITE 0
91
#endif
92

    
93
/* should probably go in elf.h */
94
#ifndef ELIBBAD
95
#define ELIBBAD 80
96
#endif
97

    
98
#ifdef TARGET_WORDS_BIGENDIAN
99
#define ELF_DATA        ELFDATA2MSB
100
#else
101
#define ELF_DATA        ELFDATA2LSB
102
#endif
103

    
104
typedef target_ulong    target_elf_greg_t;
105
#ifdef USE_UID16
106
typedef target_ushort   target_uid_t;
107
typedef target_ushort   target_gid_t;
108
#else
109
typedef target_uint     target_uid_t;
110
typedef target_uint     target_gid_t;
111
#endif
112
typedef target_int      target_pid_t;
113

    
114
#ifdef TARGET_I386
115

    
116
#define ELF_PLATFORM get_elf_platform()
117

    
118
static const char *get_elf_platform(void)
119
{
120
    static char elf_platform[] = "i386";
121
    int family = (thread_env->cpuid_version >> 8) & 0xff;
122
    if (family > 6)
123
        family = 6;
124
    if (family >= 3)
125
        elf_platform[1] = '0' + family;
126
    return elf_platform;
127
}
128

    
129
#define ELF_HWCAP get_elf_hwcap()
130

    
131
static uint32_t get_elf_hwcap(void)
132
{
133
    return thread_env->cpuid_features;
134
}
135

    
136
#ifdef TARGET_X86_64
137
#define ELF_START_MMAP 0x2aaaaab000ULL
138
#define elf_check_arch(x) ( ((x) == ELF_ARCH) )
139

    
140
#define ELF_CLASS      ELFCLASS64
141
#define ELF_ARCH       EM_X86_64
142

    
143
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
144
{
145
    regs->rax = 0;
146
    regs->rsp = infop->start_stack;
147
    regs->rip = infop->entry;
148
}
149

    
150
#define ELF_NREG    27
151
typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
152

    
153
/*
154
 * Note that ELF_NREG should be 29 as there should be place for
155
 * TRAPNO and ERR "registers" as well but linux doesn't dump
156
 * those.
157
 *
158
 * See linux kernel: arch/x86/include/asm/elf.h
159
 */
160
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
161
{
162
    (*regs)[0] = env->regs[15];
163
    (*regs)[1] = env->regs[14];
164
    (*regs)[2] = env->regs[13];
165
    (*regs)[3] = env->regs[12];
166
    (*regs)[4] = env->regs[R_EBP];
167
    (*regs)[5] = env->regs[R_EBX];
168
    (*regs)[6] = env->regs[11];
169
    (*regs)[7] = env->regs[10];
170
    (*regs)[8] = env->regs[9];
171
    (*regs)[9] = env->regs[8];
172
    (*regs)[10] = env->regs[R_EAX];
173
    (*regs)[11] = env->regs[R_ECX];
174
    (*regs)[12] = env->regs[R_EDX];
175
    (*regs)[13] = env->regs[R_ESI];
176
    (*regs)[14] = env->regs[R_EDI];
177
    (*regs)[15] = env->regs[R_EAX]; /* XXX */
178
    (*regs)[16] = env->eip;
179
    (*regs)[17] = env->segs[R_CS].selector & 0xffff;
180
    (*regs)[18] = env->eflags;
181
    (*regs)[19] = env->regs[R_ESP];
182
    (*regs)[20] = env->segs[R_SS].selector & 0xffff;
183
    (*regs)[21] = env->segs[R_FS].selector & 0xffff;
184
    (*regs)[22] = env->segs[R_GS].selector & 0xffff;
185
    (*regs)[23] = env->segs[R_DS].selector & 0xffff;
186
    (*regs)[24] = env->segs[R_ES].selector & 0xffff;
187
    (*regs)[25] = env->segs[R_FS].selector & 0xffff;
188
    (*regs)[26] = env->segs[R_GS].selector & 0xffff;
189
}
190

    
191
#else
192

    
193
#define ELF_START_MMAP 0x80000000
194

    
195
/*
196
 * This is used to ensure we don't load something for the wrong architecture.
197
 */
198
#define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) )
199

    
200
/*
201
 * These are used to set parameters in the core dumps.
202
 */
203
#define ELF_CLASS       ELFCLASS32
204
#define ELF_ARCH        EM_386
205

    
206
static inline void init_thread(struct target_pt_regs *regs,
207
                               struct image_info *infop)
208
{
209
    regs->esp = infop->start_stack;
210
    regs->eip = infop->entry;
211

    
212
    /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program
213
       starts %edx contains a pointer to a function which might be
214
       registered using `atexit'.  This provides a mean for the
215
       dynamic linker to call DT_FINI functions for shared libraries
216
       that have been loaded before the code runs.
217

218
       A value of 0 tells we have no such handler.  */
219
    regs->edx = 0;
220
}
221

    
222
#define ELF_NREG    17
223
typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
224

    
225
/*
226
 * Note that ELF_NREG should be 19 as there should be place for
227
 * TRAPNO and ERR "registers" as well but linux doesn't dump
228
 * those.
229
 *
230
 * See linux kernel: arch/x86/include/asm/elf.h
231
 */
232
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env)
233
{
234
    (*regs)[0] = env->regs[R_EBX];
235
    (*regs)[1] = env->regs[R_ECX];
236
    (*regs)[2] = env->regs[R_EDX];
237
    (*regs)[3] = env->regs[R_ESI];
238
    (*regs)[4] = env->regs[R_EDI];
239
    (*regs)[5] = env->regs[R_EBP];
240
    (*regs)[6] = env->regs[R_EAX];
241
    (*regs)[7] = env->segs[R_DS].selector & 0xffff;
242
    (*regs)[8] = env->segs[R_ES].selector & 0xffff;
243
    (*regs)[9] = env->segs[R_FS].selector & 0xffff;
244
    (*regs)[10] = env->segs[R_GS].selector & 0xffff;
245
    (*regs)[11] = env->regs[R_EAX]; /* XXX */
246
    (*regs)[12] = env->eip;
247
    (*regs)[13] = env->segs[R_CS].selector & 0xffff;
248
    (*regs)[14] = env->eflags;
249
    (*regs)[15] = env->regs[R_ESP];
250
    (*regs)[16] = env->segs[R_SS].selector & 0xffff;
251
}
252
#endif
253

    
254
#define USE_ELF_CORE_DUMP
255
#define ELF_EXEC_PAGESIZE       4096
256

    
257
#endif
258

    
259
#ifdef TARGET_ARM
260

    
261
#define ELF_START_MMAP 0x80000000
262

    
263
#define elf_check_arch(x) ( (x) == EM_ARM )
264

    
265
#define ELF_CLASS       ELFCLASS32
266
#define ELF_ARCH        EM_ARM
267

    
268
static inline void init_thread(struct target_pt_regs *regs,
269
                               struct image_info *infop)
270
{
271
    abi_long stack = infop->start_stack;
272
    memset(regs, 0, sizeof(*regs));
273
    regs->ARM_cpsr = 0x10;
274
    if (infop->entry & 1)
275
        regs->ARM_cpsr |= CPSR_T;
276
    regs->ARM_pc = infop->entry & 0xfffffffe;
277
    regs->ARM_sp = infop->start_stack;
278
    /* FIXME - what to for failure of get_user()? */
279
    get_user_ual(regs->ARM_r2, stack + 8); /* envp */
280
    get_user_ual(regs->ARM_r1, stack + 4); /* envp */
281
    /* XXX: it seems that r0 is zeroed after ! */
282
    regs->ARM_r0 = 0;
283
    /* For uClinux PIC binaries.  */
284
    /* XXX: Linux does this only on ARM with no MMU (do we care ?) */
285
    regs->ARM_r10 = infop->start_data;
286
}
287

    
288
#define ELF_NREG    18
289
typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
290

    
291
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env)
292
{
293
    (*regs)[0] = tswapl(env->regs[0]);
294
    (*regs)[1] = tswapl(env->regs[1]);
295
    (*regs)[2] = tswapl(env->regs[2]);
296
    (*regs)[3] = tswapl(env->regs[3]);
297
    (*regs)[4] = tswapl(env->regs[4]);
298
    (*regs)[5] = tswapl(env->regs[5]);
299
    (*regs)[6] = tswapl(env->regs[6]);
300
    (*regs)[7] = tswapl(env->regs[7]);
301
    (*regs)[8] = tswapl(env->regs[8]);
302
    (*regs)[9] = tswapl(env->regs[9]);
303
    (*regs)[10] = tswapl(env->regs[10]);
304
    (*regs)[11] = tswapl(env->regs[11]);
305
    (*regs)[12] = tswapl(env->regs[12]);
306
    (*regs)[13] = tswapl(env->regs[13]);
307
    (*regs)[14] = tswapl(env->regs[14]);
308
    (*regs)[15] = tswapl(env->regs[15]);
309

    
310
    (*regs)[16] = tswapl(cpsr_read((CPUARMState *)env));
311
    (*regs)[17] = tswapl(env->regs[0]); /* XXX */
312
}
313

    
314
#define USE_ELF_CORE_DUMP
315
#define ELF_EXEC_PAGESIZE       4096
316

    
317
enum
318
{
319
    ARM_HWCAP_ARM_SWP       = 1 << 0,
320
    ARM_HWCAP_ARM_HALF      = 1 << 1,
321
    ARM_HWCAP_ARM_THUMB     = 1 << 2,
322
    ARM_HWCAP_ARM_26BIT     = 1 << 3,
323
    ARM_HWCAP_ARM_FAST_MULT = 1 << 4,
324
    ARM_HWCAP_ARM_FPA       = 1 << 5,
325
    ARM_HWCAP_ARM_VFP       = 1 << 6,
326
    ARM_HWCAP_ARM_EDSP      = 1 << 7,
327
    ARM_HWCAP_ARM_JAVA      = 1 << 8,
328
    ARM_HWCAP_ARM_IWMMXT    = 1 << 9,
329
    ARM_HWCAP_ARM_THUMBEE   = 1 << 10,
330
    ARM_HWCAP_ARM_NEON      = 1 << 11,
331
    ARM_HWCAP_ARM_VFPv3     = 1 << 12,
332
    ARM_HWCAP_ARM_VFPv3D16  = 1 << 13,
333
};
334

    
335
#define TARGET_HAS_VALIDATE_GUEST_SPACE
336
/* Return 1 if the proposed guest space is suitable for the guest.
337
 * Return 0 if the proposed guest space isn't suitable, but another
338
 * address space should be tried.
339
 * Return -1 if there is no way the proposed guest space can be
340
 * valid regardless of the base.
341
 * The guest code may leave a page mapped and populate it if the
342
 * address is suitable.
343
 */
344
static int validate_guest_space(unsigned long guest_base,
345
                                unsigned long guest_size)
346
{
347
    unsigned long real_start, test_page_addr;
348

    
349
    /* We need to check that we can force a fault on access to the
350
     * commpage at 0xffff0fxx
351
     */
352
    test_page_addr = guest_base + (0xffff0f00 & qemu_host_page_mask);
353

    
354
    /* If the commpage lies within the already allocated guest space,
355
     * then there is no way we can allocate it.
356
     */
357
    if (test_page_addr >= guest_base
358
        && test_page_addr <= (guest_base + guest_size)) {
359
        return -1;
360
    }
361

    
362
    /* Note it needs to be writeable to let us initialise it */
363
    real_start = (unsigned long)
364
                 mmap((void *)test_page_addr, qemu_host_page_size,
365
                     PROT_READ | PROT_WRITE,
366
                     MAP_ANONYMOUS | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
367

    
368
    /* If we can't map it then try another address */
369
    if (real_start == -1ul) {
370
        return 0;
371
    }
372

    
373
    if (real_start != test_page_addr) {
374
        /* OS didn't put the page where we asked - unmap and reject */
375
        munmap((void *)real_start, qemu_host_page_size);
376
        return 0;
377
    }
378

    
379
    /* Leave the page mapped
380
     * Populate it (mmap should have left it all 0'd)
381
     */
382

    
383
    /* Kernel helper versions */
384
    __put_user(5, (uint32_t *)g2h(0xffff0ffcul));
385

    
386
    /* Now it's populated make it RO */
387
    if (mprotect((void *)test_page_addr, qemu_host_page_size, PROT_READ)) {
388
        perror("Protecting guest commpage");
389
        exit(-1);
390
    }
391

    
392
    return 1; /* All good */
393
}
394

    
395

    
396
#define ELF_HWCAP get_elf_hwcap()
397

    
398
static uint32_t get_elf_hwcap(void)
399
{
400
    CPUARMState *e = thread_env;
401
    uint32_t hwcaps = 0;
402

    
403
    hwcaps |= ARM_HWCAP_ARM_SWP;
404
    hwcaps |= ARM_HWCAP_ARM_HALF;
405
    hwcaps |= ARM_HWCAP_ARM_THUMB;
406
    hwcaps |= ARM_HWCAP_ARM_FAST_MULT;
407
    hwcaps |= ARM_HWCAP_ARM_FPA;
408

    
409
    /* probe for the extra features */
410
#define GET_FEATURE(feat, hwcap) \
411
    do {if (arm_feature(e, feat)) { hwcaps |= hwcap; } } while (0)
412
    GET_FEATURE(ARM_FEATURE_VFP, ARM_HWCAP_ARM_VFP);
413
    GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT);
414
    GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE);
415
    GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON);
416
    GET_FEATURE(ARM_FEATURE_VFP3, ARM_HWCAP_ARM_VFPv3);
417
    GET_FEATURE(ARM_FEATURE_VFP_FP16, ARM_HWCAP_ARM_VFPv3D16);
418
#undef GET_FEATURE
419

    
420
    return hwcaps;
421
}
422

    
423
#endif
424

    
425
#ifdef TARGET_UNICORE32
426

    
427
#define ELF_START_MMAP          0x80000000
428

    
429
#define elf_check_arch(x)       ((x) == EM_UNICORE32)
430

    
431
#define ELF_CLASS               ELFCLASS32
432
#define ELF_DATA                ELFDATA2LSB
433
#define ELF_ARCH                EM_UNICORE32
434

    
435
static inline void init_thread(struct target_pt_regs *regs,
436
        struct image_info *infop)
437
{
438
    abi_long stack = infop->start_stack;
439
    memset(regs, 0, sizeof(*regs));
440
    regs->UC32_REG_asr = 0x10;
441
    regs->UC32_REG_pc = infop->entry & 0xfffffffe;
442
    regs->UC32_REG_sp = infop->start_stack;
443
    /* FIXME - what to for failure of get_user()? */
444
    get_user_ual(regs->UC32_REG_02, stack + 8); /* envp */
445
    get_user_ual(regs->UC32_REG_01, stack + 4); /* envp */
446
    /* XXX: it seems that r0 is zeroed after ! */
447
    regs->UC32_REG_00 = 0;
448
}
449

    
450
#define ELF_NREG    34
451
typedef target_elf_greg_t  target_elf_gregset_t[ELF_NREG];
452

    
453
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUUniCore32State *env)
454
{
455
    (*regs)[0] = env->regs[0];
456
    (*regs)[1] = env->regs[1];
457
    (*regs)[2] = env->regs[2];
458
    (*regs)[3] = env->regs[3];
459
    (*regs)[4] = env->regs[4];
460
    (*regs)[5] = env->regs[5];
461
    (*regs)[6] = env->regs[6];
462
    (*regs)[7] = env->regs[7];
463
    (*regs)[8] = env->regs[8];
464
    (*regs)[9] = env->regs[9];
465
    (*regs)[10] = env->regs[10];
466
    (*regs)[11] = env->regs[11];
467
    (*regs)[12] = env->regs[12];
468
    (*regs)[13] = env->regs[13];
469
    (*regs)[14] = env->regs[14];
470
    (*regs)[15] = env->regs[15];
471
    (*regs)[16] = env->regs[16];
472
    (*regs)[17] = env->regs[17];
473
    (*regs)[18] = env->regs[18];
474
    (*regs)[19] = env->regs[19];
475
    (*regs)[20] = env->regs[20];
476
    (*regs)[21] = env->regs[21];
477
    (*regs)[22] = env->regs[22];
478
    (*regs)[23] = env->regs[23];
479
    (*regs)[24] = env->regs[24];
480
    (*regs)[25] = env->regs[25];
481
    (*regs)[26] = env->regs[26];
482
    (*regs)[27] = env->regs[27];
483
    (*regs)[28] = env->regs[28];
484
    (*regs)[29] = env->regs[29];
485
    (*regs)[30] = env->regs[30];
486
    (*regs)[31] = env->regs[31];
487

    
488
    (*regs)[32] = cpu_asr_read((CPUUniCore32State *)env);
489
    (*regs)[33] = env->regs[0]; /* XXX */
490
}
491

    
492
#define USE_ELF_CORE_DUMP
493
#define ELF_EXEC_PAGESIZE               4096
494

    
495
#define ELF_HWCAP                       (UC32_HWCAP_CMOV | UC32_HWCAP_UCF64)
496

    
497
#endif
498

    
499
#ifdef TARGET_SPARC
500
#ifdef TARGET_SPARC64
501

    
502
#define ELF_START_MMAP 0x80000000
503
#define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
504
                    | HWCAP_SPARC_MULDIV | HWCAP_SPARC_V9)
505
#ifndef TARGET_ABI32
506
#define elf_check_arch(x) ( (x) == EM_SPARCV9 || (x) == EM_SPARC32PLUS )
507
#else
508
#define elf_check_arch(x) ( (x) == EM_SPARC32PLUS || (x) == EM_SPARC )
509
#endif
510

    
511
#define ELF_CLASS   ELFCLASS64
512
#define ELF_ARCH    EM_SPARCV9
513

    
514
#define STACK_BIAS              2047
515

    
516
static inline void init_thread(struct target_pt_regs *regs,
517
                               struct image_info *infop)
518
{
519
#ifndef TARGET_ABI32
520
    regs->tstate = 0;
521
#endif
522
    regs->pc = infop->entry;
523
    regs->npc = regs->pc + 4;
524
    regs->y = 0;
525
#ifdef TARGET_ABI32
526
    regs->u_regs[14] = infop->start_stack - 16 * 4;
527
#else
528
    if (personality(infop->personality) == PER_LINUX32)
529
        regs->u_regs[14] = infop->start_stack - 16 * 4;
530
    else
531
        regs->u_regs[14] = infop->start_stack - 16 * 8 - STACK_BIAS;
532
#endif
533
}
534

    
535
#else
536
#define ELF_START_MMAP 0x80000000
537
#define ELF_HWCAP  (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | HWCAP_SPARC_SWAP \
538
                    | HWCAP_SPARC_MULDIV)
539
#define elf_check_arch(x) ( (x) == EM_SPARC )
540

    
541
#define ELF_CLASS   ELFCLASS32
542
#define ELF_ARCH    EM_SPARC
543

    
544
static inline void init_thread(struct target_pt_regs *regs,
545
                               struct image_info *infop)
546
{
547
    regs->psr = 0;
548
    regs->pc = infop->entry;
549
    regs->npc = regs->pc + 4;
550
    regs->y = 0;
551
    regs->u_regs[14] = infop->start_stack - 16 * 4;
552
}
553

    
554
#endif
555
#endif
556

    
557
#ifdef TARGET_PPC
558

    
559
#define ELF_START_MMAP 0x80000000
560

    
561
#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
562

    
563
#define elf_check_arch(x) ( (x) == EM_PPC64 )
564

    
565
#define ELF_CLASS       ELFCLASS64
566

    
567
#else
568

    
569
#define elf_check_arch(x) ( (x) == EM_PPC )
570

    
571
#define ELF_CLASS       ELFCLASS32
572

    
573
#endif
574

    
575
#define ELF_ARCH        EM_PPC
576

    
577
/* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP).
578
   See arch/powerpc/include/asm/cputable.h.  */
579
enum {
580
    QEMU_PPC_FEATURE_32 = 0x80000000,
581
    QEMU_PPC_FEATURE_64 = 0x40000000,
582
    QEMU_PPC_FEATURE_601_INSTR = 0x20000000,
583
    QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000,
584
    QEMU_PPC_FEATURE_HAS_FPU = 0x08000000,
585
    QEMU_PPC_FEATURE_HAS_MMU = 0x04000000,
586
    QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000,
587
    QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000,
588
    QEMU_PPC_FEATURE_HAS_SPE = 0x00800000,
589
    QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000,
590
    QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000,
591
    QEMU_PPC_FEATURE_NO_TB = 0x00100000,
592
    QEMU_PPC_FEATURE_POWER4 = 0x00080000,
593
    QEMU_PPC_FEATURE_POWER5 = 0x00040000,
594
    QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000,
595
    QEMU_PPC_FEATURE_CELL = 0x00010000,
596
    QEMU_PPC_FEATURE_BOOKE = 0x00008000,
597
    QEMU_PPC_FEATURE_SMT = 0x00004000,
598
    QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000,
599
    QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000,
600
    QEMU_PPC_FEATURE_PA6T = 0x00000800,
601
    QEMU_PPC_FEATURE_HAS_DFP = 0x00000400,
602
    QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200,
603
    QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100,
604
    QEMU_PPC_FEATURE_HAS_VSX = 0x00000080,
605
    QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040,
606

    
607
    QEMU_PPC_FEATURE_TRUE_LE = 0x00000002,
608
    QEMU_PPC_FEATURE_PPC_LE = 0x00000001,
609
};
610

    
611
#define ELF_HWCAP get_elf_hwcap()
612

    
613
static uint32_t get_elf_hwcap(void)
614
{
615
    CPUPPCState *e = thread_env;
616
    uint32_t features = 0;
617

    
618
    /* We don't have to be terribly complete here; the high points are
619
       Altivec/FP/SPE support.  Anything else is just a bonus.  */
620
#define GET_FEATURE(flag, feature)                                      \
621
    do {if (e->insns_flags & flag) features |= feature; } while(0)
622
    GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64);
623
    GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU);
624
    GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC);
625
    GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE);
626
    GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE);
627
    GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE);
628
    GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE);
629
    GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC);
630
#undef GET_FEATURE
631

    
632
    return features;
633
}
634

    
635
/*
636
 * The requirements here are:
637
 * - keep the final alignment of sp (sp & 0xf)
638
 * - make sure the 32-bit value at the first 16 byte aligned position of
639
 *   AUXV is greater than 16 for glibc compatibility.
640
 *   AT_IGNOREPPC is used for that.
641
 * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC,
642
 *   even if DLINFO_ARCH_ITEMS goes to zero or is undefined.
643
 */
644
#define DLINFO_ARCH_ITEMS       5
645
#define ARCH_DLINFO                                     \
646
    do {                                                \
647
        NEW_AUX_ENT(AT_DCACHEBSIZE, 0x20);              \
648
        NEW_AUX_ENT(AT_ICACHEBSIZE, 0x20);              \
649
        NEW_AUX_ENT(AT_UCACHEBSIZE, 0);                 \
650
        /*                                              \
651
         * Now handle glibc compatibility.              \
652
         */                                             \
653
        NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
654
        NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC);        \
655
    } while (0)
656

    
657
static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop)
658
{
659
    _regs->gpr[1] = infop->start_stack;
660
#if defined(TARGET_PPC64) && !defined(TARGET_ABI32)
661
    _regs->gpr[2] = ldq_raw(infop->entry + 8) + infop->load_bias;
662
    infop->entry = ldq_raw(infop->entry) + infop->load_bias;
663
#endif
664
    _regs->nip = infop->entry;
665
}
666

    
667
/* See linux kernel: arch/powerpc/include/asm/elf.h.  */
668
#define ELF_NREG 48
669
typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
670

    
671
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env)
672
{
673
    int i;
674
    target_ulong ccr = 0;
675

    
676
    for (i = 0; i < ARRAY_SIZE(env->gpr); i++) {
677
        (*regs)[i] = tswapl(env->gpr[i]);
678
    }
679

    
680
    (*regs)[32] = tswapl(env->nip);
681
    (*regs)[33] = tswapl(env->msr);
682
    (*regs)[35] = tswapl(env->ctr);
683
    (*regs)[36] = tswapl(env->lr);
684
    (*regs)[37] = tswapl(env->xer);
685

    
686
    for (i = 0; i < ARRAY_SIZE(env->crf); i++) {
687
        ccr |= env->crf[i] << (32 - ((i + 1) * 4));
688
    }
689
    (*regs)[38] = tswapl(ccr);
690
}
691

    
692
#define USE_ELF_CORE_DUMP
693
#define ELF_EXEC_PAGESIZE       4096
694

    
695
#endif
696

    
697
#ifdef TARGET_MIPS
698

    
699
#define ELF_START_MMAP 0x80000000
700

    
701
#define elf_check_arch(x) ( (x) == EM_MIPS )
702

    
703
#ifdef TARGET_MIPS64
704
#define ELF_CLASS   ELFCLASS64
705
#else
706
#define ELF_CLASS   ELFCLASS32
707
#endif
708
#define ELF_ARCH    EM_MIPS
709

    
710
static inline void init_thread(struct target_pt_regs *regs,
711
                               struct image_info *infop)
712
{
713
    regs->cp0_status = 2 << CP0St_KSU;
714
    regs->cp0_epc = infop->entry;
715
    regs->regs[29] = infop->start_stack;
716
}
717

    
718
/* See linux kernel: arch/mips/include/asm/elf.h.  */
719
#define ELF_NREG 45
720
typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
721

    
722
/* See linux kernel: arch/mips/include/asm/reg.h.  */
723
enum {
724
#ifdef TARGET_MIPS64
725
    TARGET_EF_R0 = 0,
726
#else
727
    TARGET_EF_R0 = 6,
728
#endif
729
    TARGET_EF_R26 = TARGET_EF_R0 + 26,
730
    TARGET_EF_R27 = TARGET_EF_R0 + 27,
731
    TARGET_EF_LO = TARGET_EF_R0 + 32,
732
    TARGET_EF_HI = TARGET_EF_R0 + 33,
733
    TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34,
734
    TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35,
735
    TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36,
736
    TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37
737
};
738

    
739
/* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
740
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env)
741
{
742
    int i;
743

    
744
    for (i = 0; i < TARGET_EF_R0; i++) {
745
        (*regs)[i] = 0;
746
    }
747
    (*regs)[TARGET_EF_R0] = 0;
748

    
749
    for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) {
750
        (*regs)[TARGET_EF_R0 + i] = tswapl(env->active_tc.gpr[i]);
751
    }
752

    
753
    (*regs)[TARGET_EF_R26] = 0;
754
    (*regs)[TARGET_EF_R27] = 0;
755
    (*regs)[TARGET_EF_LO] = tswapl(env->active_tc.LO[0]);
756
    (*regs)[TARGET_EF_HI] = tswapl(env->active_tc.HI[0]);
757
    (*regs)[TARGET_EF_CP0_EPC] = tswapl(env->active_tc.PC);
758
    (*regs)[TARGET_EF_CP0_BADVADDR] = tswapl(env->CP0_BadVAddr);
759
    (*regs)[TARGET_EF_CP0_STATUS] = tswapl(env->CP0_Status);
760
    (*regs)[TARGET_EF_CP0_CAUSE] = tswapl(env->CP0_Cause);
761
}
762

    
763
#define USE_ELF_CORE_DUMP
764
#define ELF_EXEC_PAGESIZE        4096
765

    
766
#endif /* TARGET_MIPS */
767

    
768
#ifdef TARGET_MICROBLAZE
769

    
770
#define ELF_START_MMAP 0x80000000
771

    
772
#define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD)
773

    
774
#define ELF_CLASS   ELFCLASS32
775
#define ELF_ARCH    EM_MICROBLAZE
776

    
777
static inline void init_thread(struct target_pt_regs *regs,
778
                               struct image_info *infop)
779
{
780
    regs->pc = infop->entry;
781
    regs->r1 = infop->start_stack;
782

    
783
}
784

    
785
#define ELF_EXEC_PAGESIZE        4096
786

    
787
#define USE_ELF_CORE_DUMP
788
#define ELF_NREG 38
789
typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
790

    
791
/* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs.  */
792
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env)
793
{
794
    int i, pos = 0;
795

    
796
    for (i = 0; i < 32; i++) {
797
        (*regs)[pos++] = tswapl(env->regs[i]);
798
    }
799

    
800
    for (i = 0; i < 6; i++) {
801
        (*regs)[pos++] = tswapl(env->sregs[i]);
802
    }
803
}
804

    
805
#endif /* TARGET_MICROBLAZE */
806

    
807
#ifdef TARGET_OPENRISC
808

    
809
#define ELF_START_MMAP 0x08000000
810

    
811
#define elf_check_arch(x) ((x) == EM_OPENRISC)
812

    
813
#define ELF_ARCH EM_OPENRISC
814
#define ELF_CLASS ELFCLASS32
815
#define ELF_DATA  ELFDATA2MSB
816

    
817
static inline void init_thread(struct target_pt_regs *regs,
818
                               struct image_info *infop)
819
{
820
    regs->pc = infop->entry;
821
    regs->gpr[1] = infop->start_stack;
822
}
823

    
824
#define USE_ELF_CORE_DUMP
825
#define ELF_EXEC_PAGESIZE 8192
826

    
827
/* See linux kernel arch/openrisc/include/asm/elf.h.  */
828
#define ELF_NREG 34 /* gprs and pc, sr */
829
typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
830

    
831
static void elf_core_copy_regs(target_elf_gregset_t *regs,
832
                               const CPUOpenRISCState *env)
833
{
834
    int i;
835

    
836
    for (i = 0; i < 32; i++) {
837
        (*regs)[i] = tswapl(env->gpr[i]);
838
    }
839

    
840
    (*regs)[32] = tswapl(env->pc);
841
    (*regs)[33] = tswapl(env->sr);
842
}
843
#define ELF_HWCAP 0
844
#define ELF_PLATFORM NULL
845

    
846
#endif /* TARGET_OPENRISC */
847

    
848
#ifdef TARGET_SH4
849

    
850
#define ELF_START_MMAP 0x80000000
851

    
852
#define elf_check_arch(x) ( (x) == EM_SH )
853

    
854
#define ELF_CLASS ELFCLASS32
855
#define ELF_ARCH  EM_SH
856

    
857
static inline void init_thread(struct target_pt_regs *regs,
858
                               struct image_info *infop)
859
{
860
    /* Check other registers XXXXX */
861
    regs->pc = infop->entry;
862
    regs->regs[15] = infop->start_stack;
863
}
864

    
865
/* See linux kernel: arch/sh/include/asm/elf.h.  */
866
#define ELF_NREG 23
867
typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
868

    
869
/* See linux kernel: arch/sh/include/asm/ptrace.h.  */
870
enum {
871
    TARGET_REG_PC = 16,
872
    TARGET_REG_PR = 17,
873
    TARGET_REG_SR = 18,
874
    TARGET_REG_GBR = 19,
875
    TARGET_REG_MACH = 20,
876
    TARGET_REG_MACL = 21,
877
    TARGET_REG_SYSCALL = 22
878
};
879

    
880
static inline void elf_core_copy_regs(target_elf_gregset_t *regs,
881
                                      const CPUSH4State *env)
882
{
883
    int i;
884

    
885
    for (i = 0; i < 16; i++) {
886
        (*regs[i]) = tswapl(env->gregs[i]);
887
    }
888

    
889
    (*regs)[TARGET_REG_PC] = tswapl(env->pc);
890
    (*regs)[TARGET_REG_PR] = tswapl(env->pr);
891
    (*regs)[TARGET_REG_SR] = tswapl(env->sr);
892
    (*regs)[TARGET_REG_GBR] = tswapl(env->gbr);
893
    (*regs)[TARGET_REG_MACH] = tswapl(env->mach);
894
    (*regs)[TARGET_REG_MACL] = tswapl(env->macl);
895
    (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */
896
}
897

    
898
#define USE_ELF_CORE_DUMP
899
#define ELF_EXEC_PAGESIZE        4096
900

    
901
#endif
902

    
903
#ifdef TARGET_CRIS
904

    
905
#define ELF_START_MMAP 0x80000000
906

    
907
#define elf_check_arch(x) ( (x) == EM_CRIS )
908

    
909
#define ELF_CLASS ELFCLASS32
910
#define ELF_ARCH  EM_CRIS
911

    
912
static inline void init_thread(struct target_pt_regs *regs,
913
                               struct image_info *infop)
914
{
915
    regs->erp = infop->entry;
916
}
917

    
918
#define ELF_EXEC_PAGESIZE        8192
919

    
920
#endif
921

    
922
#ifdef TARGET_M68K
923

    
924
#define ELF_START_MMAP 0x80000000
925

    
926
#define elf_check_arch(x) ( (x) == EM_68K )
927

    
928
#define ELF_CLASS       ELFCLASS32
929
#define ELF_ARCH        EM_68K
930

    
931
/* ??? Does this need to do anything?
932
   #define ELF_PLAT_INIT(_r) */
933

    
934
static inline void init_thread(struct target_pt_regs *regs,
935
                               struct image_info *infop)
936
{
937
    regs->usp = infop->start_stack;
938
    regs->sr = 0;
939
    regs->pc = infop->entry;
940
}
941

    
942
/* See linux kernel: arch/m68k/include/asm/elf.h.  */
943
#define ELF_NREG 20
944
typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
945

    
946
static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env)
947
{
948
    (*regs)[0] = tswapl(env->dregs[1]);
949
    (*regs)[1] = tswapl(env->dregs[2]);
950
    (*regs)[2] = tswapl(env->dregs[3]);
951
    (*regs)[3] = tswapl(env->dregs[4]);
952
    (*regs)[4] = tswapl(env->dregs[5]);
953
    (*regs)[5] = tswapl(env->dregs[6]);
954
    (*regs)[6] = tswapl(env->dregs[7]);
955
    (*regs)[7] = tswapl(env->aregs[0]);
956
    (*regs)[8] = tswapl(env->aregs[1]);
957
    (*regs)[9] = tswapl(env->aregs[2]);
958
    (*regs)[10] = tswapl(env->aregs[3]);
959
    (*regs)[11] = tswapl(env->aregs[4]);
960
    (*regs)[12] = tswapl(env->aregs[5]);
961
    (*regs)[13] = tswapl(env->aregs[6]);
962
    (*regs)[14] = tswapl(env->dregs[0]);
963
    (*regs)[15] = tswapl(env->aregs[7]);
964
    (*regs)[16] = tswapl(env->dregs[0]); /* FIXME: orig_d0 */
965
    (*regs)[17] = tswapl(env->sr);
966
    (*regs)[18] = tswapl(env->pc);
967
    (*regs)[19] = 0;  /* FIXME: regs->format | regs->vector */
968
}
969

    
970
#define USE_ELF_CORE_DUMP
971
#define ELF_EXEC_PAGESIZE       8192
972

    
973
#endif
974

    
975
#ifdef TARGET_ALPHA
976

    
977
#define ELF_START_MMAP (0x30000000000ULL)
978

    
979
#define elf_check_arch(x) ( (x) == ELF_ARCH )
980

    
981
#define ELF_CLASS      ELFCLASS64
982
#define ELF_ARCH       EM_ALPHA
983

    
984
static inline void init_thread(struct target_pt_regs *regs,
985
                               struct image_info *infop)
986
{
987
    regs->pc = infop->entry;
988
    regs->ps = 8;
989
    regs->usp = infop->start_stack;
990
}
991

    
992
#define ELF_EXEC_PAGESIZE        8192
993

    
994
#endif /* TARGET_ALPHA */
995

    
996
#ifdef TARGET_S390X
997

    
998
#define ELF_START_MMAP (0x20000000000ULL)
999

    
1000
#define elf_check_arch(x) ( (x) == ELF_ARCH )
1001

    
1002
#define ELF_CLASS        ELFCLASS64
1003
#define ELF_DATA        ELFDATA2MSB
1004
#define ELF_ARCH        EM_S390
1005

    
1006
static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop)
1007
{
1008
    regs->psw.addr = infop->entry;
1009
    regs->psw.mask = PSW_MASK_64 | PSW_MASK_32;
1010
    regs->gprs[15] = infop->start_stack;
1011
}
1012

    
1013
#endif /* TARGET_S390X */
1014

    
1015
#ifndef ELF_PLATFORM
1016
#define ELF_PLATFORM (NULL)
1017
#endif
1018

    
1019
#ifndef ELF_HWCAP
1020
#define ELF_HWCAP 0
1021
#endif
1022

    
1023
#ifdef TARGET_ABI32
1024
#undef ELF_CLASS
1025
#define ELF_CLASS ELFCLASS32
1026
#undef bswaptls
1027
#define bswaptls(ptr) bswap32s(ptr)
1028
#endif
1029

    
1030
#include "elf.h"
1031

    
1032
struct exec
1033
{
1034
    unsigned int a_info;   /* Use macros N_MAGIC, etc for access */
1035
    unsigned int a_text;   /* length of text, in bytes */
1036
    unsigned int a_data;   /* length of data, in bytes */
1037
    unsigned int a_bss;    /* length of uninitialized data area, in bytes */
1038
    unsigned int a_syms;   /* length of symbol table data in file, in bytes */
1039
    unsigned int a_entry;  /* start address */
1040
    unsigned int a_trsize; /* length of relocation info for text, in bytes */
1041
    unsigned int a_drsize; /* length of relocation info for data, in bytes */
1042
};
1043

    
1044

    
1045
#define N_MAGIC(exec) ((exec).a_info & 0xffff)
1046
#define OMAGIC 0407
1047
#define NMAGIC 0410
1048
#define ZMAGIC 0413
1049
#define QMAGIC 0314
1050

    
1051
/* Necessary parameters */
1052
#define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
1053
#define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
1054
#define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
1055

    
1056
#define DLINFO_ITEMS 13
1057

    
1058
static inline void memcpy_fromfs(void * to, const void * from, unsigned long n)
1059
{
1060
    memcpy(to, from, n);
1061
}
1062

    
1063
#ifdef BSWAP_NEEDED
1064
static void bswap_ehdr(struct elfhdr *ehdr)
1065
{
1066
    bswap16s(&ehdr->e_type);            /* Object file type */
1067
    bswap16s(&ehdr->e_machine);         /* Architecture */
1068
    bswap32s(&ehdr->e_version);         /* Object file version */
1069
    bswaptls(&ehdr->e_entry);           /* Entry point virtual address */
1070
    bswaptls(&ehdr->e_phoff);           /* Program header table file offset */
1071
    bswaptls(&ehdr->e_shoff);           /* Section header table file offset */
1072
    bswap32s(&ehdr->e_flags);           /* Processor-specific flags */
1073
    bswap16s(&ehdr->e_ehsize);          /* ELF header size in bytes */
1074
    bswap16s(&ehdr->e_phentsize);       /* Program header table entry size */
1075
    bswap16s(&ehdr->e_phnum);           /* Program header table entry count */
1076
    bswap16s(&ehdr->e_shentsize);       /* Section header table entry size */
1077
    bswap16s(&ehdr->e_shnum);           /* Section header table entry count */
1078
    bswap16s(&ehdr->e_shstrndx);        /* Section header string table index */
1079
}
1080

    
1081
static void bswap_phdr(struct elf_phdr *phdr, int phnum)
1082
{
1083
    int i;
1084
    for (i = 0; i < phnum; ++i, ++phdr) {
1085
        bswap32s(&phdr->p_type);        /* Segment type */
1086
        bswap32s(&phdr->p_flags);       /* Segment flags */
1087
        bswaptls(&phdr->p_offset);      /* Segment file offset */
1088
        bswaptls(&phdr->p_vaddr);       /* Segment virtual address */
1089
        bswaptls(&phdr->p_paddr);       /* Segment physical address */
1090
        bswaptls(&phdr->p_filesz);      /* Segment size in file */
1091
        bswaptls(&phdr->p_memsz);       /* Segment size in memory */
1092
        bswaptls(&phdr->p_align);       /* Segment alignment */
1093
    }
1094
}
1095

    
1096
static void bswap_shdr(struct elf_shdr *shdr, int shnum)
1097
{
1098
    int i;
1099
    for (i = 0; i < shnum; ++i, ++shdr) {
1100
        bswap32s(&shdr->sh_name);
1101
        bswap32s(&shdr->sh_type);
1102
        bswaptls(&shdr->sh_flags);
1103
        bswaptls(&shdr->sh_addr);
1104
        bswaptls(&shdr->sh_offset);
1105
        bswaptls(&shdr->sh_size);
1106
        bswap32s(&shdr->sh_link);
1107
        bswap32s(&shdr->sh_info);
1108
        bswaptls(&shdr->sh_addralign);
1109
        bswaptls(&shdr->sh_entsize);
1110
    }
1111
}
1112

    
1113
static void bswap_sym(struct elf_sym *sym)
1114
{
1115
    bswap32s(&sym->st_name);
1116
    bswaptls(&sym->st_value);
1117
    bswaptls(&sym->st_size);
1118
    bswap16s(&sym->st_shndx);
1119
}
1120
#else
1121
static inline void bswap_ehdr(struct elfhdr *ehdr) { }
1122
static inline void bswap_phdr(struct elf_phdr *phdr, int phnum) { }
1123
static inline void bswap_shdr(struct elf_shdr *shdr, int shnum) { }
1124
static inline void bswap_sym(struct elf_sym *sym) { }
1125
#endif
1126

    
1127
#ifdef USE_ELF_CORE_DUMP
1128
static int elf_core_dump(int, const CPUArchState *);
1129
#endif /* USE_ELF_CORE_DUMP */
1130
static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias);
1131

    
1132
/* Verify the portions of EHDR within E_IDENT for the target.
1133
   This can be performed before bswapping the entire header.  */
1134
static bool elf_check_ident(struct elfhdr *ehdr)
1135
{
1136
    return (ehdr->e_ident[EI_MAG0] == ELFMAG0
1137
            && ehdr->e_ident[EI_MAG1] == ELFMAG1
1138
            && ehdr->e_ident[EI_MAG2] == ELFMAG2
1139
            && ehdr->e_ident[EI_MAG3] == ELFMAG3
1140
            && ehdr->e_ident[EI_CLASS] == ELF_CLASS
1141
            && ehdr->e_ident[EI_DATA] == ELF_DATA
1142
            && ehdr->e_ident[EI_VERSION] == EV_CURRENT);
1143
}
1144

    
1145
/* Verify the portions of EHDR outside of E_IDENT for the target.
1146
   This has to wait until after bswapping the header.  */
1147
static bool elf_check_ehdr(struct elfhdr *ehdr)
1148
{
1149
    return (elf_check_arch(ehdr->e_machine)
1150
            && ehdr->e_ehsize == sizeof(struct elfhdr)
1151
            && ehdr->e_phentsize == sizeof(struct elf_phdr)
1152
            && ehdr->e_shentsize == sizeof(struct elf_shdr)
1153
            && (ehdr->e_type == ET_EXEC || ehdr->e_type == ET_DYN));
1154
}
1155

    
1156
/*
1157
 * 'copy_elf_strings()' copies argument/envelope strings from user
1158
 * memory to free pages in kernel mem. These are in a format ready
1159
 * to be put directly into the top of new user memory.
1160
 *
1161
 */
1162
static abi_ulong copy_elf_strings(int argc,char ** argv, void **page,
1163
                                  abi_ulong p)
1164
{
1165
    char *tmp, *tmp1, *pag = NULL;
1166
    int len, offset = 0;
1167

    
1168
    if (!p) {
1169
        return 0;       /* bullet-proofing */
1170
    }
1171
    while (argc-- > 0) {
1172
        tmp = argv[argc];
1173
        if (!tmp) {
1174
            fprintf(stderr, "VFS: argc is wrong");
1175
            exit(-1);
1176
        }
1177
        tmp1 = tmp;
1178
        while (*tmp++);
1179
        len = tmp - tmp1;
1180
        if (p < len) {  /* this shouldn't happen - 128kB */
1181
            return 0;
1182
        }
1183
        while (len) {
1184
            --p; --tmp; --len;
1185
            if (--offset < 0) {
1186
                offset = p % TARGET_PAGE_SIZE;
1187
                pag = (char *)page[p/TARGET_PAGE_SIZE];
1188
                if (!pag) {
1189
                    pag = g_try_malloc0(TARGET_PAGE_SIZE);
1190
                    page[p/TARGET_PAGE_SIZE] = pag;
1191
                    if (!pag)
1192
                        return 0;
1193
                }
1194
            }
1195
            if (len == 0 || offset == 0) {
1196
                *(pag + offset) = *tmp;
1197
            }
1198
            else {
1199
                int bytes_to_copy = (len > offset) ? offset : len;
1200
                tmp -= bytes_to_copy;
1201
                p -= bytes_to_copy;
1202
                offset -= bytes_to_copy;
1203
                len -= bytes_to_copy;
1204
                memcpy_fromfs(pag + offset, tmp, bytes_to_copy + 1);
1205
            }
1206
        }
1207
    }
1208
    return p;
1209
}
1210

    
1211
static abi_ulong setup_arg_pages(abi_ulong p, struct linux_binprm *bprm,
1212
                                 struct image_info *info)
1213
{
1214
    abi_ulong stack_base, size, error, guard;
1215
    int i;
1216

    
1217
    /* Create enough stack to hold everything.  If we don't use
1218
       it for args, we'll use it for something else.  */
1219
    size = guest_stack_size;
1220
    if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE) {
1221
        size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1222
    }
1223
    guard = TARGET_PAGE_SIZE;
1224
    if (guard < qemu_real_host_page_size) {
1225
        guard = qemu_real_host_page_size;
1226
    }
1227

    
1228
    error = target_mmap(0, size + guard, PROT_READ | PROT_WRITE,
1229
                        MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1230
    if (error == -1) {
1231
        perror("mmap stack");
1232
        exit(-1);
1233
    }
1234

    
1235
    /* We reserve one extra page at the top of the stack as guard.  */
1236
    target_mprotect(error, guard, PROT_NONE);
1237

    
1238
    info->stack_limit = error + guard;
1239
    stack_base = info->stack_limit + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
1240
    p += stack_base;
1241

    
1242
    for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
1243
        if (bprm->page[i]) {
1244
            info->rss++;
1245
            /* FIXME - check return value of memcpy_to_target() for failure */
1246
            memcpy_to_target(stack_base, bprm->page[i], TARGET_PAGE_SIZE);
1247
            g_free(bprm->page[i]);
1248
        }
1249
        stack_base += TARGET_PAGE_SIZE;
1250
    }
1251
    return p;
1252
}
1253

    
1254
/* Map and zero the bss.  We need to explicitly zero any fractional pages
1255
   after the data section (i.e. bss).  */
1256
static void zero_bss(abi_ulong elf_bss, abi_ulong last_bss, int prot)
1257
{
1258
    uintptr_t host_start, host_map_start, host_end;
1259

    
1260
    last_bss = TARGET_PAGE_ALIGN(last_bss);
1261

    
1262
    /* ??? There is confusion between qemu_real_host_page_size and
1263
       qemu_host_page_size here and elsewhere in target_mmap, which
1264
       may lead to the end of the data section mapping from the file
1265
       not being mapped.  At least there was an explicit test and
1266
       comment for that here, suggesting that "the file size must
1267
       be known".  The comment probably pre-dates the introduction
1268
       of the fstat system call in target_mmap which does in fact
1269
       find out the size.  What isn't clear is if the workaround
1270
       here is still actually needed.  For now, continue with it,
1271
       but merge it with the "normal" mmap that would allocate the bss.  */
1272

    
1273
    host_start = (uintptr_t) g2h(elf_bss);
1274
    host_end = (uintptr_t) g2h(last_bss);
1275
    host_map_start = (host_start + qemu_real_host_page_size - 1);
1276
    host_map_start &= -qemu_real_host_page_size;
1277

    
1278
    if (host_map_start < host_end) {
1279
        void *p = mmap((void *)host_map_start, host_end - host_map_start,
1280
                       prot, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
1281
        if (p == MAP_FAILED) {
1282
            perror("cannot mmap brk");
1283
            exit(-1);
1284
        }
1285

    
1286
        /* Since we didn't use target_mmap, make sure to record
1287
           the validity of the pages with qemu.  */
1288
        page_set_flags(elf_bss & TARGET_PAGE_MASK, last_bss, prot|PAGE_VALID);
1289
    }
1290

    
1291
    if (host_start < host_map_start) {
1292
        memset((void *)host_start, 0, host_map_start - host_start);
1293
    }
1294
}
1295

    
1296
#ifdef CONFIG_USE_FDPIC
1297
static abi_ulong loader_build_fdpic_loadmap(struct image_info *info, abi_ulong sp)
1298
{
1299
    uint16_t n;
1300
    struct elf32_fdpic_loadseg *loadsegs = info->loadsegs;
1301

    
1302
    /* elf32_fdpic_loadseg */
1303
    n = info->nsegs;
1304
    while (n--) {
1305
        sp -= 12;
1306
        put_user_u32(loadsegs[n].addr, sp+0);
1307
        put_user_u32(loadsegs[n].p_vaddr, sp+4);
1308
        put_user_u32(loadsegs[n].p_memsz, sp+8);
1309
    }
1310

    
1311
    /* elf32_fdpic_loadmap */
1312
    sp -= 4;
1313
    put_user_u16(0, sp+0); /* version */
1314
    put_user_u16(info->nsegs, sp+2); /* nsegs */
1315

    
1316
    info->personality = PER_LINUX_FDPIC;
1317
    info->loadmap_addr = sp;
1318

    
1319
    return sp;
1320
}
1321
#endif
1322

    
1323
static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc,
1324
                                   struct elfhdr *exec,
1325
                                   struct image_info *info,
1326
                                   struct image_info *interp_info)
1327
{
1328
    abi_ulong sp;
1329
    abi_ulong sp_auxv;
1330
    int size;
1331
    int i;
1332
    abi_ulong u_rand_bytes;
1333
    uint8_t k_rand_bytes[16];
1334
    abi_ulong u_platform;
1335
    const char *k_platform;
1336
    const int n = sizeof(elf_addr_t);
1337

    
1338
    sp = p;
1339

    
1340
#ifdef CONFIG_USE_FDPIC
1341
    /* Needs to be before we load the env/argc/... */
1342
    if (elf_is_fdpic(exec)) {
1343
        /* Need 4 byte alignment for these structs */
1344
        sp &= ~3;
1345
        sp = loader_build_fdpic_loadmap(info, sp);
1346
        info->other_info = interp_info;
1347
        if (interp_info) {
1348
            interp_info->other_info = info;
1349
            sp = loader_build_fdpic_loadmap(interp_info, sp);
1350
        }
1351
    }
1352
#endif
1353

    
1354
    u_platform = 0;
1355
    k_platform = ELF_PLATFORM;
1356
    if (k_platform) {
1357
        size_t len = strlen(k_platform) + 1;
1358
        sp -= (len + n - 1) & ~(n - 1);
1359
        u_platform = sp;
1360
        /* FIXME - check return value of memcpy_to_target() for failure */
1361
        memcpy_to_target(sp, k_platform, len);
1362
    }
1363

    
1364
    /*
1365
     * Generate 16 random bytes for userspace PRNG seeding (not
1366
     * cryptically secure but it's not the aim of QEMU).
1367
     */
1368
    srand((unsigned int) time(NULL));
1369
    for (i = 0; i < 16; i++) {
1370
        k_rand_bytes[i] = rand();
1371
    }
1372
    sp -= 16;
1373
    u_rand_bytes = sp;
1374
    /* FIXME - check return value of memcpy_to_target() for failure */
1375
    memcpy_to_target(sp, k_rand_bytes, 16);
1376

    
1377
    /*
1378
     * Force 16 byte _final_ alignment here for generality.
1379
     */
1380
    sp = sp &~ (abi_ulong)15;
1381
    size = (DLINFO_ITEMS + 1) * 2;
1382
    if (k_platform)
1383
        size += 2;
1384
#ifdef DLINFO_ARCH_ITEMS
1385
    size += DLINFO_ARCH_ITEMS * 2;
1386
#endif
1387
    size += envc + argc + 2;
1388
    size += 1;  /* argc itself */
1389
    size *= n;
1390
    if (size & 15)
1391
        sp -= 16 - (size & 15);
1392

    
1393
    /* This is correct because Linux defines
1394
     * elf_addr_t as Elf32_Off / Elf64_Off
1395
     */
1396
#define NEW_AUX_ENT(id, val) do {               \
1397
        sp -= n; put_user_ual(val, sp);         \
1398
        sp -= n; put_user_ual(id, sp);          \
1399
    } while(0)
1400

    
1401
    sp_auxv = sp;
1402
    NEW_AUX_ENT (AT_NULL, 0);
1403

    
1404
    /* There must be exactly DLINFO_ITEMS entries here.  */
1405
    NEW_AUX_ENT(AT_PHDR, (abi_ulong)(info->load_addr + exec->e_phoff));
1406
    NEW_AUX_ENT(AT_PHENT, (abi_ulong)(sizeof (struct elf_phdr)));
1407
    NEW_AUX_ENT(AT_PHNUM, (abi_ulong)(exec->e_phnum));
1408
    NEW_AUX_ENT(AT_PAGESZ, (abi_ulong)(TARGET_PAGE_SIZE));
1409
    NEW_AUX_ENT(AT_BASE, (abi_ulong)(interp_info ? interp_info->load_addr : 0));
1410
    NEW_AUX_ENT(AT_FLAGS, (abi_ulong)0);
1411
    NEW_AUX_ENT(AT_ENTRY, info->entry);
1412
    NEW_AUX_ENT(AT_UID, (abi_ulong) getuid());
1413
    NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid());
1414
    NEW_AUX_ENT(AT_GID, (abi_ulong) getgid());
1415
    NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid());
1416
    NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP);
1417
    NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK));
1418
    NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes);
1419

    
1420
    if (k_platform)
1421
        NEW_AUX_ENT(AT_PLATFORM, u_platform);
1422
#ifdef ARCH_DLINFO
1423
    /*
1424
     * ARCH_DLINFO must come last so platform specific code can enforce
1425
     * special alignment requirements on the AUXV if necessary (eg. PPC).
1426
     */
1427
    ARCH_DLINFO;
1428
#endif
1429
#undef NEW_AUX_ENT
1430

    
1431
    info->saved_auxv = sp;
1432
    info->auxv_len = sp_auxv - sp;
1433

    
1434
    sp = loader_build_argptr(envc, argc, sp, p, 0);
1435
    return sp;
1436
}
1437

    
1438
#ifndef TARGET_HAS_VALIDATE_GUEST_SPACE
1439
/* If the guest doesn't have a validation function just agree */
1440
static int validate_guest_space(unsigned long guest_base,
1441
                                unsigned long guest_size)
1442
{
1443
    return 1;
1444
}
1445
#endif
1446

    
1447
unsigned long init_guest_space(unsigned long host_start,
1448
                               unsigned long host_size,
1449
                               unsigned long guest_start,
1450
                               bool fixed)
1451
{
1452
    unsigned long current_start, real_start;
1453
    int flags;
1454

    
1455
    assert(host_start || host_size);
1456

    
1457
    /* If just a starting address is given, then just verify that
1458
     * address.  */
1459
    if (host_start && !host_size) {
1460
        if (validate_guest_space(host_start, host_size) == 1) {
1461
            return host_start;
1462
        } else {
1463
            return (unsigned long)-1;
1464
        }
1465
    }
1466

    
1467
    /* Setup the initial flags and start address.  */
1468
    current_start = host_start & qemu_host_page_mask;
1469
    flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
1470
    if (fixed) {
1471
        flags |= MAP_FIXED;
1472
    }
1473

    
1474
    /* Otherwise, a non-zero size region of memory needs to be mapped
1475
     * and validated.  */
1476
    while (1) {
1477
        unsigned long real_size = host_size;
1478

    
1479
        /* Do not use mmap_find_vma here because that is limited to the
1480
         * guest address space.  We are going to make the
1481
         * guest address space fit whatever we're given.
1482
         */
1483
        real_start = (unsigned long)
1484
            mmap((void *)current_start, host_size, PROT_NONE, flags, -1, 0);
1485
        if (real_start == (unsigned long)-1) {
1486
            return (unsigned long)-1;
1487
        }
1488

    
1489
        /* Ensure the address is properly aligned.  */
1490
        if (real_start & ~qemu_host_page_mask) {
1491
            munmap((void *)real_start, host_size);
1492
            real_size = host_size + qemu_host_page_size;
1493
            real_start = (unsigned long)
1494
                mmap((void *)real_start, real_size, PROT_NONE, flags, -1, 0);
1495
            if (real_start == (unsigned long)-1) {
1496
                return (unsigned long)-1;
1497
            }
1498
            real_start = HOST_PAGE_ALIGN(real_start);
1499
        }
1500

    
1501
        /* Check to see if the address is valid.  */
1502
        if (!host_start || real_start == current_start) {
1503
            int valid = validate_guest_space(real_start - guest_start,
1504
                                             real_size);
1505
            if (valid == 1) {
1506
                break;
1507
            } else if (valid == -1) {
1508
                return (unsigned long)-1;
1509
            }
1510
            /* valid == 0, so try again. */
1511
        }
1512

    
1513
        /* That address didn't work.  Unmap and try a different one.
1514
         * The address the host picked because is typically right at
1515
         * the top of the host address space and leaves the guest with
1516
         * no usable address space.  Resort to a linear search.  We
1517
         * already compensated for mmap_min_addr, so this should not
1518
         * happen often.  Probably means we got unlucky and host
1519
         * address space randomization put a shared library somewhere
1520
         * inconvenient.
1521
         */
1522
        munmap((void *)real_start, host_size);
1523
        current_start += qemu_host_page_size;
1524
        if (host_start == current_start) {
1525
            /* Theoretically possible if host doesn't have any suitably
1526
             * aligned areas.  Normally the first mmap will fail.
1527
             */
1528
            return (unsigned long)-1;
1529
        }
1530
    }
1531

    
1532
    qemu_log("Reserved 0x%lx bytes of guest address space\n", host_size);
1533

    
1534
    return real_start;
1535
}
1536

    
1537
static void probe_guest_base(const char *image_name,
1538
                             abi_ulong loaddr, abi_ulong hiaddr)
1539
{
1540
    /* Probe for a suitable guest base address, if the user has not set
1541
     * it explicitly, and set guest_base appropriately.
1542
     * In case of error we will print a suitable message and exit.
1543
     */
1544
#if defined(CONFIG_USE_GUEST_BASE)
1545
    const char *errmsg;
1546
    if (!have_guest_base && !reserved_va) {
1547
        unsigned long host_start, real_start, host_size;
1548

    
1549
        /* Round addresses to page boundaries.  */
1550
        loaddr &= qemu_host_page_mask;
1551
        hiaddr = HOST_PAGE_ALIGN(hiaddr);
1552

    
1553
        if (loaddr < mmap_min_addr) {
1554
            host_start = HOST_PAGE_ALIGN(mmap_min_addr);
1555
        } else {
1556
            host_start = loaddr;
1557
            if (host_start != loaddr) {
1558
                errmsg = "Address overflow loading ELF binary";
1559
                goto exit_errmsg;
1560
            }
1561
        }
1562
        host_size = hiaddr - loaddr;
1563

    
1564
        /* Setup the initial guest memory space with ranges gleaned from
1565
         * the ELF image that is being loaded.
1566
         */
1567
        real_start = init_guest_space(host_start, host_size, loaddr, false);
1568
        if (real_start == (unsigned long)-1) {
1569
            errmsg = "Unable to find space for application";
1570
            goto exit_errmsg;
1571
        }
1572
        guest_base = real_start - loaddr;
1573

    
1574
        qemu_log("Relocating guest address space from 0x"
1575
                 TARGET_ABI_FMT_lx " to 0x%lx\n",
1576
                 loaddr, real_start);
1577
    }
1578
    return;
1579

    
1580
exit_errmsg:
1581
    fprintf(stderr, "%s: %s\n", image_name, errmsg);
1582
    exit(-1);
1583
#endif
1584
}
1585

    
1586

    
1587
/* Load an ELF image into the address space.
1588

1589
   IMAGE_NAME is the filename of the image, to use in error messages.
1590
   IMAGE_FD is the open file descriptor for the image.
1591

1592
   BPRM_BUF is a copy of the beginning of the file; this of course
1593
   contains the elf file header at offset 0.  It is assumed that this
1594
   buffer is sufficiently aligned to present no problems to the host
1595
   in accessing data at aligned offsets within the buffer.
1596

1597
   On return: INFO values will be filled in, as necessary or available.  */
1598

    
1599
static void load_elf_image(const char *image_name, int image_fd,
1600
                           struct image_info *info, char **pinterp_name,
1601
                           char bprm_buf[BPRM_BUF_SIZE])
1602
{
1603
    struct elfhdr *ehdr = (struct elfhdr *)bprm_buf;
1604
    struct elf_phdr *phdr;
1605
    abi_ulong load_addr, load_bias, loaddr, hiaddr, error;
1606
    int i, retval;
1607
    const char *errmsg;
1608

    
1609
    /* First of all, some simple consistency checks */
1610
    errmsg = "Invalid ELF image for this architecture";
1611
    if (!elf_check_ident(ehdr)) {
1612
        goto exit_errmsg;
1613
    }
1614
    bswap_ehdr(ehdr);
1615
    if (!elf_check_ehdr(ehdr)) {
1616
        goto exit_errmsg;
1617
    }
1618

    
1619
    i = ehdr->e_phnum * sizeof(struct elf_phdr);
1620
    if (ehdr->e_phoff + i <= BPRM_BUF_SIZE) {
1621
        phdr = (struct elf_phdr *)(bprm_buf + ehdr->e_phoff);
1622
    } else {
1623
        phdr = (struct elf_phdr *) alloca(i);
1624
        retval = pread(image_fd, phdr, i, ehdr->e_phoff);
1625
        if (retval != i) {
1626
            goto exit_read;
1627
        }
1628
    }
1629
    bswap_phdr(phdr, ehdr->e_phnum);
1630

    
1631
#ifdef CONFIG_USE_FDPIC
1632
    info->nsegs = 0;
1633
    info->pt_dynamic_addr = 0;
1634
#endif
1635

    
1636
    /* Find the maximum size of the image and allocate an appropriate
1637
       amount of memory to handle that.  */
1638
    loaddr = -1, hiaddr = 0;
1639
    for (i = 0; i < ehdr->e_phnum; ++i) {
1640
        if (phdr[i].p_type == PT_LOAD) {
1641
            abi_ulong a = phdr[i].p_vaddr;
1642
            if (a < loaddr) {
1643
                loaddr = a;
1644
            }
1645
            a += phdr[i].p_memsz;
1646
            if (a > hiaddr) {
1647
                hiaddr = a;
1648
            }
1649
#ifdef CONFIG_USE_FDPIC
1650
            ++info->nsegs;
1651
#endif
1652
        }
1653
    }
1654

    
1655
    load_addr = loaddr;
1656
    if (ehdr->e_type == ET_DYN) {
1657
        /* The image indicates that it can be loaded anywhere.  Find a
1658
           location that can hold the memory space required.  If the
1659
           image is pre-linked, LOADDR will be non-zero.  Since we do
1660
           not supply MAP_FIXED here we'll use that address if and
1661
           only if it remains available.  */
1662
        load_addr = target_mmap(loaddr, hiaddr - loaddr, PROT_NONE,
1663
                                MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
1664
                                -1, 0);
1665
        if (load_addr == -1) {
1666
            goto exit_perror;
1667
        }
1668
    } else if (pinterp_name != NULL) {
1669
        /* This is the main executable.  Make sure that the low
1670
           address does not conflict with MMAP_MIN_ADDR or the
1671
           QEMU application itself.  */
1672
        probe_guest_base(image_name, loaddr, hiaddr);
1673
    }
1674
    load_bias = load_addr - loaddr;
1675

    
1676
#ifdef CONFIG_USE_FDPIC
1677
    {
1678
        struct elf32_fdpic_loadseg *loadsegs = info->loadsegs =
1679
            g_malloc(sizeof(*loadsegs) * info->nsegs);
1680

    
1681
        for (i = 0; i < ehdr->e_phnum; ++i) {
1682
            switch (phdr[i].p_type) {
1683
            case PT_DYNAMIC:
1684
                info->pt_dynamic_addr = phdr[i].p_vaddr + load_bias;
1685
                break;
1686
            case PT_LOAD:
1687
                loadsegs->addr = phdr[i].p_vaddr + load_bias;
1688
                loadsegs->p_vaddr = phdr[i].p_vaddr;
1689
                loadsegs->p_memsz = phdr[i].p_memsz;
1690
                ++loadsegs;
1691
                break;
1692
            }
1693
        }
1694
    }
1695
#endif
1696

    
1697
    info->load_bias = load_bias;
1698
    info->load_addr = load_addr;
1699
    info->entry = ehdr->e_entry + load_bias;
1700
    info->start_code = -1;
1701
    info->end_code = 0;
1702
    info->start_data = -1;
1703
    info->end_data = 0;
1704
    info->brk = 0;
1705
    info->elf_flags = ehdr->e_flags;
1706

    
1707
    for (i = 0; i < ehdr->e_phnum; i++) {
1708
        struct elf_phdr *eppnt = phdr + i;
1709
        if (eppnt->p_type == PT_LOAD) {
1710
            abi_ulong vaddr, vaddr_po, vaddr_ps, vaddr_ef, vaddr_em;
1711
            int elf_prot = 0;
1712

    
1713
            if (eppnt->p_flags & PF_R) elf_prot =  PROT_READ;
1714
            if (eppnt->p_flags & PF_W) elf_prot |= PROT_WRITE;
1715
            if (eppnt->p_flags & PF_X) elf_prot |= PROT_EXEC;
1716

    
1717
            vaddr = load_bias + eppnt->p_vaddr;
1718
            vaddr_po = TARGET_ELF_PAGEOFFSET(vaddr);
1719
            vaddr_ps = TARGET_ELF_PAGESTART(vaddr);
1720

    
1721
            error = target_mmap(vaddr_ps, eppnt->p_filesz + vaddr_po,
1722
                                elf_prot, MAP_PRIVATE | MAP_FIXED,
1723
                                image_fd, eppnt->p_offset - vaddr_po);
1724
            if (error == -1) {
1725
                goto exit_perror;
1726
            }
1727

    
1728
            vaddr_ef = vaddr + eppnt->p_filesz;
1729
            vaddr_em = vaddr + eppnt->p_memsz;
1730

    
1731
            /* If the load segment requests extra zeros (e.g. bss), map it.  */
1732
            if (vaddr_ef < vaddr_em) {
1733
                zero_bss(vaddr_ef, vaddr_em, elf_prot);
1734
            }
1735

    
1736
            /* Find the full program boundaries.  */
1737
            if (elf_prot & PROT_EXEC) {
1738
                if (vaddr < info->start_code) {
1739
                    info->start_code = vaddr;
1740
                }
1741
                if (vaddr_ef > info->end_code) {
1742
                    info->end_code = vaddr_ef;
1743
                }
1744
            }
1745
            if (elf_prot & PROT_WRITE) {
1746
                if (vaddr < info->start_data) {
1747
                    info->start_data = vaddr;
1748
                }
1749
                if (vaddr_ef > info->end_data) {
1750
                    info->end_data = vaddr_ef;
1751
                }
1752
                if (vaddr_em > info->brk) {
1753
                    info->brk = vaddr_em;
1754
                }
1755
            }
1756
        } else if (eppnt->p_type == PT_INTERP && pinterp_name) {
1757
            char *interp_name;
1758

    
1759
            if (*pinterp_name) {
1760
                errmsg = "Multiple PT_INTERP entries";
1761
                goto exit_errmsg;
1762
            }
1763
            interp_name = malloc(eppnt->p_filesz);
1764
            if (!interp_name) {
1765
                goto exit_perror;
1766
            }
1767

    
1768
            if (eppnt->p_offset + eppnt->p_filesz <= BPRM_BUF_SIZE) {
1769
                memcpy(interp_name, bprm_buf + eppnt->p_offset,
1770
                       eppnt->p_filesz);
1771
            } else {
1772
                retval = pread(image_fd, interp_name, eppnt->p_filesz,
1773
                               eppnt->p_offset);
1774
                if (retval != eppnt->p_filesz) {
1775
                    goto exit_perror;
1776
                }
1777
            }
1778
            if (interp_name[eppnt->p_filesz - 1] != 0) {
1779
                errmsg = "Invalid PT_INTERP entry";
1780
                goto exit_errmsg;
1781
            }
1782
            *pinterp_name = interp_name;
1783
        }
1784
    }
1785

    
1786
    if (info->end_data == 0) {
1787
        info->start_data = info->end_code;
1788
        info->end_data = info->end_code;
1789
        info->brk = info->end_code;
1790
    }
1791

    
1792
    if (qemu_log_enabled()) {
1793
        load_symbols(ehdr, image_fd, load_bias);
1794
    }
1795

    
1796
    close(image_fd);
1797
    return;
1798

    
1799
 exit_read:
1800
    if (retval >= 0) {
1801
        errmsg = "Incomplete read of file header";
1802
        goto exit_errmsg;
1803
    }
1804
 exit_perror:
1805
    errmsg = strerror(errno);
1806
 exit_errmsg:
1807
    fprintf(stderr, "%s: %s\n", image_name, errmsg);
1808
    exit(-1);
1809
}
1810

    
1811
static void load_elf_interp(const char *filename, struct image_info *info,
1812
                            char bprm_buf[BPRM_BUF_SIZE])
1813
{
1814
    int fd, retval;
1815

    
1816
    fd = open(path(filename), O_RDONLY);
1817
    if (fd < 0) {
1818
        goto exit_perror;
1819
    }
1820

    
1821
    retval = read(fd, bprm_buf, BPRM_BUF_SIZE);
1822
    if (retval < 0) {
1823
        goto exit_perror;
1824
    }
1825
    if (retval < BPRM_BUF_SIZE) {
1826
        memset(bprm_buf + retval, 0, BPRM_BUF_SIZE - retval);
1827
    }
1828

    
1829
    load_elf_image(filename, fd, info, NULL, bprm_buf);
1830
    return;
1831

    
1832
 exit_perror:
1833
    fprintf(stderr, "%s: %s\n", filename, strerror(errno));
1834
    exit(-1);
1835
}
1836

    
1837
static int symfind(const void *s0, const void *s1)
1838
{
1839
    target_ulong addr = *(target_ulong *)s0;
1840
    struct elf_sym *sym = (struct elf_sym *)s1;
1841
    int result = 0;
1842
    if (addr < sym->st_value) {
1843
        result = -1;
1844
    } else if (addr >= sym->st_value + sym->st_size) {
1845
        result = 1;
1846
    }
1847
    return result;
1848
}
1849

    
1850
static const char *lookup_symbolxx(struct syminfo *s, target_ulong orig_addr)
1851
{
1852
#if ELF_CLASS == ELFCLASS32
1853
    struct elf_sym *syms = s->disas_symtab.elf32;
1854
#else
1855
    struct elf_sym *syms = s->disas_symtab.elf64;
1856
#endif
1857

    
1858
    // binary search
1859
    struct elf_sym *sym;
1860

    
1861
    sym = bsearch(&orig_addr, syms, s->disas_num_syms, sizeof(*syms), symfind);
1862
    if (sym != NULL) {
1863
        return s->disas_strtab + sym->st_name;
1864
    }
1865

    
1866
    return "";
1867
}
1868

    
1869
/* FIXME: This should use elf_ops.h  */
1870
static int symcmp(const void *s0, const void *s1)
1871
{
1872
    struct elf_sym *sym0 = (struct elf_sym *)s0;
1873
    struct elf_sym *sym1 = (struct elf_sym *)s1;
1874
    return (sym0->st_value < sym1->st_value)
1875
        ? -1
1876
        : ((sym0->st_value > sym1->st_value) ? 1 : 0);
1877
}
1878

    
1879
/* Best attempt to load symbols from this ELF object. */
1880
static void load_symbols(struct elfhdr *hdr, int fd, abi_ulong load_bias)
1881
{
1882
    int i, shnum, nsyms, sym_idx = 0, str_idx = 0;
1883
    struct elf_shdr *shdr;
1884
    char *strings = NULL;
1885
    struct syminfo *s = NULL;
1886
    struct elf_sym *new_syms, *syms = NULL;
1887

    
1888
    shnum = hdr->e_shnum;
1889
    i = shnum * sizeof(struct elf_shdr);
1890
    shdr = (struct elf_shdr *)alloca(i);
1891
    if (pread(fd, shdr, i, hdr->e_shoff) != i) {
1892
        return;
1893
    }
1894

    
1895
    bswap_shdr(shdr, shnum);
1896
    for (i = 0; i < shnum; ++i) {
1897
        if (shdr[i].sh_type == SHT_SYMTAB) {
1898
            sym_idx = i;
1899
            str_idx = shdr[i].sh_link;
1900
            goto found;
1901
        }
1902
    }
1903

    
1904
    /* There will be no symbol table if the file was stripped.  */
1905
    return;
1906

    
1907
 found:
1908
    /* Now know where the strtab and symtab are.  Snarf them.  */
1909
    s = malloc(sizeof(*s));
1910
    if (!s) {
1911
        goto give_up;
1912
    }
1913

    
1914
    i = shdr[str_idx].sh_size;
1915
    s->disas_strtab = strings = malloc(i);
1916
    if (!strings || pread(fd, strings, i, shdr[str_idx].sh_offset) != i) {
1917
        goto give_up;
1918
    }
1919

    
1920
    i = shdr[sym_idx].sh_size;
1921
    syms = malloc(i);
1922
    if (!syms || pread(fd, syms, i, shdr[sym_idx].sh_offset) != i) {
1923
        goto give_up;
1924
    }
1925

    
1926
    nsyms = i / sizeof(struct elf_sym);
1927
    for (i = 0; i < nsyms; ) {
1928
        bswap_sym(syms + i);
1929
        /* Throw away entries which we do not need.  */
1930
        if (syms[i].st_shndx == SHN_UNDEF
1931
            || syms[i].st_shndx >= SHN_LORESERVE
1932
            || ELF_ST_TYPE(syms[i].st_info) != STT_FUNC) {
1933
            if (i < --nsyms) {
1934
                syms[i] = syms[nsyms];
1935
            }
1936
        } else {
1937
#if defined(TARGET_ARM) || defined (TARGET_MIPS)
1938
            /* The bottom address bit marks a Thumb or MIPS16 symbol.  */
1939
            syms[i].st_value &= ~(target_ulong)1;
1940
#endif
1941
            syms[i].st_value += load_bias;
1942
            i++;
1943
        }
1944
    }
1945

    
1946
    /* No "useful" symbol.  */
1947
    if (nsyms == 0) {
1948
        goto give_up;
1949
    }
1950

    
1951
    /* Attempt to free the storage associated with the local symbols
1952
       that we threw away.  Whether or not this has any effect on the
1953
       memory allocation depends on the malloc implementation and how
1954
       many symbols we managed to discard.  */
1955
    new_syms = realloc(syms, nsyms * sizeof(*syms));
1956
    if (new_syms == NULL) {
1957
        goto give_up;
1958
    }
1959
    syms = new_syms;
1960

    
1961
    qsort(syms, nsyms, sizeof(*syms), symcmp);
1962

    
1963
    s->disas_num_syms = nsyms;
1964
#if ELF_CLASS == ELFCLASS32
1965
    s->disas_symtab.elf32 = syms;
1966
#else
1967
    s->disas_symtab.elf64 = syms;
1968
#endif
1969
    s->lookup_symbol = lookup_symbolxx;
1970
    s->next = syminfos;
1971
    syminfos = s;
1972

    
1973
    return;
1974

    
1975
give_up:
1976
    free(s);
1977
    free(strings);
1978
    free(syms);
1979
}
1980

    
1981
int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * regs,
1982
                    struct image_info * info)
1983
{
1984
    struct image_info interp_info;
1985
    struct elfhdr elf_ex;
1986
    char *elf_interpreter = NULL;
1987

    
1988
    info->start_mmap = (abi_ulong)ELF_START_MMAP;
1989
    info->mmap = 0;
1990
    info->rss = 0;
1991

    
1992
    load_elf_image(bprm->filename, bprm->fd, info,
1993
                   &elf_interpreter, bprm->buf);
1994

    
1995
    /* ??? We need a copy of the elf header for passing to create_elf_tables.
1996
       If we do nothing, we'll have overwritten this when we re-use bprm->buf
1997
       when we load the interpreter.  */
1998
    elf_ex = *(struct elfhdr *)bprm->buf;
1999

    
2000
    bprm->p = copy_elf_strings(1, &bprm->filename, bprm->page, bprm->p);
2001
    bprm->p = copy_elf_strings(bprm->envc,bprm->envp,bprm->page,bprm->p);
2002
    bprm->p = copy_elf_strings(bprm->argc,bprm->argv,bprm->page,bprm->p);
2003
    if (!bprm->p) {
2004
        fprintf(stderr, "%s: %s\n", bprm->filename, strerror(E2BIG));
2005
        exit(-1);
2006
    }
2007

    
2008
    /* Do this so that we can load the interpreter, if need be.  We will
2009
       change some of these later */
2010
    bprm->p = setup_arg_pages(bprm->p, bprm, info);
2011

    
2012
    if (elf_interpreter) {
2013
        load_elf_interp(elf_interpreter, &interp_info, bprm->buf);
2014

    
2015
        /* If the program interpreter is one of these two, then assume
2016
           an iBCS2 image.  Otherwise assume a native linux image.  */
2017

    
2018
        if (strcmp(elf_interpreter, "/usr/lib/libc.so.1") == 0
2019
            || strcmp(elf_interpreter, "/usr/lib/ld.so.1") == 0) {
2020
            info->personality = PER_SVR4;
2021

    
2022
            /* Why this, you ask???  Well SVr4 maps page 0 as read-only,
2023
               and some applications "depend" upon this behavior.  Since
2024
               we do not have the power to recompile these, we emulate
2025
               the SVr4 behavior.  Sigh.  */
2026
            target_mmap(0, qemu_host_page_size, PROT_READ | PROT_EXEC,
2027
                        MAP_FIXED | MAP_PRIVATE, -1, 0);
2028
        }
2029
    }
2030

    
2031
    bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &elf_ex,
2032
                                info, (elf_interpreter ? &interp_info : NULL));
2033
    info->start_stack = bprm->p;
2034

    
2035
    /* If we have an interpreter, set that as the program's entry point.
2036
       Copy the load_bias as well, to help PPC64 interpret the entry
2037
       point as a function descriptor.  Do this after creating elf tables
2038
       so that we copy the original program entry point into the AUXV.  */
2039
    if (elf_interpreter) {
2040
        info->load_bias = interp_info.load_bias;
2041
        info->entry = interp_info.entry;
2042
        free(elf_interpreter);
2043
    }
2044

    
2045
#ifdef USE_ELF_CORE_DUMP
2046
    bprm->core_dump = &elf_core_dump;
2047
#endif
2048

    
2049
    return 0;
2050
}
2051

    
2052
#ifdef USE_ELF_CORE_DUMP
2053
/*
2054
 * Definitions to generate Intel SVR4-like core files.
2055
 * These mostly have the same names as the SVR4 types with "target_elf_"
2056
 * tacked on the front to prevent clashes with linux definitions,
2057
 * and the typedef forms have been avoided.  This is mostly like
2058
 * the SVR4 structure, but more Linuxy, with things that Linux does
2059
 * not support and which gdb doesn't really use excluded.
2060
 *
2061
 * Fields we don't dump (their contents is zero) in linux-user qemu
2062
 * are marked with XXX.
2063
 *
2064
 * Core dump code is copied from linux kernel (fs/binfmt_elf.c).
2065
 *
2066
 * Porting ELF coredump for target is (quite) simple process.  First you
2067
 * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for
2068
 * the target resides):
2069
 *
2070
 * #define USE_ELF_CORE_DUMP
2071
 *
2072
 * Next you define type of register set used for dumping.  ELF specification
2073
 * says that it needs to be array of elf_greg_t that has size of ELF_NREG.
2074
 *
2075
 * typedef <target_regtype> target_elf_greg_t;
2076
 * #define ELF_NREG <number of registers>
2077
 * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG];
2078
 *
2079
 * Last step is to implement target specific function that copies registers
2080
 * from given cpu into just specified register set.  Prototype is:
2081
 *
2082
 * static void elf_core_copy_regs(taret_elf_gregset_t *regs,
2083
 *                                const CPUArchState *env);
2084
 *
2085
 * Parameters:
2086
 *     regs - copy register values into here (allocated and zeroed by caller)
2087
 *     env - copy registers from here
2088
 *
2089
 * Example for ARM target is provided in this file.
2090
 */
2091

    
2092
/* An ELF note in memory */
2093
struct memelfnote {
2094
    const char *name;
2095
    size_t     namesz;
2096
    size_t     namesz_rounded;
2097
    int        type;
2098
    size_t     datasz;
2099
    size_t     datasz_rounded;
2100
    void       *data;
2101
    size_t     notesz;
2102
};
2103

    
2104
struct target_elf_siginfo {
2105
    target_int  si_signo; /* signal number */
2106
    target_int  si_code;  /* extra code */
2107
    target_int  si_errno; /* errno */
2108
};
2109

    
2110
struct target_elf_prstatus {
2111
    struct target_elf_siginfo pr_info;      /* Info associated with signal */
2112
    target_short       pr_cursig;    /* Current signal */
2113
    target_ulong       pr_sigpend;   /* XXX */
2114
    target_ulong       pr_sighold;   /* XXX */
2115
    target_pid_t       pr_pid;
2116
    target_pid_t       pr_ppid;
2117
    target_pid_t       pr_pgrp;
2118
    target_pid_t       pr_sid;
2119
    struct target_timeval pr_utime;  /* XXX User time */
2120
    struct target_timeval pr_stime;  /* XXX System time */
2121
    struct target_timeval pr_cutime; /* XXX Cumulative user time */
2122
    struct target_timeval pr_cstime; /* XXX Cumulative system time */
2123
    target_elf_gregset_t      pr_reg;       /* GP registers */
2124
    target_int         pr_fpvalid;   /* XXX */
2125
};
2126

    
2127
#define ELF_PRARGSZ     (80) /* Number of chars for args */
2128

    
2129
struct target_elf_prpsinfo {
2130
    char         pr_state;       /* numeric process state */
2131
    char         pr_sname;       /* char for pr_state */
2132
    char         pr_zomb;        /* zombie */
2133
    char         pr_nice;        /* nice val */
2134
    target_ulong pr_flag;        /* flags */
2135
    target_uid_t pr_uid;
2136
    target_gid_t pr_gid;
2137
    target_pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
2138
    /* Lots missing */
2139
    char    pr_fname[16];           /* filename of executable */
2140
    char    pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
2141
};
2142

    
2143
/* Here is the structure in which status of each thread is captured. */
2144
struct elf_thread_status {
2145
    QTAILQ_ENTRY(elf_thread_status)  ets_link;
2146
    struct target_elf_prstatus prstatus;   /* NT_PRSTATUS */
2147
#if 0
2148
    elf_fpregset_t fpu;             /* NT_PRFPREG */
2149
    struct task_struct *thread;
2150
    elf_fpxregset_t xfpu;           /* ELF_CORE_XFPREG_TYPE */
2151
#endif
2152
    struct memelfnote notes[1];
2153
    int num_notes;
2154
};
2155

    
2156
struct elf_note_info {
2157
    struct memelfnote   *notes;
2158
    struct target_elf_prstatus *prstatus;  /* NT_PRSTATUS */
2159
    struct target_elf_prpsinfo *psinfo;    /* NT_PRPSINFO */
2160

    
2161
    QTAILQ_HEAD(thread_list_head, elf_thread_status) thread_list;
2162
#if 0
2163
    /*
2164
     * Current version of ELF coredump doesn't support
2165
     * dumping fp regs etc.
2166
     */
2167
    elf_fpregset_t *fpu;
2168
    elf_fpxregset_t *xfpu;
2169
    int thread_status_size;
2170
#endif
2171
    int notes_size;
2172
    int numnote;
2173
};
2174

    
2175
struct vm_area_struct {
2176
    abi_ulong   vma_start;  /* start vaddr of memory region */
2177
    abi_ulong   vma_end;    /* end vaddr of memory region */
2178
    abi_ulong   vma_flags;  /* protection etc. flags for the region */
2179
    QTAILQ_ENTRY(vm_area_struct) vma_link;
2180
};
2181

    
2182
struct mm_struct {
2183
    QTAILQ_HEAD(, vm_area_struct) mm_mmap;
2184
    int mm_count;           /* number of mappings */
2185
};
2186

    
2187
static struct mm_struct *vma_init(void);
2188
static void vma_delete(struct mm_struct *);
2189
static int vma_add_mapping(struct mm_struct *, abi_ulong,
2190
                           abi_ulong, abi_ulong);
2191
static int vma_get_mapping_count(const struct mm_struct *);
2192
static struct vm_area_struct *vma_first(const struct mm_struct *);
2193
static struct vm_area_struct *vma_next(struct vm_area_struct *);
2194
static abi_ulong vma_dump_size(const struct vm_area_struct *);
2195
static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2196
                      unsigned long flags);
2197

    
2198
static void fill_elf_header(struct elfhdr *, int, uint16_t, uint32_t);
2199
static void fill_note(struct memelfnote *, const char *, int,
2200
                      unsigned int, void *);
2201
static void fill_prstatus(struct target_elf_prstatus *, const TaskState *, int);
2202
static int fill_psinfo(struct target_elf_prpsinfo *, const TaskState *);
2203
static void fill_auxv_note(struct memelfnote *, const TaskState *);
2204
static void fill_elf_note_phdr(struct elf_phdr *, int, off_t);
2205
static size_t note_size(const struct memelfnote *);
2206
static void free_note_info(struct elf_note_info *);
2207
static int fill_note_info(struct elf_note_info *, long, const CPUArchState *);
2208
static void fill_thread_info(struct elf_note_info *, const CPUArchState *);
2209
static int core_dump_filename(const TaskState *, char *, size_t);
2210

    
2211
static int dump_write(int, const void *, size_t);
2212
static int write_note(struct memelfnote *, int);
2213
static int write_note_info(struct elf_note_info *, int);
2214

    
2215
#ifdef BSWAP_NEEDED
2216
static void bswap_prstatus(struct target_elf_prstatus *prstatus)
2217
{
2218
    prstatus->pr_info.si_signo = tswapl(prstatus->pr_info.si_signo);
2219
    prstatus->pr_info.si_code = tswapl(prstatus->pr_info.si_code);
2220
    prstatus->pr_info.si_errno = tswapl(prstatus->pr_info.si_errno);
2221
    prstatus->pr_cursig = tswap16(prstatus->pr_cursig);
2222
    prstatus->pr_sigpend = tswapl(prstatus->pr_sigpend);
2223
    prstatus->pr_sighold = tswapl(prstatus->pr_sighold);
2224
    prstatus->pr_pid = tswap32(prstatus->pr_pid);
2225
    prstatus->pr_ppid = tswap32(prstatus->pr_ppid);
2226
    prstatus->pr_pgrp = tswap32(prstatus->pr_pgrp);
2227
    prstatus->pr_sid = tswap32(prstatus->pr_sid);
2228
    /* cpu times are not filled, so we skip them */
2229
    /* regs should be in correct format already */
2230
    prstatus->pr_fpvalid = tswap32(prstatus->pr_fpvalid);
2231
}
2232

    
2233
static void bswap_psinfo(struct target_elf_prpsinfo *psinfo)
2234
{
2235
    psinfo->pr_flag = tswapl(psinfo->pr_flag);
2236
    psinfo->pr_uid = tswap16(psinfo->pr_uid);
2237
    psinfo->pr_gid = tswap16(psinfo->pr_gid);
2238
    psinfo->pr_pid = tswap32(psinfo->pr_pid);
2239
    psinfo->pr_ppid = tswap32(psinfo->pr_ppid);
2240
    psinfo->pr_pgrp = tswap32(psinfo->pr_pgrp);
2241
    psinfo->pr_sid = tswap32(psinfo->pr_sid);
2242
}
2243

    
2244
static void bswap_note(struct elf_note *en)
2245
{
2246
    bswap32s(&en->n_namesz);
2247
    bswap32s(&en->n_descsz);
2248
    bswap32s(&en->n_type);
2249
}
2250
#else
2251
static inline void bswap_prstatus(struct target_elf_prstatus *p) { }
2252
static inline void bswap_psinfo(struct target_elf_prpsinfo *p) {}
2253
static inline void bswap_note(struct elf_note *en) { }
2254
#endif /* BSWAP_NEEDED */
2255

    
2256
/*
2257
 * Minimal support for linux memory regions.  These are needed
2258
 * when we are finding out what memory exactly belongs to
2259
 * emulated process.  No locks needed here, as long as
2260
 * thread that received the signal is stopped.
2261
 */
2262

    
2263
static struct mm_struct *vma_init(void)
2264
{
2265
    struct mm_struct *mm;
2266

    
2267
    if ((mm = g_malloc(sizeof (*mm))) == NULL)
2268
        return (NULL);
2269

    
2270
    mm->mm_count = 0;
2271
    QTAILQ_INIT(&mm->mm_mmap);
2272

    
2273
    return (mm);
2274
}
2275

    
2276
static void vma_delete(struct mm_struct *mm)
2277
{
2278
    struct vm_area_struct *vma;
2279

    
2280
    while ((vma = vma_first(mm)) != NULL) {
2281
        QTAILQ_REMOVE(&mm->mm_mmap, vma, vma_link);
2282
        g_free(vma);
2283
    }
2284
    g_free(mm);
2285
}
2286

    
2287
static int vma_add_mapping(struct mm_struct *mm, abi_ulong start,
2288
                           abi_ulong end, abi_ulong flags)
2289
{
2290
    struct vm_area_struct *vma;
2291

    
2292
    if ((vma = g_malloc0(sizeof (*vma))) == NULL)
2293
        return (-1);
2294

    
2295
    vma->vma_start = start;
2296
    vma->vma_end = end;
2297
    vma->vma_flags = flags;
2298

    
2299
    QTAILQ_INSERT_TAIL(&mm->mm_mmap, vma, vma_link);
2300
    mm->mm_count++;
2301

    
2302
    return (0);
2303
}
2304

    
2305
static struct vm_area_struct *vma_first(const struct mm_struct *mm)
2306
{
2307
    return (QTAILQ_FIRST(&mm->mm_mmap));
2308
}
2309

    
2310
static struct vm_area_struct *vma_next(struct vm_area_struct *vma)
2311
{
2312
    return (QTAILQ_NEXT(vma, vma_link));
2313
}
2314

    
2315
static int vma_get_mapping_count(const struct mm_struct *mm)
2316
{
2317
    return (mm->mm_count);
2318
}
2319

    
2320
/*
2321
 * Calculate file (dump) size of given memory region.
2322
 */
2323
static abi_ulong vma_dump_size(const struct vm_area_struct *vma)
2324
{
2325
    /* if we cannot even read the first page, skip it */
2326
    if (!access_ok(VERIFY_READ, vma->vma_start, TARGET_PAGE_SIZE))
2327
        return (0);
2328

    
2329
    /*
2330
     * Usually we don't dump executable pages as they contain
2331
     * non-writable code that debugger can read directly from
2332
     * target library etc.  However, thread stacks are marked
2333
     * also executable so we read in first page of given region
2334
     * and check whether it contains elf header.  If there is
2335
     * no elf header, we dump it.
2336
     */
2337
    if (vma->vma_flags & PROT_EXEC) {
2338
        char page[TARGET_PAGE_SIZE];
2339

    
2340
        copy_from_user(page, vma->vma_start, sizeof (page));
2341
        if ((page[EI_MAG0] == ELFMAG0) &&
2342
            (page[EI_MAG1] == ELFMAG1) &&
2343
            (page[EI_MAG2] == ELFMAG2) &&
2344
            (page[EI_MAG3] == ELFMAG3)) {
2345
            /*
2346
             * Mappings are possibly from ELF binary.  Don't dump
2347
             * them.
2348
             */
2349
            return (0);
2350
        }
2351
    }
2352

    
2353
    return (vma->vma_end - vma->vma_start);
2354
}
2355

    
2356
static int vma_walker(void *priv, abi_ulong start, abi_ulong end,
2357
                      unsigned long flags)
2358
{
2359
    struct mm_struct *mm = (struct mm_struct *)priv;
2360

    
2361
    vma_add_mapping(mm, start, end, flags);
2362
    return (0);
2363
}
2364

    
2365
static void fill_note(struct memelfnote *note, const char *name, int type,
2366
                      unsigned int sz, void *data)
2367
{
2368
    unsigned int namesz;
2369

    
2370
    namesz = strlen(name) + 1;
2371
    note->name = name;
2372
    note->namesz = namesz;
2373
    note->namesz_rounded = roundup(namesz, sizeof (int32_t));
2374
    note->type = type;
2375
    note->datasz = sz;
2376
    note->datasz_rounded = roundup(sz, sizeof (int32_t));
2377

    
2378
    note->data = data;
2379

    
2380
    /*
2381
     * We calculate rounded up note size here as specified by
2382
     * ELF document.
2383
     */
2384
    note->notesz = sizeof (struct elf_note) +
2385
        note->namesz_rounded + note->datasz_rounded;
2386
}
2387

    
2388
static void fill_elf_header(struct elfhdr *elf, int segs, uint16_t machine,
2389
                            uint32_t flags)
2390
{
2391
    (void) memset(elf, 0, sizeof(*elf));
2392

    
2393
    (void) memcpy(elf->e_ident, ELFMAG, SELFMAG);
2394
    elf->e_ident[EI_CLASS] = ELF_CLASS;
2395
    elf->e_ident[EI_DATA] = ELF_DATA;
2396
    elf->e_ident[EI_VERSION] = EV_CURRENT;
2397
    elf->e_ident[EI_OSABI] = ELF_OSABI;
2398

    
2399
    elf->e_type = ET_CORE;
2400
    elf->e_machine = machine;
2401
    elf->e_version = EV_CURRENT;
2402
    elf->e_phoff = sizeof(struct elfhdr);
2403
    elf->e_flags = flags;
2404
    elf->e_ehsize = sizeof(struct elfhdr);
2405
    elf->e_phentsize = sizeof(struct elf_phdr);
2406
    elf->e_phnum = segs;
2407

    
2408
    bswap_ehdr(elf);
2409
}
2410

    
2411
static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, off_t offset)
2412
{
2413
    phdr->p_type = PT_NOTE;
2414
    phdr->p_offset = offset;
2415
    phdr->p_vaddr = 0;
2416
    phdr->p_paddr = 0;
2417
    phdr->p_filesz = sz;
2418
    phdr->p_memsz = 0;
2419
    phdr->p_flags = 0;
2420
    phdr->p_align = 0;
2421

    
2422
    bswap_phdr(phdr, 1);
2423
}
2424

    
2425
static size_t note_size(const struct memelfnote *note)
2426
{
2427
    return (note->notesz);
2428
}
2429

    
2430
static void fill_prstatus(struct target_elf_prstatus *prstatus,
2431
                          const TaskState *ts, int signr)
2432
{
2433
    (void) memset(prstatus, 0, sizeof (*prstatus));
2434
    prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
2435
    prstatus->pr_pid = ts->ts_tid;
2436
    prstatus->pr_ppid = getppid();
2437
    prstatus->pr_pgrp = getpgrp();
2438
    prstatus->pr_sid = getsid(0);
2439

    
2440
    bswap_prstatus(prstatus);
2441
}
2442

    
2443
static int fill_psinfo(struct target_elf_prpsinfo *psinfo, const TaskState *ts)
2444
{
2445
    char *base_filename;
2446
    unsigned int i, len;
2447

    
2448
    (void) memset(psinfo, 0, sizeof (*psinfo));
2449

    
2450
    len = ts->info->arg_end - ts->info->arg_start;
2451
    if (len >= ELF_PRARGSZ)
2452
        len = ELF_PRARGSZ - 1;
2453
    if (copy_from_user(&psinfo->pr_psargs, ts->info->arg_start, len))
2454
        return -EFAULT;
2455
    for (i = 0; i < len; i++)
2456
        if (psinfo->pr_psargs[i] == 0)
2457
            psinfo->pr_psargs[i] = ' ';
2458
    psinfo->pr_psargs[len] = 0;
2459

    
2460
    psinfo->pr_pid = getpid();
2461
    psinfo->pr_ppid = getppid();
2462
    psinfo->pr_pgrp = getpgrp();
2463
    psinfo->pr_sid = getsid(0);
2464
    psinfo->pr_uid = getuid();
2465
    psinfo->pr_gid = getgid();
2466

    
2467
    base_filename = g_path_get_basename(ts->bprm->filename);
2468
    /*
2469
     * Using strncpy here is fine: at max-length,
2470
     * this field is not NUL-terminated.
2471
     */
2472
    (void) strncpy(psinfo->pr_fname, base_filename,
2473
                   sizeof(psinfo->pr_fname));
2474

    
2475
    g_free(base_filename);
2476
    bswap_psinfo(psinfo);
2477
    return (0);
2478
}
2479

    
2480
static void fill_auxv_note(struct memelfnote *note, const TaskState *ts)
2481
{
2482
    elf_addr_t auxv = (elf_addr_t)ts->info->saved_auxv;
2483
    elf_addr_t orig_auxv = auxv;
2484
    void *ptr;
2485
    int len = ts->info->auxv_len;
2486

    
2487
    /*
2488
     * Auxiliary vector is stored in target process stack.  It contains
2489
     * {type, value} pairs that we need to dump into note.  This is not
2490
     * strictly necessary but we do it here for sake of completeness.
2491
     */
2492

    
2493
    /* read in whole auxv vector and copy it to memelfnote */
2494
    ptr = lock_user(VERIFY_READ, orig_auxv, len, 0);
2495
    if (ptr != NULL) {
2496
        fill_note(note, "CORE", NT_AUXV, len, ptr);
2497
        unlock_user(ptr, auxv, len);
2498
    }
2499
}
2500

    
2501
/*
2502
 * Constructs name of coredump file.  We have following convention
2503
 * for the name:
2504
 *     qemu_<basename-of-target-binary>_<date>-<time>_<pid>.core
2505
 *
2506
 * Returns 0 in case of success, -1 otherwise (errno is set).
2507
 */
2508
static int core_dump_filename(const TaskState *ts, char *buf,
2509
                              size_t bufsize)
2510
{
2511
    char timestamp[64];
2512
    char *filename = NULL;
2513
    char *base_filename = NULL;
2514
    struct timeval tv;
2515
    struct tm tm;
2516

    
2517
    assert(bufsize >= PATH_MAX);
2518

    
2519
    if (gettimeofday(&tv, NULL) < 0) {
2520
        (void) fprintf(stderr, "unable to get current timestamp: %s",
2521
                       strerror(errno));
2522
        return (-1);
2523
    }
2524

    
2525
    filename = strdup(ts->bprm->filename);
2526
    base_filename = strdup(basename(filename));
2527
    (void) strftime(timestamp, sizeof (timestamp), "%Y%m%d-%H%M%S",
2528
                    localtime_r(&tv.tv_sec, &tm));
2529
    (void) snprintf(buf, bufsize, "qemu_%s_%s_%d.core",
2530
                    base_filename, timestamp, (int)getpid());
2531
    free(base_filename);
2532
    free(filename);
2533

    
2534
    return (0);
2535
}
2536

    
2537
static int dump_write(int fd, const void *ptr, size_t size)
2538
{
2539
    const char *bufp = (const char *)ptr;
2540
    ssize_t bytes_written, bytes_left;
2541
    struct rlimit dumpsize;
2542
    off_t pos;
2543

    
2544
    bytes_written = 0;
2545
    getrlimit(RLIMIT_CORE, &dumpsize);
2546
    if ((pos = lseek(fd, 0, SEEK_CUR))==-1) {
2547
        if (errno == ESPIPE) { /* not a seekable stream */
2548
            bytes_left = size;
2549
        } else {
2550
            return pos;
2551
        }
2552
    } else {
2553
        if (dumpsize.rlim_cur <= pos) {
2554
            return -1;
2555
        } else if (dumpsize.rlim_cur == RLIM_INFINITY) {
2556
            bytes_left = size;
2557
        } else {
2558
            size_t limit_left=dumpsize.rlim_cur - pos;
2559
            bytes_left = limit_left >= size ? size : limit_left ;
2560
        }
2561
    }
2562

    
2563
    /*
2564
     * In normal conditions, single write(2) should do but
2565
     * in case of socket etc. this mechanism is more portable.
2566
     */
2567
    do {
2568
        bytes_written = write(fd, bufp, bytes_left);
2569
        if (bytes_written < 0) {
2570
            if (errno == EINTR)
2571
                continue;
2572
            return (-1);
2573
        } else if (bytes_written == 0) { /* eof */
2574
            return (-1);
2575
        }
2576
        bufp += bytes_written;
2577
        bytes_left -= bytes_written;
2578
    } while (bytes_left > 0);
2579

    
2580
    return (0);
2581
}
2582

    
2583
static int write_note(struct memelfnote *men, int fd)
2584
{
2585
    struct elf_note en;
2586

    
2587
    en.n_namesz = men->namesz;
2588
    en.n_type = men->type;
2589
    en.n_descsz = men->datasz;
2590

    
2591
    bswap_note(&en);
2592

    
2593
    if (dump_write(fd, &en, sizeof(en)) != 0)
2594
        return (-1);
2595
    if (dump_write(fd, men->name, men->namesz_rounded) != 0)
2596
        return (-1);
2597
    if (dump_write(fd, men->data, men->datasz_rounded) != 0)
2598
        return (-1);
2599

    
2600
    return (0);
2601
}
2602

    
2603
static void fill_thread_info(struct elf_note_info *info, const CPUArchState *env)
2604
{
2605
    TaskState *ts = (TaskState *)env->opaque;
2606
    struct elf_thread_status *ets;
2607

    
2608
    ets = g_malloc0(sizeof (*ets));
2609
    ets->num_notes = 1; /* only prstatus is dumped */
2610
    fill_prstatus(&ets->prstatus, ts, 0);
2611
    elf_core_copy_regs(&ets->prstatus.pr_reg, env);
2612
    fill_note(&ets->notes[0], "CORE", NT_PRSTATUS, sizeof (ets->prstatus),
2613
              &ets->prstatus);
2614

    
2615
    QTAILQ_INSERT_TAIL(&info->thread_list, ets, ets_link);
2616

    
2617
    info->notes_size += note_size(&ets->notes[0]);
2618
}
2619

    
2620
static int fill_note_info(struct elf_note_info *info,
2621
                          long signr, const CPUArchState *env)
2622
{
2623
#define NUMNOTES 3
2624
    CPUArchState *cpu = NULL;
2625
    TaskState *ts = (TaskState *)env->opaque;
2626
    int i;
2627

    
2628
    (void) memset(info, 0, sizeof (*info));
2629

    
2630
    QTAILQ_INIT(&info->thread_list);
2631

    
2632
    info->notes = g_malloc0(NUMNOTES * sizeof (struct memelfnote));
2633
    if (info->notes == NULL)
2634
        return (-ENOMEM);
2635
    info->prstatus = g_malloc0(sizeof (*info->prstatus));
2636
    if (info->prstatus == NULL)
2637
        return (-ENOMEM);
2638
    info->psinfo = g_malloc0(sizeof (*info->psinfo));
2639
    if (info->prstatus == NULL)
2640
        return (-ENOMEM);
2641

    
2642
    /*
2643
     * First fill in status (and registers) of current thread
2644
     * including process info & aux vector.
2645
     */
2646
    fill_prstatus(info->prstatus, ts, signr);
2647
    elf_core_copy_regs(&info->prstatus->pr_reg, env);
2648
    fill_note(&info->notes[0], "CORE", NT_PRSTATUS,
2649
              sizeof (*info->prstatus), info->prstatus);
2650
    fill_psinfo(info->psinfo, ts);
2651
    fill_note(&info->notes[1], "CORE", NT_PRPSINFO,
2652
              sizeof (*info->psinfo), info->psinfo);
2653
    fill_auxv_note(&info->notes[2], ts);
2654
    info->numnote = 3;
2655

    
2656
    info->notes_size = 0;
2657
    for (i = 0; i < info->numnote; i++)
2658
        info->notes_size += note_size(&info->notes[i]);
2659

    
2660
    /* read and fill status of all threads */
2661
    cpu_list_lock();
2662
    for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) {
2663
        if (cpu == thread_env)
2664
            continue;
2665
        fill_thread_info(info, cpu);
2666
    }
2667
    cpu_list_unlock();
2668

    
2669
    return (0);
2670
}
2671

    
2672
static void free_note_info(struct elf_note_info *info)
2673
{
2674
    struct elf_thread_status *ets;
2675

    
2676
    while (!QTAILQ_EMPTY(&info->thread_list)) {
2677
        ets = QTAILQ_FIRST(&info->thread_list);
2678
        QTAILQ_REMOVE(&info->thread_list, ets, ets_link);
2679
        g_free(ets);
2680
    }
2681

    
2682
    g_free(info->prstatus);
2683
    g_free(info->psinfo);
2684
    g_free(info->notes);
2685
}
2686

    
2687
static int write_note_info(struct elf_note_info *info, int fd)
2688
{
2689
    struct elf_thread_status *ets;
2690
    int i, error = 0;
2691

    
2692
    /* write prstatus, psinfo and auxv for current thread */
2693
    for (i = 0; i < info->numnote; i++)
2694
        if ((error = write_note(&info->notes[i], fd)) != 0)
2695
            return (error);
2696

    
2697
    /* write prstatus for each thread */
2698
    for (ets = info->thread_list.tqh_first; ets != NULL;
2699
         ets = ets->ets_link.tqe_next) {
2700
        if ((error = write_note(&ets->notes[0], fd)) != 0)
2701
            return (error);
2702
    }
2703

    
2704
    return (0);
2705
}
2706

    
2707
/*
2708
 * Write out ELF coredump.
2709
 *
2710
 * See documentation of ELF object file format in:
2711
 * http://www.caldera.com/developers/devspecs/gabi41.pdf
2712
 *
2713
 * Coredump format in linux is following:
2714
 *
2715
 * 0   +----------------------+         \
2716
 *     | ELF header           | ET_CORE  |
2717
 *     +----------------------+          |
2718
 *     | ELF program headers  |          |--- headers
2719
 *     | - NOTE section       |          |
2720
 *     | - PT_LOAD sections   |          |
2721
 *     +----------------------+         /
2722
 *     | NOTEs:               |
2723
 *     | - NT_PRSTATUS        |
2724
 *     | - NT_PRSINFO         |
2725
 *     | - NT_AUXV            |
2726
 *     +----------------------+ <-- aligned to target page
2727
 *     | Process memory dump  |
2728
 *     :                      :
2729
 *     .                      .
2730
 *     :                      :
2731
 *     |                      |
2732
 *     +----------------------+
2733
 *
2734
 * NT_PRSTATUS -> struct elf_prstatus (per thread)
2735
 * NT_PRSINFO  -> struct elf_prpsinfo
2736
 * NT_AUXV is array of { type, value } pairs (see fill_auxv_note()).
2737
 *
2738
 * Format follows System V format as close as possible.  Current
2739
 * version limitations are as follows:
2740
 *     - no floating point registers are dumped
2741
 *
2742
 * Function returns 0 in case of success, negative errno otherwise.
2743
 *
2744
 * TODO: make this work also during runtime: it should be
2745
 * possible to force coredump from running process and then
2746
 * continue processing.  For example qemu could set up SIGUSR2
2747
 * handler (provided that target process haven't registered
2748
 * handler for that) that does the dump when signal is received.
2749
 */
2750
static int elf_core_dump(int signr, const CPUArchState *env)
2751
{
2752
    const TaskState *ts = (const TaskState *)env->opaque;
2753
    struct vm_area_struct *vma = NULL;
2754
    char corefile[PATH_MAX];
2755
    struct elf_note_info info;
2756
    struct elfhdr elf;
2757
    struct elf_phdr phdr;
2758
    struct rlimit dumpsize;
2759
    struct mm_struct *mm = NULL;
2760
    off_t offset = 0, data_offset = 0;
2761
    int segs = 0;
2762
    int fd = -1;
2763

    
2764
    errno = 0;
2765
    getrlimit(RLIMIT_CORE, &dumpsize);
2766
    if (dumpsize.rlim_cur == 0)
2767
        return 0;
2768

    
2769
    if (core_dump_filename(ts, corefile, sizeof (corefile)) < 0)
2770
        return (-errno);
2771

    
2772
    if ((fd = open(corefile, O_WRONLY | O_CREAT,
2773
                   S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)) < 0)
2774
        return (-errno);
2775

    
2776
    /*
2777
     * Walk through target process memory mappings and
2778
     * set up structure containing this information.  After
2779
     * this point vma_xxx functions can be used.
2780
     */
2781
    if ((mm = vma_init()) == NULL)
2782
        goto out;
2783

    
2784
    walk_memory_regions(mm, vma_walker);
2785
    segs = vma_get_mapping_count(mm);
2786

    
2787
    /*
2788
     * Construct valid coredump ELF header.  We also
2789
     * add one more segment for notes.
2790
     */
2791
    fill_elf_header(&elf, segs + 1, ELF_MACHINE, 0);
2792
    if (dump_write(fd, &elf, sizeof (elf)) != 0)
2793
        goto out;
2794

    
2795
    /* fill in in-memory version of notes */
2796
    if (fill_note_info(&info, signr, env) < 0)
2797
        goto out;
2798

    
2799
    offset += sizeof (elf);                             /* elf header */
2800
    offset += (segs + 1) * sizeof (struct elf_phdr);    /* program headers */
2801

    
2802
    /* write out notes program header */
2803
    fill_elf_note_phdr(&phdr, info.notes_size, offset);
2804

    
2805
    offset += info.notes_size;
2806
    if (dump_write(fd, &phdr, sizeof (phdr)) != 0)
2807
        goto out;
2808

    
2809
    /*
2810
     * ELF specification wants data to start at page boundary so
2811
     * we align it here.
2812
     */
2813
    data_offset = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2814

    
2815
    /*
2816
     * Write program headers for memory regions mapped in
2817
     * the target process.
2818
     */
2819
    for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2820
        (void) memset(&phdr, 0, sizeof (phdr));
2821

    
2822
        phdr.p_type = PT_LOAD;
2823
        phdr.p_offset = offset;
2824
        phdr.p_vaddr = vma->vma_start;
2825
        phdr.p_paddr = 0;
2826
        phdr.p_filesz = vma_dump_size(vma);
2827
        offset += phdr.p_filesz;
2828
        phdr.p_memsz = vma->vma_end - vma->vma_start;
2829
        phdr.p_flags = vma->vma_flags & PROT_READ ? PF_R : 0;
2830
        if (vma->vma_flags & PROT_WRITE)
2831
            phdr.p_flags |= PF_W;
2832
        if (vma->vma_flags & PROT_EXEC)
2833
            phdr.p_flags |= PF_X;
2834
        phdr.p_align = ELF_EXEC_PAGESIZE;
2835

    
2836
        bswap_phdr(&phdr, 1);
2837
        dump_write(fd, &phdr, sizeof (phdr));
2838
    }
2839

    
2840
    /*
2841
     * Next we write notes just after program headers.  No
2842
     * alignment needed here.
2843
     */
2844
    if (write_note_info(&info, fd) < 0)
2845
        goto out;
2846

    
2847
    /* align data to page boundary */
2848
    if (lseek(fd, data_offset, SEEK_SET) != data_offset)
2849
        goto out;
2850

    
2851
    /*
2852
     * Finally we can dump process memory into corefile as well.
2853
     */
2854
    for (vma = vma_first(mm); vma != NULL; vma = vma_next(vma)) {
2855
        abi_ulong addr;
2856
        abi_ulong end;
2857

    
2858
        end = vma->vma_start + vma_dump_size(vma);
2859

    
2860
        for (addr = vma->vma_start; addr < end;
2861
             addr += TARGET_PAGE_SIZE) {
2862
            char page[TARGET_PAGE_SIZE];
2863
            int error;
2864

    
2865
            /*
2866
             *  Read in page from target process memory and
2867
             *  write it to coredump file.
2868
             */
2869
            error = copy_from_user(page, addr, sizeof (page));
2870
            if (error != 0) {
2871
                (void) fprintf(stderr, "unable to dump " TARGET_ABI_FMT_lx "\n",
2872
                               addr);
2873
                errno = -error;
2874
                goto out;
2875
            }
2876
            if (dump_write(fd, page, TARGET_PAGE_SIZE) < 0)
2877
                goto out;
2878
        }
2879
    }
2880

    
2881
 out:
2882
    free_note_info(&info);
2883
    if (mm != NULL)
2884
        vma_delete(mm);
2885
    (void) close(fd);
2886

    
2887
    if (errno != 0)
2888
        return (-errno);
2889
    return (0);
2890
}
2891
#endif /* USE_ELF_CORE_DUMP */
2892

    
2893
void do_init_thread(struct target_pt_regs *regs, struct image_info *infop)
2894
{
2895
    init_thread(regs, infop);
2896
}