Statistics
| Branch: | Revision:

root / dump.c @ a8170e5e

History | View | Annotate | Download (22 kB)

1
/*
2
 * QEMU dump
3
 *
4
 * Copyright Fujitsu, Corp. 2011, 2012
5
 *
6
 * Authors:
7
 *     Wen Congyang <wency@cn.fujitsu.com>
8
 *
9
 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10
 * See the COPYING file in the top-level directory.
11
 *
12
 */
13

    
14
#include "qemu-common.h"
15
#include "elf.h"
16
#include "cpu.h"
17
#include "cpu-all.h"
18
#include "hwaddr.h"
19
#include "monitor.h"
20
#include "kvm.h"
21
#include "dump.h"
22
#include "sysemu.h"
23
#include "memory_mapping.h"
24
#include "error.h"
25
#include "qmp-commands.h"
26
#include "gdbstub.h"
27

    
28
static uint16_t cpu_convert_to_target16(uint16_t val, int endian)
29
{
30
    if (endian == ELFDATA2LSB) {
31
        val = cpu_to_le16(val);
32
    } else {
33
        val = cpu_to_be16(val);
34
    }
35

    
36
    return val;
37
}
38

    
39
static uint32_t cpu_convert_to_target32(uint32_t val, int endian)
40
{
41
    if (endian == ELFDATA2LSB) {
42
        val = cpu_to_le32(val);
43
    } else {
44
        val = cpu_to_be32(val);
45
    }
46

    
47
    return val;
48
}
49

    
50
static uint64_t cpu_convert_to_target64(uint64_t val, int endian)
51
{
52
    if (endian == ELFDATA2LSB) {
53
        val = cpu_to_le64(val);
54
    } else {
55
        val = cpu_to_be64(val);
56
    }
57

    
58
    return val;
59
}
60

    
61
typedef struct DumpState {
62
    ArchDumpInfo dump_info;
63
    MemoryMappingList list;
64
    uint16_t phdr_num;
65
    uint32_t sh_info;
66
    bool have_section;
67
    bool resume;
68
    size_t note_size;
69
    hwaddr memory_offset;
70
    int fd;
71

    
72
    RAMBlock *block;
73
    ram_addr_t start;
74
    bool has_filter;
75
    int64_t begin;
76
    int64_t length;
77
    Error **errp;
78
} DumpState;
79

    
80
static int dump_cleanup(DumpState *s)
81
{
82
    int ret = 0;
83

    
84
    memory_mapping_list_free(&s->list);
85
    if (s->fd != -1) {
86
        close(s->fd);
87
    }
88
    if (s->resume) {
89
        vm_start();
90
    }
91

    
92
    return ret;
93
}
94

    
95
static void dump_error(DumpState *s, const char *reason)
96
{
97
    dump_cleanup(s);
98
}
99

    
100
static int fd_write_vmcore(void *buf, size_t size, void *opaque)
101
{
102
    DumpState *s = opaque;
103
    size_t written_size;
104

    
105
    written_size = qemu_write_full(s->fd, buf, size);
106
    if (written_size != size) {
107
        return -1;
108
    }
109

    
110
    return 0;
111
}
112

    
113
static int write_elf64_header(DumpState *s)
114
{
115
    Elf64_Ehdr elf_header;
116
    int ret;
117
    int endian = s->dump_info.d_endian;
118

    
119
    memset(&elf_header, 0, sizeof(Elf64_Ehdr));
120
    memcpy(&elf_header, ELFMAG, SELFMAG);
121
    elf_header.e_ident[EI_CLASS] = ELFCLASS64;
122
    elf_header.e_ident[EI_DATA] = s->dump_info.d_endian;
123
    elf_header.e_ident[EI_VERSION] = EV_CURRENT;
124
    elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
125
    elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
126
                                                   endian);
127
    elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
128
    elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
129
    elf_header.e_phoff = cpu_convert_to_target64(sizeof(Elf64_Ehdr), endian);
130
    elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf64_Phdr),
131
                                                     endian);
132
    elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
133
    if (s->have_section) {
134
        uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info;
135

    
136
        elf_header.e_shoff = cpu_convert_to_target64(shoff, endian);
137
        elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf64_Shdr),
138
                                                         endian);
139
        elf_header.e_shnum = cpu_convert_to_target16(1, endian);
140
    }
141

    
142
    ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
143
    if (ret < 0) {
144
        dump_error(s, "dump: failed to write elf header.\n");
145
        return -1;
146
    }
147

    
148
    return 0;
149
}
150

    
151
static int write_elf32_header(DumpState *s)
152
{
153
    Elf32_Ehdr elf_header;
154
    int ret;
155
    int endian = s->dump_info.d_endian;
156

    
157
    memset(&elf_header, 0, sizeof(Elf32_Ehdr));
158
    memcpy(&elf_header, ELFMAG, SELFMAG);
159
    elf_header.e_ident[EI_CLASS] = ELFCLASS32;
160
    elf_header.e_ident[EI_DATA] = endian;
161
    elf_header.e_ident[EI_VERSION] = EV_CURRENT;
162
    elf_header.e_type = cpu_convert_to_target16(ET_CORE, endian);
163
    elf_header.e_machine = cpu_convert_to_target16(s->dump_info.d_machine,
164
                                                   endian);
165
    elf_header.e_version = cpu_convert_to_target32(EV_CURRENT, endian);
166
    elf_header.e_ehsize = cpu_convert_to_target16(sizeof(elf_header), endian);
167
    elf_header.e_phoff = cpu_convert_to_target32(sizeof(Elf32_Ehdr), endian);
168
    elf_header.e_phentsize = cpu_convert_to_target16(sizeof(Elf32_Phdr),
169
                                                     endian);
170
    elf_header.e_phnum = cpu_convert_to_target16(s->phdr_num, endian);
171
    if (s->have_section) {
172
        uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info;
173

    
174
        elf_header.e_shoff = cpu_convert_to_target32(shoff, endian);
175
        elf_header.e_shentsize = cpu_convert_to_target16(sizeof(Elf32_Shdr),
176
                                                         endian);
177
        elf_header.e_shnum = cpu_convert_to_target16(1, endian);
178
    }
179

    
180
    ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s);
181
    if (ret < 0) {
182
        dump_error(s, "dump: failed to write elf header.\n");
183
        return -1;
184
    }
185

    
186
    return 0;
187
}
188

    
189
static int write_elf64_load(DumpState *s, MemoryMapping *memory_mapping,
190
                            int phdr_index, hwaddr offset)
191
{
192
    Elf64_Phdr phdr;
193
    int ret;
194
    int endian = s->dump_info.d_endian;
195

    
196
    memset(&phdr, 0, sizeof(Elf64_Phdr));
197
    phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
198
    phdr.p_offset = cpu_convert_to_target64(offset, endian);
199
    phdr.p_paddr = cpu_convert_to_target64(memory_mapping->phys_addr, endian);
200
    if (offset == -1) {
201
        /* When the memory is not stored into vmcore, offset will be -1 */
202
        phdr.p_filesz = 0;
203
    } else {
204
        phdr.p_filesz = cpu_convert_to_target64(memory_mapping->length, endian);
205
    }
206
    phdr.p_memsz = cpu_convert_to_target64(memory_mapping->length, endian);
207
    phdr.p_vaddr = cpu_convert_to_target64(memory_mapping->virt_addr, endian);
208

    
209
    ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
210
    if (ret < 0) {
211
        dump_error(s, "dump: failed to write program header table.\n");
212
        return -1;
213
    }
214

    
215
    return 0;
216
}
217

    
218
static int write_elf32_load(DumpState *s, MemoryMapping *memory_mapping,
219
                            int phdr_index, hwaddr offset)
220
{
221
    Elf32_Phdr phdr;
222
    int ret;
223
    int endian = s->dump_info.d_endian;
224

    
225
    memset(&phdr, 0, sizeof(Elf32_Phdr));
226
    phdr.p_type = cpu_convert_to_target32(PT_LOAD, endian);
227
    phdr.p_offset = cpu_convert_to_target32(offset, endian);
228
    phdr.p_paddr = cpu_convert_to_target32(memory_mapping->phys_addr, endian);
229
    if (offset == -1) {
230
        /* When the memory is not stored into vmcore, offset will be -1 */
231
        phdr.p_filesz = 0;
232
    } else {
233
        phdr.p_filesz = cpu_convert_to_target32(memory_mapping->length, endian);
234
    }
235
    phdr.p_memsz = cpu_convert_to_target32(memory_mapping->length, endian);
236
    phdr.p_vaddr = cpu_convert_to_target32(memory_mapping->virt_addr, endian);
237

    
238
    ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
239
    if (ret < 0) {
240
        dump_error(s, "dump: failed to write program header table.\n");
241
        return -1;
242
    }
243

    
244
    return 0;
245
}
246

    
247
static int write_elf64_note(DumpState *s)
248
{
249
    Elf64_Phdr phdr;
250
    int endian = s->dump_info.d_endian;
251
    hwaddr begin = s->memory_offset - s->note_size;
252
    int ret;
253

    
254
    memset(&phdr, 0, sizeof(Elf64_Phdr));
255
    phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
256
    phdr.p_offset = cpu_convert_to_target64(begin, endian);
257
    phdr.p_paddr = 0;
258
    phdr.p_filesz = cpu_convert_to_target64(s->note_size, endian);
259
    phdr.p_memsz = cpu_convert_to_target64(s->note_size, endian);
260
    phdr.p_vaddr = 0;
261

    
262
    ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s);
263
    if (ret < 0) {
264
        dump_error(s, "dump: failed to write program header table.\n");
265
        return -1;
266
    }
267

    
268
    return 0;
269
}
270

    
271
static int write_elf64_notes(DumpState *s)
272
{
273
    CPUArchState *env;
274
    int ret;
275
    int id;
276

    
277
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
278
        id = cpu_index(env);
279
        ret = cpu_write_elf64_note(fd_write_vmcore, env, id, s);
280
        if (ret < 0) {
281
            dump_error(s, "dump: failed to write elf notes.\n");
282
            return -1;
283
        }
284
    }
285

    
286
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
287
        ret = cpu_write_elf64_qemunote(fd_write_vmcore, env, s);
288
        if (ret < 0) {
289
            dump_error(s, "dump: failed to write CPU status.\n");
290
            return -1;
291
        }
292
    }
293

    
294
    return 0;
295
}
296

    
297
static int write_elf32_note(DumpState *s)
298
{
299
    hwaddr begin = s->memory_offset - s->note_size;
300
    Elf32_Phdr phdr;
301
    int endian = s->dump_info.d_endian;
302
    int ret;
303

    
304
    memset(&phdr, 0, sizeof(Elf32_Phdr));
305
    phdr.p_type = cpu_convert_to_target32(PT_NOTE, endian);
306
    phdr.p_offset = cpu_convert_to_target32(begin, endian);
307
    phdr.p_paddr = 0;
308
    phdr.p_filesz = cpu_convert_to_target32(s->note_size, endian);
309
    phdr.p_memsz = cpu_convert_to_target32(s->note_size, endian);
310
    phdr.p_vaddr = 0;
311

    
312
    ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s);
313
    if (ret < 0) {
314
        dump_error(s, "dump: failed to write program header table.\n");
315
        return -1;
316
    }
317

    
318
    return 0;
319
}
320

    
321
static int write_elf32_notes(DumpState *s)
322
{
323
    CPUArchState *env;
324
    int ret;
325
    int id;
326

    
327
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
328
        id = cpu_index(env);
329
        ret = cpu_write_elf32_note(fd_write_vmcore, env, id, s);
330
        if (ret < 0) {
331
            dump_error(s, "dump: failed to write elf notes.\n");
332
            return -1;
333
        }
334
    }
335

    
336
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
337
        ret = cpu_write_elf32_qemunote(fd_write_vmcore, env, s);
338
        if (ret < 0) {
339
            dump_error(s, "dump: failed to write CPU status.\n");
340
            return -1;
341
        }
342
    }
343

    
344
    return 0;
345
}
346

    
347
static int write_elf_section(DumpState *s, int type)
348
{
349
    Elf32_Shdr shdr32;
350
    Elf64_Shdr shdr64;
351
    int endian = s->dump_info.d_endian;
352
    int shdr_size;
353
    void *shdr;
354
    int ret;
355

    
356
    if (type == 0) {
357
        shdr_size = sizeof(Elf32_Shdr);
358
        memset(&shdr32, 0, shdr_size);
359
        shdr32.sh_info = cpu_convert_to_target32(s->sh_info, endian);
360
        shdr = &shdr32;
361
    } else {
362
        shdr_size = sizeof(Elf64_Shdr);
363
        memset(&shdr64, 0, shdr_size);
364
        shdr64.sh_info = cpu_convert_to_target32(s->sh_info, endian);
365
        shdr = &shdr64;
366
    }
367

    
368
    ret = fd_write_vmcore(&shdr, shdr_size, s);
369
    if (ret < 0) {
370
        dump_error(s, "dump: failed to write section header table.\n");
371
        return -1;
372
    }
373

    
374
    return 0;
375
}
376

    
377
static int write_data(DumpState *s, void *buf, int length)
378
{
379
    int ret;
380

    
381
    ret = fd_write_vmcore(buf, length, s);
382
    if (ret < 0) {
383
        dump_error(s, "dump: failed to save memory.\n");
384
        return -1;
385
    }
386

    
387
    return 0;
388
}
389

    
390
/* write the memroy to vmcore. 1 page per I/O. */
391
static int write_memory(DumpState *s, RAMBlock *block, ram_addr_t start,
392
                        int64_t size)
393
{
394
    int64_t i;
395
    int ret;
396

    
397
    for (i = 0; i < size / TARGET_PAGE_SIZE; i++) {
398
        ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
399
                         TARGET_PAGE_SIZE);
400
        if (ret < 0) {
401
            return ret;
402
        }
403
    }
404

    
405
    if ((size % TARGET_PAGE_SIZE) != 0) {
406
        ret = write_data(s, block->host + start + i * TARGET_PAGE_SIZE,
407
                         size % TARGET_PAGE_SIZE);
408
        if (ret < 0) {
409
            return ret;
410
        }
411
    }
412

    
413
    return 0;
414
}
415

    
416
/* get the memory's offset in the vmcore */
417
static hwaddr get_offset(hwaddr phys_addr,
418
                                     DumpState *s)
419
{
420
    RAMBlock *block;
421
    hwaddr offset = s->memory_offset;
422
    int64_t size_in_block, start;
423

    
424
    if (s->has_filter) {
425
        if (phys_addr < s->begin || phys_addr >= s->begin + s->length) {
426
            return -1;
427
        }
428
    }
429

    
430
    QLIST_FOREACH(block, &ram_list.blocks, next) {
431
        if (s->has_filter) {
432
            if (block->offset >= s->begin + s->length ||
433
                block->offset + block->length <= s->begin) {
434
                /* This block is out of the range */
435
                continue;
436
            }
437

    
438
            if (s->begin <= block->offset) {
439
                start = block->offset;
440
            } else {
441
                start = s->begin;
442
            }
443

    
444
            size_in_block = block->length - (start - block->offset);
445
            if (s->begin + s->length < block->offset + block->length) {
446
                size_in_block -= block->offset + block->length -
447
                                 (s->begin + s->length);
448
            }
449
        } else {
450
            start = block->offset;
451
            size_in_block = block->length;
452
        }
453

    
454
        if (phys_addr >= start && phys_addr < start + size_in_block) {
455
            return phys_addr - start + offset;
456
        }
457

    
458
        offset += size_in_block;
459
    }
460

    
461
    return -1;
462
}
463

    
464
static int write_elf_loads(DumpState *s)
465
{
466
    hwaddr offset;
467
    MemoryMapping *memory_mapping;
468
    uint32_t phdr_index = 1;
469
    int ret;
470
    uint32_t max_index;
471

    
472
    if (s->have_section) {
473
        max_index = s->sh_info;
474
    } else {
475
        max_index = s->phdr_num;
476
    }
477

    
478
    QTAILQ_FOREACH(memory_mapping, &s->list.head, next) {
479
        offset = get_offset(memory_mapping->phys_addr, s);
480
        if (s->dump_info.d_class == ELFCLASS64) {
481
            ret = write_elf64_load(s, memory_mapping, phdr_index++, offset);
482
        } else {
483
            ret = write_elf32_load(s, memory_mapping, phdr_index++, offset);
484
        }
485

    
486
        if (ret < 0) {
487
            return -1;
488
        }
489

    
490
        if (phdr_index >= max_index) {
491
            break;
492
        }
493
    }
494

    
495
    return 0;
496
}
497

    
498
/* write elf header, PT_NOTE and elf note to vmcore. */
499
static int dump_begin(DumpState *s)
500
{
501
    int ret;
502

    
503
    /*
504
     * the vmcore's format is:
505
     *   --------------
506
     *   |  elf header |
507
     *   --------------
508
     *   |  PT_NOTE    |
509
     *   --------------
510
     *   |  PT_LOAD    |
511
     *   --------------
512
     *   |  ......     |
513
     *   --------------
514
     *   |  PT_LOAD    |
515
     *   --------------
516
     *   |  sec_hdr    |
517
     *   --------------
518
     *   |  elf note   |
519
     *   --------------
520
     *   |  memory     |
521
     *   --------------
522
     *
523
     * we only know where the memory is saved after we write elf note into
524
     * vmcore.
525
     */
526

    
527
    /* write elf header to vmcore */
528
    if (s->dump_info.d_class == ELFCLASS64) {
529
        ret = write_elf64_header(s);
530
    } else {
531
        ret = write_elf32_header(s);
532
    }
533
    if (ret < 0) {
534
        return -1;
535
    }
536

    
537
    if (s->dump_info.d_class == ELFCLASS64) {
538
        /* write PT_NOTE to vmcore */
539
        if (write_elf64_note(s) < 0) {
540
            return -1;
541
        }
542

    
543
        /* write all PT_LOAD to vmcore */
544
        if (write_elf_loads(s) < 0) {
545
            return -1;
546
        }
547

    
548
        /* write section to vmcore */
549
        if (s->have_section) {
550
            if (write_elf_section(s, 1) < 0) {
551
                return -1;
552
            }
553
        }
554

    
555
        /* write notes to vmcore */
556
        if (write_elf64_notes(s) < 0) {
557
            return -1;
558
        }
559

    
560
    } else {
561
        /* write PT_NOTE to vmcore */
562
        if (write_elf32_note(s) < 0) {
563
            return -1;
564
        }
565

    
566
        /* write all PT_LOAD to vmcore */
567
        if (write_elf_loads(s) < 0) {
568
            return -1;
569
        }
570

    
571
        /* write section to vmcore */
572
        if (s->have_section) {
573
            if (write_elf_section(s, 0) < 0) {
574
                return -1;
575
            }
576
        }
577

    
578
        /* write notes to vmcore */
579
        if (write_elf32_notes(s) < 0) {
580
            return -1;
581
        }
582
    }
583

    
584
    return 0;
585
}
586

    
587
/* write PT_LOAD to vmcore */
588
static int dump_completed(DumpState *s)
589
{
590
    dump_cleanup(s);
591
    return 0;
592
}
593

    
594
static int get_next_block(DumpState *s, RAMBlock *block)
595
{
596
    while (1) {
597
        block = QLIST_NEXT(block, next);
598
        if (!block) {
599
            /* no more block */
600
            return 1;
601
        }
602

    
603
        s->start = 0;
604
        s->block = block;
605
        if (s->has_filter) {
606
            if (block->offset >= s->begin + s->length ||
607
                block->offset + block->length <= s->begin) {
608
                /* This block is out of the range */
609
                continue;
610
            }
611

    
612
            if (s->begin > block->offset) {
613
                s->start = s->begin - block->offset;
614
            }
615
        }
616

    
617
        return 0;
618
    }
619
}
620

    
621
/* write all memory to vmcore */
622
static int dump_iterate(DumpState *s)
623
{
624
    RAMBlock *block;
625
    int64_t size;
626
    int ret;
627

    
628
    while (1) {
629
        block = s->block;
630

    
631
        size = block->length;
632
        if (s->has_filter) {
633
            size -= s->start;
634
            if (s->begin + s->length < block->offset + block->length) {
635
                size -= block->offset + block->length - (s->begin + s->length);
636
            }
637
        }
638
        ret = write_memory(s, block, s->start, size);
639
        if (ret == -1) {
640
            return ret;
641
        }
642

    
643
        ret = get_next_block(s, block);
644
        if (ret == 1) {
645
            dump_completed(s);
646
            return 0;
647
        }
648
    }
649
}
650

    
651
static int create_vmcore(DumpState *s)
652
{
653
    int ret;
654

    
655
    ret = dump_begin(s);
656
    if (ret < 0) {
657
        return -1;
658
    }
659

    
660
    ret = dump_iterate(s);
661
    if (ret < 0) {
662
        return -1;
663
    }
664

    
665
    return 0;
666
}
667

    
668
static ram_addr_t get_start_block(DumpState *s)
669
{
670
    RAMBlock *block;
671

    
672
    if (!s->has_filter) {
673
        s->block = QLIST_FIRST(&ram_list.blocks);
674
        return 0;
675
    }
676

    
677
    QLIST_FOREACH(block, &ram_list.blocks, next) {
678
        if (block->offset >= s->begin + s->length ||
679
            block->offset + block->length <= s->begin) {
680
            /* This block is out of the range */
681
            continue;
682
        }
683

    
684
        s->block = block;
685
        if (s->begin > block->offset) {
686
            s->start = s->begin - block->offset;
687
        } else {
688
            s->start = 0;
689
        }
690
        return s->start;
691
    }
692

    
693
    return -1;
694
}
695

    
696
static int dump_init(DumpState *s, int fd, bool paging, bool has_filter,
697
                     int64_t begin, int64_t length, Error **errp)
698
{
699
    CPUArchState *env;
700
    int nr_cpus;
701
    int ret;
702

    
703
    if (runstate_is_running()) {
704
        vm_stop(RUN_STATE_SAVE_VM);
705
        s->resume = true;
706
    } else {
707
        s->resume = false;
708
    }
709

    
710
    s->errp = errp;
711
    s->fd = fd;
712
    s->has_filter = has_filter;
713
    s->begin = begin;
714
    s->length = length;
715
    s->start = get_start_block(s);
716
    if (s->start == -1) {
717
        error_set(errp, QERR_INVALID_PARAMETER, "begin");
718
        goto cleanup;
719
    }
720

    
721
    /*
722
     * get dump info: endian, class and architecture.
723
     * If the target architecture is not supported, cpu_get_dump_info() will
724
     * return -1.
725
     *
726
     * if we use kvm, we should synchronize the register before we get dump
727
     * info.
728
     */
729
    nr_cpus = 0;
730
    for (env = first_cpu; env != NULL; env = env->next_cpu) {
731
        cpu_synchronize_state(env);
732
        nr_cpus++;
733
    }
734

    
735
    ret = cpu_get_dump_info(&s->dump_info);
736
    if (ret < 0) {
737
        error_set(errp, QERR_UNSUPPORTED);
738
        goto cleanup;
739
    }
740

    
741
    s->note_size = cpu_get_note_size(s->dump_info.d_class,
742
                                     s->dump_info.d_machine, nr_cpus);
743
    if (ret < 0) {
744
        error_set(errp, QERR_UNSUPPORTED);
745
        goto cleanup;
746
    }
747

    
748
    /* get memory mapping */
749
    memory_mapping_list_init(&s->list);
750
    if (paging) {
751
        qemu_get_guest_memory_mapping(&s->list);
752
    } else {
753
        qemu_get_guest_simple_memory_mapping(&s->list);
754
    }
755

    
756
    if (s->has_filter) {
757
        memory_mapping_filter(&s->list, s->begin, s->length);
758
    }
759

    
760
    /*
761
     * calculate phdr_num
762
     *
763
     * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow
764
     */
765
    s->phdr_num = 1; /* PT_NOTE */
766
    if (s->list.num < UINT16_MAX - 2) {
767
        s->phdr_num += s->list.num;
768
        s->have_section = false;
769
    } else {
770
        s->have_section = true;
771
        s->phdr_num = PN_XNUM;
772
        s->sh_info = 1; /* PT_NOTE */
773

    
774
        /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */
775
        if (s->list.num <= UINT32_MAX - 1) {
776
            s->sh_info += s->list.num;
777
        } else {
778
            s->sh_info = UINT32_MAX;
779
        }
780
    }
781

    
782
    if (s->dump_info.d_class == ELFCLASS64) {
783
        if (s->have_section) {
784
            s->memory_offset = sizeof(Elf64_Ehdr) +
785
                               sizeof(Elf64_Phdr) * s->sh_info +
786
                               sizeof(Elf64_Shdr) + s->note_size;
787
        } else {
788
            s->memory_offset = sizeof(Elf64_Ehdr) +
789
                               sizeof(Elf64_Phdr) * s->phdr_num + s->note_size;
790
        }
791
    } else {
792
        if (s->have_section) {
793
            s->memory_offset = sizeof(Elf32_Ehdr) +
794
                               sizeof(Elf32_Phdr) * s->sh_info +
795
                               sizeof(Elf32_Shdr) + s->note_size;
796
        } else {
797
            s->memory_offset = sizeof(Elf32_Ehdr) +
798
                               sizeof(Elf32_Phdr) * s->phdr_num + s->note_size;
799
        }
800
    }
801

    
802
    return 0;
803

    
804
cleanup:
805
    if (s->resume) {
806
        vm_start();
807
    }
808

    
809
    return -1;
810
}
811

    
812
void qmp_dump_guest_memory(bool paging, const char *file, bool has_begin,
813
                           int64_t begin, bool has_length, int64_t length,
814
                           Error **errp)
815
{
816
    const char *p;
817
    int fd = -1;
818
    DumpState *s;
819
    int ret;
820

    
821
    if (has_begin && !has_length) {
822
        error_set(errp, QERR_MISSING_PARAMETER, "length");
823
        return;
824
    }
825
    if (!has_begin && has_length) {
826
        error_set(errp, QERR_MISSING_PARAMETER, "begin");
827
        return;
828
    }
829

    
830
#if !defined(WIN32)
831
    if (strstart(file, "fd:", &p)) {
832
        fd = monitor_get_fd(cur_mon, p, errp);
833
        if (fd == -1) {
834
            return;
835
        }
836
    }
837
#endif
838

    
839
    if  (strstart(file, "file:", &p)) {
840
        fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR);
841
        if (fd < 0) {
842
            error_set(errp, QERR_OPEN_FILE_FAILED, p);
843
            return;
844
        }
845
    }
846

    
847
    if (fd == -1) {
848
        error_set(errp, QERR_INVALID_PARAMETER, "protocol");
849
        return;
850
    }
851

    
852
    s = g_malloc(sizeof(DumpState));
853

    
854
    ret = dump_init(s, fd, paging, has_begin, begin, length, errp);
855
    if (ret < 0) {
856
        g_free(s);
857
        return;
858
    }
859

    
860
    if (create_vmcore(s) < 0 && !error_is_set(s->errp)) {
861
        error_set(errp, QERR_IO_ERROR);
862
    }
863

    
864
    g_free(s);
865
}