Statistics
| Branch: | Revision:

root / xen-all.c @ 4d5b97da

History | View | Annotate | Download (36.3 kB)

1
/*
2
 * Copyright (C) 2010       Citrix Ltd.
3
 *
4
 * This work is licensed under the terms of the GNU GPL, version 2.  See
5
 * the COPYING file in the top-level directory.
6
 *
7
 * Contributions after 2012-01-13 are licensed under the terms of the
8
 * GNU GPL, version 2 or (at your option) any later version.
9
 */
10

    
11
#include <sys/mman.h>
12

    
13
#include "hw/pci.h"
14
#include "hw/pc.h"
15
#include "hw/xen_common.h"
16
#include "hw/xen_backend.h"
17
#include "qmp-commands.h"
18

    
19
#include "range.h"
20
#include "xen-mapcache.h"
21
#include "trace.h"
22
#include "exec-memory.h"
23

    
24
#include <xen/hvm/ioreq.h>
25
#include <xen/hvm/params.h>
26
#include <xen/hvm/e820.h>
27

    
28
//#define DEBUG_XEN
29

    
30
#ifdef DEBUG_XEN
31
#define DPRINTF(fmt, ...) \
32
    do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0)
33
#else
34
#define DPRINTF(fmt, ...) \
35
    do { } while (0)
36
#endif
37

    
38
static MemoryRegion ram_memory, ram_640k, ram_lo, ram_hi;
39
static MemoryRegion *framebuffer;
40
static bool xen_in_migration;
41

    
42
/* Compatibility with older version */
43
#if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a
44
static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
45
{
46
    return shared_page->vcpu_iodata[i].vp_eport;
47
}
48
static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
49
{
50
    return &shared_page->vcpu_iodata[vcpu].vp_ioreq;
51
}
52
#  define FMT_ioreq_size PRIx64
53
#else
54
static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i)
55
{
56
    return shared_page->vcpu_ioreq[i].vp_eport;
57
}
58
static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu)
59
{
60
    return &shared_page->vcpu_ioreq[vcpu];
61
}
62
#  define FMT_ioreq_size "u"
63
#endif
64
#ifndef HVM_PARAM_BUFIOREQ_EVTCHN
65
#define HVM_PARAM_BUFIOREQ_EVTCHN 26
66
#endif
67

    
68
#define BUFFER_IO_MAX_DELAY  100
69

    
70
typedef struct XenPhysmap {
71
    target_phys_addr_t start_addr;
72
    ram_addr_t size;
73
    char *name;
74
    target_phys_addr_t phys_offset;
75

    
76
    QLIST_ENTRY(XenPhysmap) list;
77
} XenPhysmap;
78

    
79
typedef struct XenIOState {
80
    shared_iopage_t *shared_page;
81
    buffered_iopage_t *buffered_io_page;
82
    QEMUTimer *buffered_io_timer;
83
    /* the evtchn port for polling the notification, */
84
    evtchn_port_t *ioreq_local_port;
85
    /* evtchn local port for buffered io */
86
    evtchn_port_t bufioreq_local_port;
87
    /* the evtchn fd for polling */
88
    XenEvtchn xce_handle;
89
    /* which vcpu we are serving */
90
    int send_vcpu;
91

    
92
    struct xs_handle *xenstore;
93
    MemoryListener memory_listener;
94
    QLIST_HEAD(, XenPhysmap) physmap;
95
    target_phys_addr_t free_phys_offset;
96
    const XenPhysmap *log_for_dirtybit;
97

    
98
    Notifier exit;
99
    Notifier suspend;
100
} XenIOState;
101

    
102
/* Xen specific function for piix pci */
103

    
104
int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num)
105
{
106
    return irq_num + ((pci_dev->devfn >> 3) << 2);
107
}
108

    
109
void xen_piix3_set_irq(void *opaque, int irq_num, int level)
110
{
111
    xc_hvm_set_pci_intx_level(xen_xc, xen_domid, 0, 0, irq_num >> 2,
112
                              irq_num & 3, level);
113
}
114

    
115
void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len)
116
{
117
    int i;
118

    
119
    /* Scan for updates to PCI link routes (0x60-0x63). */
120
    for (i = 0; i < len; i++) {
121
        uint8_t v = (val >> (8 * i)) & 0xff;
122
        if (v & 0x80) {
123
            v = 0;
124
        }
125
        v &= 0xf;
126
        if (((address + i) >= 0x60) && ((address + i) <= 0x63)) {
127
            xc_hvm_set_pci_link_route(xen_xc, xen_domid, address + i - 0x60, v);
128
        }
129
    }
130
}
131

    
132
void xen_hvm_inject_msi(uint64_t addr, uint32_t data)
133
{
134
    xen_xc_hvm_inject_msi(xen_xc, xen_domid, addr, data);
135
}
136

    
137
static void xen_suspend_notifier(Notifier *notifier, void *data)
138
{
139
    xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
140
}
141

    
142
/* Xen Interrupt Controller */
143

    
144
static void xen_set_irq(void *opaque, int irq, int level)
145
{
146
    xc_hvm_set_isa_irq_level(xen_xc, xen_domid, irq, level);
147
}
148

    
149
qemu_irq *xen_interrupt_controller_init(void)
150
{
151
    return qemu_allocate_irqs(xen_set_irq, NULL, 16);
152
}
153

    
154
/* Memory Ops */
155

    
156
static void xen_ram_init(ram_addr_t ram_size)
157
{
158
    MemoryRegion *sysmem = get_system_memory();
159
    ram_addr_t below_4g_mem_size, above_4g_mem_size = 0;
160
    ram_addr_t block_len;
161

    
162
    block_len = ram_size;
163
    if (ram_size >= HVM_BELOW_4G_RAM_END) {
164
        /* Xen does not allocate the memory continuously, and keep a hole at
165
         * HVM_BELOW_4G_MMIO_START of HVM_BELOW_4G_MMIO_LENGTH
166
         */
167
        block_len += HVM_BELOW_4G_MMIO_LENGTH;
168
    }
169
    memory_region_init_ram(&ram_memory, "xen.ram", block_len);
170
    vmstate_register_ram_global(&ram_memory);
171

    
172
    if (ram_size >= HVM_BELOW_4G_RAM_END) {
173
        above_4g_mem_size = ram_size - HVM_BELOW_4G_RAM_END;
174
        below_4g_mem_size = HVM_BELOW_4G_RAM_END;
175
    } else {
176
        below_4g_mem_size = ram_size;
177
    }
178

    
179
    memory_region_init_alias(&ram_640k, "xen.ram.640k",
180
                             &ram_memory, 0, 0xa0000);
181
    memory_region_add_subregion(sysmem, 0, &ram_640k);
182
    /* Skip of the VGA IO memory space, it will be registered later by the VGA
183
     * emulated device.
184
     *
185
     * The area between 0xc0000 and 0x100000 will be used by SeaBIOS to load
186
     * the Options ROM, so it is registered here as RAM.
187
     */
188
    memory_region_init_alias(&ram_lo, "xen.ram.lo",
189
                             &ram_memory, 0xc0000, below_4g_mem_size - 0xc0000);
190
    memory_region_add_subregion(sysmem, 0xc0000, &ram_lo);
191
    if (above_4g_mem_size > 0) {
192
        memory_region_init_alias(&ram_hi, "xen.ram.hi",
193
                                 &ram_memory, 0x100000000ULL,
194
                                 above_4g_mem_size);
195
        memory_region_add_subregion(sysmem, 0x100000000ULL, &ram_hi);
196
    }
197
}
198

    
199
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size, MemoryRegion *mr)
200
{
201
    unsigned long nr_pfn;
202
    xen_pfn_t *pfn_list;
203
    int i;
204

    
205
    if (runstate_check(RUN_STATE_INMIGRATE)) {
206
        /* RAM already populated in Xen */
207
        fprintf(stderr, "%s: do not alloc "RAM_ADDR_FMT
208
                " bytes of ram at "RAM_ADDR_FMT" when runstate is INMIGRATE\n",
209
                __func__, size, ram_addr); 
210
        return;
211
    }
212

    
213
    if (mr == &ram_memory) {
214
        return;
215
    }
216

    
217
    trace_xen_ram_alloc(ram_addr, size);
218

    
219
    nr_pfn = size >> TARGET_PAGE_BITS;
220
    pfn_list = g_malloc(sizeof (*pfn_list) * nr_pfn);
221

    
222
    for (i = 0; i < nr_pfn; i++) {
223
        pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i;
224
    }
225

    
226
    if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) {
227
        hw_error("xen: failed to populate ram at " RAM_ADDR_FMT, ram_addr);
228
    }
229

    
230
    g_free(pfn_list);
231
}
232

    
233
static XenPhysmap *get_physmapping(XenIOState *state,
234
                                   target_phys_addr_t start_addr, ram_addr_t size)
235
{
236
    XenPhysmap *physmap = NULL;
237

    
238
    start_addr &= TARGET_PAGE_MASK;
239

    
240
    QLIST_FOREACH(physmap, &state->physmap, list) {
241
        if (range_covers_byte(physmap->start_addr, physmap->size, start_addr)) {
242
            return physmap;
243
        }
244
    }
245
    return NULL;
246
}
247

    
248
static target_phys_addr_t xen_phys_offset_to_gaddr(target_phys_addr_t start_addr,
249
                                                   ram_addr_t size, void *opaque)
250
{
251
    target_phys_addr_t addr = start_addr & TARGET_PAGE_MASK;
252
    XenIOState *xen_io_state = opaque;
253
    XenPhysmap *physmap = NULL;
254

    
255
    QLIST_FOREACH(physmap, &xen_io_state->physmap, list) {
256
        if (range_covers_byte(physmap->phys_offset, physmap->size, addr)) {
257
            return physmap->start_addr;
258
        }
259
    }
260

    
261
    return start_addr;
262
}
263

    
264
#if CONFIG_XEN_CTRL_INTERFACE_VERSION >= 340
265
static int xen_add_to_physmap(XenIOState *state,
266
                              target_phys_addr_t start_addr,
267
                              ram_addr_t size,
268
                              MemoryRegion *mr,
269
                              target_phys_addr_t offset_within_region)
270
{
271
    unsigned long i = 0;
272
    int rc = 0;
273
    XenPhysmap *physmap = NULL;
274
    target_phys_addr_t pfn, start_gpfn;
275
    target_phys_addr_t phys_offset = memory_region_get_ram_addr(mr);
276
    char path[80], value[17];
277

    
278
    if (get_physmapping(state, start_addr, size)) {
279
        return 0;
280
    }
281
    if (size <= 0) {
282
        return -1;
283
    }
284

    
285
    /* Xen can only handle a single dirty log region for now and we want
286
     * the linear framebuffer to be that region.
287
     * Avoid tracking any regions that is not videoram and avoid tracking
288
     * the legacy vga region. */
289
    if (mr == framebuffer && start_addr > 0xbffff) {
290
        goto go_physmap;
291
    }
292
    return -1;
293

    
294
go_physmap:
295
    DPRINTF("mapping vram to %llx - %llx\n", start_addr, start_addr + size);
296

    
297
    pfn = phys_offset >> TARGET_PAGE_BITS;
298
    start_gpfn = start_addr >> TARGET_PAGE_BITS;
299
    for (i = 0; i < size >> TARGET_PAGE_BITS; i++) {
300
        unsigned long idx = pfn + i;
301
        xen_pfn_t gpfn = start_gpfn + i;
302

    
303
        rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
304
        if (rc) {
305
            DPRINTF("add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
306
                    PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
307
            return -rc;
308
        }
309
    }
310

    
311
    physmap = g_malloc(sizeof (XenPhysmap));
312

    
313
    physmap->start_addr = start_addr;
314
    physmap->size = size;
315
    physmap->name = (char *)mr->name;
316
    physmap->phys_offset = phys_offset;
317

    
318
    QLIST_INSERT_HEAD(&state->physmap, physmap, list);
319

    
320
    xc_domain_pin_memory_cacheattr(xen_xc, xen_domid,
321
                                   start_addr >> TARGET_PAGE_BITS,
322
                                   (start_addr + size) >> TARGET_PAGE_BITS,
323
                                   XEN_DOMCTL_MEM_CACHEATTR_WB);
324

    
325
    snprintf(path, sizeof(path),
326
            "/local/domain/0/device-model/%d/physmap/%"PRIx64"/start_addr",
327
            xen_domid, (uint64_t)phys_offset);
328
    snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)start_addr);
329
    if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
330
        return -1;
331
    }
332
    snprintf(path, sizeof(path),
333
            "/local/domain/0/device-model/%d/physmap/%"PRIx64"/size",
334
            xen_domid, (uint64_t)phys_offset);
335
    snprintf(value, sizeof(value), "%"PRIx64, (uint64_t)size);
336
    if (!xs_write(state->xenstore, 0, path, value, strlen(value))) {
337
        return -1;
338
    }
339
    if (mr->name) {
340
        snprintf(path, sizeof(path),
341
                "/local/domain/0/device-model/%d/physmap/%"PRIx64"/name",
342
                xen_domid, (uint64_t)phys_offset);
343
        if (!xs_write(state->xenstore, 0, path, mr->name, strlen(mr->name))) {
344
            return -1;
345
        }
346
    }
347

    
348
    return 0;
349
}
350

    
351
static int xen_remove_from_physmap(XenIOState *state,
352
                                   target_phys_addr_t start_addr,
353
                                   ram_addr_t size)
354
{
355
    unsigned long i = 0;
356
    int rc = 0;
357
    XenPhysmap *physmap = NULL;
358
    target_phys_addr_t phys_offset = 0;
359

    
360
    physmap = get_physmapping(state, start_addr, size);
361
    if (physmap == NULL) {
362
        return -1;
363
    }
364

    
365
    phys_offset = physmap->phys_offset;
366
    size = physmap->size;
367

    
368
    DPRINTF("unmapping vram to %llx - %llx, from %llx\n",
369
            phys_offset, phys_offset + size, start_addr);
370

    
371
    size >>= TARGET_PAGE_BITS;
372
    start_addr >>= TARGET_PAGE_BITS;
373
    phys_offset >>= TARGET_PAGE_BITS;
374
    for (i = 0; i < size; i++) {
375
        unsigned long idx = start_addr + i;
376
        xen_pfn_t gpfn = phys_offset + i;
377

    
378
        rc = xc_domain_add_to_physmap(xen_xc, xen_domid, XENMAPSPACE_gmfn, idx, gpfn);
379
        if (rc) {
380
            fprintf(stderr, "add_to_physmap MFN %"PRI_xen_pfn" to PFN %"
381
                    PRI_xen_pfn" failed: %d\n", idx, gpfn, rc);
382
            return -rc;
383
        }
384
    }
385

    
386
    QLIST_REMOVE(physmap, list);
387
    if (state->log_for_dirtybit == physmap) {
388
        state->log_for_dirtybit = NULL;
389
    }
390
    free(physmap);
391

    
392
    return 0;
393
}
394

    
395
#else
396
static int xen_add_to_physmap(XenIOState *state,
397
                              target_phys_addr_t start_addr,
398
                              ram_addr_t size,
399
                              MemoryRegion *mr,
400
                              target_phys_addr_t offset_within_region)
401
{
402
    return -ENOSYS;
403
}
404

    
405
static int xen_remove_from_physmap(XenIOState *state,
406
                                   target_phys_addr_t start_addr,
407
                                   ram_addr_t size)
408
{
409
    return -ENOSYS;
410
}
411
#endif
412

    
413
static void xen_set_memory(struct MemoryListener *listener,
414
                           MemoryRegionSection *section,
415
                           bool add)
416
{
417
    XenIOState *state = container_of(listener, XenIOState, memory_listener);
418
    target_phys_addr_t start_addr = section->offset_within_address_space;
419
    ram_addr_t size = section->size;
420
    bool log_dirty = memory_region_is_logging(section->mr);
421
    hvmmem_type_t mem_type;
422

    
423
    if (!memory_region_is_ram(section->mr)) {
424
        return;
425
    }
426

    
427
    if (!(section->mr != &ram_memory
428
          && ( (log_dirty && add) || (!log_dirty && !add)))) {
429
        return;
430
    }
431

    
432
    trace_xen_client_set_memory(start_addr, size, log_dirty);
433

    
434
    start_addr &= TARGET_PAGE_MASK;
435
    size = TARGET_PAGE_ALIGN(size);
436

    
437
    if (add) {
438
        if (!memory_region_is_rom(section->mr)) {
439
            xen_add_to_physmap(state, start_addr, size,
440
                               section->mr, section->offset_within_region);
441
        } else {
442
            mem_type = HVMMEM_ram_ro;
443
            if (xc_hvm_set_mem_type(xen_xc, xen_domid, mem_type,
444
                                    start_addr >> TARGET_PAGE_BITS,
445
                                    size >> TARGET_PAGE_BITS)) {
446
                DPRINTF("xc_hvm_set_mem_type error, addr: "TARGET_FMT_plx"\n",
447
                        start_addr);
448
            }
449
        }
450
    } else {
451
        if (xen_remove_from_physmap(state, start_addr, size) < 0) {
452
            DPRINTF("physmapping does not exist at "TARGET_FMT_plx"\n", start_addr);
453
        }
454
    }
455
}
456

    
457
static void xen_begin(MemoryListener *listener)
458
{
459
}
460

    
461
static void xen_commit(MemoryListener *listener)
462
{
463
}
464

    
465
static void xen_region_add(MemoryListener *listener,
466
                           MemoryRegionSection *section)
467
{
468
    xen_set_memory(listener, section, true);
469
}
470

    
471
static void xen_region_del(MemoryListener *listener,
472
                           MemoryRegionSection *section)
473
{
474
    xen_set_memory(listener, section, false);
475
}
476

    
477
static void xen_region_nop(MemoryListener *listener,
478
                           MemoryRegionSection *section)
479
{
480
}
481

    
482
static void xen_sync_dirty_bitmap(XenIOState *state,
483
                                  target_phys_addr_t start_addr,
484
                                  ram_addr_t size)
485
{
486
    target_phys_addr_t npages = size >> TARGET_PAGE_BITS;
487
    const int width = sizeof(unsigned long) * 8;
488
    unsigned long bitmap[(npages + width - 1) / width];
489
    int rc, i, j;
490
    const XenPhysmap *physmap = NULL;
491

    
492
    physmap = get_physmapping(state, start_addr, size);
493
    if (physmap == NULL) {
494
        /* not handled */
495
        return;
496
    }
497

    
498
    if (state->log_for_dirtybit == NULL) {
499
        state->log_for_dirtybit = physmap;
500
    } else if (state->log_for_dirtybit != physmap) {
501
        /* Only one range for dirty bitmap can be tracked. */
502
        return;
503
    }
504

    
505
    rc = xc_hvm_track_dirty_vram(xen_xc, xen_domid,
506
                                 start_addr >> TARGET_PAGE_BITS, npages,
507
                                 bitmap);
508
    if (rc < 0) {
509
        if (rc != -ENODATA) {
510
            memory_region_set_dirty(framebuffer, 0, size);
511
            DPRINTF("xen: track_dirty_vram failed (0x" TARGET_FMT_plx
512
                    ", 0x" TARGET_FMT_plx "): %s\n",
513
                    start_addr, start_addr + size, strerror(-rc));
514
        }
515
        return;
516
    }
517

    
518
    for (i = 0; i < ARRAY_SIZE(bitmap); i++) {
519
        unsigned long map = bitmap[i];
520
        while (map != 0) {
521
            j = ffsl(map) - 1;
522
            map &= ~(1ul << j);
523
            memory_region_set_dirty(framebuffer,
524
                                    (i * width + j) * TARGET_PAGE_SIZE,
525
                                    TARGET_PAGE_SIZE);
526
        };
527
    }
528
}
529

    
530
static void xen_log_start(MemoryListener *listener,
531
                          MemoryRegionSection *section)
532
{
533
    XenIOState *state = container_of(listener, XenIOState, memory_listener);
534

    
535
    xen_sync_dirty_bitmap(state, section->offset_within_address_space,
536
                          section->size);
537
}
538

    
539
static void xen_log_stop(MemoryListener *listener, MemoryRegionSection *section)
540
{
541
    XenIOState *state = container_of(listener, XenIOState, memory_listener);
542

    
543
    state->log_for_dirtybit = NULL;
544
    /* Disable dirty bit tracking */
545
    xc_hvm_track_dirty_vram(xen_xc, xen_domid, 0, 0, NULL);
546
}
547

    
548
static void xen_log_sync(MemoryListener *listener, MemoryRegionSection *section)
549
{
550
    XenIOState *state = container_of(listener, XenIOState, memory_listener);
551

    
552
    xen_sync_dirty_bitmap(state, section->offset_within_address_space,
553
                          section->size);
554
}
555

    
556
static void xen_log_global_start(MemoryListener *listener)
557
{
558
    if (xen_enabled()) {
559
        xen_in_migration = true;
560
    }
561
}
562

    
563
static void xen_log_global_stop(MemoryListener *listener)
564
{
565
    xen_in_migration = false;
566
}
567

    
568
static void xen_eventfd_add(MemoryListener *listener,
569
                            MemoryRegionSection *section,
570
                            bool match_data, uint64_t data,
571
                            EventNotifier *e)
572
{
573
}
574

    
575
static void xen_eventfd_del(MemoryListener *listener,
576
                            MemoryRegionSection *section,
577
                            bool match_data, uint64_t data,
578
                            EventNotifier *e)
579
{
580
}
581

    
582
static MemoryListener xen_memory_listener = {
583
    .begin = xen_begin,
584
    .commit = xen_commit,
585
    .region_add = xen_region_add,
586
    .region_del = xen_region_del,
587
    .region_nop = xen_region_nop,
588
    .log_start = xen_log_start,
589
    .log_stop = xen_log_stop,
590
    .log_sync = xen_log_sync,
591
    .log_global_start = xen_log_global_start,
592
    .log_global_stop = xen_log_global_stop,
593
    .eventfd_add = xen_eventfd_add,
594
    .eventfd_del = xen_eventfd_del,
595
    .priority = 10,
596
};
597

    
598
void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
599
{
600
    if (enable) {
601
        memory_global_dirty_log_start();
602
    } else {
603
        memory_global_dirty_log_stop();
604
    }
605
}
606

    
607
/* VCPU Operations, MMIO, IO ring ... */
608

    
609
static void xen_reset_vcpu(void *opaque)
610
{
611
    CPUArchState *env = opaque;
612

    
613
    env->halted = 1;
614
}
615

    
616
void xen_vcpu_init(void)
617
{
618
    CPUArchState *first_cpu;
619

    
620
    if ((first_cpu = qemu_get_cpu(0))) {
621
        qemu_register_reset(xen_reset_vcpu, first_cpu);
622
        xen_reset_vcpu(first_cpu);
623
    }
624
    /* if rtc_clock is left to default (host_clock), disable it */
625
    if (rtc_clock == host_clock) {
626
        qemu_clock_enable(rtc_clock, false);
627
    }
628
}
629

    
630
/* get the ioreq packets from share mem */
631
static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu)
632
{
633
    ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu);
634

    
635
    if (req->state != STATE_IOREQ_READY) {
636
        DPRINTF("I/O request not ready: "
637
                "%x, ptr: %x, port: %"PRIx64", "
638
                "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n",
639
                req->state, req->data_is_ptr, req->addr,
640
                req->data, req->count, req->size);
641
        return NULL;
642
    }
643

    
644
    xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
645

    
646
    req->state = STATE_IOREQ_INPROCESS;
647
    return req;
648
}
649

    
650
/* use poll to get the port notification */
651
/* ioreq_vec--out,the */
652
/* retval--the number of ioreq packet */
653
static ioreq_t *cpu_get_ioreq(XenIOState *state)
654
{
655
    int i;
656
    evtchn_port_t port;
657

    
658
    port = xc_evtchn_pending(state->xce_handle);
659
    if (port == state->bufioreq_local_port) {
660
        qemu_mod_timer(state->buffered_io_timer,
661
                BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
662
        return NULL;
663
    }
664

    
665
    if (port != -1) {
666
        for (i = 0; i < smp_cpus; i++) {
667
            if (state->ioreq_local_port[i] == port) {
668
                break;
669
            }
670
        }
671

    
672
        if (i == smp_cpus) {
673
            hw_error("Fatal error while trying to get io event!\n");
674
        }
675

    
676
        /* unmask the wanted port again */
677
        xc_evtchn_unmask(state->xce_handle, port);
678

    
679
        /* get the io packet from shared memory */
680
        state->send_vcpu = i;
681
        return cpu_get_ioreq_from_shared_memory(state, i);
682
    }
683

    
684
    /* read error or read nothing */
685
    return NULL;
686
}
687

    
688
static uint32_t do_inp(pio_addr_t addr, unsigned long size)
689
{
690
    switch (size) {
691
        case 1:
692
            return cpu_inb(addr);
693
        case 2:
694
            return cpu_inw(addr);
695
        case 4:
696
            return cpu_inl(addr);
697
        default:
698
            hw_error("inp: bad size: %04"FMT_pioaddr" %lx", addr, size);
699
    }
700
}
701

    
702
static void do_outp(pio_addr_t addr,
703
        unsigned long size, uint32_t val)
704
{
705
    switch (size) {
706
        case 1:
707
            return cpu_outb(addr, val);
708
        case 2:
709
            return cpu_outw(addr, val);
710
        case 4:
711
            return cpu_outl(addr, val);
712
        default:
713
            hw_error("outp: bad size: %04"FMT_pioaddr" %lx", addr, size);
714
    }
715
}
716

    
717
static void cpu_ioreq_pio(ioreq_t *req)
718
{
719
    int i, sign;
720

    
721
    sign = req->df ? -1 : 1;
722

    
723
    if (req->dir == IOREQ_READ) {
724
        if (!req->data_is_ptr) {
725
            req->data = do_inp(req->addr, req->size);
726
        } else {
727
            uint32_t tmp;
728

    
729
            for (i = 0; i < req->count; i++) {
730
                tmp = do_inp(req->addr, req->size);
731
                cpu_physical_memory_write(
732
                        req->data + (sign * i * (int64_t)req->size),
733
                        (uint8_t *) &tmp, req->size);
734
            }
735
        }
736
    } else if (req->dir == IOREQ_WRITE) {
737
        if (!req->data_is_ptr) {
738
            do_outp(req->addr, req->size, req->data);
739
        } else {
740
            for (i = 0; i < req->count; i++) {
741
                uint32_t tmp = 0;
742

    
743
                cpu_physical_memory_read(
744
                        req->data + (sign * i * (int64_t)req->size),
745
                        (uint8_t*) &tmp, req->size);
746
                do_outp(req->addr, req->size, tmp);
747
            }
748
        }
749
    }
750
}
751

    
752
static void cpu_ioreq_move(ioreq_t *req)
753
{
754
    int i, sign;
755

    
756
    sign = req->df ? -1 : 1;
757

    
758
    if (!req->data_is_ptr) {
759
        if (req->dir == IOREQ_READ) {
760
            for (i = 0; i < req->count; i++) {
761
                cpu_physical_memory_read(
762
                        req->addr + (sign * i * (int64_t)req->size),
763
                        (uint8_t *) &req->data, req->size);
764
            }
765
        } else if (req->dir == IOREQ_WRITE) {
766
            for (i = 0; i < req->count; i++) {
767
                cpu_physical_memory_write(
768
                        req->addr + (sign * i * (int64_t)req->size),
769
                        (uint8_t *) &req->data, req->size);
770
            }
771
        }
772
    } else {
773
        uint64_t tmp;
774

    
775
        if (req->dir == IOREQ_READ) {
776
            for (i = 0; i < req->count; i++) {
777
                cpu_physical_memory_read(
778
                        req->addr + (sign * i * (int64_t)req->size),
779
                        (uint8_t*) &tmp, req->size);
780
                cpu_physical_memory_write(
781
                        req->data + (sign * i * (int64_t)req->size),
782
                        (uint8_t*) &tmp, req->size);
783
            }
784
        } else if (req->dir == IOREQ_WRITE) {
785
            for (i = 0; i < req->count; i++) {
786
                cpu_physical_memory_read(
787
                        req->data + (sign * i * (int64_t)req->size),
788
                        (uint8_t*) &tmp, req->size);
789
                cpu_physical_memory_write(
790
                        req->addr + (sign * i * (int64_t)req->size),
791
                        (uint8_t*) &tmp, req->size);
792
            }
793
        }
794
    }
795
}
796

    
797
static void handle_ioreq(ioreq_t *req)
798
{
799
    if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
800
            (req->size < sizeof (target_ulong))) {
801
        req->data &= ((target_ulong) 1 << (8 * req->size)) - 1;
802
    }
803

    
804
    switch (req->type) {
805
        case IOREQ_TYPE_PIO:
806
            cpu_ioreq_pio(req);
807
            break;
808
        case IOREQ_TYPE_COPY:
809
            cpu_ioreq_move(req);
810
            break;
811
        case IOREQ_TYPE_TIMEOFFSET:
812
            break;
813
        case IOREQ_TYPE_INVALIDATE:
814
            xen_invalidate_map_cache();
815
            break;
816
        default:
817
            hw_error("Invalid ioreq type 0x%x\n", req->type);
818
    }
819
}
820

    
821
static int handle_buffered_iopage(XenIOState *state)
822
{
823
    buf_ioreq_t *buf_req = NULL;
824
    ioreq_t req;
825
    int qw;
826

    
827
    if (!state->buffered_io_page) {
828
        return 0;
829
    }
830

    
831
    memset(&req, 0x00, sizeof(req));
832

    
833
    while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) {
834
        buf_req = &state->buffered_io_page->buf_ioreq[
835
            state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM];
836
        req.size = 1UL << buf_req->size;
837
        req.count = 1;
838
        req.addr = buf_req->addr;
839
        req.data = buf_req->data;
840
        req.state = STATE_IOREQ_READY;
841
        req.dir = buf_req->dir;
842
        req.df = 1;
843
        req.type = buf_req->type;
844
        req.data_is_ptr = 0;
845
        qw = (req.size == 8);
846
        if (qw) {
847
            buf_req = &state->buffered_io_page->buf_ioreq[
848
                (state->buffered_io_page->read_pointer + 1) % IOREQ_BUFFER_SLOT_NUM];
849
            req.data |= ((uint64_t)buf_req->data) << 32;
850
        }
851

    
852
        handle_ioreq(&req);
853

    
854
        xen_mb();
855
        state->buffered_io_page->read_pointer += qw ? 2 : 1;
856
    }
857

    
858
    return req.count;
859
}
860

    
861
static void handle_buffered_io(void *opaque)
862
{
863
    XenIOState *state = opaque;
864

    
865
    if (handle_buffered_iopage(state)) {
866
        qemu_mod_timer(state->buffered_io_timer,
867
                BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock));
868
    } else {
869
        qemu_del_timer(state->buffered_io_timer);
870
        xc_evtchn_unmask(state->xce_handle, state->bufioreq_local_port);
871
    }
872
}
873

    
874
static void cpu_handle_ioreq(void *opaque)
875
{
876
    XenIOState *state = opaque;
877
    ioreq_t *req = cpu_get_ioreq(state);
878

    
879
    handle_buffered_iopage(state);
880
    if (req) {
881
        handle_ioreq(req);
882

    
883
        if (req->state != STATE_IOREQ_INPROCESS) {
884
            fprintf(stderr, "Badness in I/O request ... not in service?!: "
885
                    "%x, ptr: %x, port: %"PRIx64", "
886
                    "data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n",
887
                    req->state, req->data_is_ptr, req->addr,
888
                    req->data, req->count, req->size);
889
            destroy_hvm_domain(false);
890
            return;
891
        }
892

    
893
        xen_wmb(); /* Update ioreq contents /then/ update state. */
894

    
895
        /*
896
         * We do this before we send the response so that the tools
897
         * have the opportunity to pick up on the reset before the
898
         * guest resumes and does a hlt with interrupts disabled which
899
         * causes Xen to powerdown the domain.
900
         */
901
        if (runstate_is_running()) {
902
            if (qemu_shutdown_requested_get()) {
903
                destroy_hvm_domain(false);
904
            }
905
            if (qemu_reset_requested_get()) {
906
                qemu_system_reset(VMRESET_REPORT);
907
                destroy_hvm_domain(true);
908
            }
909
        }
910

    
911
        req->state = STATE_IORESP_READY;
912
        xc_evtchn_notify(state->xce_handle, state->ioreq_local_port[state->send_vcpu]);
913
    }
914
}
915

    
916
static int store_dev_info(int domid, CharDriverState *cs, const char *string)
917
{
918
    struct xs_handle *xs = NULL;
919
    char *path = NULL;
920
    char *newpath = NULL;
921
    char *pts = NULL;
922
    int ret = -1;
923

    
924
    /* Only continue if we're talking to a pty. */
925
    if (strncmp(cs->filename, "pty:", 4)) {
926
        return 0;
927
    }
928
    pts = cs->filename + 4;
929

    
930
    /* We now have everything we need to set the xenstore entry. */
931
    xs = xs_open(0);
932
    if (xs == NULL) {
933
        fprintf(stderr, "Could not contact XenStore\n");
934
        goto out;
935
    }
936

    
937
    path = xs_get_domain_path(xs, domid);
938
    if (path == NULL) {
939
        fprintf(stderr, "xs_get_domain_path() error\n");
940
        goto out;
941
    }
942
    newpath = realloc(path, (strlen(path) + strlen(string) +
943
                strlen("/tty") + 1));
944
    if (newpath == NULL) {
945
        fprintf(stderr, "realloc error\n");
946
        goto out;
947
    }
948
    path = newpath;
949

    
950
    strcat(path, string);
951
    strcat(path, "/tty");
952
    if (!xs_write(xs, XBT_NULL, path, pts, strlen(pts))) {
953
        fprintf(stderr, "xs_write for '%s' fail", string);
954
        goto out;
955
    }
956
    ret = 0;
957

    
958
out:
959
    free(path);
960
    xs_close(xs);
961

    
962
    return ret;
963
}
964

    
965
void xenstore_store_pv_console_info(int i, CharDriverState *chr)
966
{
967
    if (i == 0) {
968
        store_dev_info(xen_domid, chr, "/console");
969
    } else {
970
        char buf[32];
971
        snprintf(buf, sizeof(buf), "/device/console/%d", i);
972
        store_dev_info(xen_domid, chr, buf);
973
    }
974
}
975

    
976
static void xenstore_record_dm_state(struct xs_handle *xs, const char *state)
977
{
978
    char path[50];
979

    
980
    if (xs == NULL) {
981
        fprintf(stderr, "xenstore connection not initialized\n");
982
        exit(1);
983
    }
984

    
985
    snprintf(path, sizeof (path), "/local/domain/0/device-model/%u/state", xen_domid);
986
    if (!xs_write(xs, XBT_NULL, path, state, strlen(state))) {
987
        fprintf(stderr, "error recording dm state\n");
988
        exit(1);
989
    }
990
}
991

    
992
static void xen_main_loop_prepare(XenIOState *state)
993
{
994
    int evtchn_fd = -1;
995

    
996
    if (state->xce_handle != XC_HANDLER_INITIAL_VALUE) {
997
        evtchn_fd = xc_evtchn_fd(state->xce_handle);
998
    }
999

    
1000
    state->buffered_io_timer = qemu_new_timer_ms(rt_clock, handle_buffered_io,
1001
                                                 state);
1002

    
1003
    if (evtchn_fd != -1) {
1004
        qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
1005
    }
1006
}
1007

    
1008

    
1009
/* Initialise Xen */
1010

    
1011
static void xen_change_state_handler(void *opaque, int running,
1012
                                     RunState state)
1013
{
1014
    if (running) {
1015
        /* record state running */
1016
        xenstore_record_dm_state(xenstore, "running");
1017
    }
1018
}
1019

    
1020
static void xen_hvm_change_state_handler(void *opaque, int running,
1021
                                         RunState rstate)
1022
{
1023
    XenIOState *xstate = opaque;
1024
    if (running) {
1025
        xen_main_loop_prepare(xstate);
1026
    }
1027
}
1028

    
1029
static void xen_exit_notifier(Notifier *n, void *data)
1030
{
1031
    XenIOState *state = container_of(n, XenIOState, exit);
1032

    
1033
    xc_evtchn_close(state->xce_handle);
1034
    xs_daemon_close(state->xenstore);
1035
}
1036

    
1037
int xen_init(void)
1038
{
1039
    xen_xc = xen_xc_interface_open(0, 0, 0);
1040
    if (xen_xc == XC_HANDLER_INITIAL_VALUE) {
1041
        xen_be_printf(NULL, 0, "can't open xen interface\n");
1042
        return -1;
1043
    }
1044
    qemu_add_vm_change_state_handler(xen_change_state_handler, NULL);
1045

    
1046
    return 0;
1047
}
1048

    
1049
static void xen_read_physmap(XenIOState *state)
1050
{
1051
    XenPhysmap *physmap = NULL;
1052
    unsigned int len, num, i;
1053
    char path[80], *value = NULL;
1054
    char **entries = NULL;
1055

    
1056
    snprintf(path, sizeof(path),
1057
            "/local/domain/0/device-model/%d/physmap", xen_domid);
1058
    entries = xs_directory(state->xenstore, 0, path, &num);
1059
    if (entries == NULL)
1060
        return;
1061

    
1062
    for (i = 0; i < num; i++) {
1063
        physmap = g_malloc(sizeof (XenPhysmap));
1064
        physmap->phys_offset = strtoull(entries[i], NULL, 16);
1065
        snprintf(path, sizeof(path),
1066
                "/local/domain/0/device-model/%d/physmap/%s/start_addr",
1067
                xen_domid, entries[i]);
1068
        value = xs_read(state->xenstore, 0, path, &len);
1069
        if (value == NULL) {
1070
            free(physmap);
1071
            continue;
1072
        }
1073
        physmap->start_addr = strtoull(value, NULL, 16);
1074
        free(value);
1075

    
1076
        snprintf(path, sizeof(path),
1077
                "/local/domain/0/device-model/%d/physmap/%s/size",
1078
                xen_domid, entries[i]);
1079
        value = xs_read(state->xenstore, 0, path, &len);
1080
        if (value == NULL) {
1081
            free(physmap);
1082
            continue;
1083
        }
1084
        physmap->size = strtoull(value, NULL, 16);
1085
        free(value);
1086

    
1087
        snprintf(path, sizeof(path),
1088
                "/local/domain/0/device-model/%d/physmap/%s/name",
1089
                xen_domid, entries[i]);
1090
        physmap->name = xs_read(state->xenstore, 0, path, &len);
1091

    
1092
        QLIST_INSERT_HEAD(&state->physmap, physmap, list);
1093
    }
1094
    free(entries);
1095
}
1096

    
1097
int xen_hvm_init(void)
1098
{
1099
    int i, rc;
1100
    unsigned long ioreq_pfn;
1101
    unsigned long bufioreq_evtchn;
1102
    XenIOState *state;
1103

    
1104
    state = g_malloc0(sizeof (XenIOState));
1105

    
1106
    state->xce_handle = xen_xc_evtchn_open(NULL, 0);
1107
    if (state->xce_handle == XC_HANDLER_INITIAL_VALUE) {
1108
        perror("xen: event channel open");
1109
        return -errno;
1110
    }
1111

    
1112
    state->xenstore = xs_daemon_open();
1113
    if (state->xenstore == NULL) {
1114
        perror("xen: xenstore open");
1115
        return -errno;
1116
    }
1117

    
1118
    state->exit.notify = xen_exit_notifier;
1119
    qemu_add_exit_notifier(&state->exit);
1120

    
1121
    state->suspend.notify = xen_suspend_notifier;
1122
    qemu_register_suspend_notifier(&state->suspend);
1123

    
1124
    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn);
1125
    DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
1126
    state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
1127
                                              PROT_READ|PROT_WRITE, ioreq_pfn);
1128
    if (state->shared_page == NULL) {
1129
        hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT,
1130
                 errno, xen_xc);
1131
    }
1132

    
1133
    xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn);
1134
    DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn);
1135
    state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE,
1136
                                                   PROT_READ|PROT_WRITE, ioreq_pfn);
1137
    if (state->buffered_io_page == NULL) {
1138
        hw_error("map buffered IO page returned error %d", errno);
1139
    }
1140

    
1141
    state->ioreq_local_port = g_malloc0(smp_cpus * sizeof (evtchn_port_t));
1142

    
1143
    /* FIXME: how about if we overflow the page here? */
1144
    for (i = 0; i < smp_cpus; i++) {
1145
        rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
1146
                                        xen_vcpu_eport(state->shared_page, i));
1147
        if (rc == -1) {
1148
            fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
1149
            return -1;
1150
        }
1151
        state->ioreq_local_port[i] = rc;
1152
    }
1153

    
1154
    rc = xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_EVTCHN,
1155
            &bufioreq_evtchn);
1156
    if (rc < 0) {
1157
        fprintf(stderr, "failed to get HVM_PARAM_BUFIOREQ_EVTCHN\n");
1158
        return -1;
1159
    }
1160
    rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid,
1161
            (uint32_t)bufioreq_evtchn);
1162
    if (rc == -1) {
1163
        fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
1164
        return -1;
1165
    }
1166
    state->bufioreq_local_port = rc;
1167

    
1168
    /* Init RAM management */
1169
    xen_map_cache_init(xen_phys_offset_to_gaddr, state);
1170
    xen_ram_init(ram_size);
1171

    
1172
    qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state);
1173

    
1174
    state->memory_listener = xen_memory_listener;
1175
    QLIST_INIT(&state->physmap);
1176
    memory_listener_register(&state->memory_listener, get_system_memory());
1177
    state->log_for_dirtybit = NULL;
1178

    
1179
    /* Initialize backend core & drivers */
1180
    if (xen_be_init() != 0) {
1181
        fprintf(stderr, "%s: xen backend core setup failed\n", __FUNCTION__);
1182
        exit(1);
1183
    }
1184
    xen_be_register("console", &xen_console_ops);
1185
    xen_be_register("vkbd", &xen_kbdmouse_ops);
1186
    xen_be_register("qdisk", &xen_blkdev_ops);
1187
    xen_read_physmap(state);
1188

    
1189
    return 0;
1190
}
1191

    
1192
void destroy_hvm_domain(bool reboot)
1193
{
1194
    XenXC xc_handle;
1195
    int sts;
1196

    
1197
    xc_handle = xen_xc_interface_open(0, 0, 0);
1198
    if (xc_handle == XC_HANDLER_INITIAL_VALUE) {
1199
        fprintf(stderr, "Cannot acquire xenctrl handle\n");
1200
    } else {
1201
        sts = xc_domain_shutdown(xc_handle, xen_domid,
1202
                                 reboot ? SHUTDOWN_reboot : SHUTDOWN_poweroff);
1203
        if (sts != 0) {
1204
            fprintf(stderr, "xc_domain_shutdown failed to issue %s, "
1205
                    "sts %d, %s\n", reboot ? "reboot" : "poweroff",
1206
                    sts, strerror(errno));
1207
        } else {
1208
            fprintf(stderr, "Issued domain %d %s\n", xen_domid,
1209
                    reboot ? "reboot" : "poweroff");
1210
        }
1211
        xc_interface_close(xc_handle);
1212
    }
1213
}
1214

    
1215
void xen_register_framebuffer(MemoryRegion *mr)
1216
{
1217
    framebuffer = mr;
1218
}
1219

    
1220
void xen_shutdown_fatal_error(const char *fmt, ...)
1221
{
1222
    va_list ap;
1223

    
1224
    va_start(ap, fmt);
1225
    vfprintf(stderr, fmt, ap);
1226
    va_end(ap);
1227
    fprintf(stderr, "Will destroy the domain.\n");
1228
    /* destroy the domain */
1229
    qemu_system_shutdown_request();
1230
}
1231

    
1232
void xen_modified_memory(ram_addr_t start, ram_addr_t length)
1233
{
1234
    if (unlikely(xen_in_migration)) {
1235
        int rc;
1236
        ram_addr_t start_pfn, nb_pages;
1237

    
1238
        if (length == 0) {
1239
            length = TARGET_PAGE_SIZE;
1240
        }
1241
        start_pfn = start >> TARGET_PAGE_BITS;
1242
        nb_pages = ((start + length + TARGET_PAGE_SIZE - 1) >> TARGET_PAGE_BITS)
1243
            - start_pfn;
1244
        rc = xc_hvm_modified_memory(xen_xc, xen_domid, start_pfn, nb_pages);
1245
        if (rc) {
1246
            fprintf(stderr,
1247
                    "%s failed for "RAM_ADDR_FMT" ("RAM_ADDR_FMT"): %i, %s\n",
1248
                    __func__, start, nb_pages, rc, strerror(-rc));
1249
        }
1250
    }
1251
}