root / xen-all.c @ a74cdab4
History | View | Annotate | Download (16.8 kB)
1 |
/*
|
---|---|
2 |
* Copyright (C) 2010 Citrix Ltd.
|
3 |
*
|
4 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
5 |
* the COPYING file in the top-level directory.
|
6 |
*
|
7 |
*/
|
8 |
|
9 |
#include <sys/mman.h> |
10 |
|
11 |
#include "hw/pci.h" |
12 |
#include "hw/pc.h" |
13 |
#include "hw/xen_common.h" |
14 |
#include "hw/xen_backend.h" |
15 |
|
16 |
#include "xen-mapcache.h" |
17 |
#include "trace.h" |
18 |
|
19 |
#include <xen/hvm/ioreq.h> |
20 |
#include <xen/hvm/params.h> |
21 |
|
22 |
//#define DEBUG_XEN
|
23 |
|
24 |
#ifdef DEBUG_XEN
|
25 |
#define DPRINTF(fmt, ...) \
|
26 |
do { fprintf(stderr, "xen: " fmt, ## __VA_ARGS__); } while (0) |
27 |
#else
|
28 |
#define DPRINTF(fmt, ...) \
|
29 |
do { } while (0) |
30 |
#endif
|
31 |
|
32 |
/* Compatibility with older version */
|
33 |
#if __XEN_LATEST_INTERFACE_VERSION__ < 0x0003020a |
34 |
static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) |
35 |
{ |
36 |
return shared_page->vcpu_iodata[i].vp_eport;
|
37 |
} |
38 |
static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) |
39 |
{ |
40 |
return &shared_page->vcpu_iodata[vcpu].vp_ioreq;
|
41 |
} |
42 |
# define FMT_ioreq_size PRIx64
|
43 |
#else
|
44 |
static inline uint32_t xen_vcpu_eport(shared_iopage_t *shared_page, int i) |
45 |
{ |
46 |
return shared_page->vcpu_ioreq[i].vp_eport;
|
47 |
} |
48 |
static inline ioreq_t *xen_vcpu_ioreq(shared_iopage_t *shared_page, int vcpu) |
49 |
{ |
50 |
return &shared_page->vcpu_ioreq[vcpu];
|
51 |
} |
52 |
# define FMT_ioreq_size "u" |
53 |
#endif
|
54 |
|
55 |
#define BUFFER_IO_MAX_DELAY 100 |
56 |
|
57 |
typedef struct XenIOState { |
58 |
shared_iopage_t *shared_page; |
59 |
buffered_iopage_t *buffered_io_page; |
60 |
QEMUTimer *buffered_io_timer; |
61 |
/* the evtchn port for polling the notification, */
|
62 |
evtchn_port_t *ioreq_local_port; |
63 |
/* the evtchn fd for polling */
|
64 |
XenEvtchn xce_handle; |
65 |
/* which vcpu we are serving */
|
66 |
int send_vcpu;
|
67 |
|
68 |
struct xs_handle *xenstore;
|
69 |
|
70 |
Notifier exit; |
71 |
} XenIOState; |
72 |
|
73 |
/* Xen specific function for piix pci */
|
74 |
|
75 |
int xen_pci_slot_get_pirq(PCIDevice *pci_dev, int irq_num) |
76 |
{ |
77 |
return irq_num + ((pci_dev->devfn >> 3) << 2); |
78 |
} |
79 |
|
80 |
void xen_piix3_set_irq(void *opaque, int irq_num, int level) |
81 |
{ |
82 |
xc_hvm_set_pci_intx_level(xen_xc, xen_domid, 0, 0, irq_num >> 2, |
83 |
irq_num & 3, level);
|
84 |
} |
85 |
|
86 |
void xen_piix_pci_write_config_client(uint32_t address, uint32_t val, int len) |
87 |
{ |
88 |
int i;
|
89 |
|
90 |
/* Scan for updates to PCI link routes (0x60-0x63). */
|
91 |
for (i = 0; i < len; i++) { |
92 |
uint8_t v = (val >> (8 * i)) & 0xff; |
93 |
if (v & 0x80) { |
94 |
v = 0;
|
95 |
} |
96 |
v &= 0xf;
|
97 |
if (((address + i) >= 0x60) && ((address + i) <= 0x63)) { |
98 |
xc_hvm_set_pci_link_route(xen_xc, xen_domid, address + i - 0x60, v);
|
99 |
} |
100 |
} |
101 |
} |
102 |
|
103 |
void xen_cmos_set_s3_resume(void *opaque, int irq, int level) |
104 |
{ |
105 |
pc_cmos_set_s3_resume(opaque, irq, level); |
106 |
if (level) {
|
107 |
xc_set_hvm_param(xen_xc, xen_domid, HVM_PARAM_ACPI_S_STATE, 3);
|
108 |
} |
109 |
} |
110 |
|
111 |
/* Xen Interrupt Controller */
|
112 |
|
113 |
static void xen_set_irq(void *opaque, int irq, int level) |
114 |
{ |
115 |
xc_hvm_set_isa_irq_level(xen_xc, xen_domid, irq, level); |
116 |
} |
117 |
|
118 |
qemu_irq *xen_interrupt_controller_init(void)
|
119 |
{ |
120 |
return qemu_allocate_irqs(xen_set_irq, NULL, 16); |
121 |
} |
122 |
|
123 |
/* Memory Ops */
|
124 |
|
125 |
static void xen_ram_init(ram_addr_t ram_size) |
126 |
{ |
127 |
RAMBlock *new_block; |
128 |
ram_addr_t below_4g_mem_size, above_4g_mem_size = 0;
|
129 |
|
130 |
new_block = qemu_mallocz(sizeof (*new_block));
|
131 |
pstrcpy(new_block->idstr, sizeof (new_block->idstr), "xen.ram"); |
132 |
new_block->host = NULL;
|
133 |
new_block->offset = 0;
|
134 |
new_block->length = ram_size; |
135 |
|
136 |
QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); |
137 |
|
138 |
ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty, |
139 |
new_block->length >> TARGET_PAGE_BITS); |
140 |
memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS), |
141 |
0xff, new_block->length >> TARGET_PAGE_BITS);
|
142 |
|
143 |
if (ram_size >= 0xe0000000 ) { |
144 |
above_4g_mem_size = ram_size - 0xe0000000;
|
145 |
below_4g_mem_size = 0xe0000000;
|
146 |
} else {
|
147 |
below_4g_mem_size = ram_size; |
148 |
} |
149 |
|
150 |
cpu_register_physical_memory(0, below_4g_mem_size, new_block->offset);
|
151 |
#if TARGET_PHYS_ADDR_BITS > 32 |
152 |
if (above_4g_mem_size > 0) { |
153 |
cpu_register_physical_memory(0x100000000ULL, above_4g_mem_size,
|
154 |
new_block->offset + below_4g_mem_size); |
155 |
} |
156 |
#endif
|
157 |
} |
158 |
|
159 |
void xen_ram_alloc(ram_addr_t ram_addr, ram_addr_t size)
|
160 |
{ |
161 |
unsigned long nr_pfn; |
162 |
xen_pfn_t *pfn_list; |
163 |
int i;
|
164 |
|
165 |
trace_xen_ram_alloc(ram_addr, size); |
166 |
|
167 |
nr_pfn = size >> TARGET_PAGE_BITS; |
168 |
pfn_list = qemu_malloc(sizeof (*pfn_list) * nr_pfn);
|
169 |
|
170 |
for (i = 0; i < nr_pfn; i++) { |
171 |
pfn_list[i] = (ram_addr >> TARGET_PAGE_BITS) + i; |
172 |
} |
173 |
|
174 |
if (xc_domain_populate_physmap_exact(xen_xc, xen_domid, nr_pfn, 0, 0, pfn_list)) { |
175 |
hw_error("xen: failed to populate ram at %lx", ram_addr);
|
176 |
} |
177 |
|
178 |
qemu_free(pfn_list); |
179 |
} |
180 |
|
181 |
|
182 |
/* VCPU Operations, MMIO, IO ring ... */
|
183 |
|
184 |
static void xen_reset_vcpu(void *opaque) |
185 |
{ |
186 |
CPUState *env = opaque; |
187 |
|
188 |
env->halted = 1;
|
189 |
} |
190 |
|
191 |
void xen_vcpu_init(void) |
192 |
{ |
193 |
CPUState *first_cpu; |
194 |
|
195 |
if ((first_cpu = qemu_get_cpu(0))) { |
196 |
qemu_register_reset(xen_reset_vcpu, first_cpu); |
197 |
xen_reset_vcpu(first_cpu); |
198 |
} |
199 |
} |
200 |
|
201 |
/* get the ioreq packets from share mem */
|
202 |
static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) |
203 |
{ |
204 |
ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); |
205 |
|
206 |
if (req->state != STATE_IOREQ_READY) {
|
207 |
DPRINTF("I/O request not ready: "
|
208 |
"%x, ptr: %x, port: %"PRIx64", " |
209 |
"data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n", |
210 |
req->state, req->data_is_ptr, req->addr, |
211 |
req->data, req->count, req->size); |
212 |
return NULL; |
213 |
} |
214 |
|
215 |
xen_rmb(); /* see IOREQ_READY /then/ read contents of ioreq */
|
216 |
|
217 |
req->state = STATE_IOREQ_INPROCESS; |
218 |
return req;
|
219 |
} |
220 |
|
221 |
/* use poll to get the port notification */
|
222 |
/* ioreq_vec--out,the */
|
223 |
/* retval--the number of ioreq packet */
|
224 |
static ioreq_t *cpu_get_ioreq(XenIOState *state)
|
225 |
{ |
226 |
int i;
|
227 |
evtchn_port_t port; |
228 |
|
229 |
port = xc_evtchn_pending(state->xce_handle); |
230 |
if (port != -1) { |
231 |
for (i = 0; i < smp_cpus; i++) { |
232 |
if (state->ioreq_local_port[i] == port) {
|
233 |
break;
|
234 |
} |
235 |
} |
236 |
|
237 |
if (i == smp_cpus) {
|
238 |
hw_error("Fatal error while trying to get io event!\n");
|
239 |
} |
240 |
|
241 |
/* unmask the wanted port again */
|
242 |
xc_evtchn_unmask(state->xce_handle, port); |
243 |
|
244 |
/* get the io packet from shared memory */
|
245 |
state->send_vcpu = i; |
246 |
return cpu_get_ioreq_from_shared_memory(state, i);
|
247 |
} |
248 |
|
249 |
/* read error or read nothing */
|
250 |
return NULL; |
251 |
} |
252 |
|
253 |
static uint32_t do_inp(pio_addr_t addr, unsigned long size) |
254 |
{ |
255 |
switch (size) {
|
256 |
case 1: |
257 |
return cpu_inb(addr);
|
258 |
case 2: |
259 |
return cpu_inw(addr);
|
260 |
case 4: |
261 |
return cpu_inl(addr);
|
262 |
default:
|
263 |
hw_error("inp: bad size: %04"FMT_pioaddr" %lx", addr, size); |
264 |
} |
265 |
} |
266 |
|
267 |
static void do_outp(pio_addr_t addr, |
268 |
unsigned long size, uint32_t val) |
269 |
{ |
270 |
switch (size) {
|
271 |
case 1: |
272 |
return cpu_outb(addr, val);
|
273 |
case 2: |
274 |
return cpu_outw(addr, val);
|
275 |
case 4: |
276 |
return cpu_outl(addr, val);
|
277 |
default:
|
278 |
hw_error("outp: bad size: %04"FMT_pioaddr" %lx", addr, size); |
279 |
} |
280 |
} |
281 |
|
282 |
static void cpu_ioreq_pio(ioreq_t *req) |
283 |
{ |
284 |
int i, sign;
|
285 |
|
286 |
sign = req->df ? -1 : 1; |
287 |
|
288 |
if (req->dir == IOREQ_READ) {
|
289 |
if (!req->data_is_ptr) {
|
290 |
req->data = do_inp(req->addr, req->size); |
291 |
} else {
|
292 |
uint32_t tmp; |
293 |
|
294 |
for (i = 0; i < req->count; i++) { |
295 |
tmp = do_inp(req->addr, req->size); |
296 |
cpu_physical_memory_write(req->data + (sign * i * req->size), |
297 |
(uint8_t *) &tmp, req->size); |
298 |
} |
299 |
} |
300 |
} else if (req->dir == IOREQ_WRITE) { |
301 |
if (!req->data_is_ptr) {
|
302 |
do_outp(req->addr, req->size, req->data); |
303 |
} else {
|
304 |
for (i = 0; i < req->count; i++) { |
305 |
uint32_t tmp = 0;
|
306 |
|
307 |
cpu_physical_memory_read(req->data + (sign * i * req->size), |
308 |
(uint8_t*) &tmp, req->size); |
309 |
do_outp(req->addr, req->size, tmp); |
310 |
} |
311 |
} |
312 |
} |
313 |
} |
314 |
|
315 |
static void cpu_ioreq_move(ioreq_t *req) |
316 |
{ |
317 |
int i, sign;
|
318 |
|
319 |
sign = req->df ? -1 : 1; |
320 |
|
321 |
if (!req->data_is_ptr) {
|
322 |
if (req->dir == IOREQ_READ) {
|
323 |
for (i = 0; i < req->count; i++) { |
324 |
cpu_physical_memory_read(req->addr + (sign * i * req->size), |
325 |
(uint8_t *) &req->data, req->size); |
326 |
} |
327 |
} else if (req->dir == IOREQ_WRITE) { |
328 |
for (i = 0; i < req->count; i++) { |
329 |
cpu_physical_memory_write(req->addr + (sign * i * req->size), |
330 |
(uint8_t *) &req->data, req->size); |
331 |
} |
332 |
} |
333 |
} else {
|
334 |
target_ulong tmp; |
335 |
|
336 |
if (req->dir == IOREQ_READ) {
|
337 |
for (i = 0; i < req->count; i++) { |
338 |
cpu_physical_memory_read(req->addr + (sign * i * req->size), |
339 |
(uint8_t*) &tmp, req->size); |
340 |
cpu_physical_memory_write(req->data + (sign * i * req->size), |
341 |
(uint8_t*) &tmp, req->size); |
342 |
} |
343 |
} else if (req->dir == IOREQ_WRITE) { |
344 |
for (i = 0; i < req->count; i++) { |
345 |
cpu_physical_memory_read(req->data + (sign * i * req->size), |
346 |
(uint8_t*) &tmp, req->size); |
347 |
cpu_physical_memory_write(req->addr + (sign * i * req->size), |
348 |
(uint8_t*) &tmp, req->size); |
349 |
} |
350 |
} |
351 |
} |
352 |
} |
353 |
|
354 |
static void handle_ioreq(ioreq_t *req) |
355 |
{ |
356 |
if (!req->data_is_ptr && (req->dir == IOREQ_WRITE) &&
|
357 |
(req->size < sizeof (target_ulong))) {
|
358 |
req->data &= ((target_ulong) 1 << (8 * req->size)) - 1; |
359 |
} |
360 |
|
361 |
switch (req->type) {
|
362 |
case IOREQ_TYPE_PIO:
|
363 |
cpu_ioreq_pio(req); |
364 |
break;
|
365 |
case IOREQ_TYPE_COPY:
|
366 |
cpu_ioreq_move(req); |
367 |
break;
|
368 |
case IOREQ_TYPE_TIMEOFFSET:
|
369 |
break;
|
370 |
case IOREQ_TYPE_INVALIDATE:
|
371 |
qemu_invalidate_map_cache(); |
372 |
break;
|
373 |
default:
|
374 |
hw_error("Invalid ioreq type 0x%x\n", req->type);
|
375 |
} |
376 |
} |
377 |
|
378 |
static void handle_buffered_iopage(XenIOState *state) |
379 |
{ |
380 |
buf_ioreq_t *buf_req = NULL;
|
381 |
ioreq_t req; |
382 |
int qw;
|
383 |
|
384 |
if (!state->buffered_io_page) {
|
385 |
return;
|
386 |
} |
387 |
|
388 |
while (state->buffered_io_page->read_pointer != state->buffered_io_page->write_pointer) {
|
389 |
buf_req = &state->buffered_io_page->buf_ioreq[ |
390 |
state->buffered_io_page->read_pointer % IOREQ_BUFFER_SLOT_NUM]; |
391 |
req.size = 1UL << buf_req->size;
|
392 |
req.count = 1;
|
393 |
req.addr = buf_req->addr; |
394 |
req.data = buf_req->data; |
395 |
req.state = STATE_IOREQ_READY; |
396 |
req.dir = buf_req->dir; |
397 |
req.df = 1;
|
398 |
req.type = buf_req->type; |
399 |
req.data_is_ptr = 0;
|
400 |
qw = (req.size == 8);
|
401 |
if (qw) {
|
402 |
buf_req = &state->buffered_io_page->buf_ioreq[ |
403 |
(state->buffered_io_page->read_pointer + 1) % IOREQ_BUFFER_SLOT_NUM];
|
404 |
req.data |= ((uint64_t)buf_req->data) << 32;
|
405 |
} |
406 |
|
407 |
handle_ioreq(&req); |
408 |
|
409 |
xen_mb(); |
410 |
state->buffered_io_page->read_pointer += qw ? 2 : 1; |
411 |
} |
412 |
} |
413 |
|
414 |
static void handle_buffered_io(void *opaque) |
415 |
{ |
416 |
XenIOState *state = opaque; |
417 |
|
418 |
handle_buffered_iopage(state); |
419 |
qemu_mod_timer(state->buffered_io_timer, |
420 |
BUFFER_IO_MAX_DELAY + qemu_get_clock_ms(rt_clock)); |
421 |
} |
422 |
|
423 |
static void cpu_handle_ioreq(void *opaque) |
424 |
{ |
425 |
XenIOState *state = opaque; |
426 |
ioreq_t *req = cpu_get_ioreq(state); |
427 |
|
428 |
handle_buffered_iopage(state); |
429 |
if (req) {
|
430 |
handle_ioreq(req); |
431 |
|
432 |
if (req->state != STATE_IOREQ_INPROCESS) {
|
433 |
fprintf(stderr, "Badness in I/O request ... not in service?!: "
|
434 |
"%x, ptr: %x, port: %"PRIx64", " |
435 |
"data: %"PRIx64", count: %" FMT_ioreq_size ", size: %" FMT_ioreq_size "\n", |
436 |
req->state, req->data_is_ptr, req->addr, |
437 |
req->data, req->count, req->size); |
438 |
destroy_hvm_domain(); |
439 |
return;
|
440 |
} |
441 |
|
442 |
xen_wmb(); /* Update ioreq contents /then/ update state. */
|
443 |
|
444 |
/*
|
445 |
* We do this before we send the response so that the tools
|
446 |
* have the opportunity to pick up on the reset before the
|
447 |
* guest resumes and does a hlt with interrupts disabled which
|
448 |
* causes Xen to powerdown the domain.
|
449 |
*/
|
450 |
if (vm_running) {
|
451 |
if (qemu_shutdown_requested_get()) {
|
452 |
destroy_hvm_domain(); |
453 |
} |
454 |
if (qemu_reset_requested_get()) {
|
455 |
qemu_system_reset(); |
456 |
} |
457 |
} |
458 |
|
459 |
req->state = STATE_IORESP_READY; |
460 |
xc_evtchn_notify(state->xce_handle, state->ioreq_local_port[state->send_vcpu]); |
461 |
} |
462 |
} |
463 |
|
464 |
static void xenstore_record_dm_state(XenIOState *s, const char *state) |
465 |
{ |
466 |
char path[50]; |
467 |
|
468 |
snprintf(path, sizeof (path), "/local/domain/0/device-model/%u/state", xen_domid); |
469 |
if (!xs_write(s->xenstore, XBT_NULL, path, state, strlen(state))) {
|
470 |
fprintf(stderr, "error recording dm state\n");
|
471 |
exit(1);
|
472 |
} |
473 |
} |
474 |
|
475 |
static void xen_main_loop_prepare(XenIOState *state) |
476 |
{ |
477 |
int evtchn_fd = -1; |
478 |
|
479 |
if (state->xce_handle != XC_HANDLER_INITIAL_VALUE) {
|
480 |
evtchn_fd = xc_evtchn_fd(state->xce_handle); |
481 |
} |
482 |
|
483 |
state->buffered_io_timer = qemu_new_timer_ms(rt_clock, handle_buffered_io, |
484 |
state); |
485 |
qemu_mod_timer(state->buffered_io_timer, qemu_get_clock_ms(rt_clock)); |
486 |
|
487 |
if (evtchn_fd != -1) { |
488 |
qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state);
|
489 |
} |
490 |
|
491 |
/* record state running */
|
492 |
xenstore_record_dm_state(state, "running");
|
493 |
} |
494 |
|
495 |
|
496 |
/* Initialise Xen */
|
497 |
|
498 |
static void xen_vm_change_state_handler(void *opaque, int running, int reason) |
499 |
{ |
500 |
XenIOState *state = opaque; |
501 |
if (running) {
|
502 |
xen_main_loop_prepare(state); |
503 |
} |
504 |
} |
505 |
|
506 |
static void xen_exit_notifier(Notifier *n) |
507 |
{ |
508 |
XenIOState *state = container_of(n, XenIOState, exit); |
509 |
|
510 |
xc_evtchn_close(state->xce_handle); |
511 |
xs_daemon_close(state->xenstore); |
512 |
} |
513 |
|
514 |
int xen_init(void) |
515 |
{ |
516 |
xen_xc = xen_xc_interface_open(0, 0, 0); |
517 |
if (xen_xc == XC_HANDLER_INITIAL_VALUE) {
|
518 |
xen_be_printf(NULL, 0, "can't open xen interface\n"); |
519 |
return -1; |
520 |
} |
521 |
|
522 |
return 0; |
523 |
} |
524 |
|
525 |
int xen_hvm_init(void) |
526 |
{ |
527 |
int i, rc;
|
528 |
unsigned long ioreq_pfn; |
529 |
XenIOState *state; |
530 |
|
531 |
state = qemu_mallocz(sizeof (XenIOState));
|
532 |
|
533 |
state->xce_handle = xen_xc_evtchn_open(NULL, 0); |
534 |
if (state->xce_handle == XC_HANDLER_INITIAL_VALUE) {
|
535 |
perror("xen: event channel open");
|
536 |
return -errno;
|
537 |
} |
538 |
|
539 |
state->xenstore = xs_daemon_open(); |
540 |
if (state->xenstore == NULL) { |
541 |
perror("xen: xenstore open");
|
542 |
return -errno;
|
543 |
} |
544 |
|
545 |
state->exit.notify = xen_exit_notifier; |
546 |
qemu_add_exit_notifier(&state->exit); |
547 |
|
548 |
xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_IOREQ_PFN, &ioreq_pfn); |
549 |
DPRINTF("shared page at pfn %lx\n", ioreq_pfn);
|
550 |
state->shared_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, |
551 |
PROT_READ|PROT_WRITE, ioreq_pfn); |
552 |
if (state->shared_page == NULL) { |
553 |
hw_error("map shared IO page returned error %d handle=" XC_INTERFACE_FMT,
|
554 |
errno, xen_xc); |
555 |
} |
556 |
|
557 |
xc_get_hvm_param(xen_xc, xen_domid, HVM_PARAM_BUFIOREQ_PFN, &ioreq_pfn); |
558 |
DPRINTF("buffered io page at pfn %lx\n", ioreq_pfn);
|
559 |
state->buffered_io_page = xc_map_foreign_range(xen_xc, xen_domid, XC_PAGE_SIZE, |
560 |
PROT_READ|PROT_WRITE, ioreq_pfn); |
561 |
if (state->buffered_io_page == NULL) { |
562 |
hw_error("map buffered IO page returned error %d", errno);
|
563 |
} |
564 |
|
565 |
state->ioreq_local_port = qemu_mallocz(smp_cpus * sizeof (evtchn_port_t));
|
566 |
|
567 |
/* FIXME: how about if we overflow the page here? */
|
568 |
for (i = 0; i < smp_cpus; i++) { |
569 |
rc = xc_evtchn_bind_interdomain(state->xce_handle, xen_domid, |
570 |
xen_vcpu_eport(state->shared_page, i)); |
571 |
if (rc == -1) { |
572 |
fprintf(stderr, "bind interdomain ioctl error %d\n", errno);
|
573 |
return -1; |
574 |
} |
575 |
state->ioreq_local_port[i] = rc; |
576 |
} |
577 |
|
578 |
/* Init RAM management */
|
579 |
qemu_map_cache_init(); |
580 |
xen_ram_init(ram_size); |
581 |
|
582 |
qemu_add_vm_change_state_handler(xen_vm_change_state_handler, state); |
583 |
|
584 |
return 0; |
585 |
} |
586 |
|
587 |
void destroy_hvm_domain(void) |
588 |
{ |
589 |
XenXC xc_handle; |
590 |
int sts;
|
591 |
|
592 |
xc_handle = xen_xc_interface_open(0, 0, 0); |
593 |
if (xc_handle == XC_HANDLER_INITIAL_VALUE) {
|
594 |
fprintf(stderr, "Cannot acquire xenctrl handle\n");
|
595 |
} else {
|
596 |
sts = xc_domain_shutdown(xc_handle, xen_domid, SHUTDOWN_poweroff); |
597 |
if (sts != 0) { |
598 |
fprintf(stderr, "? xc_domain_shutdown failed to issue poweroff, "
|
599 |
"sts %d, %s\n", sts, strerror(errno));
|
600 |
} else {
|
601 |
fprintf(stderr, "Issued domain %d poweroff\n", xen_domid);
|
602 |
} |
603 |
xc_interface_close(xc_handle); |
604 |
} |
605 |
} |