Revision ffcde12f kvm-all.c
b/kvm-all.c | ||
---|---|---|
340 | 340 |
} |
341 | 341 |
|
342 | 342 |
/* get kvm's dirty pages bitmap and update qemu's */ |
343 |
static int kvm_get_dirty_pages_log_range(unsigned long start_addr, |
|
344 |
unsigned long *bitmap, |
|
345 |
unsigned long offset, |
|
346 |
unsigned long mem_size) |
|
343 |
static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section, |
|
344 |
unsigned long *bitmap) |
|
347 | 345 |
{ |
348 | 346 |
unsigned int i, j; |
349 | 347 |
unsigned long page_number, addr, addr1, c; |
350 |
ram_addr_t ram_addr; |
|
351 |
unsigned int len = ((mem_size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / |
|
352 |
HOST_LONG_BITS; |
|
348 |
unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / HOST_LONG_BITS; |
|
353 | 349 |
|
354 | 350 |
/* |
355 | 351 |
* bitmap-traveling is faster than memory-traveling (for addr...) |
... | ... | |
363 | 359 |
c &= ~(1ul << j); |
364 | 360 |
page_number = i * HOST_LONG_BITS + j; |
365 | 361 |
addr1 = page_number * TARGET_PAGE_SIZE; |
366 |
addr = offset + addr1; |
|
367 |
ram_addr = cpu_get_physical_page_desc(addr); |
|
368 |
cpu_physical_memory_set_dirty(ram_addr); |
|
362 |
addr = section->offset_within_region + addr1; |
|
363 |
memory_region_set_dirty(section->mr, addr); |
|
369 | 364 |
} while (c != 0); |
370 | 365 |
} |
371 | 366 |
} |
... | ... | |
382 | 377 |
* @start_add: start of logged region. |
383 | 378 |
* @end_addr: end of logged region. |
384 | 379 |
*/ |
385 |
static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, |
|
386 |
target_phys_addr_t end_addr) |
|
380 |
static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section) |
|
387 | 381 |
{ |
388 | 382 |
KVMState *s = kvm_state; |
389 | 383 |
unsigned long size, allocated_size = 0; |
390 | 384 |
KVMDirtyLog d; |
391 | 385 |
KVMSlot *mem; |
392 | 386 |
int ret = 0; |
387 |
target_phys_addr_t start_addr = section->offset_within_address_space; |
|
388 |
target_phys_addr_t end_addr = start_addr + section->size; |
|
393 | 389 |
|
394 | 390 |
d.dirty_bitmap = NULL; |
395 | 391 |
while (start_addr < end_addr) { |
... | ... | |
428 | 424 |
break; |
429 | 425 |
} |
430 | 426 |
|
431 |
kvm_get_dirty_pages_log_range(mem->start_addr, d.dirty_bitmap, |
|
432 |
mem->start_addr, mem->memory_size); |
|
427 |
kvm_get_dirty_pages_log_range(section, d.dirty_bitmap); |
|
433 | 428 |
start_addr = mem->start_addr + mem->memory_size; |
434 | 429 |
} |
435 | 430 |
g_free(d.dirty_bitmap); |
... | ... | |
686 | 681 |
static void kvm_log_sync(MemoryListener *listener, |
687 | 682 |
MemoryRegionSection *section) |
688 | 683 |
{ |
689 |
target_phys_addr_t start = section->offset_within_address_space; |
|
690 |
target_phys_addr_t end = start + section->size; |
|
691 | 684 |
int r; |
692 | 685 |
|
693 |
r = kvm_physical_sync_dirty_bitmap(start, end);
|
|
686 |
r = kvm_physical_sync_dirty_bitmap(section);
|
|
694 | 687 |
if (r < 0) { |
695 | 688 |
abort(); |
696 | 689 |
} |
Also available in: Unified diff