Revision f471a17e
b/arch_init.c | ||
---|---|---|
110 | 110 |
ram_addr_t addr = 0; |
111 | 111 |
int bytes_sent = 0; |
112 | 112 |
|
113 |
while (addr < last_ram_offset) {
|
|
113 |
while (addr < ram_list.last_offset) {
|
|
114 | 114 |
if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) { |
115 | 115 |
uint8_t *p; |
116 | 116 |
|
... | ... | |
133 | 133 |
break; |
134 | 134 |
} |
135 | 135 |
addr += TARGET_PAGE_SIZE; |
136 |
current_addr = (saved_addr + addr) % last_ram_offset;
|
|
136 |
current_addr = (saved_addr + addr) % ram_list.last_offset;
|
|
137 | 137 |
} |
138 | 138 |
|
139 | 139 |
return bytes_sent; |
... | ... | |
146 | 146 |
ram_addr_t addr; |
147 | 147 |
ram_addr_t count = 0; |
148 | 148 |
|
149 |
for (addr = 0; addr < last_ram_offset; addr += TARGET_PAGE_SIZE) {
|
|
149 |
for (addr = 0; addr < ram_list.last_offset; addr += TARGET_PAGE_SIZE) {
|
|
150 | 150 |
if (cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG)) { |
151 | 151 |
count++; |
152 | 152 |
} |
... | ... | |
167 | 167 |
|
168 | 168 |
uint64_t ram_bytes_total(void) |
169 | 169 |
{ |
170 |
return last_ram_offset;
|
|
170 |
return ram_list.last_offset;
|
|
171 | 171 |
} |
172 | 172 |
|
173 | 173 |
int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque) |
... | ... | |
191 | 191 |
bytes_transferred = 0; |
192 | 192 |
|
193 | 193 |
/* Make sure all dirty bits are set */ |
194 |
for (addr = 0; addr < last_ram_offset; addr += TARGET_PAGE_SIZE) {
|
|
194 |
for (addr = 0; addr < ram_list.last_offset; addr += TARGET_PAGE_SIZE) {
|
|
195 | 195 |
if (!cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG)) { |
196 | 196 |
cpu_physical_memory_set_dirty(addr); |
197 | 197 |
} |
... | ... | |
200 | 200 |
/* Enable dirty memory tracking */ |
201 | 201 |
cpu_physical_memory_set_dirty_tracking(1); |
202 | 202 |
|
203 |
qemu_put_be64(f, last_ram_offset | RAM_SAVE_FLAG_MEM_SIZE);
|
|
203 |
qemu_put_be64(f, ram_list.last_offset | RAM_SAVE_FLAG_MEM_SIZE);
|
|
204 | 204 |
} |
205 | 205 |
|
206 | 206 |
bytes_transferred_last = bytes_transferred; |
... | ... | |
259 | 259 |
addr &= TARGET_PAGE_MASK; |
260 | 260 |
|
261 | 261 |
if (flags & RAM_SAVE_FLAG_MEM_SIZE) { |
262 |
if (addr != last_ram_offset) {
|
|
262 |
if (addr != ram_list.last_offset) {
|
|
263 | 263 |
return -EINVAL; |
264 | 264 |
} |
265 | 265 |
} |
b/cpu-all.h | ||
---|---|---|
859 | 859 |
/* memory API */ |
860 | 860 |
|
861 | 861 |
extern int phys_ram_fd; |
862 |
extern uint8_t *phys_ram_dirty; |
|
863 | 862 |
extern ram_addr_t ram_size; |
864 |
extern ram_addr_t last_ram_offset; |
|
863 |
|
|
864 |
typedef struct RAMBlock { |
|
865 |
uint8_t *host; |
|
866 |
ram_addr_t offset; |
|
867 |
ram_addr_t length; |
|
868 |
QLIST_ENTRY(RAMBlock) next; |
|
869 |
} RAMBlock; |
|
870 |
|
|
871 |
typedef struct RAMList { |
|
872 |
uint8_t *phys_dirty; |
|
873 |
ram_addr_t last_offset; |
|
874 |
QLIST_HEAD(ram, RAMBlock) blocks; |
|
875 |
} RAMList; |
|
876 |
extern RAMList ram_list; |
|
865 | 877 |
|
866 | 878 |
extern const char *mem_path; |
867 | 879 |
extern int mem_prealloc; |
... | ... | |
891 | 903 |
/* read dirty bit (return 0 or 1) */ |
892 | 904 |
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) |
893 | 905 |
{ |
894 |
return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
|
|
906 |
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
|
|
895 | 907 |
} |
896 | 908 |
|
897 | 909 |
static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr) |
898 | 910 |
{ |
899 |
return phys_ram_dirty[addr >> TARGET_PAGE_BITS];
|
|
911 |
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
|
|
900 | 912 |
} |
901 | 913 |
|
902 | 914 |
static inline int cpu_physical_memory_get_dirty(ram_addr_t addr, |
903 | 915 |
int dirty_flags) |
904 | 916 |
{ |
905 |
return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
|
|
917 |
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
|
|
906 | 918 |
} |
907 | 919 |
|
908 | 920 |
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) |
909 | 921 |
{ |
910 |
phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
|
|
922 |
ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
|
|
911 | 923 |
} |
912 | 924 |
|
913 | 925 |
static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr, |
914 | 926 |
int dirty_flags) |
915 | 927 |
{ |
916 |
return phys_ram_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
|
|
928 |
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
|
|
917 | 929 |
} |
918 | 930 |
|
919 | 931 |
static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start, |
... | ... | |
925 | 937 |
|
926 | 938 |
len = length >> TARGET_PAGE_BITS; |
927 | 939 |
mask = ~dirty_flags; |
928 |
p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
|
|
940 |
p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
|
|
929 | 941 |
for (i = 0; i < len; i++) { |
930 | 942 |
p[i] &= mask; |
931 | 943 |
} |
b/exec.c | ||
---|---|---|
110 | 110 |
|
111 | 111 |
#if !defined(CONFIG_USER_ONLY) |
112 | 112 |
int phys_ram_fd; |
113 |
uint8_t *phys_ram_dirty; |
|
114 | 113 |
static int in_migration; |
115 | 114 |
|
116 |
typedef struct RAMBlock { |
|
117 |
uint8_t *host; |
|
118 |
ram_addr_t offset; |
|
119 |
ram_addr_t length; |
|
120 |
struct RAMBlock *next; |
|
121 |
} RAMBlock; |
|
122 |
|
|
123 |
static RAMBlock *ram_blocks; |
|
124 |
/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug) |
|
125 |
then we can no longer assume contiguous ram offsets, and external uses |
|
126 |
of this variable will break. */ |
|
127 |
ram_addr_t last_ram_offset; |
|
115 |
RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) }; |
|
128 | 116 |
#endif |
129 | 117 |
|
130 | 118 |
CPUState *first_cpu; |
... | ... | |
2810 | 2798 |
madvise(new_block->host, size, MADV_MERGEABLE); |
2811 | 2799 |
#endif |
2812 | 2800 |
} |
2813 |
new_block->offset = last_ram_offset;
|
|
2801 |
new_block->offset = ram_list.last_offset;
|
|
2814 | 2802 |
new_block->length = size; |
2815 | 2803 |
|
2816 |
new_block->next = ram_blocks; |
|
2817 |
ram_blocks = new_block; |
|
2804 |
QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); |
|
2818 | 2805 |
|
2819 |
phys_ram_dirty = qemu_realloc(phys_ram_dirty,
|
|
2820 |
(last_ram_offset + size) >> TARGET_PAGE_BITS);
|
|
2821 |
memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
|
|
2806 |
ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
|
|
2807 |
(ram_list.last_offset + size) >> TARGET_PAGE_BITS);
|
|
2808 |
memset(ram_list.phys_dirty + (ram_list.last_offset >> TARGET_PAGE_BITS),
|
|
2822 | 2809 |
0xff, size >> TARGET_PAGE_BITS); |
2823 | 2810 |
|
2824 |
last_ram_offset += size;
|
|
2811 |
ram_list.last_offset += size;
|
|
2825 | 2812 |
|
2826 | 2813 |
if (kvm_enabled()) |
2827 | 2814 |
kvm_setup_guest_memory(new_block->host, size); |
... | ... | |
2844 | 2831 |
*/ |
2845 | 2832 |
void *qemu_get_ram_ptr(ram_addr_t addr) |
2846 | 2833 |
{ |
2847 |
RAMBlock *prev; |
|
2848 |
RAMBlock **prevp; |
|
2849 | 2834 |
RAMBlock *block; |
2850 | 2835 |
|
2851 |
prev = NULL; |
|
2852 |
prevp = &ram_blocks; |
|
2853 |
block = ram_blocks; |
|
2854 |
while (block && (block->offset > addr |
|
2855 |
|| block->offset + block->length <= addr)) { |
|
2856 |
if (prev) |
|
2857 |
prevp = &prev->next; |
|
2858 |
prev = block; |
|
2859 |
block = block->next; |
|
2860 |
} |
|
2861 |
if (!block) { |
|
2862 |
fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); |
|
2863 |
abort(); |
|
2864 |
} |
|
2865 |
/* Move this entry to to start of the list. */ |
|
2866 |
if (prev) { |
|
2867 |
prev->next = block->next; |
|
2868 |
block->next = *prevp; |
|
2869 |
*prevp = block; |
|
2836 |
QLIST_FOREACH(block, &ram_list.blocks, next) { |
|
2837 |
if (addr - block->offset < block->length) { |
|
2838 |
QLIST_REMOVE(block, next); |
|
2839 |
QLIST_INSERT_HEAD(&ram_list.blocks, block, next); |
|
2840 |
return block->host + (addr - block->offset); |
|
2841 |
} |
|
2870 | 2842 |
} |
2871 |
return block->host + (addr - block->offset); |
|
2843 |
|
|
2844 |
fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); |
|
2845 |
abort(); |
|
2846 |
|
|
2847 |
return NULL; |
|
2872 | 2848 |
} |
2873 | 2849 |
|
2874 | 2850 |
/* Some of the softmmu routines need to translate from a host pointer |
... | ... | |
2878 | 2854 |
RAMBlock *block; |
2879 | 2855 |
uint8_t *host = ptr; |
2880 | 2856 |
|
2881 |
block = ram_blocks; |
|
2882 |
while (block && (block->host > host |
|
2883 |
|| block->host + block->length <= host)) { |
|
2884 |
block = block->next; |
|
2885 |
} |
|
2886 |
if (!block) { |
|
2887 |
fprintf(stderr, "Bad ram pointer %p\n", ptr); |
|
2888 |
abort(); |
|
2857 |
QLIST_FOREACH(block, &ram_list.blocks, next) { |
|
2858 |
if (host - block->host < block->length) { |
|
2859 |
return block->offset + (host - block->host); |
|
2860 |
} |
|
2889 | 2861 |
} |
2890 |
return block->offset + (host - block->host); |
|
2862 |
|
|
2863 |
fprintf(stderr, "Bad ram pointer %p\n", ptr); |
|
2864 |
abort(); |
|
2865 |
|
|
2866 |
return 0; |
|
2891 | 2867 |
} |
2892 | 2868 |
|
2893 | 2869 |
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr) |
Also available in: Unified diff