root / exec-obsolete.h @ 45724d6d
History | View | Annotate | Download (4.2 kB)
1 |
/*
|
---|---|
2 |
* Declarations for obsolete exec.c functions
|
3 |
*
|
4 |
* Copyright 2011 Red Hat, Inc. and/or its affiliates
|
5 |
*
|
6 |
* Authors:
|
7 |
* Avi Kivity <avi@redhat.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2 or
|
10 |
* later. See the COPYING file in the top-level directory.
|
11 |
*
|
12 |
*/
|
13 |
|
14 |
/*
|
15 |
* This header is for use by exec.c and memory.c ONLY. Do not include it.
|
16 |
* The functions declared here will be removed soon.
|
17 |
*/
|
18 |
|
19 |
#ifndef EXEC_OBSOLETE_H
|
20 |
#define EXEC_OBSOLETE_H
|
21 |
|
22 |
#ifndef WANT_EXEC_OBSOLETE
|
23 |
#error Do not include exec-obsolete.h
|
24 |
#endif
|
25 |
|
26 |
#ifndef CONFIG_USER_ONLY
|
27 |
|
28 |
ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
|
29 |
MemoryRegion *mr); |
30 |
ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr); |
31 |
void qemu_ram_free(ram_addr_t addr);
|
32 |
void qemu_ram_free_from_ptr(ram_addr_t addr);
|
33 |
|
34 |
struct MemoryRegion;
|
35 |
struct MemoryRegionSection;
|
36 |
void cpu_register_physical_memory_log(struct MemoryRegionSection *section, |
37 |
bool readonly);
|
38 |
|
39 |
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
|
40 |
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
|
41 |
|
42 |
int cpu_physical_memory_set_dirty_tracking(int enable); |
43 |
|
44 |
#define VGA_DIRTY_FLAG 0x01 |
45 |
#define CODE_DIRTY_FLAG 0x02 |
46 |
#define MIGRATION_DIRTY_FLAG 0x08 |
47 |
|
48 |
static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr) |
49 |
{ |
50 |
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
|
51 |
} |
52 |
|
53 |
/* read dirty bit (return 0 or 1) */
|
54 |
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr) |
55 |
{ |
56 |
return cpu_physical_memory_get_dirty_flags(addr) == 0xff; |
57 |
} |
58 |
|
59 |
static inline int cpu_physical_memory_get_dirty(ram_addr_t start, |
60 |
ram_addr_t length, |
61 |
int dirty_flags)
|
62 |
{ |
63 |
int ret = 0; |
64 |
ram_addr_t addr, end; |
65 |
|
66 |
end = TARGET_PAGE_ALIGN(start + length); |
67 |
start &= TARGET_PAGE_MASK; |
68 |
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
69 |
ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags; |
70 |
} |
71 |
return ret;
|
72 |
} |
73 |
|
74 |
static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr, |
75 |
int dirty_flags)
|
76 |
{ |
77 |
if ((dirty_flags & MIGRATION_DIRTY_FLAG) &&
|
78 |
!cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE, |
79 |
MIGRATION_DIRTY_FLAG)) { |
80 |
ram_list.dirty_pages++; |
81 |
} |
82 |
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
|
83 |
} |
84 |
|
85 |
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr) |
86 |
{ |
87 |
cpu_physical_memory_set_dirty_flags(addr, 0xff);
|
88 |
} |
89 |
|
90 |
static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr, |
91 |
int dirty_flags)
|
92 |
{ |
93 |
int mask = ~dirty_flags;
|
94 |
|
95 |
if ((dirty_flags & MIGRATION_DIRTY_FLAG) &&
|
96 |
cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE, |
97 |
MIGRATION_DIRTY_FLAG)) { |
98 |
ram_list.dirty_pages--; |
99 |
} |
100 |
return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
|
101 |
} |
102 |
|
103 |
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, |
104 |
ram_addr_t length, |
105 |
int dirty_flags)
|
106 |
{ |
107 |
ram_addr_t addr, end; |
108 |
|
109 |
end = TARGET_PAGE_ALIGN(start + length); |
110 |
start &= TARGET_PAGE_MASK; |
111 |
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
112 |
cpu_physical_memory_set_dirty_flags(addr, dirty_flags); |
113 |
} |
114 |
} |
115 |
|
116 |
static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start, |
117 |
ram_addr_t length, |
118 |
int dirty_flags)
|
119 |
{ |
120 |
ram_addr_t addr, end; |
121 |
|
122 |
end = TARGET_PAGE_ALIGN(start + length); |
123 |
start &= TARGET_PAGE_MASK; |
124 |
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
|
125 |
cpu_physical_memory_clear_dirty_flags(addr, dirty_flags); |
126 |
} |
127 |
} |
128 |
|
129 |
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
130 |
int dirty_flags);
|
131 |
|
132 |
extern const IORangeOps memory_region_iorange_ops; |
133 |
|
134 |
#endif
|
135 |
|
136 |
#endif
|