root / cpu-common.h @ a0a3167a
History | View | Annotate | Download (6.4 kB)
1 | 1ad2134f | Paul Brook | #ifndef CPU_COMMON_H
|
---|---|---|---|
2 | 1ad2134f | Paul Brook | #define CPU_COMMON_H 1 |
3 | 1ad2134f | Paul Brook | |
4 | 1ad2134f | Paul Brook | /* CPU interfaces that are target indpendent. */
|
5 | 1ad2134f | Paul Brook | |
6 | 477ba620 | Aurelien Jarno | #if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) || defined(__ia64__)
|
7 | 1ad2134f | Paul Brook | #define WORDS_ALIGNED
|
8 | 1ad2134f | Paul Brook | #endif
|
9 | 1ad2134f | Paul Brook | |
10 | 37b76cfd | Paolo Bonzini | #ifdef TARGET_PHYS_ADDR_BITS
|
11 | 37b76cfd | Paolo Bonzini | #include "targphys.h" |
12 | 37b76cfd | Paolo Bonzini | #endif
|
13 | 37b76cfd | Paolo Bonzini | |
14 | 37b76cfd | Paolo Bonzini | #ifndef NEED_CPU_H
|
15 | 37b76cfd | Paolo Bonzini | #include "poison.h" |
16 | 37b76cfd | Paolo Bonzini | #endif
|
17 | 37b76cfd | Paolo Bonzini | |
18 | 1ad2134f | Paul Brook | #include "bswap.h" |
19 | f6f3fbca | Michael S. Tsirkin | #include "qemu-queue.h" |
20 | 1ad2134f | Paul Brook | |
21 | b3755a91 | Paul Brook | #if !defined(CONFIG_USER_ONLY)
|
22 | b3755a91 | Paul Brook | |
23 | dd310534 | Alexander Graf | enum device_endian {
|
24 | dd310534 | Alexander Graf | DEVICE_NATIVE_ENDIAN, |
25 | dd310534 | Alexander Graf | DEVICE_BIG_ENDIAN, |
26 | dd310534 | Alexander Graf | DEVICE_LITTLE_ENDIAN, |
27 | dd310534 | Alexander Graf | }; |
28 | dd310534 | Alexander Graf | |
29 | 1ad2134f | Paul Brook | /* address in the RAM (different from a physical address) */
|
30 | c227f099 | Anthony Liguori | typedef unsigned long ram_addr_t; |
31 | 1ad2134f | Paul Brook | |
32 | 1ad2134f | Paul Brook | /* memory API */
|
33 | 1ad2134f | Paul Brook | |
34 | c227f099 | Anthony Liguori | typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value); |
35 | c227f099 | Anthony Liguori | typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr); |
36 | 1ad2134f | Paul Brook | |
37 | 0fd542fb | Michael S. Tsirkin | void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
|
38 | 0fd542fb | Michael S. Tsirkin | ram_addr_t size, |
39 | 0fd542fb | Michael S. Tsirkin | ram_addr_t phys_offset, |
40 | 0fd542fb | Michael S. Tsirkin | ram_addr_t region_offset, |
41 | 0fd542fb | Michael S. Tsirkin | bool log_dirty);
|
42 | 0fd542fb | Michael S. Tsirkin | |
43 | 0fd542fb | Michael S. Tsirkin | static inline void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, |
44 | 0fd542fb | Michael S. Tsirkin | ram_addr_t size, |
45 | 0fd542fb | Michael S. Tsirkin | ram_addr_t phys_offset, |
46 | 0fd542fb | Michael S. Tsirkin | ram_addr_t region_offset) |
47 | 0fd542fb | Michael S. Tsirkin | { |
48 | 0fd542fb | Michael S. Tsirkin | cpu_register_physical_memory_log(start_addr, size, phys_offset, |
49 | 0fd542fb | Michael S. Tsirkin | region_offset, false);
|
50 | 0fd542fb | Michael S. Tsirkin | } |
51 | 0fd542fb | Michael S. Tsirkin | |
52 | c227f099 | Anthony Liguori | static inline void cpu_register_physical_memory(target_phys_addr_t start_addr, |
53 | c227f099 | Anthony Liguori | ram_addr_t size, |
54 | c227f099 | Anthony Liguori | ram_addr_t phys_offset) |
55 | 1ad2134f | Paul Brook | { |
56 | 1ad2134f | Paul Brook | cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);
|
57 | 1ad2134f | Paul Brook | } |
58 | 1ad2134f | Paul Brook | |
59 | c227f099 | Anthony Liguori | ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr); |
60 | 84b89d78 | Cam Macdonell | ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, |
61 | 84b89d78 | Cam Macdonell | ram_addr_t size, void *host);
|
62 | 1724f049 | Alex Williamson | ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size); |
63 | c227f099 | Anthony Liguori | void qemu_ram_free(ram_addr_t addr);
|
64 | 1f2e98b6 | Alex Williamson | void qemu_ram_free_from_ptr(ram_addr_t addr);
|
65 | cd19cfa2 | Huang Ying | void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
|
66 | 1ad2134f | Paul Brook | /* This should only be used for ram local to a device. */
|
67 | c227f099 | Anthony Liguori | void *qemu_get_ram_ptr(ram_addr_t addr);
|
68 | 38bee5dc | Stefano Stabellini | void *qemu_ram_ptr_length(target_phys_addr_t addr, target_phys_addr_t *size);
|
69 | b2e0a138 | Michael S. Tsirkin | /* Same but slower, to use for migration, where the order of
|
70 | b2e0a138 | Michael S. Tsirkin | * RAMBlocks must not change. */
|
71 | b2e0a138 | Michael S. Tsirkin | void *qemu_safe_ram_ptr(ram_addr_t addr);
|
72 | 050a0ddf | Anthony PERARD | void qemu_put_ram_ptr(void *addr); |
73 | 1ad2134f | Paul Brook | /* This should not be used by devices. */
|
74 | e890261f | Marcelo Tosatti | int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr); |
75 | e890261f | Marcelo Tosatti | ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
|
76 | 1ad2134f | Paul Brook | |
77 | d60efc6b | Blue Swirl | int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read, |
78 | d60efc6b | Blue Swirl | CPUWriteMemoryFunc * const *mem_write,
|
79 | dd310534 | Alexander Graf | void *opaque, enum device_endian endian); |
80 | 1ad2134f | Paul Brook | void cpu_unregister_io_memory(int table_address); |
81 | 1ad2134f | Paul Brook | |
82 | c227f099 | Anthony Liguori | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
83 | 1ad2134f | Paul Brook | int len, int is_write); |
84 | c227f099 | Anthony Liguori | static inline void cpu_physical_memory_read(target_phys_addr_t addr, |
85 | 3bad9814 | Stefan Weil | void *buf, int len) |
86 | 1ad2134f | Paul Brook | { |
87 | 1ad2134f | Paul Brook | cpu_physical_memory_rw(addr, buf, len, 0);
|
88 | 1ad2134f | Paul Brook | } |
89 | c227f099 | Anthony Liguori | static inline void cpu_physical_memory_write(target_phys_addr_t addr, |
90 | 3bad9814 | Stefan Weil | const void *buf, int len) |
91 | 1ad2134f | Paul Brook | { |
92 | 3bad9814 | Stefan Weil | cpu_physical_memory_rw(addr, (void *)buf, len, 1); |
93 | 1ad2134f | Paul Brook | } |
94 | c227f099 | Anthony Liguori | void *cpu_physical_memory_map(target_phys_addr_t addr,
|
95 | c227f099 | Anthony Liguori | target_phys_addr_t *plen, |
96 | 1ad2134f | Paul Brook | int is_write);
|
97 | c227f099 | Anthony Liguori | void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, |
98 | c227f099 | Anthony Liguori | int is_write, target_phys_addr_t access_len);
|
99 | 1ad2134f | Paul Brook | void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)); |
100 | 1ad2134f | Paul Brook | void cpu_unregister_map_client(void *cookie); |
101 | 1ad2134f | Paul Brook | |
102 | f6f3fbca | Michael S. Tsirkin | struct CPUPhysMemoryClient;
|
103 | f6f3fbca | Michael S. Tsirkin | typedef struct CPUPhysMemoryClient CPUPhysMemoryClient; |
104 | f6f3fbca | Michael S. Tsirkin | struct CPUPhysMemoryClient {
|
105 | f6f3fbca | Michael S. Tsirkin | void (*set_memory)(struct CPUPhysMemoryClient *client, |
106 | f6f3fbca | Michael S. Tsirkin | target_phys_addr_t start_addr, |
107 | f6f3fbca | Michael S. Tsirkin | ram_addr_t size, |
108 | 0fd542fb | Michael S. Tsirkin | ram_addr_t phys_offset, |
109 | 0fd542fb | Michael S. Tsirkin | bool log_dirty);
|
110 | f6f3fbca | Michael S. Tsirkin | int (*sync_dirty_bitmap)(struct CPUPhysMemoryClient *client, |
111 | f6f3fbca | Michael S. Tsirkin | target_phys_addr_t start_addr, |
112 | f6f3fbca | Michael S. Tsirkin | target_phys_addr_t end_addr); |
113 | f6f3fbca | Michael S. Tsirkin | int (*migration_log)(struct CPUPhysMemoryClient *client, |
114 | f6f3fbca | Michael S. Tsirkin | int enable);
|
115 | e5896b12 | Anthony PERARD | int (*log_start)(struct CPUPhysMemoryClient *client, |
116 | e5896b12 | Anthony PERARD | target_phys_addr_t phys_addr, ram_addr_t size); |
117 | e5896b12 | Anthony PERARD | int (*log_stop)(struct CPUPhysMemoryClient *client, |
118 | e5896b12 | Anthony PERARD | target_phys_addr_t phys_addr, ram_addr_t size); |
119 | f6f3fbca | Michael S. Tsirkin | QLIST_ENTRY(CPUPhysMemoryClient) list; |
120 | f6f3fbca | Michael S. Tsirkin | }; |
121 | f6f3fbca | Michael S. Tsirkin | |
122 | f6f3fbca | Michael S. Tsirkin | void cpu_register_phys_memory_client(CPUPhysMemoryClient *);
|
123 | f6f3fbca | Michael S. Tsirkin | void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *);
|
124 | f6f3fbca | Michael S. Tsirkin | |
125 | 6842a08e | Blue Swirl | /* Coalesced MMIO regions are areas where write operations can be reordered.
|
126 | 6842a08e | Blue Swirl | * This usually implies that write operations are side-effect free. This allows
|
127 | 6842a08e | Blue Swirl | * batching which can make a major impact on performance when using
|
128 | 6842a08e | Blue Swirl | * virtualization.
|
129 | 6842a08e | Blue Swirl | */
|
130 | 6842a08e | Blue Swirl | void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
|
131 | 6842a08e | Blue Swirl | |
132 | 6842a08e | Blue Swirl | void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
|
133 | 6842a08e | Blue Swirl | |
134 | 6842a08e | Blue Swirl | void qemu_flush_coalesced_mmio_buffer(void); |
135 | 6842a08e | Blue Swirl | |
136 | c227f099 | Anthony Liguori | uint32_t ldub_phys(target_phys_addr_t addr); |
137 | c227f099 | Anthony Liguori | uint32_t lduw_phys(target_phys_addr_t addr); |
138 | c227f099 | Anthony Liguori | uint32_t ldl_phys(target_phys_addr_t addr); |
139 | c227f099 | Anthony Liguori | uint64_t ldq_phys(target_phys_addr_t addr); |
140 | c227f099 | Anthony Liguori | void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
|
141 | c227f099 | Anthony Liguori | void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
|
142 | c227f099 | Anthony Liguori | void stb_phys(target_phys_addr_t addr, uint32_t val);
|
143 | c227f099 | Anthony Liguori | void stw_phys(target_phys_addr_t addr, uint32_t val);
|
144 | c227f099 | Anthony Liguori | void stl_phys(target_phys_addr_t addr, uint32_t val);
|
145 | c227f099 | Anthony Liguori | void stq_phys(target_phys_addr_t addr, uint64_t val);
|
146 | c227f099 | Anthony Liguori | |
147 | c227f099 | Anthony Liguori | void cpu_physical_memory_write_rom(target_phys_addr_t addr,
|
148 | 1ad2134f | Paul Brook | const uint8_t *buf, int len); |
149 | 1ad2134f | Paul Brook | |
150 | 1ad2134f | Paul Brook | #define IO_MEM_SHIFT 3 |
151 | 1ad2134f | Paul Brook | |
152 | 1ad2134f | Paul Brook | #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */ |
153 | 1ad2134f | Paul Brook | #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */ |
154 | 1ad2134f | Paul Brook | #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT) |
155 | 1ad2134f | Paul Brook | #define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT) |
156 | 1ad2134f | Paul Brook | |
157 | 1ad2134f | Paul Brook | /* Acts like a ROM when read and like a device when written. */
|
158 | 1ad2134f | Paul Brook | #define IO_MEM_ROMD (1) |
159 | 1ad2134f | Paul Brook | #define IO_MEM_SUBPAGE (2) |
160 | 1ad2134f | Paul Brook | |
161 | b3755a91 | Paul Brook | #endif
|
162 | b3755a91 | Paul Brook | |
163 | 1ad2134f | Paul Brook | #endif /* !CPU_COMMON_H */ |