root / cpu-common.h @ 6774e44a
History | View | Annotate | Download (6.2 kB)
1 | 1ad2134f | Paul Brook | #ifndef CPU_COMMON_H
|
---|---|---|---|
2 | 1ad2134f | Paul Brook | #define CPU_COMMON_H 1 |
3 | 1ad2134f | Paul Brook | |
4 | 1ad2134f | Paul Brook | /* CPU interfaces that are target indpendent. */
|
5 | 1ad2134f | Paul Brook | |
6 | 477ba620 | Aurelien Jarno | #if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) || defined(__ia64__)
|
7 | 1ad2134f | Paul Brook | #define WORDS_ALIGNED
|
8 | 1ad2134f | Paul Brook | #endif
|
9 | 1ad2134f | Paul Brook | |
10 | 37b76cfd | Paolo Bonzini | #ifdef TARGET_PHYS_ADDR_BITS
|
11 | 37b76cfd | Paolo Bonzini | #include "targphys.h" |
12 | 37b76cfd | Paolo Bonzini | #endif
|
13 | 37b76cfd | Paolo Bonzini | |
14 | 37b76cfd | Paolo Bonzini | #ifndef NEED_CPU_H
|
15 | 37b76cfd | Paolo Bonzini | #include "poison.h" |
16 | 37b76cfd | Paolo Bonzini | #endif
|
17 | 37b76cfd | Paolo Bonzini | |
18 | 1ad2134f | Paul Brook | #include "bswap.h" |
19 | f6f3fbca | Michael S. Tsirkin | #include "qemu-queue.h" |
20 | 1ad2134f | Paul Brook | |
21 | b3755a91 | Paul Brook | #if !defined(CONFIG_USER_ONLY)
|
22 | b3755a91 | Paul Brook | |
23 | dd310534 | Alexander Graf | enum device_endian {
|
24 | dd310534 | Alexander Graf | DEVICE_NATIVE_ENDIAN, |
25 | dd310534 | Alexander Graf | DEVICE_BIG_ENDIAN, |
26 | dd310534 | Alexander Graf | DEVICE_LITTLE_ENDIAN, |
27 | dd310534 | Alexander Graf | }; |
28 | dd310534 | Alexander Graf | |
29 | 1ad2134f | Paul Brook | /* address in the RAM (different from a physical address) */
|
30 | c227f099 | Anthony Liguori | typedef unsigned long ram_addr_t; |
31 | 1ad2134f | Paul Brook | |
32 | 1ad2134f | Paul Brook | /* memory API */
|
33 | 1ad2134f | Paul Brook | |
34 | c227f099 | Anthony Liguori | typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value); |
35 | c227f099 | Anthony Liguori | typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr); |
36 | 1ad2134f | Paul Brook | |
37 | 0fd542fb | Michael S. Tsirkin | void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
|
38 | 0fd542fb | Michael S. Tsirkin | ram_addr_t size, |
39 | 0fd542fb | Michael S. Tsirkin | ram_addr_t phys_offset, |
40 | 0fd542fb | Michael S. Tsirkin | ram_addr_t region_offset, |
41 | 0fd542fb | Michael S. Tsirkin | bool log_dirty);
|
42 | 0fd542fb | Michael S. Tsirkin | |
43 | 0fd542fb | Michael S. Tsirkin | static inline void cpu_register_physical_memory_offset(target_phys_addr_t start_addr, |
44 | 0fd542fb | Michael S. Tsirkin | ram_addr_t size, |
45 | 0fd542fb | Michael S. Tsirkin | ram_addr_t phys_offset, |
46 | 0fd542fb | Michael S. Tsirkin | ram_addr_t region_offset) |
47 | 0fd542fb | Michael S. Tsirkin | { |
48 | 0fd542fb | Michael S. Tsirkin | cpu_register_physical_memory_log(start_addr, size, phys_offset, |
49 | 0fd542fb | Michael S. Tsirkin | region_offset, false);
|
50 | 0fd542fb | Michael S. Tsirkin | } |
51 | 0fd542fb | Michael S. Tsirkin | |
52 | c227f099 | Anthony Liguori | static inline void cpu_register_physical_memory(target_phys_addr_t start_addr, |
53 | c227f099 | Anthony Liguori | ram_addr_t size, |
54 | c227f099 | Anthony Liguori | ram_addr_t phys_offset) |
55 | 1ad2134f | Paul Brook | { |
56 | 1ad2134f | Paul Brook | cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);
|
57 | 1ad2134f | Paul Brook | } |
58 | 1ad2134f | Paul Brook | |
59 | c227f099 | Anthony Liguori | ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr); |
60 | 84b89d78 | Cam Macdonell | ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name, |
61 | 84b89d78 | Cam Macdonell | ram_addr_t size, void *host);
|
62 | 1724f049 | Alex Williamson | ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size); |
63 | c227f099 | Anthony Liguori | void qemu_ram_free(ram_addr_t addr);
|
64 | cd19cfa2 | Huang Ying | void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
|
65 | 1ad2134f | Paul Brook | /* This should only be used for ram local to a device. */
|
66 | c227f099 | Anthony Liguori | void *qemu_get_ram_ptr(ram_addr_t addr);
|
67 | b2e0a138 | Michael S. Tsirkin | /* Same but slower, to use for migration, where the order of
|
68 | b2e0a138 | Michael S. Tsirkin | * RAMBlocks must not change. */
|
69 | b2e0a138 | Michael S. Tsirkin | void *qemu_safe_ram_ptr(ram_addr_t addr);
|
70 | 1ad2134f | Paul Brook | /* This should not be used by devices. */
|
71 | e890261f | Marcelo Tosatti | int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr); |
72 | e890261f | Marcelo Tosatti | ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
|
73 | 1ad2134f | Paul Brook | |
74 | d60efc6b | Blue Swirl | int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read, |
75 | d60efc6b | Blue Swirl | CPUWriteMemoryFunc * const *mem_write,
|
76 | dd310534 | Alexander Graf | void *opaque, enum device_endian endian); |
77 | 1ad2134f | Paul Brook | void cpu_unregister_io_memory(int table_address); |
78 | 1ad2134f | Paul Brook | |
79 | c227f099 | Anthony Liguori | void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
|
80 | 1ad2134f | Paul Brook | int len, int is_write); |
81 | c227f099 | Anthony Liguori | static inline void cpu_physical_memory_read(target_phys_addr_t addr, |
82 | 3bad9814 | Stefan Weil | void *buf, int len) |
83 | 1ad2134f | Paul Brook | { |
84 | 1ad2134f | Paul Brook | cpu_physical_memory_rw(addr, buf, len, 0);
|
85 | 1ad2134f | Paul Brook | } |
86 | c227f099 | Anthony Liguori | static inline void cpu_physical_memory_write(target_phys_addr_t addr, |
87 | 3bad9814 | Stefan Weil | const void *buf, int len) |
88 | 1ad2134f | Paul Brook | { |
89 | 3bad9814 | Stefan Weil | cpu_physical_memory_rw(addr, (void *)buf, len, 1); |
90 | 1ad2134f | Paul Brook | } |
91 | c227f099 | Anthony Liguori | void *cpu_physical_memory_map(target_phys_addr_t addr,
|
92 | c227f099 | Anthony Liguori | target_phys_addr_t *plen, |
93 | 1ad2134f | Paul Brook | int is_write);
|
94 | c227f099 | Anthony Liguori | void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len, |
95 | c227f099 | Anthony Liguori | int is_write, target_phys_addr_t access_len);
|
96 | 1ad2134f | Paul Brook | void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)); |
97 | 1ad2134f | Paul Brook | void cpu_unregister_map_client(void *cookie); |
98 | 1ad2134f | Paul Brook | |
99 | f6f3fbca | Michael S. Tsirkin | struct CPUPhysMemoryClient;
|
100 | f6f3fbca | Michael S. Tsirkin | typedef struct CPUPhysMemoryClient CPUPhysMemoryClient; |
101 | f6f3fbca | Michael S. Tsirkin | struct CPUPhysMemoryClient {
|
102 | f6f3fbca | Michael S. Tsirkin | void (*set_memory)(struct CPUPhysMemoryClient *client, |
103 | f6f3fbca | Michael S. Tsirkin | target_phys_addr_t start_addr, |
104 | f6f3fbca | Michael S. Tsirkin | ram_addr_t size, |
105 | 0fd542fb | Michael S. Tsirkin | ram_addr_t phys_offset, |
106 | 0fd542fb | Michael S. Tsirkin | bool log_dirty);
|
107 | f6f3fbca | Michael S. Tsirkin | int (*sync_dirty_bitmap)(struct CPUPhysMemoryClient *client, |
108 | f6f3fbca | Michael S. Tsirkin | target_phys_addr_t start_addr, |
109 | f6f3fbca | Michael S. Tsirkin | target_phys_addr_t end_addr); |
110 | f6f3fbca | Michael S. Tsirkin | int (*migration_log)(struct CPUPhysMemoryClient *client, |
111 | f6f3fbca | Michael S. Tsirkin | int enable);
|
112 | e5896b12 | Anthony PERARD | int (*log_start)(struct CPUPhysMemoryClient *client, |
113 | e5896b12 | Anthony PERARD | target_phys_addr_t phys_addr, ram_addr_t size); |
114 | e5896b12 | Anthony PERARD | int (*log_stop)(struct CPUPhysMemoryClient *client, |
115 | e5896b12 | Anthony PERARD | target_phys_addr_t phys_addr, ram_addr_t size); |
116 | f6f3fbca | Michael S. Tsirkin | QLIST_ENTRY(CPUPhysMemoryClient) list; |
117 | f6f3fbca | Michael S. Tsirkin | }; |
118 | f6f3fbca | Michael S. Tsirkin | |
119 | f6f3fbca | Michael S. Tsirkin | void cpu_register_phys_memory_client(CPUPhysMemoryClient *);
|
120 | f6f3fbca | Michael S. Tsirkin | void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *);
|
121 | f6f3fbca | Michael S. Tsirkin | |
122 | 6842a08e | Blue Swirl | /* Coalesced MMIO regions are areas where write operations can be reordered.
|
123 | 6842a08e | Blue Swirl | * This usually implies that write operations are side-effect free. This allows
|
124 | 6842a08e | Blue Swirl | * batching which can make a major impact on performance when using
|
125 | 6842a08e | Blue Swirl | * virtualization.
|
126 | 6842a08e | Blue Swirl | */
|
127 | 6842a08e | Blue Swirl | void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
|
128 | 6842a08e | Blue Swirl | |
129 | 6842a08e | Blue Swirl | void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
|
130 | 6842a08e | Blue Swirl | |
131 | 6842a08e | Blue Swirl | void qemu_flush_coalesced_mmio_buffer(void); |
132 | 6842a08e | Blue Swirl | |
133 | c227f099 | Anthony Liguori | uint32_t ldub_phys(target_phys_addr_t addr); |
134 | c227f099 | Anthony Liguori | uint32_t lduw_phys(target_phys_addr_t addr); |
135 | c227f099 | Anthony Liguori | uint32_t ldl_phys(target_phys_addr_t addr); |
136 | c227f099 | Anthony Liguori | uint64_t ldq_phys(target_phys_addr_t addr); |
137 | c227f099 | Anthony Liguori | void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
|
138 | c227f099 | Anthony Liguori | void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
|
139 | c227f099 | Anthony Liguori | void stb_phys(target_phys_addr_t addr, uint32_t val);
|
140 | c227f099 | Anthony Liguori | void stw_phys(target_phys_addr_t addr, uint32_t val);
|
141 | c227f099 | Anthony Liguori | void stl_phys(target_phys_addr_t addr, uint32_t val);
|
142 | c227f099 | Anthony Liguori | void stq_phys(target_phys_addr_t addr, uint64_t val);
|
143 | c227f099 | Anthony Liguori | |
144 | c227f099 | Anthony Liguori | void cpu_physical_memory_write_rom(target_phys_addr_t addr,
|
145 | 1ad2134f | Paul Brook | const uint8_t *buf, int len); |
146 | 1ad2134f | Paul Brook | |
147 | 1ad2134f | Paul Brook | #define IO_MEM_SHIFT 3 |
148 | 1ad2134f | Paul Brook | |
149 | 1ad2134f | Paul Brook | #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */ |
150 | 1ad2134f | Paul Brook | #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */ |
151 | 1ad2134f | Paul Brook | #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT) |
152 | 1ad2134f | Paul Brook | #define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT) |
153 | 1ad2134f | Paul Brook | |
154 | 1ad2134f | Paul Brook | /* Acts like a ROM when read and like a device when written. */
|
155 | 1ad2134f | Paul Brook | #define IO_MEM_ROMD (1) |
156 | 1ad2134f | Paul Brook | #define IO_MEM_SUBPAGE (2) |
157 | 1ad2134f | Paul Brook | |
158 | b3755a91 | Paul Brook | #endif
|
159 | b3755a91 | Paul Brook | |
160 | 1ad2134f | Paul Brook | #endif /* !CPU_COMMON_H */ |