Statistics
| Branch: | Revision:

root / cpu-common.h @ 1e78bcc1

History | View | Annotate | Download (7 kB)

1
#ifndef CPU_COMMON_H
2
#define CPU_COMMON_H 1
3

    
4
/* CPU interfaces that are target indpendent.  */
5

    
6
#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) || defined(__ia64__)
7
#define WORDS_ALIGNED
8
#endif
9

    
10
#ifdef TARGET_PHYS_ADDR_BITS
11
#include "targphys.h"
12
#endif
13

    
14
#ifndef NEED_CPU_H
15
#include "poison.h"
16
#endif
17

    
18
#include "bswap.h"
19
#include "qemu-queue.h"
20

    
21
#if !defined(CONFIG_USER_ONLY)
22

    
23
enum device_endian {
24
    DEVICE_NATIVE_ENDIAN,
25
    DEVICE_BIG_ENDIAN,
26
    DEVICE_LITTLE_ENDIAN,
27
};
28

    
29
/* address in the RAM (different from a physical address) */
30
typedef unsigned long ram_addr_t;
31

    
32
/* memory API */
33

    
34
typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
35
typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
36

    
37
void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
38
                                      ram_addr_t size,
39
                                      ram_addr_t phys_offset,
40
                                      ram_addr_t region_offset,
41
                                      bool log_dirty);
42

    
43
static inline void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
44
                                                       ram_addr_t size,
45
                                                       ram_addr_t phys_offset,
46
                                                       ram_addr_t region_offset)
47
{
48
    cpu_register_physical_memory_log(start_addr, size, phys_offset,
49
                                     region_offset, false);
50
}
51

    
52
static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
53
                                                ram_addr_t size,
54
                                                ram_addr_t phys_offset)
55
{
56
    cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);
57
}
58

    
59
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
60
ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
61
                        ram_addr_t size, void *host);
62
ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size);
63
void qemu_ram_free(ram_addr_t addr);
64
void qemu_ram_free_from_ptr(ram_addr_t addr);
65
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
66
/* This should only be used for ram local to a device.  */
67
void *qemu_get_ram_ptr(ram_addr_t addr);
68
void *qemu_ram_ptr_length(target_phys_addr_t addr, target_phys_addr_t *size);
69
/* Same but slower, to use for migration, where the order of
70
 * RAMBlocks must not change. */
71
void *qemu_safe_ram_ptr(ram_addr_t addr);
72
void qemu_put_ram_ptr(void *addr);
73
/* This should not be used by devices.  */
74
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
75
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
76

    
77
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
78
                           CPUWriteMemoryFunc * const *mem_write,
79
                           void *opaque, enum device_endian endian);
80
void cpu_unregister_io_memory(int table_address);
81

    
82
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
83
                            int len, int is_write);
84
static inline void cpu_physical_memory_read(target_phys_addr_t addr,
85
                                            void *buf, int len)
86
{
87
    cpu_physical_memory_rw(addr, buf, len, 0);
88
}
89
static inline void cpu_physical_memory_write(target_phys_addr_t addr,
90
                                             const void *buf, int len)
91
{
92
    cpu_physical_memory_rw(addr, (void *)buf, len, 1);
93
}
94
void *cpu_physical_memory_map(target_phys_addr_t addr,
95
                              target_phys_addr_t *plen,
96
                              int is_write);
97
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
98
                               int is_write, target_phys_addr_t access_len);
99
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
100
void cpu_unregister_map_client(void *cookie);
101

    
102
struct CPUPhysMemoryClient;
103
typedef struct CPUPhysMemoryClient CPUPhysMemoryClient;
104
struct CPUPhysMemoryClient {
105
    void (*set_memory)(struct CPUPhysMemoryClient *client,
106
                       target_phys_addr_t start_addr,
107
                       ram_addr_t size,
108
                       ram_addr_t phys_offset,
109
                       bool log_dirty);
110
    int (*sync_dirty_bitmap)(struct CPUPhysMemoryClient *client,
111
                             target_phys_addr_t start_addr,
112
                             target_phys_addr_t end_addr);
113
    int (*migration_log)(struct CPUPhysMemoryClient *client,
114
                         int enable);
115
    int (*log_start)(struct CPUPhysMemoryClient *client,
116
                     target_phys_addr_t phys_addr, ram_addr_t size);
117
    int (*log_stop)(struct CPUPhysMemoryClient *client,
118
                    target_phys_addr_t phys_addr, ram_addr_t size);
119
    QLIST_ENTRY(CPUPhysMemoryClient) list;
120
};
121

    
122
void cpu_register_phys_memory_client(CPUPhysMemoryClient *);
123
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *);
124

    
125
/* Coalesced MMIO regions are areas where write operations can be reordered.
126
 * This usually implies that write operations are side-effect free.  This allows
127
 * batching which can make a major impact on performance when using
128
 * virtualization.
129
 */
130
void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
131

    
132
void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
133

    
134
void qemu_flush_coalesced_mmio_buffer(void);
135

    
136
uint32_t ldub_phys(target_phys_addr_t addr);
137
uint32_t lduw_phys(target_phys_addr_t addr);
138
uint32_t lduw_le_phys(target_phys_addr_t addr);
139
uint32_t lduw_be_phys(target_phys_addr_t addr);
140
uint32_t ldl_phys(target_phys_addr_t addr);
141
uint32_t ldl_le_phys(target_phys_addr_t addr);
142
uint32_t ldl_be_phys(target_phys_addr_t addr);
143
uint64_t ldq_phys(target_phys_addr_t addr);
144
uint64_t ldq_le_phys(target_phys_addr_t addr);
145
uint64_t ldq_be_phys(target_phys_addr_t addr);
146
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
147
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
148
void stb_phys(target_phys_addr_t addr, uint32_t val);
149
void stw_phys(target_phys_addr_t addr, uint32_t val);
150
void stw_le_phys(target_phys_addr_t addr, uint32_t val);
151
void stw_be_phys(target_phys_addr_t addr, uint32_t val);
152
void stl_phys(target_phys_addr_t addr, uint32_t val);
153
void stl_le_phys(target_phys_addr_t addr, uint32_t val);
154
void stl_be_phys(target_phys_addr_t addr, uint32_t val);
155
void stq_phys(target_phys_addr_t addr, uint64_t val);
156
void stq_le_phys(target_phys_addr_t addr, uint64_t val);
157
void stq_be_phys(target_phys_addr_t addr, uint64_t val);
158

    
159
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
160
                                   const uint8_t *buf, int len);
161

    
162
#define IO_MEM_SHIFT       3
163

    
164
#define IO_MEM_RAM         (0 << IO_MEM_SHIFT) /* hardcoded offset */
165
#define IO_MEM_ROM         (1 << IO_MEM_SHIFT) /* hardcoded offset */
166
#define IO_MEM_UNASSIGNED  (2 << IO_MEM_SHIFT)
167
#define IO_MEM_NOTDIRTY    (3 << IO_MEM_SHIFT)
168

    
169
/* Acts like a ROM when read and like a device when written.  */
170
#define IO_MEM_ROMD        (1)
171
#define IO_MEM_SUBPAGE     (2)
172

    
173
#endif
174

    
175
#endif /* !CPU_COMMON_H */