Statistics
| Branch: | Revision:

root / cpu-common.h @ 67d95c15

History | View | Annotate | Download (5.4 kB)

1
#ifndef CPU_COMMON_H
2
#define CPU_COMMON_H 1
3

    
4
/* CPU interfaces that are target independent.  */
5

    
6
#ifdef TARGET_PHYS_ADDR_BITS
7
#include "targphys.h"
8
#endif
9

    
10
#ifndef NEED_CPU_H
11
#include "poison.h"
12
#endif
13

    
14
#include "bswap.h"
15
#include "qemu-queue.h"
16

    
17
#if !defined(CONFIG_USER_ONLY)
18

    
19
enum device_endian {
20
    DEVICE_NATIVE_ENDIAN,
21
    DEVICE_BIG_ENDIAN,
22
    DEVICE_LITTLE_ENDIAN,
23
};
24

    
25
/* address in the RAM (different from a physical address) */
26
#if defined(CONFIG_XEN_BACKEND) && TARGET_PHYS_ADDR_BITS == 64
27
typedef uint64_t ram_addr_t;
28
#  define RAM_ADDR_MAX UINT64_MAX
29
#  define RAM_ADDR_FMT "%" PRIx64
30
#else
31
typedef unsigned long ram_addr_t;
32
#  define RAM_ADDR_MAX ULONG_MAX
33
#  define RAM_ADDR_FMT "%lx"
34
#endif
35

    
36
/* memory API */
37

    
38
typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
39
typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
40

    
41
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
42
void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
43
/* This should only be used for ram local to a device.  */
44
void *qemu_get_ram_ptr(ram_addr_t addr);
45
void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size);
46
/* Same but slower, to use for migration, where the order of
47
 * RAMBlocks must not change. */
48
void *qemu_safe_ram_ptr(ram_addr_t addr);
49
void qemu_put_ram_ptr(void *addr);
50
/* This should not be used by devices.  */
51
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
52
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
53

    
54
void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
55
                            int len, int is_write);
56
static inline void cpu_physical_memory_read(target_phys_addr_t addr,
57
                                            void *buf, int len)
58
{
59
    cpu_physical_memory_rw(addr, buf, len, 0);
60
}
61
static inline void cpu_physical_memory_write(target_phys_addr_t addr,
62
                                             const void *buf, int len)
63
{
64
    cpu_physical_memory_rw(addr, (void *)buf, len, 1);
65
}
66
void *cpu_physical_memory_map(target_phys_addr_t addr,
67
                              target_phys_addr_t *plen,
68
                              int is_write);
69
void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
70
                               int is_write, target_phys_addr_t access_len);
71
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
72
void cpu_unregister_map_client(void *cookie);
73

    
74
struct CPUPhysMemoryClient;
75
typedef struct CPUPhysMemoryClient CPUPhysMemoryClient;
76
struct CPUPhysMemoryClient {
77
    void (*set_memory)(struct CPUPhysMemoryClient *client,
78
                       target_phys_addr_t start_addr,
79
                       ram_addr_t size,
80
                       ram_addr_t phys_offset,
81
                       bool log_dirty);
82
    int (*sync_dirty_bitmap)(struct CPUPhysMemoryClient *client,
83
                             target_phys_addr_t start_addr,
84
                             target_phys_addr_t end_addr);
85
    int (*migration_log)(struct CPUPhysMemoryClient *client,
86
                         int enable);
87
    int (*log_start)(struct CPUPhysMemoryClient *client,
88
                     target_phys_addr_t phys_addr, ram_addr_t size);
89
    int (*log_stop)(struct CPUPhysMemoryClient *client,
90
                    target_phys_addr_t phys_addr, ram_addr_t size);
91
    QLIST_ENTRY(CPUPhysMemoryClient) list;
92
};
93

    
94
void cpu_register_phys_memory_client(CPUPhysMemoryClient *);
95
void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *);
96

    
97
/* Coalesced MMIO regions are areas where write operations can be reordered.
98
 * This usually implies that write operations are side-effect free.  This allows
99
 * batching which can make a major impact on performance when using
100
 * virtualization.
101
 */
102
void qemu_flush_coalesced_mmio_buffer(void);
103

    
104
uint32_t ldub_phys(target_phys_addr_t addr);
105
uint32_t lduw_le_phys(target_phys_addr_t addr);
106
uint32_t lduw_be_phys(target_phys_addr_t addr);
107
uint32_t ldl_le_phys(target_phys_addr_t addr);
108
uint32_t ldl_be_phys(target_phys_addr_t addr);
109
uint64_t ldq_le_phys(target_phys_addr_t addr);
110
uint64_t ldq_be_phys(target_phys_addr_t addr);
111
void stb_phys(target_phys_addr_t addr, uint32_t val);
112
void stw_le_phys(target_phys_addr_t addr, uint32_t val);
113
void stw_be_phys(target_phys_addr_t addr, uint32_t val);
114
void stl_le_phys(target_phys_addr_t addr, uint32_t val);
115
void stl_be_phys(target_phys_addr_t addr, uint32_t val);
116
void stq_le_phys(target_phys_addr_t addr, uint64_t val);
117
void stq_be_phys(target_phys_addr_t addr, uint64_t val);
118

    
119
#ifdef NEED_CPU_H
120
uint32_t lduw_phys(target_phys_addr_t addr);
121
uint32_t ldl_phys(target_phys_addr_t addr);
122
uint64_t ldq_phys(target_phys_addr_t addr);
123
void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
124
void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
125
void stw_phys(target_phys_addr_t addr, uint32_t val);
126
void stl_phys(target_phys_addr_t addr, uint32_t val);
127
void stq_phys(target_phys_addr_t addr, uint64_t val);
128
#endif
129

    
130
void cpu_physical_memory_write_rom(target_phys_addr_t addr,
131
                                   const uint8_t *buf, int len);
132

    
133
#define IO_MEM_SHIFT       3
134

    
135
#define IO_MEM_RAM         (0 << IO_MEM_SHIFT) /* hardcoded offset */
136
#define IO_MEM_ROM         (1 << IO_MEM_SHIFT) /* hardcoded offset */
137
#define IO_MEM_UNASSIGNED  (2 << IO_MEM_SHIFT)
138
#define IO_MEM_NOTDIRTY    (3 << IO_MEM_SHIFT)
139
#define IO_MEM_SUBPAGE_RAM (4 << IO_MEM_SHIFT)
140

    
141
/* Acts like a ROM when read and like a device when written.  */
142
#define IO_MEM_ROMD        (1)
143
#define IO_MEM_SUBPAGE     (2)
144

    
145
#endif
146

    
147
#endif /* !CPU_COMMON_H */