root / include / block / block.h @ 212a5a8f
History | View | Annotate | Download (22.1 kB)
1 | faf07963 | pbrook | #ifndef BLOCK_H
|
---|---|---|---|
2 | faf07963 | pbrook | #define BLOCK_H
|
3 | faf07963 | pbrook | |
4 | 737e150e | Paolo Bonzini | #include "block/aio.h" |
5 | 3b69e4b9 | aliguori | #include "qemu-common.h" |
6 | 1de7afc9 | Paolo Bonzini | #include "qemu/option.h" |
7 | 737e150e | Paolo Bonzini | #include "block/coroutine.h" |
8 | 7b1b5d19 | Paolo Bonzini | #include "qapi/qmp/qobject.h" |
9 | 2f0c9fe6 | Paolo Bonzini | #include "qapi-types.h" |
10 | a76bab49 | aliguori | |
11 | faf07963 | pbrook | /* block.c */
|
12 | faf07963 | pbrook | typedef struct BlockDriver BlockDriver; |
13 | 2f0c9fe6 | Paolo Bonzini | typedef struct BlockJob BlockJob; |
14 | faf07963 | pbrook | |
15 | faf07963 | pbrook | typedef struct BlockDriverInfo { |
16 | faf07963 | pbrook | /* in bytes, 0 if irrelevant */
|
17 | faf07963 | pbrook | int cluster_size;
|
18 | faf07963 | pbrook | /* offset at which the VM state can be saved (0 if not possible) */
|
19 | faf07963 | pbrook | int64_t vm_state_offset; |
20 | 64c79160 | Dong Xu Wang | bool is_dirty;
|
21 | e1a5c4be | Peter Lieven | /*
|
22 | e1a5c4be | Peter Lieven | * True if unallocated blocks read back as zeroes. This is equivalent
|
23 | e1a5c4be | Peter Lieven | * to the the LBPRZ flag in the SCSI logical block provisioning page.
|
24 | e1a5c4be | Peter Lieven | */
|
25 | e1a5c4be | Peter Lieven | bool unallocated_blocks_are_zero;
|
26 | e1a5c4be | Peter Lieven | /*
|
27 | e1a5c4be | Peter Lieven | * True if the driver can optimize writing zeroes by unmapping
|
28 | e1a5c4be | Peter Lieven | * sectors. This is equivalent to the BLKDISCARDZEROES ioctl in Linux
|
29 | e1a5c4be | Peter Lieven | * with the difference that in qemu a discard is allowed to silently
|
30 | e1a5c4be | Peter Lieven | * fail. Therefore we have to use bdrv_write_zeroes with the
|
31 | e1a5c4be | Peter Lieven | * BDRV_REQ_MAY_UNMAP flag for an optimized zero write with unmapping.
|
32 | e1a5c4be | Peter Lieven | * After this call the driver has to guarantee that the contents read
|
33 | e1a5c4be | Peter Lieven | * back as zero. It is additionally required that the block device is
|
34 | e1a5c4be | Peter Lieven | * opened with BDRV_O_UNMAP flag for this to work.
|
35 | e1a5c4be | Peter Lieven | */
|
36 | e1a5c4be | Peter Lieven | bool can_write_zeroes_with_unmap;
|
37 | faf07963 | pbrook | } BlockDriverInfo; |
38 | faf07963 | pbrook | |
39 | f8111c24 | Dong Xu Wang | typedef struct BlockFragInfo { |
40 | f8111c24 | Dong Xu Wang | uint64_t allocated_clusters; |
41 | f8111c24 | Dong Xu Wang | uint64_t total_clusters; |
42 | f8111c24 | Dong Xu Wang | uint64_t fragmented_clusters; |
43 | e6439d78 | Stefan Hajnoczi | uint64_t compressed_clusters; |
44 | f8111c24 | Dong Xu Wang | } BlockFragInfo; |
45 | f8111c24 | Dong Xu Wang | |
46 | 145feb17 | Markus Armbruster | /* Callbacks for block device models */
|
47 | 0e49de52 | Markus Armbruster | typedef struct BlockDevOps { |
48 | 145feb17 | Markus Armbruster | /*
|
49 | 145feb17 | Markus Armbruster | * Runs when virtual media changed (monitor commands eject, change)
|
50 | 7d4b4ba5 | Markus Armbruster | * Argument load is true on load and false on eject.
|
51 | 145feb17 | Markus Armbruster | * Beware: doesn't run when a host device's physical media
|
52 | 145feb17 | Markus Armbruster | * changes. Sure would be useful if it did.
|
53 | 2c6942fa | Markus Armbruster | * Device models with removable media must implement this callback.
|
54 | 145feb17 | Markus Armbruster | */
|
55 | 7d4b4ba5 | Markus Armbruster | void (*change_media_cb)(void *opaque, bool load); |
56 | 145feb17 | Markus Armbruster | /*
|
57 | 025ccaa7 | Paolo Bonzini | * Runs when an eject request is issued from the monitor, the tray
|
58 | 025ccaa7 | Paolo Bonzini | * is closed, and the medium is locked.
|
59 | 025ccaa7 | Paolo Bonzini | * Device models that do not implement is_medium_locked will not need
|
60 | 025ccaa7 | Paolo Bonzini | * this callback. Device models that can lock the medium or tray might
|
61 | 025ccaa7 | Paolo Bonzini | * want to implement the callback and unlock the tray when "force" is
|
62 | 025ccaa7 | Paolo Bonzini | * true, even if they do not support eject requests.
|
63 | 025ccaa7 | Paolo Bonzini | */
|
64 | 025ccaa7 | Paolo Bonzini | void (*eject_request_cb)(void *opaque, bool force); |
65 | 025ccaa7 | Paolo Bonzini | /*
|
66 | e4def80b | Markus Armbruster | * Is the virtual tray open?
|
67 | e4def80b | Markus Armbruster | * Device models implement this only when the device has a tray.
|
68 | e4def80b | Markus Armbruster | */
|
69 | e4def80b | Markus Armbruster | bool (*is_tray_open)(void *opaque); |
70 | e4def80b | Markus Armbruster | /*
|
71 | f107639a | Markus Armbruster | * Is the virtual medium locked into the device?
|
72 | f107639a | Markus Armbruster | * Device models implement this only when device has such a lock.
|
73 | f107639a | Markus Armbruster | */
|
74 | f107639a | Markus Armbruster | bool (*is_medium_locked)(void *opaque); |
75 | f107639a | Markus Armbruster | /*
|
76 | 145feb17 | Markus Armbruster | * Runs when the size changed (e.g. monitor command block_resize)
|
77 | 145feb17 | Markus Armbruster | */
|
78 | 145feb17 | Markus Armbruster | void (*resize_cb)(void *opaque); |
79 | 0e49de52 | Markus Armbruster | } BlockDevOps; |
80 | 0e49de52 | Markus Armbruster | |
81 | 6faac15f | Peter Lieven | typedef enum { |
82 | 6faac15f | Peter Lieven | BDRV_REQ_COPY_ON_READ = 0x1,
|
83 | 6faac15f | Peter Lieven | BDRV_REQ_ZERO_WRITE = 0x2,
|
84 | d32f35cb | Peter Lieven | /* The BDRV_REQ_MAY_UNMAP flag is used to indicate that the block driver
|
85 | d32f35cb | Peter Lieven | * is allowed to optimize a write zeroes request by unmapping (discarding)
|
86 | d32f35cb | Peter Lieven | * blocks if it is guaranteed that the result will read back as
|
87 | d32f35cb | Peter Lieven | * zeroes. The flag is only passed to the driver if the block device is
|
88 | d32f35cb | Peter Lieven | * opened with BDRV_O_UNMAP.
|
89 | d32f35cb | Peter Lieven | */
|
90 | d32f35cb | Peter Lieven | BDRV_REQ_MAY_UNMAP = 0x4,
|
91 | 6faac15f | Peter Lieven | } BdrvRequestFlags; |
92 | 6faac15f | Peter Lieven | |
93 | faf07963 | pbrook | #define BDRV_O_RDWR 0x0002 |
94 | faf07963 | pbrook | #define BDRV_O_SNAPSHOT 0x0008 /* open the file read only and save writes in a snapshot */ |
95 | 9f7965c7 | aliguori | #define BDRV_O_NOCACHE 0x0020 /* do not use the host page cache */ |
96 | 9f7965c7 | aliguori | #define BDRV_O_CACHE_WB 0x0040 /* use write-back caching */ |
97 | 5c6c3a6c | Christoph Hellwig | #define BDRV_O_NATIVE_AIO 0x0080 /* use native AIO instead of the thread pool */ |
98 | b783e409 | Kevin Wolf | #define BDRV_O_NO_BACKING 0x0100 /* don't open the backing file */ |
99 | 016f5cf6 | Alexander Graf | #define BDRV_O_NO_FLUSH 0x0200 /* disable flushing on this disk */ |
100 | 53fec9d3 | Stefan Hajnoczi | #define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */ |
101 | ccb1f4a7 | Benoît Canet | #define BDRV_O_INCOMING 0x0800 /* consistency hint for incoming migration */ |
102 | 058f8f16 | Stefan Hajnoczi | #define BDRV_O_CHECK 0x1000 /* open solely for consistency check */ |
103 | be028adc | Jeff Cody | #define BDRV_O_ALLOW_RDWR 0x2000 /* allow reopen to change from r/o to r/w */ |
104 | 9e8f1835 | Paolo Bonzini | #define BDRV_O_UNMAP 0x4000 /* execute guest UNMAP/TRIM operations */ |
105 | 9f7965c7 | aliguori | |
106 | ceb25e5c | Kevin Wolf | #define BDRV_O_CACHE_MASK (BDRV_O_NOCACHE | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH)
|
107 | faf07963 | pbrook | |
108 | 6ea44308 | Jan Kiszka | #define BDRV_SECTOR_BITS 9 |
109 | c63782cb | Jes Sorensen | #define BDRV_SECTOR_SIZE (1ULL << BDRV_SECTOR_BITS) |
110 | 3abbc4d9 | Stefan Hajnoczi | #define BDRV_SECTOR_MASK ~(BDRV_SECTOR_SIZE - 1) |
111 | 6ea44308 | Jan Kiszka | |
112 | 4333bb71 | Paolo Bonzini | /* BDRV_BLOCK_DATA: data is read from bs->file or another file
|
113 | 4333bb71 | Paolo Bonzini | * BDRV_BLOCK_ZERO: sectors read as zero
|
114 | 4333bb71 | Paolo Bonzini | * BDRV_BLOCK_OFFSET_VALID: sector stored in bs->file as raw data
|
115 | 92bc50a5 | Peter Lieven | * BDRV_BLOCK_RAW: used internally to indicate that the request
|
116 | 92bc50a5 | Peter Lieven | * was answered by the raw driver and that one
|
117 | 92bc50a5 | Peter Lieven | * should look in bs->file directly.
|
118 | 4333bb71 | Paolo Bonzini | *
|
119 | 4333bb71 | Paolo Bonzini | * If BDRV_BLOCK_OFFSET_VALID is set, bits 9-62 represent the offset in
|
120 | 4333bb71 | Paolo Bonzini | * bs->file where sector data can be read from as raw data.
|
121 | 4333bb71 | Paolo Bonzini | *
|
122 | 4333bb71 | Paolo Bonzini | * DATA == 0 && ZERO == 0 means that data is read from backing_hd if present.
|
123 | 4333bb71 | Paolo Bonzini | *
|
124 | 4333bb71 | Paolo Bonzini | * DATA ZERO OFFSET_VALID
|
125 | 4333bb71 | Paolo Bonzini | * t t t sectors read as zero, bs->file is zero at offset
|
126 | 4333bb71 | Paolo Bonzini | * t f t sectors read as valid from bs->file at offset
|
127 | 4333bb71 | Paolo Bonzini | * f t t sectors preallocated, read as zero, bs->file not
|
128 | 4333bb71 | Paolo Bonzini | * necessarily zero at offset
|
129 | 4333bb71 | Paolo Bonzini | * f f t sectors preallocated but read from backing_hd,
|
130 | 4333bb71 | Paolo Bonzini | * bs->file contains garbage at offset
|
131 | 4333bb71 | Paolo Bonzini | * t t f sectors preallocated, read as zero, unknown offset
|
132 | 4333bb71 | Paolo Bonzini | * t f f sectors read from unknown file or offset
|
133 | 4333bb71 | Paolo Bonzini | * f t f not allocated or unknown offset, read as zero
|
134 | 4333bb71 | Paolo Bonzini | * f f f not allocated or unknown offset, read from backing_hd
|
135 | 4333bb71 | Paolo Bonzini | */
|
136 | 4333bb71 | Paolo Bonzini | #define BDRV_BLOCK_DATA 1 |
137 | 4333bb71 | Paolo Bonzini | #define BDRV_BLOCK_ZERO 2 |
138 | 4333bb71 | Paolo Bonzini | #define BDRV_BLOCK_OFFSET_VALID 4 |
139 | 92bc50a5 | Peter Lieven | #define BDRV_BLOCK_RAW 8 |
140 | 4333bb71 | Paolo Bonzini | #define BDRV_BLOCK_OFFSET_MASK BDRV_SECTOR_MASK
|
141 | 4333bb71 | Paolo Bonzini | |
142 | 2582bfed | Luiz Capitulino | typedef enum { |
143 | 2582bfed | Luiz Capitulino | BDRV_ACTION_REPORT, BDRV_ACTION_IGNORE, BDRV_ACTION_STOP |
144 | ff06f5f3 | Paolo Bonzini | } BlockErrorAction; |
145 | 2582bfed | Luiz Capitulino | |
146 | e971aa12 | Jeff Cody | typedef QSIMPLEQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue;
|
147 | e971aa12 | Jeff Cody | |
148 | e971aa12 | Jeff Cody | typedef struct BDRVReopenState { |
149 | e971aa12 | Jeff Cody | BlockDriverState *bs; |
150 | e971aa12 | Jeff Cody | int flags;
|
151 | e971aa12 | Jeff Cody | void *opaque;
|
152 | e971aa12 | Jeff Cody | } BDRVReopenState; |
153 | e971aa12 | Jeff Cody | |
154 | e971aa12 | Jeff Cody | |
155 | 28a7282a | Luiz Capitulino | void bdrv_iostatus_enable(BlockDriverState *bs);
|
156 | 28a7282a | Luiz Capitulino | void bdrv_iostatus_reset(BlockDriverState *bs);
|
157 | 28a7282a | Luiz Capitulino | void bdrv_iostatus_disable(BlockDriverState *bs);
|
158 | 28a7282a | Luiz Capitulino | bool bdrv_iostatus_is_enabled(const BlockDriverState *bs); |
159 | 28a7282a | Luiz Capitulino | void bdrv_iostatus_set_err(BlockDriverState *bs, int error); |
160 | d15e5465 | Luiz Capitulino | void bdrv_info_print(Monitor *mon, const QObject *data); |
161 | d15e5465 | Luiz Capitulino | void bdrv_info(Monitor *mon, QObject **ret_data);
|
162 | 218a536a | Luiz Capitulino | void bdrv_stats_print(Monitor *mon, const QObject *data); |
163 | 218a536a | Luiz Capitulino | void bdrv_info_stats(Monitor *mon, QObject **ret_data);
|
164 | faf07963 | pbrook | |
165 | 0563e191 | Zhi Yong Wu | /* disk I/O throttling */
|
166 | 0563e191 | Zhi Yong Wu | void bdrv_io_limits_enable(BlockDriverState *bs);
|
167 | 98f90dba | Zhi Yong Wu | void bdrv_io_limits_disable(BlockDriverState *bs);
|
168 | 0563e191 | Zhi Yong Wu | |
169 | faf07963 | pbrook | void bdrv_init(void); |
170 | eb852011 | Markus Armbruster | void bdrv_init_with_whitelist(void); |
171 | 98289620 | Kevin Wolf | BlockDriver *bdrv_find_protocol(const char *filename, |
172 | 98289620 | Kevin Wolf | bool allow_protocol_prefix);
|
173 | faf07963 | pbrook | BlockDriver *bdrv_find_format(const char *format_name); |
174 | b64ec4e4 | Fam Zheng | BlockDriver *bdrv_find_whitelisted_format(const char *format_name, |
175 | b64ec4e4 | Fam Zheng | bool readonly);
|
176 | 0e7e1989 | Kevin Wolf | int bdrv_create(BlockDriver *drv, const char* filename, |
177 | cc84d90f | Max Reitz | QEMUOptionParameter *options, Error **errp); |
178 | cc84d90f | Max Reitz | int bdrv_create_file(const char* filename, QEMUOptionParameter *options, |
179 | cc84d90f | Max Reitz | Error **errp); |
180 | faf07963 | pbrook | BlockDriverState *bdrv_new(const char *device_name); |
181 | d22b2f41 | Ryan Harper | void bdrv_make_anon(BlockDriverState *bs);
|
182 | 4ddc07ca | Paolo Bonzini | void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old);
|
183 | 8802d1fd | Jeff Cody | void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top);
|
184 | c3993cdc | Stefan Hajnoczi | int bdrv_parse_cache_flags(const char *mode, int *flags); |
185 | 9e8f1835 | Paolo Bonzini | int bdrv_parse_discard_flags(const char *mode, int *flags); |
186 | 787e4a85 | Kevin Wolf | int bdrv_file_open(BlockDriverState **pbs, const char *filename, |
187 | 72daa72e | Max Reitz | const char *reference, QDict *options, int flags, |
188 | 72daa72e | Max Reitz | Error **errp); |
189 | da557aac | Max Reitz | int bdrv_open_image(BlockDriverState **pbs, const char *filename, |
190 | da557aac | Max Reitz | QDict *options, const char *bdref_key, int flags, |
191 | da557aac | Max Reitz | bool force_raw, bool allow_none, Error **errp); |
192 | 34b5d2c6 | Max Reitz | int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp);
|
193 | de9c0cec | Kevin Wolf | int bdrv_open(BlockDriverState *bs, const char *filename, QDict *options, |
194 | 34b5d2c6 | Max Reitz | int flags, BlockDriver *drv, Error **errp);
|
195 | e971aa12 | Jeff Cody | BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue, |
196 | e971aa12 | Jeff Cody | BlockDriverState *bs, int flags);
|
197 | e971aa12 | Jeff Cody | int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp);
|
198 | e971aa12 | Jeff Cody | int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp); |
199 | e971aa12 | Jeff Cody | int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
|
200 | e971aa12 | Jeff Cody | BlockReopenQueue *queue, Error **errp); |
201 | e971aa12 | Jeff Cody | void bdrv_reopen_commit(BDRVReopenState *reopen_state);
|
202 | e971aa12 | Jeff Cody | void bdrv_reopen_abort(BDRVReopenState *reopen_state);
|
203 | faf07963 | pbrook | void bdrv_close(BlockDriverState *bs);
|
204 | d7d512f6 | Paolo Bonzini | void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify);
|
205 | fa879d62 | Markus Armbruster | int bdrv_attach_dev(BlockDriverState *bs, void *dev); |
206 | fa879d62 | Markus Armbruster | void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev); |
207 | fa879d62 | Markus Armbruster | void bdrv_detach_dev(BlockDriverState *bs, void *dev); |
208 | fa879d62 | Markus Armbruster | void *bdrv_get_attached_dev(BlockDriverState *bs);
|
209 | 0e49de52 | Markus Armbruster | void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops, |
210 | 0e49de52 | Markus Armbruster | void *opaque);
|
211 | 025ccaa7 | Paolo Bonzini | void bdrv_dev_eject_request(BlockDriverState *bs, bool force); |
212 | 2c6942fa | Markus Armbruster | bool bdrv_dev_has_removable_media(BlockDriverState *bs);
|
213 | e4def80b | Markus Armbruster | bool bdrv_dev_is_tray_open(BlockDriverState *bs);
|
214 | f107639a | Markus Armbruster | bool bdrv_dev_is_medium_locked(BlockDriverState *bs);
|
215 | faf07963 | pbrook | int bdrv_read(BlockDriverState *bs, int64_t sector_num,
|
216 | faf07963 | pbrook | uint8_t *buf, int nb_sectors);
|
217 | 07d27a44 | Markus Armbruster | int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
|
218 | 07d27a44 | Markus Armbruster | uint8_t *buf, int nb_sectors);
|
219 | faf07963 | pbrook | int bdrv_write(BlockDriverState *bs, int64_t sector_num,
|
220 | faf07963 | pbrook | const uint8_t *buf, int nb_sectors); |
221 | 4105eaaa | Peter Lieven | int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
|
222 | aa7bfbff | Peter Lieven | int nb_sectors, BdrvRequestFlags flags);
|
223 | d5ef94d4 | Paolo Bonzini | BlockDriverAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs, int64_t sector_num, |
224 | d5ef94d4 | Paolo Bonzini | int nb_sectors, BdrvRequestFlags flags,
|
225 | d5ef94d4 | Paolo Bonzini | BlockDriverCompletionFunc *cb, void *opaque);
|
226 | d75cbb5e | Peter Lieven | int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags);
|
227 | 8d3b1a2d | Kevin Wolf | int bdrv_writev(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov);
|
228 | faf07963 | pbrook | int bdrv_pread(BlockDriverState *bs, int64_t offset,
|
229 | faf07963 | pbrook | void *buf, int count); |
230 | faf07963 | pbrook | int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
|
231 | faf07963 | pbrook | const void *buf, int count); |
232 | 8d3b1a2d | Kevin Wolf | int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov);
|
233 | f08145fe | Kevin Wolf | int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
|
234 | f08145fe | Kevin Wolf | const void *buf, int count); |
235 | da1fa91d | Kevin Wolf | int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
|
236 | da1fa91d | Kevin Wolf | int nb_sectors, QEMUIOVector *qiov);
|
237 | 470c0504 | Stefan Hajnoczi | int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
|
238 | 470c0504 | Stefan Hajnoczi | int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
|
239 | da1fa91d | Kevin Wolf | int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
|
240 | da1fa91d | Kevin Wolf | int nb_sectors, QEMUIOVector *qiov);
|
241 | f08f2dda | Stefan Hajnoczi | /*
|
242 | f08f2dda | Stefan Hajnoczi | * Efficiently zero a region of the disk image. Note that this is a regular
|
243 | f08f2dda | Stefan Hajnoczi | * I/O request like read or write and should have a reasonable size. This
|
244 | f08f2dda | Stefan Hajnoczi | * function is not suitable for zeroing the entire image in a single request
|
245 | f08f2dda | Stefan Hajnoczi | * because it may allocate memory for the entire region.
|
246 | f08f2dda | Stefan Hajnoczi | */
|
247 | f08f2dda | Stefan Hajnoczi | int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, int64_t sector_num,
|
248 | aa7bfbff | Peter Lieven | int nb_sectors, BdrvRequestFlags flags);
|
249 | e8a6bb9c | Marcelo Tosatti | BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs, |
250 | e8a6bb9c | Marcelo Tosatti | const char *backing_file); |
251 | f198fd1c | Benoît Canet | int bdrv_get_backing_file_depth(BlockDriverState *bs);
|
252 | faf07963 | pbrook | int bdrv_truncate(BlockDriverState *bs, int64_t offset);
|
253 | faf07963 | pbrook | int64_t bdrv_getlength(BlockDriverState *bs); |
254 | 4a1d5e1f | Fam Zheng | int64_t bdrv_get_allocated_file_size(BlockDriverState *bs); |
255 | 96b8f136 | ths | void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr);
|
256 | faf07963 | pbrook | int bdrv_commit(BlockDriverState *bs);
|
257 | e8877497 | Stefan Hajnoczi | int bdrv_commit_all(void); |
258 | 756e6736 | Kevin Wolf | int bdrv_change_backing_file(BlockDriverState *bs,
|
259 | 756e6736 | Kevin Wolf | const char *backing_file, const char *backing_fmt); |
260 | 5efa9d5a | Anthony Liguori | void bdrv_register(BlockDriver *bdrv);
|
261 | 6ebdcee2 | Jeff Cody | int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
|
262 | 6ebdcee2 | Jeff Cody | BlockDriverState *base); |
263 | 6ebdcee2 | Jeff Cody | BlockDriverState *bdrv_find_overlay(BlockDriverState *active, |
264 | 6ebdcee2 | Jeff Cody | BlockDriverState *bs); |
265 | 79fac568 | Jeff Cody | BlockDriverState *bdrv_find_base(BlockDriverState *bs); |
266 | 5efa9d5a | Anthony Liguori | |
267 | e076f338 | Kevin Wolf | |
268 | e076f338 | Kevin Wolf | typedef struct BdrvCheckResult { |
269 | e076f338 | Kevin Wolf | int corruptions;
|
270 | e076f338 | Kevin Wolf | int leaks;
|
271 | e076f338 | Kevin Wolf | int check_errors;
|
272 | ccf34716 | Kevin Wolf | int corruptions_fixed;
|
273 | ccf34716 | Kevin Wolf | int leaks_fixed;
|
274 | c6bb9ad1 | Federico Simoncelli | int64_t image_end_offset; |
275 | f8111c24 | Dong Xu Wang | BlockFragInfo bfi; |
276 | e076f338 | Kevin Wolf | } BdrvCheckResult; |
277 | e076f338 | Kevin Wolf | |
278 | 4534ff54 | Kevin Wolf | typedef enum { |
279 | 4534ff54 | Kevin Wolf | BDRV_FIX_LEAKS = 1,
|
280 | 4534ff54 | Kevin Wolf | BDRV_FIX_ERRORS = 2,
|
281 | 4534ff54 | Kevin Wolf | } BdrvCheckMode; |
282 | 4534ff54 | Kevin Wolf | |
283 | 4534ff54 | Kevin Wolf | int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix);
|
284 | e076f338 | Kevin Wolf | |
285 | 6f176b48 | Max Reitz | int bdrv_amend_options(BlockDriverState *bs_new, QEMUOptionParameter *options);
|
286 | 6f176b48 | Max Reitz | |
287 | f6186f49 | Benoît Canet | /* external snapshots */
|
288 | f6186f49 | Benoît Canet | |
289 | f6186f49 | Benoît Canet | typedef enum { |
290 | 212a5a8f | Benoît Canet | BS_IS_A_FILTER, |
291 | 212a5a8f | Benoît Canet | BS_FILTER_PASS_DOWN, |
292 | 212a5a8f | Benoît Canet | BS_AUTHORIZATION_COUNT, |
293 | 212a5a8f | Benoît Canet | } BsAuthorization; |
294 | f6186f49 | Benoît Canet | |
295 | 212a5a8f | Benoît Canet | bool bdrv_generic_is_first_non_filter(BlockDriverState *bs,
|
296 | 212a5a8f | Benoît Canet | BlockDriverState *candidate); |
297 | 212a5a8f | Benoît Canet | bool bdrv_recurse_is_first_non_filter(BlockDriverState *bs,
|
298 | 212a5a8f | Benoît Canet | BlockDriverState *candidate); |
299 | 212a5a8f | Benoît Canet | bool bdrv_is_first_non_filter(BlockDriverState *candidate);
|
300 | f6186f49 | Benoît Canet | |
301 | faf07963 | pbrook | /* async block I/O */
|
302 | 7cd1e32a | lirans@il.ibm.com | typedef void BlockDriverDirtyHandler(BlockDriverState *bs, int64_t sector, |
303 | 39aa9a12 | Devin Nakamura | int sector_num);
|
304 | 3b69e4b9 | aliguori | BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num, |
305 | 3b69e4b9 | aliguori | QEMUIOVector *iov, int nb_sectors,
|
306 | 3b69e4b9 | aliguori | BlockDriverCompletionFunc *cb, void *opaque);
|
307 | 3b69e4b9 | aliguori | BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num, |
308 | 3b69e4b9 | aliguori | QEMUIOVector *iov, int nb_sectors,
|
309 | 3b69e4b9 | aliguori | BlockDriverCompletionFunc *cb, void *opaque);
|
310 | b2e12bc6 | Christoph Hellwig | BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs, |
311 | 39aa9a12 | Devin Nakamura | BlockDriverCompletionFunc *cb, void *opaque);
|
312 | 4265d620 | Paolo Bonzini | BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs, |
313 | 4265d620 | Paolo Bonzini | int64_t sector_num, int nb_sectors,
|
314 | 4265d620 | Paolo Bonzini | BlockDriverCompletionFunc *cb, void *opaque);
|
315 | faf07963 | pbrook | void bdrv_aio_cancel(BlockDriverAIOCB *acb);
|
316 | faf07963 | pbrook | |
317 | 40b4f539 | Kevin Wolf | typedef struct BlockRequest { |
318 | 40b4f539 | Kevin Wolf | /* Fields to be filled by multiwrite caller */
|
319 | 40b4f539 | Kevin Wolf | int64_t sector; |
320 | 40b4f539 | Kevin Wolf | int nb_sectors;
|
321 | d20d9b7c | Paolo Bonzini | int flags;
|
322 | 40b4f539 | Kevin Wolf | QEMUIOVector *qiov; |
323 | 40b4f539 | Kevin Wolf | BlockDriverCompletionFunc *cb; |
324 | 40b4f539 | Kevin Wolf | void *opaque;
|
325 | 40b4f539 | Kevin Wolf | |
326 | 40b4f539 | Kevin Wolf | /* Filled by multiwrite implementation */
|
327 | 40b4f539 | Kevin Wolf | int error;
|
328 | 40b4f539 | Kevin Wolf | } BlockRequest; |
329 | 40b4f539 | Kevin Wolf | |
330 | 40b4f539 | Kevin Wolf | int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs,
|
331 | 40b4f539 | Kevin Wolf | int num_reqs);
|
332 | 40b4f539 | Kevin Wolf | |
333 | 7d780669 | aliguori | /* sg packet commands */
|
334 | 221f715d | aliguori | int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf); |
335 | 221f715d | aliguori | BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs, |
336 | 221f715d | aliguori | unsigned long int req, void *buf, |
337 | 221f715d | aliguori | BlockDriverCompletionFunc *cb, void *opaque);
|
338 | 7d780669 | aliguori | |
339 | 0f15423c | Anthony Liguori | /* Invalidate any cached metadata used by image formats */
|
340 | 0f15423c | Anthony Liguori | void bdrv_invalidate_cache(BlockDriverState *bs);
|
341 | 0f15423c | Anthony Liguori | void bdrv_invalidate_cache_all(void); |
342 | 0f15423c | Anthony Liguori | |
343 | 07789269 | Benoît Canet | void bdrv_clear_incoming_migration_all(void); |
344 | 07789269 | Benoît Canet | |
345 | faf07963 | pbrook | /* Ensure contents are flushed to disk. */
|
346 | 205ef796 | Kevin Wolf | int bdrv_flush(BlockDriverState *bs);
|
347 | 07f07615 | Paolo Bonzini | int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
|
348 | f0f0fdfe | Kevin Wolf | int bdrv_flush_all(void); |
349 | 2bc93fed | MORITA Kazutaka | void bdrv_close_all(void); |
350 | 922453bc | Stefan Hajnoczi | void bdrv_drain_all(void); |
351 | c6ca28d6 | aliguori | |
352 | bb8bf76f | Christoph Hellwig | int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors); |
353 | 4265d620 | Paolo Bonzini | int bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors); |
354 | 3ac21627 | Peter Lieven | int bdrv_has_zero_init_1(BlockDriverState *bs);
|
355 | f2feebbd | Kevin Wolf | int bdrv_has_zero_init(BlockDriverState *bs);
|
356 | 4ce78691 | Peter Lieven | bool bdrv_unallocated_blocks_are_zero(BlockDriverState *bs);
|
357 | 4ce78691 | Peter Lieven | bool bdrv_can_write_zeroes_with_unmap(BlockDriverState *bs);
|
358 | b6b8a333 | Paolo Bonzini | int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num, |
359 | b6b8a333 | Paolo Bonzini | int nb_sectors, int *pnum); |
360 | f58c7b35 | ths | int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors, |
361 | 39aa9a12 | Devin Nakamura | int *pnum);
|
362 | b35b2bba | Miroslav Rezanina | int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
|
363 | b35b2bba | Miroslav Rezanina | int64_t sector_num, int nb_sectors, int *pnum); |
364 | faf07963 | pbrook | |
365 | ff06f5f3 | Paolo Bonzini | void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
|
366 | ff06f5f3 | Paolo Bonzini | BlockdevOnError on_write_error); |
367 | 1ceee0d5 | Paolo Bonzini | BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read);
|
368 | 3e1caa5f | Paolo Bonzini | BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error); |
369 | 3e1caa5f | Paolo Bonzini | void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
|
370 | 3e1caa5f | Paolo Bonzini | bool is_read, int error); |
371 | faf07963 | pbrook | int bdrv_is_read_only(BlockDriverState *bs);
|
372 | 985a03b0 | ths | int bdrv_is_sg(BlockDriverState *bs);
|
373 | e900a7b7 | Christoph Hellwig | int bdrv_enable_write_cache(BlockDriverState *bs);
|
374 | 425b0148 | Paolo Bonzini | void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce); |
375 | faf07963 | pbrook | int bdrv_is_inserted(BlockDriverState *bs);
|
376 | faf07963 | pbrook | int bdrv_media_changed(BlockDriverState *bs);
|
377 | 025e849a | Markus Armbruster | void bdrv_lock_medium(BlockDriverState *bs, bool locked); |
378 | f36f3949 | Luiz Capitulino | void bdrv_eject(BlockDriverState *bs, bool eject_flag); |
379 | f8d6bba1 | Markus Armbruster | const char *bdrv_get_format_name(BlockDriverState *bs); |
380 | faf07963 | pbrook | BlockDriverState *bdrv_find(const char *name); |
381 | dc364f4c | Benoît Canet | BlockDriverState *bdrv_find_node(const char *node_name); |
382 | c13163fb | Benoît Canet | BlockDeviceInfoList *bdrv_named_nodes_list(void);
|
383 | 12d3ba82 | Benoît Canet | BlockDriverState *bdrv_lookup_bs(const char *device, |
384 | 12d3ba82 | Benoît Canet | const char *node_name, |
385 | 12d3ba82 | Benoît Canet | Error **errp); |
386 | 2f399b0a | Markus Armbruster | BlockDriverState *bdrv_next(BlockDriverState *bs); |
387 | 51de9760 | aliguori | void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs), |
388 | 51de9760 | aliguori | void *opaque);
|
389 | faf07963 | pbrook | int bdrv_is_encrypted(BlockDriverState *bs);
|
390 | c0f4ce77 | aliguori | int bdrv_key_required(BlockDriverState *bs);
|
391 | faf07963 | pbrook | int bdrv_set_key(BlockDriverState *bs, const char *key); |
392 | c0f4ce77 | aliguori | int bdrv_query_missing_keys(void); |
393 | faf07963 | pbrook | void bdrv_iterate_format(void (*it)(void *opaque, const char *name), |
394 | faf07963 | pbrook | void *opaque);
|
395 | faf07963 | pbrook | const char *bdrv_get_device_name(BlockDriverState *bs); |
396 | c8433287 | Markus Armbruster | int bdrv_get_flags(BlockDriverState *bs);
|
397 | faf07963 | pbrook | int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
|
398 | faf07963 | pbrook | const uint8_t *buf, int nb_sectors); |
399 | faf07963 | pbrook | int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
|
400 | eae041fe | Max Reitz | ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs); |
401 | 343bded4 | Paolo Bonzini | void bdrv_round_to_clusters(BlockDriverState *bs,
|
402 | 343bded4 | Paolo Bonzini | int64_t sector_num, int nb_sectors,
|
403 | 343bded4 | Paolo Bonzini | int64_t *cluster_sector_num, |
404 | 343bded4 | Paolo Bonzini | int *cluster_nb_sectors);
|
405 | faf07963 | pbrook | |
406 | 045df330 | aliguori | const char *bdrv_get_encrypted_filename(BlockDriverState *bs); |
407 | faf07963 | pbrook | void bdrv_get_backing_filename(BlockDriverState *bs,
|
408 | faf07963 | pbrook | char *filename, int filename_size); |
409 | dc5a1371 | Paolo Bonzini | void bdrv_get_full_backing_filename(BlockDriverState *bs,
|
410 | dc5a1371 | Paolo Bonzini | char *dest, size_t sz);
|
411 | 199630b6 | Blue Swirl | int bdrv_is_snapshot(BlockDriverState *bs);
|
412 | faf07963 | pbrook | |
413 | faf07963 | pbrook | int path_is_absolute(const char *path); |
414 | faf07963 | pbrook | void path_combine(char *dest, int dest_size, |
415 | faf07963 | pbrook | const char *base_path, |
416 | faf07963 | pbrook | const char *filename); |
417 | faf07963 | pbrook | |
418 | cf8074b3 | Kevin Wolf | int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
|
419 | 45566e9c | Christoph Hellwig | int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf, |
420 | 45566e9c | Christoph Hellwig | int64_t pos, int size);
|
421 | 178e08a5 | aliguori | |
422 | 45566e9c | Christoph Hellwig | int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
|
423 | 45566e9c | Christoph Hellwig | int64_t pos, int size);
|
424 | 178e08a5 | aliguori | |
425 | d92ada22 | Luiz Capitulino | void bdrv_img_create(const char *filename, const char *fmt, |
426 | d92ada22 | Luiz Capitulino | const char *base_filename, const char *base_fmt, |
427 | f382d43a | Miroslav Rezanina | char *options, uint64_t img_size, int flags, |
428 | f382d43a | Miroslav Rezanina | Error **errp, bool quiet);
|
429 | f88e1a42 | Jes Sorensen | |
430 | 7b6f9300 | Markus Armbruster | void bdrv_set_buffer_alignment(BlockDriverState *bs, int align); |
431 | ba5b7ad4 | Markus Armbruster | void *qemu_blockalign(BlockDriverState *bs, size_t size);
|
432 | c53b1c51 | Stefan Hajnoczi | bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov);
|
433 | ba5b7ad4 | Markus Armbruster | |
434 | 8f0720ec | Paolo Bonzini | struct HBitmapIter;
|
435 | e4654d2d | Fam Zheng | typedef struct BdrvDirtyBitmap BdrvDirtyBitmap; |
436 | e4654d2d | Fam Zheng | BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, int granularity);
|
437 | e4654d2d | Fam Zheng | void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap);
|
438 | 21b56835 | Fam Zheng | BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs); |
439 | e4654d2d | Fam Zheng | int bdrv_get_dirty(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t sector);
|
440 | 1755da16 | Paolo Bonzini | void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors); |
441 | 1755da16 | Paolo Bonzini | void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors); |
442 | e4654d2d | Fam Zheng | void bdrv_dirty_iter_init(BlockDriverState *bs,
|
443 | e4654d2d | Fam Zheng | BdrvDirtyBitmap *bitmap, struct HBitmapIter *hbi);
|
444 | e4654d2d | Fam Zheng | int64_t bdrv_get_dirty_count(BlockDriverState *bs, BdrvDirtyBitmap *bitmap); |
445 | 8b9b0cc2 | Kevin Wolf | |
446 | 53fec9d3 | Stefan Hajnoczi | void bdrv_enable_copy_on_read(BlockDriverState *bs);
|
447 | 53fec9d3 | Stefan Hajnoczi | void bdrv_disable_copy_on_read(BlockDriverState *bs);
|
448 | 53fec9d3 | Stefan Hajnoczi | |
449 | 9fcb0251 | Fam Zheng | void bdrv_ref(BlockDriverState *bs);
|
450 | 9fcb0251 | Fam Zheng | void bdrv_unref(BlockDriverState *bs);
|
451 | db593f25 | Marcelo Tosatti | void bdrv_set_in_use(BlockDriverState *bs, int in_use); |
452 | db593f25 | Marcelo Tosatti | int bdrv_in_use(BlockDriverState *bs);
|
453 | 8b9b0cc2 | Kevin Wolf | |
454 | 4065742a | Stefan Hajnoczi | #ifdef CONFIG_LINUX_AIO
|
455 | 4065742a | Stefan Hajnoczi | int raw_get_aio_fd(BlockDriverState *bs);
|
456 | 4065742a | Stefan Hajnoczi | #else
|
457 | 4065742a | Stefan Hajnoczi | static inline int raw_get_aio_fd(BlockDriverState *bs) |
458 | 4065742a | Stefan Hajnoczi | { |
459 | 4065742a | Stefan Hajnoczi | return -ENOTSUP;
|
460 | 4065742a | Stefan Hajnoczi | } |
461 | 4065742a | Stefan Hajnoczi | #endif
|
462 | 4065742a | Stefan Hajnoczi | |
463 | a597e79c | Christoph Hellwig | enum BlockAcctType {
|
464 | a597e79c | Christoph Hellwig | BDRV_ACCT_READ, |
465 | a597e79c | Christoph Hellwig | BDRV_ACCT_WRITE, |
466 | a597e79c | Christoph Hellwig | BDRV_ACCT_FLUSH, |
467 | a597e79c | Christoph Hellwig | BDRV_MAX_IOTYPE, |
468 | a597e79c | Christoph Hellwig | }; |
469 | a597e79c | Christoph Hellwig | |
470 | a597e79c | Christoph Hellwig | typedef struct BlockAcctCookie { |
471 | a597e79c | Christoph Hellwig | int64_t bytes; |
472 | c488c7f6 | Christoph Hellwig | int64_t start_time_ns; |
473 | a597e79c | Christoph Hellwig | enum BlockAcctType type;
|
474 | a597e79c | Christoph Hellwig | } BlockAcctCookie; |
475 | a597e79c | Christoph Hellwig | |
476 | a597e79c | Christoph Hellwig | void bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
|
477 | a597e79c | Christoph Hellwig | int64_t bytes, enum BlockAcctType type);
|
478 | a597e79c | Christoph Hellwig | void bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie);
|
479 | a597e79c | Christoph Hellwig | |
480 | 8b9b0cc2 | Kevin Wolf | typedef enum { |
481 | 8252278a | Kevin Wolf | BLKDBG_L1_UPDATE, |
482 | 8252278a | Kevin Wolf | |
483 | 8252278a | Kevin Wolf | BLKDBG_L1_GROW_ALLOC_TABLE, |
484 | 8252278a | Kevin Wolf | BLKDBG_L1_GROW_WRITE_TABLE, |
485 | 8252278a | Kevin Wolf | BLKDBG_L1_GROW_ACTIVATE_TABLE, |
486 | 8252278a | Kevin Wolf | |
487 | 8252278a | Kevin Wolf | BLKDBG_L2_LOAD, |
488 | 8252278a | Kevin Wolf | BLKDBG_L2_UPDATE, |
489 | 8252278a | Kevin Wolf | BLKDBG_L2_UPDATE_COMPRESSED, |
490 | 8252278a | Kevin Wolf | BLKDBG_L2_ALLOC_COW_READ, |
491 | 8252278a | Kevin Wolf | BLKDBG_L2_ALLOC_WRITE, |
492 | 8252278a | Kevin Wolf | |
493 | 8252278a | Kevin Wolf | BLKDBG_READ_AIO, |
494 | 8252278a | Kevin Wolf | BLKDBG_READ_BACKING_AIO, |
495 | 8252278a | Kevin Wolf | BLKDBG_READ_COMPRESSED, |
496 | 8252278a | Kevin Wolf | |
497 | 8252278a | Kevin Wolf | BLKDBG_WRITE_AIO, |
498 | 8252278a | Kevin Wolf | BLKDBG_WRITE_COMPRESSED, |
499 | 8252278a | Kevin Wolf | |
500 | 8252278a | Kevin Wolf | BLKDBG_VMSTATE_LOAD, |
501 | 8252278a | Kevin Wolf | BLKDBG_VMSTATE_SAVE, |
502 | 8252278a | Kevin Wolf | |
503 | 8252278a | Kevin Wolf | BLKDBG_COW_READ, |
504 | 8252278a | Kevin Wolf | BLKDBG_COW_WRITE, |
505 | 8252278a | Kevin Wolf | |
506 | 8252278a | Kevin Wolf | BLKDBG_REFTABLE_LOAD, |
507 | 8252278a | Kevin Wolf | BLKDBG_REFTABLE_GROW, |
508 | afa50193 | Max Reitz | BLKDBG_REFTABLE_UPDATE, |
509 | 8252278a | Kevin Wolf | |
510 | 8252278a | Kevin Wolf | BLKDBG_REFBLOCK_LOAD, |
511 | 8252278a | Kevin Wolf | BLKDBG_REFBLOCK_UPDATE, |
512 | 8252278a | Kevin Wolf | BLKDBG_REFBLOCK_UPDATE_PART, |
513 | 8252278a | Kevin Wolf | BLKDBG_REFBLOCK_ALLOC, |
514 | 8252278a | Kevin Wolf | BLKDBG_REFBLOCK_ALLOC_HOOKUP, |
515 | 8252278a | Kevin Wolf | BLKDBG_REFBLOCK_ALLOC_WRITE, |
516 | 8252278a | Kevin Wolf | BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS, |
517 | 8252278a | Kevin Wolf | BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE, |
518 | 8252278a | Kevin Wolf | BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE, |
519 | 8252278a | Kevin Wolf | |
520 | 8252278a | Kevin Wolf | BLKDBG_CLUSTER_ALLOC, |
521 | 8252278a | Kevin Wolf | BLKDBG_CLUSTER_ALLOC_BYTES, |
522 | 8252278a | Kevin Wolf | BLKDBG_CLUSTER_FREE, |
523 | 8252278a | Kevin Wolf | |
524 | bf736fe3 | Kevin Wolf | BLKDBG_FLUSH_TO_OS, |
525 | bf736fe3 | Kevin Wolf | BLKDBG_FLUSH_TO_DISK, |
526 | bf736fe3 | Kevin Wolf | |
527 | 8b9b0cc2 | Kevin Wolf | BLKDBG_EVENT_MAX, |
528 | 8b9b0cc2 | Kevin Wolf | } BlkDebugEvent; |
529 | 8b9b0cc2 | Kevin Wolf | |
530 | 8b9b0cc2 | Kevin Wolf | #define BLKDBG_EVENT(bs, evt) bdrv_debug_event(bs, evt)
|
531 | 8b9b0cc2 | Kevin Wolf | void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event);
|
532 | 8b9b0cc2 | Kevin Wolf | |
533 | 41c695c7 | Kevin Wolf | int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event, |
534 | 41c695c7 | Kevin Wolf | const char *tag); |
535 | 4cc70e93 | Fam Zheng | int bdrv_debug_remove_breakpoint(BlockDriverState *bs, const char *tag); |
536 | 41c695c7 | Kevin Wolf | int bdrv_debug_resume(BlockDriverState *bs, const char *tag); |
537 | 41c695c7 | Kevin Wolf | bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag); |
538 | 41c695c7 | Kevin Wolf | |
539 | 8a4bc5aa | Markus Armbruster | #endif |