root / block / qed.c @ 6f6dc656
History | View | Annotate | Download (43.3 kB)
1 | 75411d23 | Stefan Hajnoczi | /*
|
---|---|---|---|
2 | 75411d23 | Stefan Hajnoczi | * QEMU Enhanced Disk Format
|
3 | 75411d23 | Stefan Hajnoczi | *
|
4 | 75411d23 | Stefan Hajnoczi | * Copyright IBM, Corp. 2010
|
5 | 75411d23 | Stefan Hajnoczi | *
|
6 | 75411d23 | Stefan Hajnoczi | * Authors:
|
7 | 75411d23 | Stefan Hajnoczi | * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
|
8 | 75411d23 | Stefan Hajnoczi | * Anthony Liguori <aliguori@us.ibm.com>
|
9 | 75411d23 | Stefan Hajnoczi | *
|
10 | 75411d23 | Stefan Hajnoczi | * This work is licensed under the terms of the GNU LGPL, version 2 or later.
|
11 | 75411d23 | Stefan Hajnoczi | * See the COPYING.LIB file in the top-level directory.
|
12 | 75411d23 | Stefan Hajnoczi | *
|
13 | 75411d23 | Stefan Hajnoczi | */
|
14 | 75411d23 | Stefan Hajnoczi | |
15 | 6f321e93 | Stefan Hajnoczi | #include "qemu-timer.h" |
16 | eabba580 | Stefan Hajnoczi | #include "trace.h" |
17 | 75411d23 | Stefan Hajnoczi | #include "qed.h" |
18 | 10b758e8 | Kevin Wolf | #include "qerror.h" |
19 | 75411d23 | Stefan Hajnoczi | |
20 | eabba580 | Stefan Hajnoczi | static void qed_aio_cancel(BlockDriverAIOCB *blockacb) |
21 | eabba580 | Stefan Hajnoczi | { |
22 | eabba580 | Stefan Hajnoczi | QEDAIOCB *acb = (QEDAIOCB *)blockacb; |
23 | eabba580 | Stefan Hajnoczi | bool finished = false; |
24 | eabba580 | Stefan Hajnoczi | |
25 | eabba580 | Stefan Hajnoczi | /* Wait for the request to finish */
|
26 | eabba580 | Stefan Hajnoczi | acb->finished = &finished; |
27 | eabba580 | Stefan Hajnoczi | while (!finished) {
|
28 | eabba580 | Stefan Hajnoczi | qemu_aio_wait(); |
29 | eabba580 | Stefan Hajnoczi | } |
30 | eabba580 | Stefan Hajnoczi | } |
31 | eabba580 | Stefan Hajnoczi | |
32 | eabba580 | Stefan Hajnoczi | static AIOPool qed_aio_pool = {
|
33 | eabba580 | Stefan Hajnoczi | .aiocb_size = sizeof(QEDAIOCB),
|
34 | eabba580 | Stefan Hajnoczi | .cancel = qed_aio_cancel, |
35 | eabba580 | Stefan Hajnoczi | }; |
36 | eabba580 | Stefan Hajnoczi | |
37 | 75411d23 | Stefan Hajnoczi | static int bdrv_qed_probe(const uint8_t *buf, int buf_size, |
38 | 75411d23 | Stefan Hajnoczi | const char *filename) |
39 | 75411d23 | Stefan Hajnoczi | { |
40 | 75411d23 | Stefan Hajnoczi | const QEDHeader *header = (const QEDHeader *)buf; |
41 | 75411d23 | Stefan Hajnoczi | |
42 | 75411d23 | Stefan Hajnoczi | if (buf_size < sizeof(*header)) { |
43 | 75411d23 | Stefan Hajnoczi | return 0; |
44 | 75411d23 | Stefan Hajnoczi | } |
45 | 75411d23 | Stefan Hajnoczi | if (le32_to_cpu(header->magic) != QED_MAGIC) {
|
46 | 75411d23 | Stefan Hajnoczi | return 0; |
47 | 75411d23 | Stefan Hajnoczi | } |
48 | 75411d23 | Stefan Hajnoczi | return 100; |
49 | 75411d23 | Stefan Hajnoczi | } |
50 | 75411d23 | Stefan Hajnoczi | |
51 | 75411d23 | Stefan Hajnoczi | /**
|
52 | 75411d23 | Stefan Hajnoczi | * Check whether an image format is raw
|
53 | 75411d23 | Stefan Hajnoczi | *
|
54 | 75411d23 | Stefan Hajnoczi | * @fmt: Backing file format, may be NULL
|
55 | 75411d23 | Stefan Hajnoczi | */
|
56 | 75411d23 | Stefan Hajnoczi | static bool qed_fmt_is_raw(const char *fmt) |
57 | 75411d23 | Stefan Hajnoczi | { |
58 | 75411d23 | Stefan Hajnoczi | return fmt && strcmp(fmt, "raw") == 0; |
59 | 75411d23 | Stefan Hajnoczi | } |
60 | 75411d23 | Stefan Hajnoczi | |
61 | 75411d23 | Stefan Hajnoczi | static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu) |
62 | 75411d23 | Stefan Hajnoczi | { |
63 | 75411d23 | Stefan Hajnoczi | cpu->magic = le32_to_cpu(le->magic); |
64 | 75411d23 | Stefan Hajnoczi | cpu->cluster_size = le32_to_cpu(le->cluster_size); |
65 | 75411d23 | Stefan Hajnoczi | cpu->table_size = le32_to_cpu(le->table_size); |
66 | 75411d23 | Stefan Hajnoczi | cpu->header_size = le32_to_cpu(le->header_size); |
67 | 75411d23 | Stefan Hajnoczi | cpu->features = le64_to_cpu(le->features); |
68 | 75411d23 | Stefan Hajnoczi | cpu->compat_features = le64_to_cpu(le->compat_features); |
69 | 75411d23 | Stefan Hajnoczi | cpu->autoclear_features = le64_to_cpu(le->autoclear_features); |
70 | 75411d23 | Stefan Hajnoczi | cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset); |
71 | 75411d23 | Stefan Hajnoczi | cpu->image_size = le64_to_cpu(le->image_size); |
72 | 75411d23 | Stefan Hajnoczi | cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset); |
73 | 75411d23 | Stefan Hajnoczi | cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size); |
74 | 75411d23 | Stefan Hajnoczi | } |
75 | 75411d23 | Stefan Hajnoczi | |
76 | 75411d23 | Stefan Hajnoczi | static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le) |
77 | 75411d23 | Stefan Hajnoczi | { |
78 | 75411d23 | Stefan Hajnoczi | le->magic = cpu_to_le32(cpu->magic); |
79 | 75411d23 | Stefan Hajnoczi | le->cluster_size = cpu_to_le32(cpu->cluster_size); |
80 | 75411d23 | Stefan Hajnoczi | le->table_size = cpu_to_le32(cpu->table_size); |
81 | 75411d23 | Stefan Hajnoczi | le->header_size = cpu_to_le32(cpu->header_size); |
82 | 75411d23 | Stefan Hajnoczi | le->features = cpu_to_le64(cpu->features); |
83 | 75411d23 | Stefan Hajnoczi | le->compat_features = cpu_to_le64(cpu->compat_features); |
84 | 75411d23 | Stefan Hajnoczi | le->autoclear_features = cpu_to_le64(cpu->autoclear_features); |
85 | 75411d23 | Stefan Hajnoczi | le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset); |
86 | 75411d23 | Stefan Hajnoczi | le->image_size = cpu_to_le64(cpu->image_size); |
87 | 75411d23 | Stefan Hajnoczi | le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset); |
88 | 75411d23 | Stefan Hajnoczi | le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size); |
89 | 75411d23 | Stefan Hajnoczi | } |
90 | 75411d23 | Stefan Hajnoczi | |
91 | 75411d23 | Stefan Hajnoczi | static int qed_write_header_sync(BDRVQEDState *s) |
92 | 75411d23 | Stefan Hajnoczi | { |
93 | 75411d23 | Stefan Hajnoczi | QEDHeader le; |
94 | 75411d23 | Stefan Hajnoczi | int ret;
|
95 | 75411d23 | Stefan Hajnoczi | |
96 | 75411d23 | Stefan Hajnoczi | qed_header_cpu_to_le(&s->header, &le); |
97 | 75411d23 | Stefan Hajnoczi | ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le)); |
98 | 75411d23 | Stefan Hajnoczi | if (ret != sizeof(le)) { |
99 | 75411d23 | Stefan Hajnoczi | return ret;
|
100 | 75411d23 | Stefan Hajnoczi | } |
101 | 75411d23 | Stefan Hajnoczi | return 0; |
102 | 75411d23 | Stefan Hajnoczi | } |
103 | 75411d23 | Stefan Hajnoczi | |
104 | 01979a98 | Stefan Hajnoczi | typedef struct { |
105 | 01979a98 | Stefan Hajnoczi | GenericCB gencb; |
106 | 01979a98 | Stefan Hajnoczi | BDRVQEDState *s; |
107 | 01979a98 | Stefan Hajnoczi | struct iovec iov;
|
108 | 01979a98 | Stefan Hajnoczi | QEMUIOVector qiov; |
109 | 01979a98 | Stefan Hajnoczi | int nsectors;
|
110 | 01979a98 | Stefan Hajnoczi | uint8_t *buf; |
111 | 01979a98 | Stefan Hajnoczi | } QEDWriteHeaderCB; |
112 | 01979a98 | Stefan Hajnoczi | |
113 | 01979a98 | Stefan Hajnoczi | static void qed_write_header_cb(void *opaque, int ret) |
114 | 01979a98 | Stefan Hajnoczi | { |
115 | 01979a98 | Stefan Hajnoczi | QEDWriteHeaderCB *write_header_cb = opaque; |
116 | 01979a98 | Stefan Hajnoczi | |
117 | 01979a98 | Stefan Hajnoczi | qemu_vfree(write_header_cb->buf); |
118 | 01979a98 | Stefan Hajnoczi | gencb_complete(write_header_cb, ret); |
119 | 01979a98 | Stefan Hajnoczi | } |
120 | 01979a98 | Stefan Hajnoczi | |
121 | 01979a98 | Stefan Hajnoczi | static void qed_write_header_read_cb(void *opaque, int ret) |
122 | 01979a98 | Stefan Hajnoczi | { |
123 | 01979a98 | Stefan Hajnoczi | QEDWriteHeaderCB *write_header_cb = opaque; |
124 | 01979a98 | Stefan Hajnoczi | BDRVQEDState *s = write_header_cb->s; |
125 | 01979a98 | Stefan Hajnoczi | BlockDriverAIOCB *acb; |
126 | 01979a98 | Stefan Hajnoczi | |
127 | 01979a98 | Stefan Hajnoczi | if (ret) {
|
128 | 01979a98 | Stefan Hajnoczi | qed_write_header_cb(write_header_cb, ret); |
129 | 01979a98 | Stefan Hajnoczi | return;
|
130 | 01979a98 | Stefan Hajnoczi | } |
131 | 01979a98 | Stefan Hajnoczi | |
132 | 01979a98 | Stefan Hajnoczi | /* Update header */
|
133 | 01979a98 | Stefan Hajnoczi | qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf); |
134 | 01979a98 | Stefan Hajnoczi | |
135 | 01979a98 | Stefan Hajnoczi | acb = bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
|
136 | 01979a98 | Stefan Hajnoczi | write_header_cb->nsectors, qed_write_header_cb, |
137 | 01979a98 | Stefan Hajnoczi | write_header_cb); |
138 | 01979a98 | Stefan Hajnoczi | if (!acb) {
|
139 | 01979a98 | Stefan Hajnoczi | qed_write_header_cb(write_header_cb, -EIO); |
140 | 01979a98 | Stefan Hajnoczi | } |
141 | 01979a98 | Stefan Hajnoczi | } |
142 | 01979a98 | Stefan Hajnoczi | |
143 | 01979a98 | Stefan Hajnoczi | /**
|
144 | 01979a98 | Stefan Hajnoczi | * Update header in-place (does not rewrite backing filename or other strings)
|
145 | 01979a98 | Stefan Hajnoczi | *
|
146 | 01979a98 | Stefan Hajnoczi | * This function only updates known header fields in-place and does not affect
|
147 | 01979a98 | Stefan Hajnoczi | * extra data after the QED header.
|
148 | 01979a98 | Stefan Hajnoczi | */
|
149 | 01979a98 | Stefan Hajnoczi | static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb, |
150 | 01979a98 | Stefan Hajnoczi | void *opaque)
|
151 | 01979a98 | Stefan Hajnoczi | { |
152 | 01979a98 | Stefan Hajnoczi | /* We must write full sectors for O_DIRECT but cannot necessarily generate
|
153 | 01979a98 | Stefan Hajnoczi | * the data following the header if an unrecognized compat feature is
|
154 | 01979a98 | Stefan Hajnoczi | * active. Therefore, first read the sectors containing the header, update
|
155 | 01979a98 | Stefan Hajnoczi | * them, and write back.
|
156 | 01979a98 | Stefan Hajnoczi | */
|
157 | 01979a98 | Stefan Hajnoczi | |
158 | 01979a98 | Stefan Hajnoczi | BlockDriverAIOCB *acb; |
159 | 01979a98 | Stefan Hajnoczi | int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) / |
160 | 01979a98 | Stefan Hajnoczi | BDRV_SECTOR_SIZE; |
161 | 01979a98 | Stefan Hajnoczi | size_t len = nsectors * BDRV_SECTOR_SIZE; |
162 | 01979a98 | Stefan Hajnoczi | QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
|
163 | 01979a98 | Stefan Hajnoczi | cb, opaque); |
164 | 01979a98 | Stefan Hajnoczi | |
165 | 01979a98 | Stefan Hajnoczi | write_header_cb->s = s; |
166 | 01979a98 | Stefan Hajnoczi | write_header_cb->nsectors = nsectors; |
167 | 01979a98 | Stefan Hajnoczi | write_header_cb->buf = qemu_blockalign(s->bs, len); |
168 | 01979a98 | Stefan Hajnoczi | write_header_cb->iov.iov_base = write_header_cb->buf; |
169 | 01979a98 | Stefan Hajnoczi | write_header_cb->iov.iov_len = len; |
170 | 01979a98 | Stefan Hajnoczi | qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
|
171 | 01979a98 | Stefan Hajnoczi | |
172 | 01979a98 | Stefan Hajnoczi | acb = bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
|
173 | 01979a98 | Stefan Hajnoczi | qed_write_header_read_cb, write_header_cb); |
174 | 01979a98 | Stefan Hajnoczi | if (!acb) {
|
175 | 01979a98 | Stefan Hajnoczi | qed_write_header_cb(write_header_cb, -EIO); |
176 | 01979a98 | Stefan Hajnoczi | } |
177 | 01979a98 | Stefan Hajnoczi | } |
178 | 01979a98 | Stefan Hajnoczi | |
179 | 75411d23 | Stefan Hajnoczi | static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
|
180 | 75411d23 | Stefan Hajnoczi | { |
181 | 75411d23 | Stefan Hajnoczi | uint64_t table_entries; |
182 | 75411d23 | Stefan Hajnoczi | uint64_t l2_size; |
183 | 75411d23 | Stefan Hajnoczi | |
184 | 75411d23 | Stefan Hajnoczi | table_entries = (table_size * cluster_size) / sizeof(uint64_t);
|
185 | 75411d23 | Stefan Hajnoczi | l2_size = table_entries * cluster_size; |
186 | 75411d23 | Stefan Hajnoczi | |
187 | 75411d23 | Stefan Hajnoczi | return l2_size * table_entries;
|
188 | 75411d23 | Stefan Hajnoczi | } |
189 | 75411d23 | Stefan Hajnoczi | |
190 | 75411d23 | Stefan Hajnoczi | static bool qed_is_cluster_size_valid(uint32_t cluster_size) |
191 | 75411d23 | Stefan Hajnoczi | { |
192 | 75411d23 | Stefan Hajnoczi | if (cluster_size < QED_MIN_CLUSTER_SIZE ||
|
193 | 75411d23 | Stefan Hajnoczi | cluster_size > QED_MAX_CLUSTER_SIZE) { |
194 | 75411d23 | Stefan Hajnoczi | return false; |
195 | 75411d23 | Stefan Hajnoczi | } |
196 | 75411d23 | Stefan Hajnoczi | if (cluster_size & (cluster_size - 1)) { |
197 | 75411d23 | Stefan Hajnoczi | return false; /* not power of 2 */ |
198 | 75411d23 | Stefan Hajnoczi | } |
199 | 75411d23 | Stefan Hajnoczi | return true; |
200 | 75411d23 | Stefan Hajnoczi | } |
201 | 75411d23 | Stefan Hajnoczi | |
202 | 75411d23 | Stefan Hajnoczi | static bool qed_is_table_size_valid(uint32_t table_size) |
203 | 75411d23 | Stefan Hajnoczi | { |
204 | 75411d23 | Stefan Hajnoczi | if (table_size < QED_MIN_TABLE_SIZE ||
|
205 | 75411d23 | Stefan Hajnoczi | table_size > QED_MAX_TABLE_SIZE) { |
206 | 75411d23 | Stefan Hajnoczi | return false; |
207 | 75411d23 | Stefan Hajnoczi | } |
208 | 75411d23 | Stefan Hajnoczi | if (table_size & (table_size - 1)) { |
209 | 75411d23 | Stefan Hajnoczi | return false; /* not power of 2 */ |
210 | 75411d23 | Stefan Hajnoczi | } |
211 | 75411d23 | Stefan Hajnoczi | return true; |
212 | 75411d23 | Stefan Hajnoczi | } |
213 | 75411d23 | Stefan Hajnoczi | |
214 | 75411d23 | Stefan Hajnoczi | static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size, |
215 | 75411d23 | Stefan Hajnoczi | uint32_t table_size) |
216 | 75411d23 | Stefan Hajnoczi | { |
217 | 75411d23 | Stefan Hajnoczi | if (image_size % BDRV_SECTOR_SIZE != 0) { |
218 | 75411d23 | Stefan Hajnoczi | return false; /* not multiple of sector size */ |
219 | 75411d23 | Stefan Hajnoczi | } |
220 | 75411d23 | Stefan Hajnoczi | if (image_size > qed_max_image_size(cluster_size, table_size)) {
|
221 | 75411d23 | Stefan Hajnoczi | return false; /* image is too large */ |
222 | 75411d23 | Stefan Hajnoczi | } |
223 | 75411d23 | Stefan Hajnoczi | return true; |
224 | 75411d23 | Stefan Hajnoczi | } |
225 | 75411d23 | Stefan Hajnoczi | |
226 | 75411d23 | Stefan Hajnoczi | /**
|
227 | 75411d23 | Stefan Hajnoczi | * Read a string of known length from the image file
|
228 | 75411d23 | Stefan Hajnoczi | *
|
229 | 75411d23 | Stefan Hajnoczi | * @file: Image file
|
230 | 75411d23 | Stefan Hajnoczi | * @offset: File offset to start of string, in bytes
|
231 | 75411d23 | Stefan Hajnoczi | * @n: String length in bytes
|
232 | 75411d23 | Stefan Hajnoczi | * @buf: Destination buffer
|
233 | 75411d23 | Stefan Hajnoczi | * @buflen: Destination buffer length in bytes
|
234 | 75411d23 | Stefan Hajnoczi | * @ret: 0 on success, -errno on failure
|
235 | 75411d23 | Stefan Hajnoczi | *
|
236 | 75411d23 | Stefan Hajnoczi | * The string is NUL-terminated.
|
237 | 75411d23 | Stefan Hajnoczi | */
|
238 | 75411d23 | Stefan Hajnoczi | static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n, |
239 | 75411d23 | Stefan Hajnoczi | char *buf, size_t buflen)
|
240 | 75411d23 | Stefan Hajnoczi | { |
241 | 75411d23 | Stefan Hajnoczi | int ret;
|
242 | 75411d23 | Stefan Hajnoczi | if (n >= buflen) {
|
243 | 75411d23 | Stefan Hajnoczi | return -EINVAL;
|
244 | 75411d23 | Stefan Hajnoczi | } |
245 | 75411d23 | Stefan Hajnoczi | ret = bdrv_pread(file, offset, buf, n); |
246 | 75411d23 | Stefan Hajnoczi | if (ret < 0) { |
247 | 75411d23 | Stefan Hajnoczi | return ret;
|
248 | 75411d23 | Stefan Hajnoczi | } |
249 | 75411d23 | Stefan Hajnoczi | buf[n] = '\0';
|
250 | 75411d23 | Stefan Hajnoczi | return 0; |
251 | 75411d23 | Stefan Hajnoczi | } |
252 | 75411d23 | Stefan Hajnoczi | |
253 | eabba580 | Stefan Hajnoczi | /**
|
254 | eabba580 | Stefan Hajnoczi | * Allocate new clusters
|
255 | eabba580 | Stefan Hajnoczi | *
|
256 | eabba580 | Stefan Hajnoczi | * @s: QED state
|
257 | eabba580 | Stefan Hajnoczi | * @n: Number of contiguous clusters to allocate
|
258 | eabba580 | Stefan Hajnoczi | * @ret: Offset of first allocated cluster
|
259 | eabba580 | Stefan Hajnoczi | *
|
260 | eabba580 | Stefan Hajnoczi | * This function only produces the offset where the new clusters should be
|
261 | eabba580 | Stefan Hajnoczi | * written. It updates BDRVQEDState but does not make any changes to the image
|
262 | eabba580 | Stefan Hajnoczi | * file.
|
263 | eabba580 | Stefan Hajnoczi | */
|
264 | eabba580 | Stefan Hajnoczi | static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n) |
265 | eabba580 | Stefan Hajnoczi | { |
266 | eabba580 | Stefan Hajnoczi | uint64_t offset = s->file_size; |
267 | eabba580 | Stefan Hajnoczi | s->file_size += n * s->header.cluster_size; |
268 | eabba580 | Stefan Hajnoczi | return offset;
|
269 | eabba580 | Stefan Hajnoczi | } |
270 | eabba580 | Stefan Hajnoczi | |
271 | 298800ca | Stefan Hajnoczi | QEDTable *qed_alloc_table(BDRVQEDState *s) |
272 | 298800ca | Stefan Hajnoczi | { |
273 | 298800ca | Stefan Hajnoczi | /* Honor O_DIRECT memory alignment requirements */
|
274 | 298800ca | Stefan Hajnoczi | return qemu_blockalign(s->bs,
|
275 | 298800ca | Stefan Hajnoczi | s->header.cluster_size * s->header.table_size); |
276 | 298800ca | Stefan Hajnoczi | } |
277 | 298800ca | Stefan Hajnoczi | |
278 | eabba580 | Stefan Hajnoczi | /**
|
279 | eabba580 | Stefan Hajnoczi | * Allocate a new zeroed L2 table
|
280 | eabba580 | Stefan Hajnoczi | */
|
281 | eabba580 | Stefan Hajnoczi | static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
|
282 | eabba580 | Stefan Hajnoczi | { |
283 | eabba580 | Stefan Hajnoczi | CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache); |
284 | eabba580 | Stefan Hajnoczi | |
285 | eabba580 | Stefan Hajnoczi | l2_table->table = qed_alloc_table(s); |
286 | eabba580 | Stefan Hajnoczi | l2_table->offset = qed_alloc_clusters(s, s->header.table_size); |
287 | eabba580 | Stefan Hajnoczi | |
288 | eabba580 | Stefan Hajnoczi | memset(l2_table->table->offsets, 0,
|
289 | eabba580 | Stefan Hajnoczi | s->header.cluster_size * s->header.table_size); |
290 | eabba580 | Stefan Hajnoczi | return l2_table;
|
291 | eabba580 | Stefan Hajnoczi | } |
292 | eabba580 | Stefan Hajnoczi | |
293 | eabba580 | Stefan Hajnoczi | static void qed_aio_next_io(void *opaque, int ret); |
294 | eabba580 | Stefan Hajnoczi | |
295 | 6f321e93 | Stefan Hajnoczi | static void qed_plug_allocating_write_reqs(BDRVQEDState *s) |
296 | 6f321e93 | Stefan Hajnoczi | { |
297 | 6f321e93 | Stefan Hajnoczi | assert(!s->allocating_write_reqs_plugged); |
298 | 6f321e93 | Stefan Hajnoczi | |
299 | 6f321e93 | Stefan Hajnoczi | s->allocating_write_reqs_plugged = true;
|
300 | 6f321e93 | Stefan Hajnoczi | } |
301 | 6f321e93 | Stefan Hajnoczi | |
302 | 6f321e93 | Stefan Hajnoczi | static void qed_unplug_allocating_write_reqs(BDRVQEDState *s) |
303 | 6f321e93 | Stefan Hajnoczi | { |
304 | 6f321e93 | Stefan Hajnoczi | QEDAIOCB *acb; |
305 | 6f321e93 | Stefan Hajnoczi | |
306 | 6f321e93 | Stefan Hajnoczi | assert(s->allocating_write_reqs_plugged); |
307 | 6f321e93 | Stefan Hajnoczi | |
308 | 6f321e93 | Stefan Hajnoczi | s->allocating_write_reqs_plugged = false;
|
309 | 6f321e93 | Stefan Hajnoczi | |
310 | 6f321e93 | Stefan Hajnoczi | acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); |
311 | 6f321e93 | Stefan Hajnoczi | if (acb) {
|
312 | 6f321e93 | Stefan Hajnoczi | qed_aio_next_io(acb, 0);
|
313 | 6f321e93 | Stefan Hajnoczi | } |
314 | 6f321e93 | Stefan Hajnoczi | } |
315 | 6f321e93 | Stefan Hajnoczi | |
316 | 6f321e93 | Stefan Hajnoczi | static void qed_finish_clear_need_check(void *opaque, int ret) |
317 | 6f321e93 | Stefan Hajnoczi | { |
318 | 6f321e93 | Stefan Hajnoczi | /* Do nothing */
|
319 | 6f321e93 | Stefan Hajnoczi | } |
320 | 6f321e93 | Stefan Hajnoczi | |
321 | 6f321e93 | Stefan Hajnoczi | static void qed_flush_after_clear_need_check(void *opaque, int ret) |
322 | 6f321e93 | Stefan Hajnoczi | { |
323 | 6f321e93 | Stefan Hajnoczi | BDRVQEDState *s = opaque; |
324 | 6f321e93 | Stefan Hajnoczi | |
325 | 6f321e93 | Stefan Hajnoczi | bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s); |
326 | 6f321e93 | Stefan Hajnoczi | |
327 | 6f321e93 | Stefan Hajnoczi | /* No need to wait until flush completes */
|
328 | 6f321e93 | Stefan Hajnoczi | qed_unplug_allocating_write_reqs(s); |
329 | 6f321e93 | Stefan Hajnoczi | } |
330 | 6f321e93 | Stefan Hajnoczi | |
331 | 6f321e93 | Stefan Hajnoczi | static void qed_clear_need_check(void *opaque, int ret) |
332 | 6f321e93 | Stefan Hajnoczi | { |
333 | 6f321e93 | Stefan Hajnoczi | BDRVQEDState *s = opaque; |
334 | 6f321e93 | Stefan Hajnoczi | |
335 | 6f321e93 | Stefan Hajnoczi | if (ret) {
|
336 | 6f321e93 | Stefan Hajnoczi | qed_unplug_allocating_write_reqs(s); |
337 | 6f321e93 | Stefan Hajnoczi | return;
|
338 | 6f321e93 | Stefan Hajnoczi | } |
339 | 6f321e93 | Stefan Hajnoczi | |
340 | 6f321e93 | Stefan Hajnoczi | s->header.features &= ~QED_F_NEED_CHECK; |
341 | 6f321e93 | Stefan Hajnoczi | qed_write_header(s, qed_flush_after_clear_need_check, s); |
342 | 6f321e93 | Stefan Hajnoczi | } |
343 | 6f321e93 | Stefan Hajnoczi | |
344 | 6f321e93 | Stefan Hajnoczi | static void qed_need_check_timer_cb(void *opaque) |
345 | 6f321e93 | Stefan Hajnoczi | { |
346 | 6f321e93 | Stefan Hajnoczi | BDRVQEDState *s = opaque; |
347 | 6f321e93 | Stefan Hajnoczi | |
348 | 6f321e93 | Stefan Hajnoczi | /* The timer should only fire when allocating writes have drained */
|
349 | 6f321e93 | Stefan Hajnoczi | assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs)); |
350 | 6f321e93 | Stefan Hajnoczi | |
351 | 6f321e93 | Stefan Hajnoczi | trace_qed_need_check_timer_cb(s); |
352 | 6f321e93 | Stefan Hajnoczi | |
353 | 6f321e93 | Stefan Hajnoczi | qed_plug_allocating_write_reqs(s); |
354 | 6f321e93 | Stefan Hajnoczi | |
355 | 6f321e93 | Stefan Hajnoczi | /* Ensure writes are on disk before clearing flag */
|
356 | 6f321e93 | Stefan Hajnoczi | bdrv_aio_flush(s->bs, qed_clear_need_check, s); |
357 | 6f321e93 | Stefan Hajnoczi | } |
358 | 6f321e93 | Stefan Hajnoczi | |
359 | 6f321e93 | Stefan Hajnoczi | static void qed_start_need_check_timer(BDRVQEDState *s) |
360 | 6f321e93 | Stefan Hajnoczi | { |
361 | 6f321e93 | Stefan Hajnoczi | trace_qed_start_need_check_timer(s); |
362 | 6f321e93 | Stefan Hajnoczi | |
363 | 6f321e93 | Stefan Hajnoczi | /* Use vm_clock so we don't alter the image file while suspended for
|
364 | 6f321e93 | Stefan Hajnoczi | * migration.
|
365 | 6f321e93 | Stefan Hajnoczi | */
|
366 | 6f321e93 | Stefan Hajnoczi | qemu_mod_timer(s->need_check_timer, qemu_get_clock_ns(vm_clock) + |
367 | 6f321e93 | Stefan Hajnoczi | get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT); |
368 | 6f321e93 | Stefan Hajnoczi | } |
369 | 6f321e93 | Stefan Hajnoczi | |
370 | 6f321e93 | Stefan Hajnoczi | /* It's okay to call this multiple times or when no timer is started */
|
371 | 6f321e93 | Stefan Hajnoczi | static void qed_cancel_need_check_timer(BDRVQEDState *s) |
372 | 6f321e93 | Stefan Hajnoczi | { |
373 | 6f321e93 | Stefan Hajnoczi | trace_qed_cancel_need_check_timer(s); |
374 | 6f321e93 | Stefan Hajnoczi | qemu_del_timer(s->need_check_timer); |
375 | 6f321e93 | Stefan Hajnoczi | } |
376 | 6f321e93 | Stefan Hajnoczi | |
377 | 75411d23 | Stefan Hajnoczi | static int bdrv_qed_open(BlockDriverState *bs, int flags) |
378 | 75411d23 | Stefan Hajnoczi | { |
379 | 75411d23 | Stefan Hajnoczi | BDRVQEDState *s = bs->opaque; |
380 | 75411d23 | Stefan Hajnoczi | QEDHeader le_header; |
381 | 75411d23 | Stefan Hajnoczi | int64_t file_size; |
382 | 75411d23 | Stefan Hajnoczi | int ret;
|
383 | 75411d23 | Stefan Hajnoczi | |
384 | 75411d23 | Stefan Hajnoczi | s->bs = bs; |
385 | eabba580 | Stefan Hajnoczi | QSIMPLEQ_INIT(&s->allocating_write_reqs); |
386 | 75411d23 | Stefan Hajnoczi | |
387 | 75411d23 | Stefan Hajnoczi | ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header)); |
388 | 75411d23 | Stefan Hajnoczi | if (ret < 0) { |
389 | 75411d23 | Stefan Hajnoczi | return ret;
|
390 | 75411d23 | Stefan Hajnoczi | } |
391 | 75411d23 | Stefan Hajnoczi | ret = 0; /* ret should always be 0 or -errno */ |
392 | 75411d23 | Stefan Hajnoczi | qed_header_le_to_cpu(&le_header, &s->header); |
393 | 75411d23 | Stefan Hajnoczi | |
394 | 75411d23 | Stefan Hajnoczi | if (s->header.magic != QED_MAGIC) {
|
395 | 75411d23 | Stefan Hajnoczi | return -EINVAL;
|
396 | 75411d23 | Stefan Hajnoczi | } |
397 | 75411d23 | Stefan Hajnoczi | if (s->header.features & ~QED_FEATURE_MASK) {
|
398 | 10b758e8 | Kevin Wolf | /* image uses unsupported feature bits */
|
399 | 10b758e8 | Kevin Wolf | char buf[64]; |
400 | 10b758e8 | Kevin Wolf | snprintf(buf, sizeof(buf), "%" PRIx64, |
401 | 10b758e8 | Kevin Wolf | s->header.features & ~QED_FEATURE_MASK); |
402 | 10b758e8 | Kevin Wolf | qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE, |
403 | 10b758e8 | Kevin Wolf | bs->device_name, "QED", buf);
|
404 | 10b758e8 | Kevin Wolf | return -ENOTSUP;
|
405 | 75411d23 | Stefan Hajnoczi | } |
406 | 75411d23 | Stefan Hajnoczi | if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
|
407 | 75411d23 | Stefan Hajnoczi | return -EINVAL;
|
408 | 75411d23 | Stefan Hajnoczi | } |
409 | 75411d23 | Stefan Hajnoczi | |
410 | 75411d23 | Stefan Hajnoczi | /* Round down file size to the last cluster */
|
411 | 75411d23 | Stefan Hajnoczi | file_size = bdrv_getlength(bs->file); |
412 | 75411d23 | Stefan Hajnoczi | if (file_size < 0) { |
413 | 75411d23 | Stefan Hajnoczi | return file_size;
|
414 | 75411d23 | Stefan Hajnoczi | } |
415 | 75411d23 | Stefan Hajnoczi | s->file_size = qed_start_of_cluster(s, file_size); |
416 | 75411d23 | Stefan Hajnoczi | |
417 | 75411d23 | Stefan Hajnoczi | if (!qed_is_table_size_valid(s->header.table_size)) {
|
418 | 75411d23 | Stefan Hajnoczi | return -EINVAL;
|
419 | 75411d23 | Stefan Hajnoczi | } |
420 | 75411d23 | Stefan Hajnoczi | if (!qed_is_image_size_valid(s->header.image_size,
|
421 | 75411d23 | Stefan Hajnoczi | s->header.cluster_size, |
422 | 75411d23 | Stefan Hajnoczi | s->header.table_size)) { |
423 | 75411d23 | Stefan Hajnoczi | return -EINVAL;
|
424 | 75411d23 | Stefan Hajnoczi | } |
425 | 75411d23 | Stefan Hajnoczi | if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
|
426 | 75411d23 | Stefan Hajnoczi | return -EINVAL;
|
427 | 75411d23 | Stefan Hajnoczi | } |
428 | 75411d23 | Stefan Hajnoczi | |
429 | 75411d23 | Stefan Hajnoczi | s->table_nelems = (s->header.cluster_size * s->header.table_size) / |
430 | 75411d23 | Stefan Hajnoczi | sizeof(uint64_t);
|
431 | 75411d23 | Stefan Hajnoczi | s->l2_shift = ffs(s->header.cluster_size) - 1;
|
432 | 75411d23 | Stefan Hajnoczi | s->l2_mask = s->table_nelems - 1;
|
433 | 75411d23 | Stefan Hajnoczi | s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1;
|
434 | 75411d23 | Stefan Hajnoczi | |
435 | 75411d23 | Stefan Hajnoczi | if ((s->header.features & QED_F_BACKING_FILE)) {
|
436 | 75411d23 | Stefan Hajnoczi | if ((uint64_t)s->header.backing_filename_offset +
|
437 | 75411d23 | Stefan Hajnoczi | s->header.backing_filename_size > |
438 | 75411d23 | Stefan Hajnoczi | s->header.cluster_size * s->header.header_size) { |
439 | 75411d23 | Stefan Hajnoczi | return -EINVAL;
|
440 | 75411d23 | Stefan Hajnoczi | } |
441 | 75411d23 | Stefan Hajnoczi | |
442 | 75411d23 | Stefan Hajnoczi | ret = qed_read_string(bs->file, s->header.backing_filename_offset, |
443 | 75411d23 | Stefan Hajnoczi | s->header.backing_filename_size, bs->backing_file, |
444 | 75411d23 | Stefan Hajnoczi | sizeof(bs->backing_file));
|
445 | 75411d23 | Stefan Hajnoczi | if (ret < 0) { |
446 | 75411d23 | Stefan Hajnoczi | return ret;
|
447 | 75411d23 | Stefan Hajnoczi | } |
448 | 75411d23 | Stefan Hajnoczi | |
449 | 75411d23 | Stefan Hajnoczi | if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
|
450 | 75411d23 | Stefan Hajnoczi | pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw"); |
451 | 75411d23 | Stefan Hajnoczi | } |
452 | 75411d23 | Stefan Hajnoczi | } |
453 | 75411d23 | Stefan Hajnoczi | |
454 | 75411d23 | Stefan Hajnoczi | /* Reset unknown autoclear feature bits. This is a backwards
|
455 | 75411d23 | Stefan Hajnoczi | * compatibility mechanism that allows images to be opened by older
|
456 | 75411d23 | Stefan Hajnoczi | * programs, which "knock out" unknown feature bits. When an image is
|
457 | 75411d23 | Stefan Hajnoczi | * opened by a newer program again it can detect that the autoclear
|
458 | 75411d23 | Stefan Hajnoczi | * feature is no longer valid.
|
459 | 75411d23 | Stefan Hajnoczi | */
|
460 | 75411d23 | Stefan Hajnoczi | if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 && |
461 | 75411d23 | Stefan Hajnoczi | !bdrv_is_read_only(bs->file)) { |
462 | 75411d23 | Stefan Hajnoczi | s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK; |
463 | 75411d23 | Stefan Hajnoczi | |
464 | 75411d23 | Stefan Hajnoczi | ret = qed_write_header_sync(s); |
465 | 75411d23 | Stefan Hajnoczi | if (ret) {
|
466 | 75411d23 | Stefan Hajnoczi | return ret;
|
467 | 75411d23 | Stefan Hajnoczi | } |
468 | 75411d23 | Stefan Hajnoczi | |
469 | 75411d23 | Stefan Hajnoczi | /* From here on only known autoclear feature bits are valid */
|
470 | 75411d23 | Stefan Hajnoczi | bdrv_flush(bs->file); |
471 | 75411d23 | Stefan Hajnoczi | } |
472 | 75411d23 | Stefan Hajnoczi | |
473 | 298800ca | Stefan Hajnoczi | s->l1_table = qed_alloc_table(s); |
474 | 298800ca | Stefan Hajnoczi | qed_init_l2_cache(&s->l2_cache); |
475 | 298800ca | Stefan Hajnoczi | |
476 | 298800ca | Stefan Hajnoczi | ret = qed_read_l1_table_sync(s); |
477 | 298800ca | Stefan Hajnoczi | if (ret) {
|
478 | 01979a98 | Stefan Hajnoczi | goto out;
|
479 | 01979a98 | Stefan Hajnoczi | } |
480 | 01979a98 | Stefan Hajnoczi | |
481 | 01979a98 | Stefan Hajnoczi | /* If image was not closed cleanly, check consistency */
|
482 | 01979a98 | Stefan Hajnoczi | if (s->header.features & QED_F_NEED_CHECK) {
|
483 | 01979a98 | Stefan Hajnoczi | /* Read-only images cannot be fixed. There is no risk of corruption
|
484 | 01979a98 | Stefan Hajnoczi | * since write operations are not possible. Therefore, allow
|
485 | 01979a98 | Stefan Hajnoczi | * potentially inconsistent images to be opened read-only. This can
|
486 | 01979a98 | Stefan Hajnoczi | * aid data recovery from an otherwise inconsistent image.
|
487 | 01979a98 | Stefan Hajnoczi | */
|
488 | 01979a98 | Stefan Hajnoczi | if (!bdrv_is_read_only(bs->file)) {
|
489 | 01979a98 | Stefan Hajnoczi | BdrvCheckResult result = {0};
|
490 | 01979a98 | Stefan Hajnoczi | |
491 | 01979a98 | Stefan Hajnoczi | ret = qed_check(s, &result, true);
|
492 | 6f321e93 | Stefan Hajnoczi | if (ret) {
|
493 | 6f321e93 | Stefan Hajnoczi | goto out;
|
494 | 6f321e93 | Stefan Hajnoczi | } |
495 | 6f321e93 | Stefan Hajnoczi | if (!result.corruptions && !result.check_errors) {
|
496 | 01979a98 | Stefan Hajnoczi | /* Ensure fixes reach storage before clearing check bit */
|
497 | 01979a98 | Stefan Hajnoczi | bdrv_flush(s->bs); |
498 | 01979a98 | Stefan Hajnoczi | |
499 | 01979a98 | Stefan Hajnoczi | s->header.features &= ~QED_F_NEED_CHECK; |
500 | 01979a98 | Stefan Hajnoczi | qed_write_header_sync(s); |
501 | 01979a98 | Stefan Hajnoczi | } |
502 | 01979a98 | Stefan Hajnoczi | } |
503 | 01979a98 | Stefan Hajnoczi | } |
504 | 01979a98 | Stefan Hajnoczi | |
505 | 6f321e93 | Stefan Hajnoczi | s->need_check_timer = qemu_new_timer_ns(vm_clock, |
506 | 6f321e93 | Stefan Hajnoczi | qed_need_check_timer_cb, s); |
507 | 6f321e93 | Stefan Hajnoczi | |
508 | 01979a98 | Stefan Hajnoczi | out:
|
509 | 01979a98 | Stefan Hajnoczi | if (ret) {
|
510 | 298800ca | Stefan Hajnoczi | qed_free_l2_cache(&s->l2_cache); |
511 | 298800ca | Stefan Hajnoczi | qemu_vfree(s->l1_table); |
512 | 298800ca | Stefan Hajnoczi | } |
513 | 75411d23 | Stefan Hajnoczi | return ret;
|
514 | 75411d23 | Stefan Hajnoczi | } |
515 | 75411d23 | Stefan Hajnoczi | |
516 | 75411d23 | Stefan Hajnoczi | static void bdrv_qed_close(BlockDriverState *bs) |
517 | 75411d23 | Stefan Hajnoczi | { |
518 | 298800ca | Stefan Hajnoczi | BDRVQEDState *s = bs->opaque; |
519 | 298800ca | Stefan Hajnoczi | |
520 | 6f321e93 | Stefan Hajnoczi | qed_cancel_need_check_timer(s); |
521 | 6f321e93 | Stefan Hajnoczi | qemu_free_timer(s->need_check_timer); |
522 | 6f321e93 | Stefan Hajnoczi | |
523 | 01979a98 | Stefan Hajnoczi | /* Ensure writes reach stable storage */
|
524 | 01979a98 | Stefan Hajnoczi | bdrv_flush(bs->file); |
525 | 01979a98 | Stefan Hajnoczi | |
526 | 01979a98 | Stefan Hajnoczi | /* Clean shutdown, no check required on next open */
|
527 | 01979a98 | Stefan Hajnoczi | if (s->header.features & QED_F_NEED_CHECK) {
|
528 | 01979a98 | Stefan Hajnoczi | s->header.features &= ~QED_F_NEED_CHECK; |
529 | 01979a98 | Stefan Hajnoczi | qed_write_header_sync(s); |
530 | 01979a98 | Stefan Hajnoczi | } |
531 | 01979a98 | Stefan Hajnoczi | |
532 | 298800ca | Stefan Hajnoczi | qed_free_l2_cache(&s->l2_cache); |
533 | 298800ca | Stefan Hajnoczi | qemu_vfree(s->l1_table); |
534 | 75411d23 | Stefan Hajnoczi | } |
535 | 75411d23 | Stefan Hajnoczi | |
536 | 75411d23 | Stefan Hajnoczi | static int qed_create(const char *filename, uint32_t cluster_size, |
537 | 75411d23 | Stefan Hajnoczi | uint64_t image_size, uint32_t table_size, |
538 | 75411d23 | Stefan Hajnoczi | const char *backing_file, const char *backing_fmt) |
539 | 75411d23 | Stefan Hajnoczi | { |
540 | 75411d23 | Stefan Hajnoczi | QEDHeader header = { |
541 | 75411d23 | Stefan Hajnoczi | .magic = QED_MAGIC, |
542 | 75411d23 | Stefan Hajnoczi | .cluster_size = cluster_size, |
543 | 75411d23 | Stefan Hajnoczi | .table_size = table_size, |
544 | 75411d23 | Stefan Hajnoczi | .header_size = 1,
|
545 | 75411d23 | Stefan Hajnoczi | .features = 0,
|
546 | 75411d23 | Stefan Hajnoczi | .compat_features = 0,
|
547 | 75411d23 | Stefan Hajnoczi | .l1_table_offset = cluster_size, |
548 | 75411d23 | Stefan Hajnoczi | .image_size = image_size, |
549 | 75411d23 | Stefan Hajnoczi | }; |
550 | 75411d23 | Stefan Hajnoczi | QEDHeader le_header; |
551 | 75411d23 | Stefan Hajnoczi | uint8_t *l1_table = NULL;
|
552 | 75411d23 | Stefan Hajnoczi | size_t l1_size = header.cluster_size * header.table_size; |
553 | 75411d23 | Stefan Hajnoczi | int ret = 0; |
554 | 75411d23 | Stefan Hajnoczi | BlockDriverState *bs = NULL;
|
555 | 75411d23 | Stefan Hajnoczi | |
556 | 75411d23 | Stefan Hajnoczi | ret = bdrv_create_file(filename, NULL);
|
557 | 75411d23 | Stefan Hajnoczi | if (ret < 0) { |
558 | 75411d23 | Stefan Hajnoczi | return ret;
|
559 | 75411d23 | Stefan Hajnoczi | } |
560 | 75411d23 | Stefan Hajnoczi | |
561 | 75411d23 | Stefan Hajnoczi | ret = bdrv_file_open(&bs, filename, BDRV_O_RDWR | BDRV_O_CACHE_WB); |
562 | 75411d23 | Stefan Hajnoczi | if (ret < 0) { |
563 | 75411d23 | Stefan Hajnoczi | return ret;
|
564 | 75411d23 | Stefan Hajnoczi | } |
565 | 75411d23 | Stefan Hajnoczi | |
566 | c743849b | Stefan Hajnoczi | /* File must start empty and grow, check truncate is supported */
|
567 | c743849b | Stefan Hajnoczi | ret = bdrv_truncate(bs, 0);
|
568 | c743849b | Stefan Hajnoczi | if (ret < 0) { |
569 | c743849b | Stefan Hajnoczi | goto out;
|
570 | c743849b | Stefan Hajnoczi | } |
571 | c743849b | Stefan Hajnoczi | |
572 | 75411d23 | Stefan Hajnoczi | if (backing_file) {
|
573 | 75411d23 | Stefan Hajnoczi | header.features |= QED_F_BACKING_FILE; |
574 | 75411d23 | Stefan Hajnoczi | header.backing_filename_offset = sizeof(le_header);
|
575 | 75411d23 | Stefan Hajnoczi | header.backing_filename_size = strlen(backing_file); |
576 | 75411d23 | Stefan Hajnoczi | |
577 | 75411d23 | Stefan Hajnoczi | if (qed_fmt_is_raw(backing_fmt)) {
|
578 | 75411d23 | Stefan Hajnoczi | header.features |= QED_F_BACKING_FORMAT_NO_PROBE; |
579 | 75411d23 | Stefan Hajnoczi | } |
580 | 75411d23 | Stefan Hajnoczi | } |
581 | 75411d23 | Stefan Hajnoczi | |
582 | 75411d23 | Stefan Hajnoczi | qed_header_cpu_to_le(&header, &le_header); |
583 | 75411d23 | Stefan Hajnoczi | ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header)); |
584 | 75411d23 | Stefan Hajnoczi | if (ret < 0) { |
585 | 75411d23 | Stefan Hajnoczi | goto out;
|
586 | 75411d23 | Stefan Hajnoczi | } |
587 | 75411d23 | Stefan Hajnoczi | ret = bdrv_pwrite(bs, sizeof(le_header), backing_file,
|
588 | 75411d23 | Stefan Hajnoczi | header.backing_filename_size); |
589 | 75411d23 | Stefan Hajnoczi | if (ret < 0) { |
590 | 75411d23 | Stefan Hajnoczi | goto out;
|
591 | 75411d23 | Stefan Hajnoczi | } |
592 | 75411d23 | Stefan Hajnoczi | |
593 | 7267c094 | Anthony Liguori | l1_table = g_malloc0(l1_size); |
594 | 75411d23 | Stefan Hajnoczi | ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size); |
595 | 75411d23 | Stefan Hajnoczi | if (ret < 0) { |
596 | 75411d23 | Stefan Hajnoczi | goto out;
|
597 | 75411d23 | Stefan Hajnoczi | } |
598 | 75411d23 | Stefan Hajnoczi | |
599 | 75411d23 | Stefan Hajnoczi | ret = 0; /* success */ |
600 | 75411d23 | Stefan Hajnoczi | out:
|
601 | 7267c094 | Anthony Liguori | g_free(l1_table); |
602 | 75411d23 | Stefan Hajnoczi | bdrv_delete(bs); |
603 | 75411d23 | Stefan Hajnoczi | return ret;
|
604 | 75411d23 | Stefan Hajnoczi | } |
605 | 75411d23 | Stefan Hajnoczi | |
606 | 75411d23 | Stefan Hajnoczi | static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options) |
607 | 75411d23 | Stefan Hajnoczi | { |
608 | 75411d23 | Stefan Hajnoczi | uint64_t image_size = 0;
|
609 | 75411d23 | Stefan Hajnoczi | uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE; |
610 | 75411d23 | Stefan Hajnoczi | uint32_t table_size = QED_DEFAULT_TABLE_SIZE; |
611 | 75411d23 | Stefan Hajnoczi | const char *backing_file = NULL; |
612 | 75411d23 | Stefan Hajnoczi | const char *backing_fmt = NULL; |
613 | 75411d23 | Stefan Hajnoczi | |
614 | 75411d23 | Stefan Hajnoczi | while (options && options->name) {
|
615 | 75411d23 | Stefan Hajnoczi | if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
|
616 | 75411d23 | Stefan Hajnoczi | image_size = options->value.n; |
617 | 75411d23 | Stefan Hajnoczi | } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) { |
618 | 75411d23 | Stefan Hajnoczi | backing_file = options->value.s; |
619 | 75411d23 | Stefan Hajnoczi | } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) { |
620 | 75411d23 | Stefan Hajnoczi | backing_fmt = options->value.s; |
621 | 75411d23 | Stefan Hajnoczi | } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) { |
622 | 75411d23 | Stefan Hajnoczi | if (options->value.n) {
|
623 | 75411d23 | Stefan Hajnoczi | cluster_size = options->value.n; |
624 | 75411d23 | Stefan Hajnoczi | } |
625 | 75411d23 | Stefan Hajnoczi | } else if (!strcmp(options->name, BLOCK_OPT_TABLE_SIZE)) { |
626 | 75411d23 | Stefan Hajnoczi | if (options->value.n) {
|
627 | 75411d23 | Stefan Hajnoczi | table_size = options->value.n; |
628 | 75411d23 | Stefan Hajnoczi | } |
629 | 75411d23 | Stefan Hajnoczi | } |
630 | 75411d23 | Stefan Hajnoczi | options++; |
631 | 75411d23 | Stefan Hajnoczi | } |
632 | 75411d23 | Stefan Hajnoczi | |
633 | 75411d23 | Stefan Hajnoczi | if (!qed_is_cluster_size_valid(cluster_size)) {
|
634 | 75411d23 | Stefan Hajnoczi | fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n",
|
635 | 75411d23 | Stefan Hajnoczi | QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE); |
636 | 75411d23 | Stefan Hajnoczi | return -EINVAL;
|
637 | 75411d23 | Stefan Hajnoczi | } |
638 | 75411d23 | Stefan Hajnoczi | if (!qed_is_table_size_valid(table_size)) {
|
639 | 75411d23 | Stefan Hajnoczi | fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n",
|
640 | 75411d23 | Stefan Hajnoczi | QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE); |
641 | 75411d23 | Stefan Hajnoczi | return -EINVAL;
|
642 | 75411d23 | Stefan Hajnoczi | } |
643 | 75411d23 | Stefan Hajnoczi | if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
|
644 | 75411d23 | Stefan Hajnoczi | fprintf(stderr, "QED image size must be a non-zero multiple of "
|
645 | 75411d23 | Stefan Hajnoczi | "cluster size and less than %" PRIu64 " bytes\n", |
646 | 75411d23 | Stefan Hajnoczi | qed_max_image_size(cluster_size, table_size)); |
647 | 75411d23 | Stefan Hajnoczi | return -EINVAL;
|
648 | 75411d23 | Stefan Hajnoczi | } |
649 | 75411d23 | Stefan Hajnoczi | |
650 | 75411d23 | Stefan Hajnoczi | return qed_create(filename, cluster_size, image_size, table_size,
|
651 | 75411d23 | Stefan Hajnoczi | backing_file, backing_fmt); |
652 | 75411d23 | Stefan Hajnoczi | } |
653 | 75411d23 | Stefan Hajnoczi | |
654 | 298800ca | Stefan Hajnoczi | typedef struct { |
655 | 298800ca | Stefan Hajnoczi | int is_allocated;
|
656 | 298800ca | Stefan Hajnoczi | int *pnum;
|
657 | 298800ca | Stefan Hajnoczi | } QEDIsAllocatedCB; |
658 | 298800ca | Stefan Hajnoczi | |
659 | 298800ca | Stefan Hajnoczi | static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len) |
660 | 298800ca | Stefan Hajnoczi | { |
661 | 298800ca | Stefan Hajnoczi | QEDIsAllocatedCB *cb = opaque; |
662 | 298800ca | Stefan Hajnoczi | *cb->pnum = len / BDRV_SECTOR_SIZE; |
663 | 21df65b6 | Anthony Liguori | cb->is_allocated = (ret == QED_CLUSTER_FOUND || ret == QED_CLUSTER_ZERO); |
664 | 298800ca | Stefan Hajnoczi | } |
665 | 298800ca | Stefan Hajnoczi | |
666 | 75411d23 | Stefan Hajnoczi | static int bdrv_qed_is_allocated(BlockDriverState *bs, int64_t sector_num, |
667 | 75411d23 | Stefan Hajnoczi | int nb_sectors, int *pnum) |
668 | 75411d23 | Stefan Hajnoczi | { |
669 | 298800ca | Stefan Hajnoczi | BDRVQEDState *s = bs->opaque; |
670 | 298800ca | Stefan Hajnoczi | uint64_t pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE; |
671 | 298800ca | Stefan Hajnoczi | size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE; |
672 | 298800ca | Stefan Hajnoczi | QEDIsAllocatedCB cb = { |
673 | 298800ca | Stefan Hajnoczi | .is_allocated = -1,
|
674 | 298800ca | Stefan Hajnoczi | .pnum = pnum, |
675 | 298800ca | Stefan Hajnoczi | }; |
676 | 298800ca | Stefan Hajnoczi | QEDRequest request = { .l2_table = NULL };
|
677 | 298800ca | Stefan Hajnoczi | |
678 | 298800ca | Stefan Hajnoczi | qed_find_cluster(s, &request, pos, len, qed_is_allocated_cb, &cb); |
679 | 298800ca | Stefan Hajnoczi | |
680 | 298800ca | Stefan Hajnoczi | while (cb.is_allocated == -1) { |
681 | 298800ca | Stefan Hajnoczi | qemu_aio_wait(); |
682 | 298800ca | Stefan Hajnoczi | } |
683 | 298800ca | Stefan Hajnoczi | |
684 | 298800ca | Stefan Hajnoczi | qed_unref_l2_cache_entry(request.l2_table); |
685 | 298800ca | Stefan Hajnoczi | |
686 | 298800ca | Stefan Hajnoczi | return cb.is_allocated;
|
687 | 75411d23 | Stefan Hajnoczi | } |
688 | 75411d23 | Stefan Hajnoczi | |
689 | 75411d23 | Stefan Hajnoczi | static int bdrv_qed_make_empty(BlockDriverState *bs) |
690 | 75411d23 | Stefan Hajnoczi | { |
691 | 75411d23 | Stefan Hajnoczi | return -ENOTSUP;
|
692 | 75411d23 | Stefan Hajnoczi | } |
693 | 75411d23 | Stefan Hajnoczi | |
694 | eabba580 | Stefan Hajnoczi | static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
|
695 | eabba580 | Stefan Hajnoczi | { |
696 | eabba580 | Stefan Hajnoczi | return acb->common.bs->opaque;
|
697 | eabba580 | Stefan Hajnoczi | } |
698 | eabba580 | Stefan Hajnoczi | |
699 | eabba580 | Stefan Hajnoczi | /**
|
700 | eabba580 | Stefan Hajnoczi | * Read from the backing file or zero-fill if no backing file
|
701 | eabba580 | Stefan Hajnoczi | *
|
702 | eabba580 | Stefan Hajnoczi | * @s: QED state
|
703 | eabba580 | Stefan Hajnoczi | * @pos: Byte position in device
|
704 | eabba580 | Stefan Hajnoczi | * @qiov: Destination I/O vector
|
705 | eabba580 | Stefan Hajnoczi | * @cb: Completion function
|
706 | eabba580 | Stefan Hajnoczi | * @opaque: User data for completion function
|
707 | eabba580 | Stefan Hajnoczi | *
|
708 | eabba580 | Stefan Hajnoczi | * This function reads qiov->size bytes starting at pos from the backing file.
|
709 | eabba580 | Stefan Hajnoczi | * If there is no backing file then zeroes are read.
|
710 | eabba580 | Stefan Hajnoczi | */
|
711 | eabba580 | Stefan Hajnoczi | static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos, |
712 | eabba580 | Stefan Hajnoczi | QEMUIOVector *qiov, |
713 | eabba580 | Stefan Hajnoczi | BlockDriverCompletionFunc *cb, void *opaque)
|
714 | eabba580 | Stefan Hajnoczi | { |
715 | eabba580 | Stefan Hajnoczi | BlockDriverAIOCB *aiocb; |
716 | eabba580 | Stefan Hajnoczi | uint64_t backing_length = 0;
|
717 | eabba580 | Stefan Hajnoczi | size_t size; |
718 | eabba580 | Stefan Hajnoczi | |
719 | eabba580 | Stefan Hajnoczi | /* If there is a backing file, get its length. Treat the absence of a
|
720 | eabba580 | Stefan Hajnoczi | * backing file like a zero length backing file.
|
721 | eabba580 | Stefan Hajnoczi | */
|
722 | eabba580 | Stefan Hajnoczi | if (s->bs->backing_hd) {
|
723 | eabba580 | Stefan Hajnoczi | int64_t l = bdrv_getlength(s->bs->backing_hd); |
724 | eabba580 | Stefan Hajnoczi | if (l < 0) { |
725 | eabba580 | Stefan Hajnoczi | cb(opaque, l); |
726 | eabba580 | Stefan Hajnoczi | return;
|
727 | eabba580 | Stefan Hajnoczi | } |
728 | eabba580 | Stefan Hajnoczi | backing_length = l; |
729 | eabba580 | Stefan Hajnoczi | } |
730 | eabba580 | Stefan Hajnoczi | |
731 | eabba580 | Stefan Hajnoczi | /* Zero all sectors if reading beyond the end of the backing file */
|
732 | eabba580 | Stefan Hajnoczi | if (pos >= backing_length ||
|
733 | eabba580 | Stefan Hajnoczi | pos + qiov->size > backing_length) { |
734 | eabba580 | Stefan Hajnoczi | qemu_iovec_memset(qiov, 0, qiov->size);
|
735 | eabba580 | Stefan Hajnoczi | } |
736 | eabba580 | Stefan Hajnoczi | |
737 | eabba580 | Stefan Hajnoczi | /* Complete now if there are no backing file sectors to read */
|
738 | eabba580 | Stefan Hajnoczi | if (pos >= backing_length) {
|
739 | eabba580 | Stefan Hajnoczi | cb(opaque, 0);
|
740 | eabba580 | Stefan Hajnoczi | return;
|
741 | eabba580 | Stefan Hajnoczi | } |
742 | eabba580 | Stefan Hajnoczi | |
743 | eabba580 | Stefan Hajnoczi | /* If the read straddles the end of the backing file, shorten it */
|
744 | eabba580 | Stefan Hajnoczi | size = MIN((uint64_t)backing_length - pos, qiov->size); |
745 | eabba580 | Stefan Hajnoczi | |
746 | eabba580 | Stefan Hajnoczi | BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING); |
747 | eabba580 | Stefan Hajnoczi | aiocb = bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE, |
748 | eabba580 | Stefan Hajnoczi | qiov, size / BDRV_SECTOR_SIZE, cb, opaque); |
749 | eabba580 | Stefan Hajnoczi | if (!aiocb) {
|
750 | eabba580 | Stefan Hajnoczi | cb(opaque, -EIO); |
751 | eabba580 | Stefan Hajnoczi | } |
752 | eabba580 | Stefan Hajnoczi | } |
753 | eabba580 | Stefan Hajnoczi | |
754 | eabba580 | Stefan Hajnoczi | typedef struct { |
755 | eabba580 | Stefan Hajnoczi | GenericCB gencb; |
756 | eabba580 | Stefan Hajnoczi | BDRVQEDState *s; |
757 | eabba580 | Stefan Hajnoczi | QEMUIOVector qiov; |
758 | eabba580 | Stefan Hajnoczi | struct iovec iov;
|
759 | eabba580 | Stefan Hajnoczi | uint64_t offset; |
760 | eabba580 | Stefan Hajnoczi | } CopyFromBackingFileCB; |
761 | eabba580 | Stefan Hajnoczi | |
762 | eabba580 | Stefan Hajnoczi | static void qed_copy_from_backing_file_cb(void *opaque, int ret) |
763 | eabba580 | Stefan Hajnoczi | { |
764 | eabba580 | Stefan Hajnoczi | CopyFromBackingFileCB *copy_cb = opaque; |
765 | eabba580 | Stefan Hajnoczi | qemu_vfree(copy_cb->iov.iov_base); |
766 | eabba580 | Stefan Hajnoczi | gencb_complete(©_cb->gencb, ret); |
767 | eabba580 | Stefan Hajnoczi | } |
768 | eabba580 | Stefan Hajnoczi | |
769 | eabba580 | Stefan Hajnoczi | static void qed_copy_from_backing_file_write(void *opaque, int ret) |
770 | eabba580 | Stefan Hajnoczi | { |
771 | eabba580 | Stefan Hajnoczi | CopyFromBackingFileCB *copy_cb = opaque; |
772 | eabba580 | Stefan Hajnoczi | BDRVQEDState *s = copy_cb->s; |
773 | eabba580 | Stefan Hajnoczi | BlockDriverAIOCB *aiocb; |
774 | eabba580 | Stefan Hajnoczi | |
775 | eabba580 | Stefan Hajnoczi | if (ret) {
|
776 | eabba580 | Stefan Hajnoczi | qed_copy_from_backing_file_cb(copy_cb, ret); |
777 | eabba580 | Stefan Hajnoczi | return;
|
778 | eabba580 | Stefan Hajnoczi | } |
779 | eabba580 | Stefan Hajnoczi | |
780 | eabba580 | Stefan Hajnoczi | BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE); |
781 | eabba580 | Stefan Hajnoczi | aiocb = bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE, |
782 | eabba580 | Stefan Hajnoczi | ©_cb->qiov, |
783 | eabba580 | Stefan Hajnoczi | copy_cb->qiov.size / BDRV_SECTOR_SIZE, |
784 | eabba580 | Stefan Hajnoczi | qed_copy_from_backing_file_cb, copy_cb); |
785 | eabba580 | Stefan Hajnoczi | if (!aiocb) {
|
786 | eabba580 | Stefan Hajnoczi | qed_copy_from_backing_file_cb(copy_cb, -EIO); |
787 | eabba580 | Stefan Hajnoczi | } |
788 | eabba580 | Stefan Hajnoczi | } |
789 | eabba580 | Stefan Hajnoczi | |
790 | eabba580 | Stefan Hajnoczi | /**
|
791 | eabba580 | Stefan Hajnoczi | * Copy data from backing file into the image
|
792 | eabba580 | Stefan Hajnoczi | *
|
793 | eabba580 | Stefan Hajnoczi | * @s: QED state
|
794 | eabba580 | Stefan Hajnoczi | * @pos: Byte position in device
|
795 | eabba580 | Stefan Hajnoczi | * @len: Number of bytes
|
796 | eabba580 | Stefan Hajnoczi | * @offset: Byte offset in image file
|
797 | eabba580 | Stefan Hajnoczi | * @cb: Completion function
|
798 | eabba580 | Stefan Hajnoczi | * @opaque: User data for completion function
|
799 | eabba580 | Stefan Hajnoczi | */
|
800 | eabba580 | Stefan Hajnoczi | static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos, |
801 | eabba580 | Stefan Hajnoczi | uint64_t len, uint64_t offset, |
802 | eabba580 | Stefan Hajnoczi | BlockDriverCompletionFunc *cb, |
803 | eabba580 | Stefan Hajnoczi | void *opaque)
|
804 | eabba580 | Stefan Hajnoczi | { |
805 | eabba580 | Stefan Hajnoczi | CopyFromBackingFileCB *copy_cb; |
806 | eabba580 | Stefan Hajnoczi | |
807 | eabba580 | Stefan Hajnoczi | /* Skip copy entirely if there is no work to do */
|
808 | eabba580 | Stefan Hajnoczi | if (len == 0) { |
809 | eabba580 | Stefan Hajnoczi | cb(opaque, 0);
|
810 | eabba580 | Stefan Hajnoczi | return;
|
811 | eabba580 | Stefan Hajnoczi | } |
812 | eabba580 | Stefan Hajnoczi | |
813 | eabba580 | Stefan Hajnoczi | copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
|
814 | eabba580 | Stefan Hajnoczi | copy_cb->s = s; |
815 | eabba580 | Stefan Hajnoczi | copy_cb->offset = offset; |
816 | eabba580 | Stefan Hajnoczi | copy_cb->iov.iov_base = qemu_blockalign(s->bs, len); |
817 | eabba580 | Stefan Hajnoczi | copy_cb->iov.iov_len = len; |
818 | eabba580 | Stefan Hajnoczi | qemu_iovec_init_external(©_cb->qiov, ©_cb->iov, 1);
|
819 | eabba580 | Stefan Hajnoczi | |
820 | eabba580 | Stefan Hajnoczi | qed_read_backing_file(s, pos, ©_cb->qiov, |
821 | eabba580 | Stefan Hajnoczi | qed_copy_from_backing_file_write, copy_cb); |
822 | eabba580 | Stefan Hajnoczi | } |
823 | eabba580 | Stefan Hajnoczi | |
824 | eabba580 | Stefan Hajnoczi | /**
|
825 | eabba580 | Stefan Hajnoczi | * Link one or more contiguous clusters into a table
|
826 | eabba580 | Stefan Hajnoczi | *
|
827 | eabba580 | Stefan Hajnoczi | * @s: QED state
|
828 | eabba580 | Stefan Hajnoczi | * @table: L2 table
|
829 | eabba580 | Stefan Hajnoczi | * @index: First cluster index
|
830 | eabba580 | Stefan Hajnoczi | * @n: Number of contiguous clusters
|
831 | 21df65b6 | Anthony Liguori | * @cluster: First cluster offset
|
832 | 21df65b6 | Anthony Liguori | *
|
833 | 21df65b6 | Anthony Liguori | * The cluster offset may be an allocated byte offset in the image file, the
|
834 | 21df65b6 | Anthony Liguori | * zero cluster marker, or the unallocated cluster marker.
|
835 | eabba580 | Stefan Hajnoczi | */
|
836 | eabba580 | Stefan Hajnoczi | static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index, |
837 | eabba580 | Stefan Hajnoczi | unsigned int n, uint64_t cluster) |
838 | eabba580 | Stefan Hajnoczi | { |
839 | eabba580 | Stefan Hajnoczi | int i;
|
840 | eabba580 | Stefan Hajnoczi | for (i = index; i < index + n; i++) {
|
841 | eabba580 | Stefan Hajnoczi | table->offsets[i] = cluster; |
842 | 21df65b6 | Anthony Liguori | if (!qed_offset_is_unalloc_cluster(cluster) &&
|
843 | 21df65b6 | Anthony Liguori | !qed_offset_is_zero_cluster(cluster)) { |
844 | 21df65b6 | Anthony Liguori | cluster += s->header.cluster_size; |
845 | 21df65b6 | Anthony Liguori | } |
846 | eabba580 | Stefan Hajnoczi | } |
847 | eabba580 | Stefan Hajnoczi | } |
848 | eabba580 | Stefan Hajnoczi | |
849 | eabba580 | Stefan Hajnoczi | static void qed_aio_complete_bh(void *opaque) |
850 | eabba580 | Stefan Hajnoczi | { |
851 | eabba580 | Stefan Hajnoczi | QEDAIOCB *acb = opaque; |
852 | eabba580 | Stefan Hajnoczi | BlockDriverCompletionFunc *cb = acb->common.cb; |
853 | eabba580 | Stefan Hajnoczi | void *user_opaque = acb->common.opaque;
|
854 | eabba580 | Stefan Hajnoczi | int ret = acb->bh_ret;
|
855 | eabba580 | Stefan Hajnoczi | bool *finished = acb->finished;
|
856 | eabba580 | Stefan Hajnoczi | |
857 | eabba580 | Stefan Hajnoczi | qemu_bh_delete(acb->bh); |
858 | eabba580 | Stefan Hajnoczi | qemu_aio_release(acb); |
859 | eabba580 | Stefan Hajnoczi | |
860 | eabba580 | Stefan Hajnoczi | /* Invoke callback */
|
861 | eabba580 | Stefan Hajnoczi | cb(user_opaque, ret); |
862 | eabba580 | Stefan Hajnoczi | |
863 | eabba580 | Stefan Hajnoczi | /* Signal cancel completion */
|
864 | eabba580 | Stefan Hajnoczi | if (finished) {
|
865 | eabba580 | Stefan Hajnoczi | *finished = true;
|
866 | eabba580 | Stefan Hajnoczi | } |
867 | eabba580 | Stefan Hajnoczi | } |
868 | eabba580 | Stefan Hajnoczi | |
869 | eabba580 | Stefan Hajnoczi | static void qed_aio_complete(QEDAIOCB *acb, int ret) |
870 | eabba580 | Stefan Hajnoczi | { |
871 | eabba580 | Stefan Hajnoczi | BDRVQEDState *s = acb_to_s(acb); |
872 | eabba580 | Stefan Hajnoczi | |
873 | eabba580 | Stefan Hajnoczi | trace_qed_aio_complete(s, acb, ret); |
874 | eabba580 | Stefan Hajnoczi | |
875 | eabba580 | Stefan Hajnoczi | /* Free resources */
|
876 | eabba580 | Stefan Hajnoczi | qemu_iovec_destroy(&acb->cur_qiov); |
877 | eabba580 | Stefan Hajnoczi | qed_unref_l2_cache_entry(acb->request.l2_table); |
878 | eabba580 | Stefan Hajnoczi | |
879 | eabba580 | Stefan Hajnoczi | /* Arrange for a bh to invoke the completion function */
|
880 | eabba580 | Stefan Hajnoczi | acb->bh_ret = ret; |
881 | eabba580 | Stefan Hajnoczi | acb->bh = qemu_bh_new(qed_aio_complete_bh, acb); |
882 | eabba580 | Stefan Hajnoczi | qemu_bh_schedule(acb->bh); |
883 | eabba580 | Stefan Hajnoczi | |
884 | eabba580 | Stefan Hajnoczi | /* Start next allocating write request waiting behind this one. Note that
|
885 | eabba580 | Stefan Hajnoczi | * requests enqueue themselves when they first hit an unallocated cluster
|
886 | eabba580 | Stefan Hajnoczi | * but they wait until the entire request is finished before waking up the
|
887 | eabba580 | Stefan Hajnoczi | * next request in the queue. This ensures that we don't cycle through
|
888 | eabba580 | Stefan Hajnoczi | * requests multiple times but rather finish one at a time completely.
|
889 | eabba580 | Stefan Hajnoczi | */
|
890 | eabba580 | Stefan Hajnoczi | if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
|
891 | eabba580 | Stefan Hajnoczi | QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next); |
892 | eabba580 | Stefan Hajnoczi | acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs); |
893 | eabba580 | Stefan Hajnoczi | if (acb) {
|
894 | eabba580 | Stefan Hajnoczi | qed_aio_next_io(acb, 0);
|
895 | 6f321e93 | Stefan Hajnoczi | } else if (s->header.features & QED_F_NEED_CHECK) { |
896 | 6f321e93 | Stefan Hajnoczi | qed_start_need_check_timer(s); |
897 | eabba580 | Stefan Hajnoczi | } |
898 | eabba580 | Stefan Hajnoczi | } |
899 | eabba580 | Stefan Hajnoczi | } |
900 | eabba580 | Stefan Hajnoczi | |
901 | eabba580 | Stefan Hajnoczi | /**
|
902 | eabba580 | Stefan Hajnoczi | * Commit the current L2 table to the cache
|
903 | eabba580 | Stefan Hajnoczi | */
|
904 | eabba580 | Stefan Hajnoczi | static void qed_commit_l2_update(void *opaque, int ret) |
905 | eabba580 | Stefan Hajnoczi | { |
906 | eabba580 | Stefan Hajnoczi | QEDAIOCB *acb = opaque; |
907 | eabba580 | Stefan Hajnoczi | BDRVQEDState *s = acb_to_s(acb); |
908 | eabba580 | Stefan Hajnoczi | CachedL2Table *l2_table = acb->request.l2_table; |
909 | e4fc8781 | Stefan Hajnoczi | uint64_t l2_offset = l2_table->offset; |
910 | eabba580 | Stefan Hajnoczi | |
911 | eabba580 | Stefan Hajnoczi | qed_commit_l2_cache_entry(&s->l2_cache, l2_table); |
912 | eabba580 | Stefan Hajnoczi | |
913 | eabba580 | Stefan Hajnoczi | /* This is guaranteed to succeed because we just committed the entry to the
|
914 | eabba580 | Stefan Hajnoczi | * cache.
|
915 | eabba580 | Stefan Hajnoczi | */
|
916 | e4fc8781 | Stefan Hajnoczi | acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset); |
917 | eabba580 | Stefan Hajnoczi | assert(acb->request.l2_table != NULL);
|
918 | eabba580 | Stefan Hajnoczi | |
919 | eabba580 | Stefan Hajnoczi | qed_aio_next_io(opaque, ret); |
920 | eabba580 | Stefan Hajnoczi | } |
921 | eabba580 | Stefan Hajnoczi | |
922 | eabba580 | Stefan Hajnoczi | /**
|
923 | eabba580 | Stefan Hajnoczi | * Update L1 table with new L2 table offset and write it out
|
924 | eabba580 | Stefan Hajnoczi | */
|
925 | eabba580 | Stefan Hajnoczi | static void qed_aio_write_l1_update(void *opaque, int ret) |
926 | eabba580 | Stefan Hajnoczi | { |
927 | eabba580 | Stefan Hajnoczi | QEDAIOCB *acb = opaque; |
928 | eabba580 | Stefan Hajnoczi | BDRVQEDState *s = acb_to_s(acb); |
929 | eabba580 | Stefan Hajnoczi | int index;
|
930 | eabba580 | Stefan Hajnoczi | |
931 | eabba580 | Stefan Hajnoczi | if (ret) {
|
932 | eabba580 | Stefan Hajnoczi | qed_aio_complete(acb, ret); |
933 | eabba580 | Stefan Hajnoczi | return;
|
934 | eabba580 | Stefan Hajnoczi | } |
935 | eabba580 | Stefan Hajnoczi | |
936 | eabba580 | Stefan Hajnoczi | index = qed_l1_index(s, acb->cur_pos); |
937 | eabba580 | Stefan Hajnoczi | s->l1_table->offsets[index] = acb->request.l2_table->offset; |
938 | eabba580 | Stefan Hajnoczi | |
939 | eabba580 | Stefan Hajnoczi | qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
|
940 | eabba580 | Stefan Hajnoczi | } |
941 | eabba580 | Stefan Hajnoczi | |
942 | eabba580 | Stefan Hajnoczi | /**
|
943 | eabba580 | Stefan Hajnoczi | * Update L2 table with new cluster offsets and write them out
|
944 | eabba580 | Stefan Hajnoczi | */
|
945 | eabba580 | Stefan Hajnoczi | static void qed_aio_write_l2_update(void *opaque, int ret) |
946 | eabba580 | Stefan Hajnoczi | { |
947 | eabba580 | Stefan Hajnoczi | QEDAIOCB *acb = opaque; |
948 | eabba580 | Stefan Hajnoczi | BDRVQEDState *s = acb_to_s(acb); |
949 | eabba580 | Stefan Hajnoczi | bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
|
950 | eabba580 | Stefan Hajnoczi | int index;
|
951 | eabba580 | Stefan Hajnoczi | |
952 | eabba580 | Stefan Hajnoczi | if (ret) {
|
953 | eabba580 | Stefan Hajnoczi | goto err;
|
954 | eabba580 | Stefan Hajnoczi | } |
955 | eabba580 | Stefan Hajnoczi | |
956 | eabba580 | Stefan Hajnoczi | if (need_alloc) {
|
957 | eabba580 | Stefan Hajnoczi | qed_unref_l2_cache_entry(acb->request.l2_table); |
958 | eabba580 | Stefan Hajnoczi | acb->request.l2_table = qed_new_l2_table(s); |
959 | eabba580 | Stefan Hajnoczi | } |
960 | eabba580 | Stefan Hajnoczi | |
961 | eabba580 | Stefan Hajnoczi | index = qed_l2_index(s, acb->cur_pos); |
962 | eabba580 | Stefan Hajnoczi | qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters, |
963 | eabba580 | Stefan Hajnoczi | acb->cur_cluster); |
964 | eabba580 | Stefan Hajnoczi | |
965 | eabba580 | Stefan Hajnoczi | if (need_alloc) {
|
966 | eabba580 | Stefan Hajnoczi | /* Write out the whole new L2 table */
|
967 | eabba580 | Stefan Hajnoczi | qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true, |
968 | eabba580 | Stefan Hajnoczi | qed_aio_write_l1_update, acb); |
969 | eabba580 | Stefan Hajnoczi | } else {
|
970 | eabba580 | Stefan Hajnoczi | /* Write out only the updated part of the L2 table */
|
971 | eabba580 | Stefan Hajnoczi | qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
|
972 | eabba580 | Stefan Hajnoczi | qed_aio_next_io, acb); |
973 | eabba580 | Stefan Hajnoczi | } |
974 | eabba580 | Stefan Hajnoczi | return;
|
975 | eabba580 | Stefan Hajnoczi | |
976 | eabba580 | Stefan Hajnoczi | err:
|
977 | eabba580 | Stefan Hajnoczi | qed_aio_complete(acb, ret); |
978 | eabba580 | Stefan Hajnoczi | } |
979 | eabba580 | Stefan Hajnoczi | |
980 | eabba580 | Stefan Hajnoczi | /**
|
981 | eabba580 | Stefan Hajnoczi | * Flush new data clusters before updating the L2 table
|
982 | eabba580 | Stefan Hajnoczi | *
|
983 | eabba580 | Stefan Hajnoczi | * This flush is necessary when a backing file is in use. A crash during an
|
984 | eabba580 | Stefan Hajnoczi | * allocating write could result in empty clusters in the image. If the write
|
985 | eabba580 | Stefan Hajnoczi | * only touched a subregion of the cluster, then backing image sectors have
|
986 | eabba580 | Stefan Hajnoczi | * been lost in the untouched region. The solution is to flush after writing a
|
987 | eabba580 | Stefan Hajnoczi | * new data cluster and before updating the L2 table.
|
988 | eabba580 | Stefan Hajnoczi | */
|
989 | eabba580 | Stefan Hajnoczi | static void qed_aio_write_flush_before_l2_update(void *opaque, int ret) |
990 | eabba580 | Stefan Hajnoczi | { |
991 | eabba580 | Stefan Hajnoczi | QEDAIOCB *acb = opaque; |
992 | eabba580 | Stefan Hajnoczi | BDRVQEDState *s = acb_to_s(acb); |
993 | eabba580 | Stefan Hajnoczi | |
994 | eabba580 | Stefan Hajnoczi | if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update, opaque)) {
|
995 | eabba580 | Stefan Hajnoczi | qed_aio_complete(acb, -EIO); |
996 | eabba580 | Stefan Hajnoczi | } |
997 | eabba580 | Stefan Hajnoczi | } |
998 | eabba580 | Stefan Hajnoczi | |
999 | eabba580 | Stefan Hajnoczi | /**
|
1000 | eabba580 | Stefan Hajnoczi | * Write data to the image file
|
1001 | eabba580 | Stefan Hajnoczi | */
|
1002 | eabba580 | Stefan Hajnoczi | static void qed_aio_write_main(void *opaque, int ret) |
1003 | eabba580 | Stefan Hajnoczi | { |
1004 | eabba580 | Stefan Hajnoczi | QEDAIOCB *acb = opaque; |
1005 | eabba580 | Stefan Hajnoczi | BDRVQEDState *s = acb_to_s(acb); |
1006 | eabba580 | Stefan Hajnoczi | uint64_t offset = acb->cur_cluster + |
1007 | eabba580 | Stefan Hajnoczi | qed_offset_into_cluster(s, acb->cur_pos); |
1008 | eabba580 | Stefan Hajnoczi | BlockDriverCompletionFunc *next_fn; |
1009 | eabba580 | Stefan Hajnoczi | BlockDriverAIOCB *file_acb; |
1010 | eabba580 | Stefan Hajnoczi | |
1011 | eabba580 | Stefan Hajnoczi | trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size); |
1012 | eabba580 | Stefan Hajnoczi | |
1013 | eabba580 | Stefan Hajnoczi | if (ret) {
|
1014 | eabba580 | Stefan Hajnoczi | qed_aio_complete(acb, ret); |
1015 | eabba580 | Stefan Hajnoczi | return;
|
1016 | eabba580 | Stefan Hajnoczi | } |
1017 | eabba580 | Stefan Hajnoczi | |
1018 | eabba580 | Stefan Hajnoczi | if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
|
1019 | eabba580 | Stefan Hajnoczi | next_fn = qed_aio_next_io; |
1020 | eabba580 | Stefan Hajnoczi | } else {
|
1021 | eabba580 | Stefan Hajnoczi | if (s->bs->backing_hd) {
|
1022 | eabba580 | Stefan Hajnoczi | next_fn = qed_aio_write_flush_before_l2_update; |
1023 | eabba580 | Stefan Hajnoczi | } else {
|
1024 | eabba580 | Stefan Hajnoczi | next_fn = qed_aio_write_l2_update; |
1025 | eabba580 | Stefan Hajnoczi | } |
1026 | eabba580 | Stefan Hajnoczi | } |
1027 | eabba580 | Stefan Hajnoczi | |
1028 | eabba580 | Stefan Hajnoczi | BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO); |
1029 | eabba580 | Stefan Hajnoczi | file_acb = bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE, |
1030 | eabba580 | Stefan Hajnoczi | &acb->cur_qiov, |
1031 | eabba580 | Stefan Hajnoczi | acb->cur_qiov.size / BDRV_SECTOR_SIZE, |
1032 | eabba580 | Stefan Hajnoczi | next_fn, acb); |
1033 | eabba580 | Stefan Hajnoczi | if (!file_acb) {
|
1034 | eabba580 | Stefan Hajnoczi | qed_aio_complete(acb, -EIO); |
1035 | eabba580 | Stefan Hajnoczi | } |
1036 | eabba580 | Stefan Hajnoczi | } |
1037 | eabba580 | Stefan Hajnoczi | |
1038 | eabba580 | Stefan Hajnoczi | /**
|
1039 | eabba580 | Stefan Hajnoczi | * Populate back untouched region of new data cluster
|
1040 | eabba580 | Stefan Hajnoczi | */
|
1041 | eabba580 | Stefan Hajnoczi | static void qed_aio_write_postfill(void *opaque, int ret) |
1042 | eabba580 | Stefan Hajnoczi | { |
1043 | eabba580 | Stefan Hajnoczi | QEDAIOCB *acb = opaque; |
1044 | eabba580 | Stefan Hajnoczi | BDRVQEDState *s = acb_to_s(acb); |
1045 | eabba580 | Stefan Hajnoczi | uint64_t start = acb->cur_pos + acb->cur_qiov.size; |
1046 | eabba580 | Stefan Hajnoczi | uint64_t len = |
1047 | eabba580 | Stefan Hajnoczi | qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
|
1048 | eabba580 | Stefan Hajnoczi | uint64_t offset = acb->cur_cluster + |
1049 | eabba580 | Stefan Hajnoczi | qed_offset_into_cluster(s, acb->cur_pos) + |
1050 | eabba580 | Stefan Hajnoczi | acb->cur_qiov.size; |
1051 | eabba580 | Stefan Hajnoczi | |
1052 | eabba580 | Stefan Hajnoczi | if (ret) {
|
1053 | eabba580 | Stefan Hajnoczi | qed_aio_complete(acb, ret); |
1054 | eabba580 | Stefan Hajnoczi | return;
|
1055 | eabba580 | Stefan Hajnoczi | } |
1056 | eabba580 | Stefan Hajnoczi | |
1057 | eabba580 | Stefan Hajnoczi | trace_qed_aio_write_postfill(s, acb, start, len, offset); |
1058 | eabba580 | Stefan Hajnoczi | qed_copy_from_backing_file(s, start, len, offset, |
1059 | eabba580 | Stefan Hajnoczi | qed_aio_write_main, acb); |
1060 | eabba580 | Stefan Hajnoczi | } |
1061 | eabba580 | Stefan Hajnoczi | |
1062 | eabba580 | Stefan Hajnoczi | /**
|
1063 | eabba580 | Stefan Hajnoczi | * Populate front untouched region of new data cluster
|
1064 | eabba580 | Stefan Hajnoczi | */
|
1065 | eabba580 | Stefan Hajnoczi | static void qed_aio_write_prefill(void *opaque, int ret) |
1066 | eabba580 | Stefan Hajnoczi | { |
1067 | eabba580 | Stefan Hajnoczi | QEDAIOCB *acb = opaque; |
1068 | eabba580 | Stefan Hajnoczi | BDRVQEDState *s = acb_to_s(acb); |
1069 | eabba580 | Stefan Hajnoczi | uint64_t start = qed_start_of_cluster(s, acb->cur_pos); |
1070 | eabba580 | Stefan Hajnoczi | uint64_t len = qed_offset_into_cluster(s, acb->cur_pos); |
1071 | eabba580 | Stefan Hajnoczi | |
1072 | eabba580 | Stefan Hajnoczi | trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster); |
1073 | eabba580 | Stefan Hajnoczi | qed_copy_from_backing_file(s, start, len, acb->cur_cluster, |
1074 | eabba580 | Stefan Hajnoczi | qed_aio_write_postfill, acb); |
1075 | eabba580 | Stefan Hajnoczi | } |
1076 | eabba580 | Stefan Hajnoczi | |
1077 | eabba580 | Stefan Hajnoczi | /**
|
1078 | 0d09c797 | Stefan Hajnoczi | * Check if the QED_F_NEED_CHECK bit should be set during allocating write
|
1079 | 0d09c797 | Stefan Hajnoczi | */
|
1080 | 0d09c797 | Stefan Hajnoczi | static bool qed_should_set_need_check(BDRVQEDState *s) |
1081 | 0d09c797 | Stefan Hajnoczi | { |
1082 | 0d09c797 | Stefan Hajnoczi | /* The flush before L2 update path ensures consistency */
|
1083 | 0d09c797 | Stefan Hajnoczi | if (s->bs->backing_hd) {
|
1084 | 0d09c797 | Stefan Hajnoczi | return false; |
1085 | 0d09c797 | Stefan Hajnoczi | } |
1086 | 0d09c797 | Stefan Hajnoczi | |
1087 | 0d09c797 | Stefan Hajnoczi | return !(s->header.features & QED_F_NEED_CHECK);
|
1088 | 0d09c797 | Stefan Hajnoczi | } |
1089 | 0d09c797 | Stefan Hajnoczi | |
1090 | 0d09c797 | Stefan Hajnoczi | /**
|
1091 | eabba580 | Stefan Hajnoczi | * Write new data cluster
|
1092 | eabba580 | Stefan Hajnoczi | *
|
1093 | eabba580 | Stefan Hajnoczi | * @acb: Write request
|
1094 | eabba580 | Stefan Hajnoczi | * @len: Length in bytes
|
1095 | eabba580 | Stefan Hajnoczi | *
|
1096 | eabba580 | Stefan Hajnoczi | * This path is taken when writing to previously unallocated clusters.
|
1097 | eabba580 | Stefan Hajnoczi | */
|
1098 | eabba580 | Stefan Hajnoczi | static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len) |
1099 | eabba580 | Stefan Hajnoczi | { |
1100 | eabba580 | Stefan Hajnoczi | BDRVQEDState *s = acb_to_s(acb); |
1101 | eabba580 | Stefan Hajnoczi | |
1102 | 6f321e93 | Stefan Hajnoczi | /* Cancel timer when the first allocating request comes in */
|
1103 | 6f321e93 | Stefan Hajnoczi | if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
|
1104 | 6f321e93 | Stefan Hajnoczi | qed_cancel_need_check_timer(s); |
1105 | 6f321e93 | Stefan Hajnoczi | } |
1106 | 6f321e93 | Stefan Hajnoczi | |
1107 | eabba580 | Stefan Hajnoczi | /* Freeze this request if another allocating write is in progress */
|
1108 | eabba580 | Stefan Hajnoczi | if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
|
1109 | eabba580 | Stefan Hajnoczi | QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next); |
1110 | eabba580 | Stefan Hajnoczi | } |
1111 | 6f321e93 | Stefan Hajnoczi | if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
|
1112 | 6f321e93 | Stefan Hajnoczi | s->allocating_write_reqs_plugged) { |
1113 | eabba580 | Stefan Hajnoczi | return; /* wait for existing request to finish */ |
1114 | eabba580 | Stefan Hajnoczi | } |
1115 | eabba580 | Stefan Hajnoczi | |
1116 | eabba580 | Stefan Hajnoczi | acb->cur_nclusters = qed_bytes_to_clusters(s, |
1117 | eabba580 | Stefan Hajnoczi | qed_offset_into_cluster(s, acb->cur_pos) + len); |
1118 | eabba580 | Stefan Hajnoczi | acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters); |
1119 | eabba580 | Stefan Hajnoczi | qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
1120 | eabba580 | Stefan Hajnoczi | |
1121 | 0d09c797 | Stefan Hajnoczi | if (qed_should_set_need_check(s)) {
|
1122 | 0d09c797 | Stefan Hajnoczi | s->header.features |= QED_F_NEED_CHECK; |
1123 | 0d09c797 | Stefan Hajnoczi | qed_write_header(s, qed_aio_write_prefill, acb); |
1124 | 0d09c797 | Stefan Hajnoczi | } else {
|
1125 | 01979a98 | Stefan Hajnoczi | qed_aio_write_prefill(acb, 0);
|
1126 | 01979a98 | Stefan Hajnoczi | } |
1127 | eabba580 | Stefan Hajnoczi | } |
1128 | eabba580 | Stefan Hajnoczi | |
1129 | eabba580 | Stefan Hajnoczi | /**
|
1130 | eabba580 | Stefan Hajnoczi | * Write data cluster in place
|
1131 | eabba580 | Stefan Hajnoczi | *
|
1132 | eabba580 | Stefan Hajnoczi | * @acb: Write request
|
1133 | eabba580 | Stefan Hajnoczi | * @offset: Cluster offset in bytes
|
1134 | eabba580 | Stefan Hajnoczi | * @len: Length in bytes
|
1135 | eabba580 | Stefan Hajnoczi | *
|
1136 | eabba580 | Stefan Hajnoczi | * This path is taken when writing to already allocated clusters.
|
1137 | eabba580 | Stefan Hajnoczi | */
|
1138 | eabba580 | Stefan Hajnoczi | static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len) |
1139 | eabba580 | Stefan Hajnoczi | { |
1140 | eabba580 | Stefan Hajnoczi | /* Calculate the I/O vector */
|
1141 | eabba580 | Stefan Hajnoczi | acb->cur_cluster = offset; |
1142 | eabba580 | Stefan Hajnoczi | qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
1143 | eabba580 | Stefan Hajnoczi | |
1144 | eabba580 | Stefan Hajnoczi | /* Do the actual write */
|
1145 | eabba580 | Stefan Hajnoczi | qed_aio_write_main(acb, 0);
|
1146 | eabba580 | Stefan Hajnoczi | } |
1147 | eabba580 | Stefan Hajnoczi | |
1148 | eabba580 | Stefan Hajnoczi | /**
|
1149 | eabba580 | Stefan Hajnoczi | * Write data cluster
|
1150 | eabba580 | Stefan Hajnoczi | *
|
1151 | eabba580 | Stefan Hajnoczi | * @opaque: Write request
|
1152 | eabba580 | Stefan Hajnoczi | * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
|
1153 | eabba580 | Stefan Hajnoczi | * or -errno
|
1154 | eabba580 | Stefan Hajnoczi | * @offset: Cluster offset in bytes
|
1155 | eabba580 | Stefan Hajnoczi | * @len: Length in bytes
|
1156 | eabba580 | Stefan Hajnoczi | *
|
1157 | eabba580 | Stefan Hajnoczi | * Callback from qed_find_cluster().
|
1158 | eabba580 | Stefan Hajnoczi | */
|
1159 | eabba580 | Stefan Hajnoczi | static void qed_aio_write_data(void *opaque, int ret, |
1160 | eabba580 | Stefan Hajnoczi | uint64_t offset, size_t len) |
1161 | eabba580 | Stefan Hajnoczi | { |
1162 | eabba580 | Stefan Hajnoczi | QEDAIOCB *acb = opaque; |
1163 | eabba580 | Stefan Hajnoczi | |
1164 | eabba580 | Stefan Hajnoczi | trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len); |
1165 | eabba580 | Stefan Hajnoczi | |
1166 | eabba580 | Stefan Hajnoczi | acb->find_cluster_ret = ret; |
1167 | eabba580 | Stefan Hajnoczi | |
1168 | eabba580 | Stefan Hajnoczi | switch (ret) {
|
1169 | eabba580 | Stefan Hajnoczi | case QED_CLUSTER_FOUND:
|
1170 | eabba580 | Stefan Hajnoczi | qed_aio_write_inplace(acb, offset, len); |
1171 | eabba580 | Stefan Hajnoczi | break;
|
1172 | eabba580 | Stefan Hajnoczi | |
1173 | eabba580 | Stefan Hajnoczi | case QED_CLUSTER_L2:
|
1174 | eabba580 | Stefan Hajnoczi | case QED_CLUSTER_L1:
|
1175 | 21df65b6 | Anthony Liguori | case QED_CLUSTER_ZERO:
|
1176 | eabba580 | Stefan Hajnoczi | qed_aio_write_alloc(acb, len); |
1177 | eabba580 | Stefan Hajnoczi | break;
|
1178 | eabba580 | Stefan Hajnoczi | |
1179 | eabba580 | Stefan Hajnoczi | default:
|
1180 | eabba580 | Stefan Hajnoczi | qed_aio_complete(acb, ret); |
1181 | eabba580 | Stefan Hajnoczi | break;
|
1182 | eabba580 | Stefan Hajnoczi | } |
1183 | eabba580 | Stefan Hajnoczi | } |
1184 | eabba580 | Stefan Hajnoczi | |
1185 | eabba580 | Stefan Hajnoczi | /**
|
1186 | eabba580 | Stefan Hajnoczi | * Read data cluster
|
1187 | eabba580 | Stefan Hajnoczi | *
|
1188 | eabba580 | Stefan Hajnoczi | * @opaque: Read request
|
1189 | eabba580 | Stefan Hajnoczi | * @ret: QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
|
1190 | eabba580 | Stefan Hajnoczi | * or -errno
|
1191 | eabba580 | Stefan Hajnoczi | * @offset: Cluster offset in bytes
|
1192 | eabba580 | Stefan Hajnoczi | * @len: Length in bytes
|
1193 | eabba580 | Stefan Hajnoczi | *
|
1194 | eabba580 | Stefan Hajnoczi | * Callback from qed_find_cluster().
|
1195 | eabba580 | Stefan Hajnoczi | */
|
1196 | eabba580 | Stefan Hajnoczi | static void qed_aio_read_data(void *opaque, int ret, |
1197 | eabba580 | Stefan Hajnoczi | uint64_t offset, size_t len) |
1198 | eabba580 | Stefan Hajnoczi | { |
1199 | eabba580 | Stefan Hajnoczi | QEDAIOCB *acb = opaque; |
1200 | eabba580 | Stefan Hajnoczi | BDRVQEDState *s = acb_to_s(acb); |
1201 | eabba580 | Stefan Hajnoczi | BlockDriverState *bs = acb->common.bs; |
1202 | eabba580 | Stefan Hajnoczi | BlockDriverAIOCB *file_acb; |
1203 | eabba580 | Stefan Hajnoczi | |
1204 | eabba580 | Stefan Hajnoczi | /* Adjust offset into cluster */
|
1205 | eabba580 | Stefan Hajnoczi | offset += qed_offset_into_cluster(s, acb->cur_pos); |
1206 | eabba580 | Stefan Hajnoczi | |
1207 | eabba580 | Stefan Hajnoczi | trace_qed_aio_read_data(s, acb, ret, offset, len); |
1208 | eabba580 | Stefan Hajnoczi | |
1209 | eabba580 | Stefan Hajnoczi | if (ret < 0) { |
1210 | eabba580 | Stefan Hajnoczi | goto err;
|
1211 | eabba580 | Stefan Hajnoczi | } |
1212 | eabba580 | Stefan Hajnoczi | |
1213 | eabba580 | Stefan Hajnoczi | qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len); |
1214 | eabba580 | Stefan Hajnoczi | |
1215 | 21df65b6 | Anthony Liguori | /* Handle zero cluster and backing file reads */
|
1216 | 21df65b6 | Anthony Liguori | if (ret == QED_CLUSTER_ZERO) {
|
1217 | 21df65b6 | Anthony Liguori | qemu_iovec_memset(&acb->cur_qiov, 0, acb->cur_qiov.size);
|
1218 | 21df65b6 | Anthony Liguori | qed_aio_next_io(acb, 0);
|
1219 | 21df65b6 | Anthony Liguori | return;
|
1220 | 21df65b6 | Anthony Liguori | } else if (ret != QED_CLUSTER_FOUND) { |
1221 | eabba580 | Stefan Hajnoczi | qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov, |
1222 | eabba580 | Stefan Hajnoczi | qed_aio_next_io, acb); |
1223 | eabba580 | Stefan Hajnoczi | return;
|
1224 | eabba580 | Stefan Hajnoczi | } |
1225 | eabba580 | Stefan Hajnoczi | |
1226 | eabba580 | Stefan Hajnoczi | BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO); |
1227 | eabba580 | Stefan Hajnoczi | file_acb = bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE, |
1228 | eabba580 | Stefan Hajnoczi | &acb->cur_qiov, |
1229 | eabba580 | Stefan Hajnoczi | acb->cur_qiov.size / BDRV_SECTOR_SIZE, |
1230 | eabba580 | Stefan Hajnoczi | qed_aio_next_io, acb); |
1231 | eabba580 | Stefan Hajnoczi | if (!file_acb) {
|
1232 | eabba580 | Stefan Hajnoczi | ret = -EIO; |
1233 | eabba580 | Stefan Hajnoczi | goto err;
|
1234 | eabba580 | Stefan Hajnoczi | } |
1235 | eabba580 | Stefan Hajnoczi | return;
|
1236 | eabba580 | Stefan Hajnoczi | |
1237 | eabba580 | Stefan Hajnoczi | err:
|
1238 | eabba580 | Stefan Hajnoczi | qed_aio_complete(acb, ret); |
1239 | eabba580 | Stefan Hajnoczi | } |
1240 | eabba580 | Stefan Hajnoczi | |
1241 | eabba580 | Stefan Hajnoczi | /**
|
1242 | eabba580 | Stefan Hajnoczi | * Begin next I/O or complete the request
|
1243 | eabba580 | Stefan Hajnoczi | */
|
1244 | eabba580 | Stefan Hajnoczi | static void qed_aio_next_io(void *opaque, int ret) |
1245 | eabba580 | Stefan Hajnoczi | { |
1246 | eabba580 | Stefan Hajnoczi | QEDAIOCB *acb = opaque; |
1247 | eabba580 | Stefan Hajnoczi | BDRVQEDState *s = acb_to_s(acb); |
1248 | eabba580 | Stefan Hajnoczi | QEDFindClusterFunc *io_fn = |
1249 | eabba580 | Stefan Hajnoczi | acb->is_write ? qed_aio_write_data : qed_aio_read_data; |
1250 | eabba580 | Stefan Hajnoczi | |
1251 | eabba580 | Stefan Hajnoczi | trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size); |
1252 | eabba580 | Stefan Hajnoczi | |
1253 | eabba580 | Stefan Hajnoczi | /* Handle I/O error */
|
1254 | eabba580 | Stefan Hajnoczi | if (ret) {
|
1255 | eabba580 | Stefan Hajnoczi | qed_aio_complete(acb, ret); |
1256 | eabba580 | Stefan Hajnoczi | return;
|
1257 | eabba580 | Stefan Hajnoczi | } |
1258 | eabba580 | Stefan Hajnoczi | |
1259 | eabba580 | Stefan Hajnoczi | acb->qiov_offset += acb->cur_qiov.size; |
1260 | eabba580 | Stefan Hajnoczi | acb->cur_pos += acb->cur_qiov.size; |
1261 | eabba580 | Stefan Hajnoczi | qemu_iovec_reset(&acb->cur_qiov); |
1262 | eabba580 | Stefan Hajnoczi | |
1263 | eabba580 | Stefan Hajnoczi | /* Complete request */
|
1264 | eabba580 | Stefan Hajnoczi | if (acb->cur_pos >= acb->end_pos) {
|
1265 | eabba580 | Stefan Hajnoczi | qed_aio_complete(acb, 0);
|
1266 | eabba580 | Stefan Hajnoczi | return;
|
1267 | eabba580 | Stefan Hajnoczi | } |
1268 | eabba580 | Stefan Hajnoczi | |
1269 | eabba580 | Stefan Hajnoczi | /* Find next cluster and start I/O */
|
1270 | eabba580 | Stefan Hajnoczi | qed_find_cluster(s, &acb->request, |
1271 | eabba580 | Stefan Hajnoczi | acb->cur_pos, acb->end_pos - acb->cur_pos, |
1272 | eabba580 | Stefan Hajnoczi | io_fn, acb); |
1273 | eabba580 | Stefan Hajnoczi | } |
1274 | eabba580 | Stefan Hajnoczi | |
1275 | eabba580 | Stefan Hajnoczi | static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs,
|
1276 | eabba580 | Stefan Hajnoczi | int64_t sector_num, |
1277 | eabba580 | Stefan Hajnoczi | QEMUIOVector *qiov, int nb_sectors,
|
1278 | eabba580 | Stefan Hajnoczi | BlockDriverCompletionFunc *cb, |
1279 | eabba580 | Stefan Hajnoczi | void *opaque, bool is_write) |
1280 | eabba580 | Stefan Hajnoczi | { |
1281 | eabba580 | Stefan Hajnoczi | QEDAIOCB *acb = qemu_aio_get(&qed_aio_pool, bs, cb, opaque); |
1282 | eabba580 | Stefan Hajnoczi | |
1283 | eabba580 | Stefan Hajnoczi | trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors, |
1284 | eabba580 | Stefan Hajnoczi | opaque, is_write); |
1285 | eabba580 | Stefan Hajnoczi | |
1286 | eabba580 | Stefan Hajnoczi | acb->is_write = is_write; |
1287 | eabba580 | Stefan Hajnoczi | acb->finished = NULL;
|
1288 | eabba580 | Stefan Hajnoczi | acb->qiov = qiov; |
1289 | eabba580 | Stefan Hajnoczi | acb->qiov_offset = 0;
|
1290 | eabba580 | Stefan Hajnoczi | acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE; |
1291 | eabba580 | Stefan Hajnoczi | acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE; |
1292 | eabba580 | Stefan Hajnoczi | acb->request.l2_table = NULL;
|
1293 | eabba580 | Stefan Hajnoczi | qemu_iovec_init(&acb->cur_qiov, qiov->niov); |
1294 | eabba580 | Stefan Hajnoczi | |
1295 | eabba580 | Stefan Hajnoczi | /* Start request */
|
1296 | eabba580 | Stefan Hajnoczi | qed_aio_next_io(acb, 0);
|
1297 | eabba580 | Stefan Hajnoczi | return &acb->common;
|
1298 | eabba580 | Stefan Hajnoczi | } |
1299 | eabba580 | Stefan Hajnoczi | |
1300 | 75411d23 | Stefan Hajnoczi | static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
|
1301 | 75411d23 | Stefan Hajnoczi | int64_t sector_num, |
1302 | 75411d23 | Stefan Hajnoczi | QEMUIOVector *qiov, int nb_sectors,
|
1303 | 75411d23 | Stefan Hajnoczi | BlockDriverCompletionFunc *cb, |
1304 | 75411d23 | Stefan Hajnoczi | void *opaque)
|
1305 | 75411d23 | Stefan Hajnoczi | { |
1306 | eabba580 | Stefan Hajnoczi | return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, false); |
1307 | 75411d23 | Stefan Hajnoczi | } |
1308 | 75411d23 | Stefan Hajnoczi | |
1309 | 75411d23 | Stefan Hajnoczi | static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
|
1310 | 75411d23 | Stefan Hajnoczi | int64_t sector_num, |
1311 | 75411d23 | Stefan Hajnoczi | QEMUIOVector *qiov, int nb_sectors,
|
1312 | 75411d23 | Stefan Hajnoczi | BlockDriverCompletionFunc *cb, |
1313 | 75411d23 | Stefan Hajnoczi | void *opaque)
|
1314 | 75411d23 | Stefan Hajnoczi | { |
1315 | eabba580 | Stefan Hajnoczi | return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, true); |
1316 | 75411d23 | Stefan Hajnoczi | } |
1317 | 75411d23 | Stefan Hajnoczi | |
1318 | 75411d23 | Stefan Hajnoczi | static BlockDriverAIOCB *bdrv_qed_aio_flush(BlockDriverState *bs,
|
1319 | 75411d23 | Stefan Hajnoczi | BlockDriverCompletionFunc *cb, |
1320 | 75411d23 | Stefan Hajnoczi | void *opaque)
|
1321 | 75411d23 | Stefan Hajnoczi | { |
1322 | 75411d23 | Stefan Hajnoczi | return bdrv_aio_flush(bs->file, cb, opaque);
|
1323 | 75411d23 | Stefan Hajnoczi | } |
1324 | 75411d23 | Stefan Hajnoczi | |
1325 | 75411d23 | Stefan Hajnoczi | static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset) |
1326 | 75411d23 | Stefan Hajnoczi | { |
1327 | 77a5a000 | Stefan Hajnoczi | BDRVQEDState *s = bs->opaque; |
1328 | 77a5a000 | Stefan Hajnoczi | uint64_t old_image_size; |
1329 | 77a5a000 | Stefan Hajnoczi | int ret;
|
1330 | 77a5a000 | Stefan Hajnoczi | |
1331 | 77a5a000 | Stefan Hajnoczi | if (!qed_is_image_size_valid(offset, s->header.cluster_size,
|
1332 | 77a5a000 | Stefan Hajnoczi | s->header.table_size)) { |
1333 | 77a5a000 | Stefan Hajnoczi | return -EINVAL;
|
1334 | 77a5a000 | Stefan Hajnoczi | } |
1335 | 77a5a000 | Stefan Hajnoczi | |
1336 | 77a5a000 | Stefan Hajnoczi | /* Shrinking is currently not supported */
|
1337 | 77a5a000 | Stefan Hajnoczi | if ((uint64_t)offset < s->header.image_size) {
|
1338 | 77a5a000 | Stefan Hajnoczi | return -ENOTSUP;
|
1339 | 77a5a000 | Stefan Hajnoczi | } |
1340 | 77a5a000 | Stefan Hajnoczi | |
1341 | 77a5a000 | Stefan Hajnoczi | old_image_size = s->header.image_size; |
1342 | 77a5a000 | Stefan Hajnoczi | s->header.image_size = offset; |
1343 | 77a5a000 | Stefan Hajnoczi | ret = qed_write_header_sync(s); |
1344 | 77a5a000 | Stefan Hajnoczi | if (ret < 0) { |
1345 | 77a5a000 | Stefan Hajnoczi | s->header.image_size = old_image_size; |
1346 | 77a5a000 | Stefan Hajnoczi | } |
1347 | 77a5a000 | Stefan Hajnoczi | return ret;
|
1348 | 75411d23 | Stefan Hajnoczi | } |
1349 | 75411d23 | Stefan Hajnoczi | |
1350 | 75411d23 | Stefan Hajnoczi | static int64_t bdrv_qed_getlength(BlockDriverState *bs)
|
1351 | 75411d23 | Stefan Hajnoczi | { |
1352 | 75411d23 | Stefan Hajnoczi | BDRVQEDState *s = bs->opaque; |
1353 | 75411d23 | Stefan Hajnoczi | return s->header.image_size;
|
1354 | 75411d23 | Stefan Hajnoczi | } |
1355 | 75411d23 | Stefan Hajnoczi | |
1356 | 75411d23 | Stefan Hajnoczi | static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi) |
1357 | 75411d23 | Stefan Hajnoczi | { |
1358 | 75411d23 | Stefan Hajnoczi | BDRVQEDState *s = bs->opaque; |
1359 | 75411d23 | Stefan Hajnoczi | |
1360 | 75411d23 | Stefan Hajnoczi | memset(bdi, 0, sizeof(*bdi)); |
1361 | 75411d23 | Stefan Hajnoczi | bdi->cluster_size = s->header.cluster_size; |
1362 | 75411d23 | Stefan Hajnoczi | return 0; |
1363 | 75411d23 | Stefan Hajnoczi | } |
1364 | 75411d23 | Stefan Hajnoczi | |
1365 | 75411d23 | Stefan Hajnoczi | static int bdrv_qed_change_backing_file(BlockDriverState *bs, |
1366 | 75411d23 | Stefan Hajnoczi | const char *backing_file, |
1367 | 75411d23 | Stefan Hajnoczi | const char *backing_fmt) |
1368 | 75411d23 | Stefan Hajnoczi | { |
1369 | 75411d23 | Stefan Hajnoczi | BDRVQEDState *s = bs->opaque; |
1370 | 75411d23 | Stefan Hajnoczi | QEDHeader new_header, le_header; |
1371 | 75411d23 | Stefan Hajnoczi | void *buffer;
|
1372 | 75411d23 | Stefan Hajnoczi | size_t buffer_len, backing_file_len; |
1373 | 75411d23 | Stefan Hajnoczi | int ret;
|
1374 | 75411d23 | Stefan Hajnoczi | |
1375 | 75411d23 | Stefan Hajnoczi | /* Refuse to set backing filename if unknown compat feature bits are
|
1376 | 75411d23 | Stefan Hajnoczi | * active. If the image uses an unknown compat feature then we may not
|
1377 | 75411d23 | Stefan Hajnoczi | * know the layout of data following the header structure and cannot safely
|
1378 | 75411d23 | Stefan Hajnoczi | * add a new string.
|
1379 | 75411d23 | Stefan Hajnoczi | */
|
1380 | 75411d23 | Stefan Hajnoczi | if (backing_file && (s->header.compat_features &
|
1381 | 75411d23 | Stefan Hajnoczi | ~QED_COMPAT_FEATURE_MASK)) { |
1382 | 75411d23 | Stefan Hajnoczi | return -ENOTSUP;
|
1383 | 75411d23 | Stefan Hajnoczi | } |
1384 | 75411d23 | Stefan Hajnoczi | |
1385 | 75411d23 | Stefan Hajnoczi | memcpy(&new_header, &s->header, sizeof(new_header));
|
1386 | 75411d23 | Stefan Hajnoczi | |
1387 | 75411d23 | Stefan Hajnoczi | new_header.features &= ~(QED_F_BACKING_FILE | |
1388 | 75411d23 | Stefan Hajnoczi | QED_F_BACKING_FORMAT_NO_PROBE); |
1389 | 75411d23 | Stefan Hajnoczi | |
1390 | 75411d23 | Stefan Hajnoczi | /* Adjust feature flags */
|
1391 | 75411d23 | Stefan Hajnoczi | if (backing_file) {
|
1392 | 75411d23 | Stefan Hajnoczi | new_header.features |= QED_F_BACKING_FILE; |
1393 | 75411d23 | Stefan Hajnoczi | |
1394 | 75411d23 | Stefan Hajnoczi | if (qed_fmt_is_raw(backing_fmt)) {
|
1395 | 75411d23 | Stefan Hajnoczi | new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE; |
1396 | 75411d23 | Stefan Hajnoczi | } |
1397 | 75411d23 | Stefan Hajnoczi | } |
1398 | 75411d23 | Stefan Hajnoczi | |
1399 | 75411d23 | Stefan Hajnoczi | /* Calculate new header size */
|
1400 | 75411d23 | Stefan Hajnoczi | backing_file_len = 0;
|
1401 | 75411d23 | Stefan Hajnoczi | |
1402 | 75411d23 | Stefan Hajnoczi | if (backing_file) {
|
1403 | 75411d23 | Stefan Hajnoczi | backing_file_len = strlen(backing_file); |
1404 | 75411d23 | Stefan Hajnoczi | } |
1405 | 75411d23 | Stefan Hajnoczi | |
1406 | 75411d23 | Stefan Hajnoczi | buffer_len = sizeof(new_header);
|
1407 | 75411d23 | Stefan Hajnoczi | new_header.backing_filename_offset = buffer_len; |
1408 | 75411d23 | Stefan Hajnoczi | new_header.backing_filename_size = backing_file_len; |
1409 | 75411d23 | Stefan Hajnoczi | buffer_len += backing_file_len; |
1410 | 75411d23 | Stefan Hajnoczi | |
1411 | 75411d23 | Stefan Hajnoczi | /* Make sure we can rewrite header without failing */
|
1412 | 75411d23 | Stefan Hajnoczi | if (buffer_len > new_header.header_size * new_header.cluster_size) {
|
1413 | 75411d23 | Stefan Hajnoczi | return -ENOSPC;
|
1414 | 75411d23 | Stefan Hajnoczi | } |
1415 | 75411d23 | Stefan Hajnoczi | |
1416 | 75411d23 | Stefan Hajnoczi | /* Prepare new header */
|
1417 | 7267c094 | Anthony Liguori | buffer = g_malloc(buffer_len); |
1418 | 75411d23 | Stefan Hajnoczi | |
1419 | 75411d23 | Stefan Hajnoczi | qed_header_cpu_to_le(&new_header, &le_header); |
1420 | 75411d23 | Stefan Hajnoczi | memcpy(buffer, &le_header, sizeof(le_header));
|
1421 | 75411d23 | Stefan Hajnoczi | buffer_len = sizeof(le_header);
|
1422 | 75411d23 | Stefan Hajnoczi | |
1423 | 75411d23 | Stefan Hajnoczi | memcpy(buffer + buffer_len, backing_file, backing_file_len); |
1424 | 75411d23 | Stefan Hajnoczi | buffer_len += backing_file_len; |
1425 | 75411d23 | Stefan Hajnoczi | |
1426 | 75411d23 | Stefan Hajnoczi | /* Write new header */
|
1427 | 75411d23 | Stefan Hajnoczi | ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
|
1428 | 7267c094 | Anthony Liguori | g_free(buffer); |
1429 | 75411d23 | Stefan Hajnoczi | if (ret == 0) { |
1430 | 75411d23 | Stefan Hajnoczi | memcpy(&s->header, &new_header, sizeof(new_header));
|
1431 | 75411d23 | Stefan Hajnoczi | } |
1432 | 75411d23 | Stefan Hajnoczi | return ret;
|
1433 | 75411d23 | Stefan Hajnoczi | } |
1434 | 75411d23 | Stefan Hajnoczi | |
1435 | 75411d23 | Stefan Hajnoczi | static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result) |
1436 | 75411d23 | Stefan Hajnoczi | { |
1437 | 01979a98 | Stefan Hajnoczi | BDRVQEDState *s = bs->opaque; |
1438 | 01979a98 | Stefan Hajnoczi | |
1439 | 01979a98 | Stefan Hajnoczi | return qed_check(s, result, false); |
1440 | 75411d23 | Stefan Hajnoczi | } |
1441 | 75411d23 | Stefan Hajnoczi | |
1442 | 75411d23 | Stefan Hajnoczi | static QEMUOptionParameter qed_create_options[] = {
|
1443 | 75411d23 | Stefan Hajnoczi | { |
1444 | 75411d23 | Stefan Hajnoczi | .name = BLOCK_OPT_SIZE, |
1445 | 75411d23 | Stefan Hajnoczi | .type = OPT_SIZE, |
1446 | 75411d23 | Stefan Hajnoczi | .help = "Virtual disk size (in bytes)"
|
1447 | 75411d23 | Stefan Hajnoczi | }, { |
1448 | 75411d23 | Stefan Hajnoczi | .name = BLOCK_OPT_BACKING_FILE, |
1449 | 75411d23 | Stefan Hajnoczi | .type = OPT_STRING, |
1450 | 75411d23 | Stefan Hajnoczi | .help = "File name of a base image"
|
1451 | 75411d23 | Stefan Hajnoczi | }, { |
1452 | 75411d23 | Stefan Hajnoczi | .name = BLOCK_OPT_BACKING_FMT, |
1453 | 75411d23 | Stefan Hajnoczi | .type = OPT_STRING, |
1454 | 75411d23 | Stefan Hajnoczi | .help = "Image format of the base image"
|
1455 | 75411d23 | Stefan Hajnoczi | }, { |
1456 | 75411d23 | Stefan Hajnoczi | .name = BLOCK_OPT_CLUSTER_SIZE, |
1457 | 75411d23 | Stefan Hajnoczi | .type = OPT_SIZE, |
1458 | 99cce9fa | Kevin Wolf | .help = "Cluster size (in bytes)",
|
1459 | 99cce9fa | Kevin Wolf | .value = { .n = QED_DEFAULT_CLUSTER_SIZE }, |
1460 | 75411d23 | Stefan Hajnoczi | }, { |
1461 | 75411d23 | Stefan Hajnoczi | .name = BLOCK_OPT_TABLE_SIZE, |
1462 | 75411d23 | Stefan Hajnoczi | .type = OPT_SIZE, |
1463 | 75411d23 | Stefan Hajnoczi | .help = "L1/L2 table size (in clusters)"
|
1464 | 75411d23 | Stefan Hajnoczi | }, |
1465 | 75411d23 | Stefan Hajnoczi | { /* end of list */ }
|
1466 | 75411d23 | Stefan Hajnoczi | }; |
1467 | 75411d23 | Stefan Hajnoczi | |
1468 | 75411d23 | Stefan Hajnoczi | static BlockDriver bdrv_qed = {
|
1469 | 75411d23 | Stefan Hajnoczi | .format_name = "qed",
|
1470 | 75411d23 | Stefan Hajnoczi | .instance_size = sizeof(BDRVQEDState),
|
1471 | 75411d23 | Stefan Hajnoczi | .create_options = qed_create_options, |
1472 | 75411d23 | Stefan Hajnoczi | |
1473 | 75411d23 | Stefan Hajnoczi | .bdrv_probe = bdrv_qed_probe, |
1474 | 75411d23 | Stefan Hajnoczi | .bdrv_open = bdrv_qed_open, |
1475 | 75411d23 | Stefan Hajnoczi | .bdrv_close = bdrv_qed_close, |
1476 | 75411d23 | Stefan Hajnoczi | .bdrv_create = bdrv_qed_create, |
1477 | 75411d23 | Stefan Hajnoczi | .bdrv_is_allocated = bdrv_qed_is_allocated, |
1478 | 75411d23 | Stefan Hajnoczi | .bdrv_make_empty = bdrv_qed_make_empty, |
1479 | 75411d23 | Stefan Hajnoczi | .bdrv_aio_readv = bdrv_qed_aio_readv, |
1480 | 75411d23 | Stefan Hajnoczi | .bdrv_aio_writev = bdrv_qed_aio_writev, |
1481 | 75411d23 | Stefan Hajnoczi | .bdrv_aio_flush = bdrv_qed_aio_flush, |
1482 | 75411d23 | Stefan Hajnoczi | .bdrv_truncate = bdrv_qed_truncate, |
1483 | 75411d23 | Stefan Hajnoczi | .bdrv_getlength = bdrv_qed_getlength, |
1484 | 75411d23 | Stefan Hajnoczi | .bdrv_get_info = bdrv_qed_get_info, |
1485 | 75411d23 | Stefan Hajnoczi | .bdrv_change_backing_file = bdrv_qed_change_backing_file, |
1486 | 75411d23 | Stefan Hajnoczi | .bdrv_check = bdrv_qed_check, |
1487 | 75411d23 | Stefan Hajnoczi | }; |
1488 | 75411d23 | Stefan Hajnoczi | |
1489 | 75411d23 | Stefan Hajnoczi | static void bdrv_qed_init(void) |
1490 | 75411d23 | Stefan Hajnoczi | { |
1491 | 75411d23 | Stefan Hajnoczi | bdrv_register(&bdrv_qed); |
1492 | 75411d23 | Stefan Hajnoczi | } |
1493 | 75411d23 | Stefan Hajnoczi | |
1494 | 75411d23 | Stefan Hajnoczi | block_init(bdrv_qed_init); |