root / hw / virtio-blk.c @ 0d09e41a
History | View | Annotate | Download (20.2 kB)
1 |
/*
|
---|---|
2 |
* Virtio Block Device
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2007
|
5 |
*
|
6 |
* Authors:
|
7 |
* Anthony Liguori <aliguori@us.ibm.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
*/
|
13 |
|
14 |
#include "qemu-common.h" |
15 |
#include "qemu/error-report.h" |
16 |
#include "trace.h" |
17 |
#include "hw/block/block.h" |
18 |
#include "sysemu/blockdev.h" |
19 |
#include "hw/virtio/virtio-blk.h" |
20 |
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
|
21 |
# include "dataplane/virtio-blk.h" |
22 |
#endif
|
23 |
#include "block/scsi.h" |
24 |
#ifdef __linux__
|
25 |
# include <scsi/sg.h> |
26 |
#endif
|
27 |
#include "hw/virtio/virtio-bus.h" |
28 |
|
29 |
typedef struct VirtIOBlockReq |
30 |
{ |
31 |
VirtIOBlock *dev; |
32 |
VirtQueueElement elem; |
33 |
struct virtio_blk_inhdr *in;
|
34 |
struct virtio_blk_outhdr *out;
|
35 |
struct virtio_scsi_inhdr *scsi;
|
36 |
QEMUIOVector qiov; |
37 |
struct VirtIOBlockReq *next;
|
38 |
BlockAcctCookie acct; |
39 |
} VirtIOBlockReq; |
40 |
|
41 |
static void virtio_blk_req_complete(VirtIOBlockReq *req, int status) |
42 |
{ |
43 |
VirtIOBlock *s = req->dev; |
44 |
VirtIODevice *vdev = VIRTIO_DEVICE(s); |
45 |
|
46 |
trace_virtio_blk_req_complete(req, status); |
47 |
|
48 |
stb_p(&req->in->status, status); |
49 |
virtqueue_push(s->vq, &req->elem, req->qiov.size + sizeof(*req->in));
|
50 |
virtio_notify(vdev, s->vq); |
51 |
} |
52 |
|
53 |
static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, |
54 |
bool is_read)
|
55 |
{ |
56 |
BlockErrorAction action = bdrv_get_error_action(req->dev->bs, is_read, error); |
57 |
VirtIOBlock *s = req->dev; |
58 |
|
59 |
if (action == BDRV_ACTION_STOP) {
|
60 |
req->next = s->rq; |
61 |
s->rq = req; |
62 |
} else if (action == BDRV_ACTION_REPORT) { |
63 |
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); |
64 |
bdrv_acct_done(s->bs, &req->acct); |
65 |
g_free(req); |
66 |
} |
67 |
|
68 |
bdrv_error_action(s->bs, action, is_read, error); |
69 |
return action != BDRV_ACTION_IGNORE;
|
70 |
} |
71 |
|
72 |
static void virtio_blk_rw_complete(void *opaque, int ret) |
73 |
{ |
74 |
VirtIOBlockReq *req = opaque; |
75 |
|
76 |
trace_virtio_blk_rw_complete(req, ret); |
77 |
|
78 |
if (ret) {
|
79 |
bool is_read = !(ldl_p(&req->out->type) & VIRTIO_BLK_T_OUT);
|
80 |
if (virtio_blk_handle_rw_error(req, -ret, is_read))
|
81 |
return;
|
82 |
} |
83 |
|
84 |
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); |
85 |
bdrv_acct_done(req->dev->bs, &req->acct); |
86 |
g_free(req); |
87 |
} |
88 |
|
89 |
static void virtio_blk_flush_complete(void *opaque, int ret) |
90 |
{ |
91 |
VirtIOBlockReq *req = opaque; |
92 |
|
93 |
if (ret) {
|
94 |
if (virtio_blk_handle_rw_error(req, -ret, 0)) { |
95 |
return;
|
96 |
} |
97 |
} |
98 |
|
99 |
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); |
100 |
bdrv_acct_done(req->dev->bs, &req->acct); |
101 |
g_free(req); |
102 |
} |
103 |
|
104 |
static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
|
105 |
{ |
106 |
VirtIOBlockReq *req = g_malloc(sizeof(*req));
|
107 |
req->dev = s; |
108 |
req->qiov.size = 0;
|
109 |
req->next = NULL;
|
110 |
return req;
|
111 |
} |
112 |
|
113 |
static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
|
114 |
{ |
115 |
VirtIOBlockReq *req = virtio_blk_alloc_request(s); |
116 |
|
117 |
if (req != NULL) { |
118 |
if (!virtqueue_pop(s->vq, &req->elem)) {
|
119 |
g_free(req); |
120 |
return NULL; |
121 |
} |
122 |
} |
123 |
|
124 |
return req;
|
125 |
} |
126 |
|
127 |
static void virtio_blk_handle_scsi(VirtIOBlockReq *req) |
128 |
{ |
129 |
#ifdef __linux__
|
130 |
int ret;
|
131 |
int i;
|
132 |
#endif
|
133 |
int status = VIRTIO_BLK_S_OK;
|
134 |
|
135 |
/*
|
136 |
* We require at least one output segment each for the virtio_blk_outhdr
|
137 |
* and the SCSI command block.
|
138 |
*
|
139 |
* We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
|
140 |
* and the sense buffer pointer in the input segments.
|
141 |
*/
|
142 |
if (req->elem.out_num < 2 || req->elem.in_num < 3) { |
143 |
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); |
144 |
g_free(req); |
145 |
return;
|
146 |
} |
147 |
|
148 |
/*
|
149 |
* The scsi inhdr is placed in the second-to-last input segment, just
|
150 |
* before the regular inhdr.
|
151 |
*/
|
152 |
req->scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base; |
153 |
|
154 |
if (!req->dev->blk.scsi) {
|
155 |
status = VIRTIO_BLK_S_UNSUPP; |
156 |
goto fail;
|
157 |
} |
158 |
|
159 |
/*
|
160 |
* No support for bidirection commands yet.
|
161 |
*/
|
162 |
if (req->elem.out_num > 2 && req->elem.in_num > 3) { |
163 |
status = VIRTIO_BLK_S_UNSUPP; |
164 |
goto fail;
|
165 |
} |
166 |
|
167 |
#ifdef __linux__
|
168 |
struct sg_io_hdr hdr;
|
169 |
memset(&hdr, 0, sizeof(struct sg_io_hdr)); |
170 |
hdr.interface_id = 'S';
|
171 |
hdr.cmd_len = req->elem.out_sg[1].iov_len;
|
172 |
hdr.cmdp = req->elem.out_sg[1].iov_base;
|
173 |
hdr.dxfer_len = 0;
|
174 |
|
175 |
if (req->elem.out_num > 2) { |
176 |
/*
|
177 |
* If there are more than the minimally required 2 output segments
|
178 |
* there is write payload starting from the third iovec.
|
179 |
*/
|
180 |
hdr.dxfer_direction = SG_DXFER_TO_DEV; |
181 |
hdr.iovec_count = req->elem.out_num - 2;
|
182 |
|
183 |
for (i = 0; i < hdr.iovec_count; i++) |
184 |
hdr.dxfer_len += req->elem.out_sg[i + 2].iov_len;
|
185 |
|
186 |
hdr.dxferp = req->elem.out_sg + 2;
|
187 |
|
188 |
} else if (req->elem.in_num > 3) { |
189 |
/*
|
190 |
* If we have more than 3 input segments the guest wants to actually
|
191 |
* read data.
|
192 |
*/
|
193 |
hdr.dxfer_direction = SG_DXFER_FROM_DEV; |
194 |
hdr.iovec_count = req->elem.in_num - 3;
|
195 |
for (i = 0; i < hdr.iovec_count; i++) |
196 |
hdr.dxfer_len += req->elem.in_sg[i].iov_len; |
197 |
|
198 |
hdr.dxferp = req->elem.in_sg; |
199 |
} else {
|
200 |
/*
|
201 |
* Some SCSI commands don't actually transfer any data.
|
202 |
*/
|
203 |
hdr.dxfer_direction = SG_DXFER_NONE; |
204 |
} |
205 |
|
206 |
hdr.sbp = req->elem.in_sg[req->elem.in_num - 3].iov_base;
|
207 |
hdr.mx_sb_len = req->elem.in_sg[req->elem.in_num - 3].iov_len;
|
208 |
|
209 |
ret = bdrv_ioctl(req->dev->bs, SG_IO, &hdr); |
210 |
if (ret) {
|
211 |
status = VIRTIO_BLK_S_UNSUPP; |
212 |
goto fail;
|
213 |
} |
214 |
|
215 |
/*
|
216 |
* From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
|
217 |
* clear the masked_status field [hence status gets cleared too, see
|
218 |
* block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
|
219 |
* status has occurred. However they do set DRIVER_SENSE in driver_status
|
220 |
* field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
|
221 |
*/
|
222 |
if (hdr.status == 0 && hdr.sb_len_wr > 0) { |
223 |
hdr.status = CHECK_CONDITION; |
224 |
} |
225 |
|
226 |
stl_p(&req->scsi->errors, |
227 |
hdr.status | (hdr.msg_status << 8) |
|
228 |
(hdr.host_status << 16) | (hdr.driver_status << 24)); |
229 |
stl_p(&req->scsi->residual, hdr.resid); |
230 |
stl_p(&req->scsi->sense_len, hdr.sb_len_wr); |
231 |
stl_p(&req->scsi->data_len, hdr.dxfer_len); |
232 |
|
233 |
virtio_blk_req_complete(req, status); |
234 |
g_free(req); |
235 |
return;
|
236 |
#else
|
237 |
abort(); |
238 |
#endif
|
239 |
|
240 |
fail:
|
241 |
/* Just put anything nonzero so that the ioctl fails in the guest. */
|
242 |
stl_p(&req->scsi->errors, 255);
|
243 |
virtio_blk_req_complete(req, status); |
244 |
g_free(req); |
245 |
} |
246 |
|
247 |
typedef struct MultiReqBuffer { |
248 |
BlockRequest blkreq[32];
|
249 |
unsigned int num_writes; |
250 |
} MultiReqBuffer; |
251 |
|
252 |
static void virtio_submit_multiwrite(BlockDriverState *bs, MultiReqBuffer *mrb) |
253 |
{ |
254 |
int i, ret;
|
255 |
|
256 |
if (!mrb->num_writes) {
|
257 |
return;
|
258 |
} |
259 |
|
260 |
ret = bdrv_aio_multiwrite(bs, mrb->blkreq, mrb->num_writes); |
261 |
if (ret != 0) { |
262 |
for (i = 0; i < mrb->num_writes; i++) { |
263 |
if (mrb->blkreq[i].error) {
|
264 |
virtio_blk_rw_complete(mrb->blkreq[i].opaque, -EIO); |
265 |
} |
266 |
} |
267 |
} |
268 |
|
269 |
mrb->num_writes = 0;
|
270 |
} |
271 |
|
272 |
static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb) |
273 |
{ |
274 |
bdrv_acct_start(req->dev->bs, &req->acct, 0, BDRV_ACCT_FLUSH);
|
275 |
|
276 |
/*
|
277 |
* Make sure all outstanding writes are posted to the backing device.
|
278 |
*/
|
279 |
virtio_submit_multiwrite(req->dev->bs, mrb); |
280 |
bdrv_aio_flush(req->dev->bs, virtio_blk_flush_complete, req); |
281 |
} |
282 |
|
283 |
static void virtio_blk_handle_write(VirtIOBlockReq *req, MultiReqBuffer *mrb) |
284 |
{ |
285 |
BlockRequest *blkreq; |
286 |
uint64_t sector; |
287 |
|
288 |
sector = ldq_p(&req->out->sector); |
289 |
|
290 |
bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_WRITE); |
291 |
|
292 |
trace_virtio_blk_handle_write(req, sector, req->qiov.size / 512);
|
293 |
|
294 |
if (sector & req->dev->sector_mask) {
|
295 |
virtio_blk_rw_complete(req, -EIO); |
296 |
return;
|
297 |
} |
298 |
if (req->qiov.size % req->dev->conf->logical_block_size) {
|
299 |
virtio_blk_rw_complete(req, -EIO); |
300 |
return;
|
301 |
} |
302 |
|
303 |
if (mrb->num_writes == 32) { |
304 |
virtio_submit_multiwrite(req->dev->bs, mrb); |
305 |
} |
306 |
|
307 |
blkreq = &mrb->blkreq[mrb->num_writes]; |
308 |
blkreq->sector = sector; |
309 |
blkreq->nb_sectors = req->qiov.size / BDRV_SECTOR_SIZE; |
310 |
blkreq->qiov = &req->qiov; |
311 |
blkreq->cb = virtio_blk_rw_complete; |
312 |
blkreq->opaque = req; |
313 |
blkreq->error = 0;
|
314 |
|
315 |
mrb->num_writes++; |
316 |
} |
317 |
|
318 |
static void virtio_blk_handle_read(VirtIOBlockReq *req) |
319 |
{ |
320 |
uint64_t sector; |
321 |
|
322 |
sector = ldq_p(&req->out->sector); |
323 |
|
324 |
bdrv_acct_start(req->dev->bs, &req->acct, req->qiov.size, BDRV_ACCT_READ); |
325 |
|
326 |
trace_virtio_blk_handle_read(req, sector, req->qiov.size / 512);
|
327 |
|
328 |
if (sector & req->dev->sector_mask) {
|
329 |
virtio_blk_rw_complete(req, -EIO); |
330 |
return;
|
331 |
} |
332 |
if (req->qiov.size % req->dev->conf->logical_block_size) {
|
333 |
virtio_blk_rw_complete(req, -EIO); |
334 |
return;
|
335 |
} |
336 |
bdrv_aio_readv(req->dev->bs, sector, &req->qiov, |
337 |
req->qiov.size / BDRV_SECTOR_SIZE, |
338 |
virtio_blk_rw_complete, req); |
339 |
} |
340 |
|
341 |
static void virtio_blk_handle_request(VirtIOBlockReq *req, |
342 |
MultiReqBuffer *mrb) |
343 |
{ |
344 |
uint32_t type; |
345 |
|
346 |
if (req->elem.out_num < 1 || req->elem.in_num < 1) { |
347 |
error_report("virtio-blk missing headers");
|
348 |
exit(1);
|
349 |
} |
350 |
|
351 |
if (req->elem.out_sg[0].iov_len < sizeof(*req->out) || |
352 |
req->elem.in_sg[req->elem.in_num - 1].iov_len < sizeof(*req->in)) { |
353 |
error_report("virtio-blk header not in correct element");
|
354 |
exit(1);
|
355 |
} |
356 |
|
357 |
req->out = (void *)req->elem.out_sg[0].iov_base; |
358 |
req->in = (void *)req->elem.in_sg[req->elem.in_num - 1].iov_base; |
359 |
|
360 |
type = ldl_p(&req->out->type); |
361 |
|
362 |
if (type & VIRTIO_BLK_T_FLUSH) {
|
363 |
virtio_blk_handle_flush(req, mrb); |
364 |
} else if (type & VIRTIO_BLK_T_SCSI_CMD) { |
365 |
virtio_blk_handle_scsi(req); |
366 |
} else if (type & VIRTIO_BLK_T_GET_ID) { |
367 |
VirtIOBlock *s = req->dev; |
368 |
|
369 |
/*
|
370 |
* NB: per existing s/n string convention the string is
|
371 |
* terminated by '\0' only when shorter than buffer.
|
372 |
*/
|
373 |
strncpy(req->elem.in_sg[0].iov_base,
|
374 |
s->blk.serial ? s->blk.serial : "",
|
375 |
MIN(req->elem.in_sg[0].iov_len, VIRTIO_BLK_ID_BYTES));
|
376 |
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); |
377 |
g_free(req); |
378 |
} else if (type & VIRTIO_BLK_T_OUT) { |
379 |
qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
|
380 |
req->elem.out_num - 1);
|
381 |
virtio_blk_handle_write(req, mrb); |
382 |
} else if (type == VIRTIO_BLK_T_IN || type == VIRTIO_BLK_T_BARRIER) { |
383 |
/* VIRTIO_BLK_T_IN is 0, so we can't just & it. */
|
384 |
qemu_iovec_init_external(&req->qiov, &req->elem.in_sg[0],
|
385 |
req->elem.in_num - 1);
|
386 |
virtio_blk_handle_read(req); |
387 |
} else {
|
388 |
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); |
389 |
g_free(req); |
390 |
} |
391 |
} |
392 |
|
393 |
static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) |
394 |
{ |
395 |
VirtIOBlock *s = VIRTIO_BLK(vdev); |
396 |
VirtIOBlockReq *req; |
397 |
MultiReqBuffer mrb = { |
398 |
.num_writes = 0,
|
399 |
}; |
400 |
|
401 |
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
|
402 |
/* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
|
403 |
* dataplane here instead of waiting for .set_status().
|
404 |
*/
|
405 |
if (s->dataplane) {
|
406 |
virtio_blk_data_plane_start(s->dataplane); |
407 |
return;
|
408 |
} |
409 |
#endif
|
410 |
|
411 |
while ((req = virtio_blk_get_request(s))) {
|
412 |
virtio_blk_handle_request(req, &mrb); |
413 |
} |
414 |
|
415 |
virtio_submit_multiwrite(s->bs, &mrb); |
416 |
|
417 |
/*
|
418 |
* FIXME: Want to check for completions before returning to guest mode,
|
419 |
* so cached reads and writes are reported as quickly as possible. But
|
420 |
* that should be done in the generic block layer.
|
421 |
*/
|
422 |
} |
423 |
|
424 |
static void virtio_blk_dma_restart_bh(void *opaque) |
425 |
{ |
426 |
VirtIOBlock *s = opaque; |
427 |
VirtIOBlockReq *req = s->rq; |
428 |
MultiReqBuffer mrb = { |
429 |
.num_writes = 0,
|
430 |
}; |
431 |
|
432 |
qemu_bh_delete(s->bh); |
433 |
s->bh = NULL;
|
434 |
|
435 |
s->rq = NULL;
|
436 |
|
437 |
while (req) {
|
438 |
virtio_blk_handle_request(req, &mrb); |
439 |
req = req->next; |
440 |
} |
441 |
|
442 |
virtio_submit_multiwrite(s->bs, &mrb); |
443 |
} |
444 |
|
445 |
static void virtio_blk_dma_restart_cb(void *opaque, int running, |
446 |
RunState state) |
447 |
{ |
448 |
VirtIOBlock *s = opaque; |
449 |
|
450 |
if (!running) {
|
451 |
return;
|
452 |
} |
453 |
|
454 |
if (!s->bh) {
|
455 |
s->bh = qemu_bh_new(virtio_blk_dma_restart_bh, s); |
456 |
qemu_bh_schedule(s->bh); |
457 |
} |
458 |
} |
459 |
|
460 |
static void virtio_blk_reset(VirtIODevice *vdev) |
461 |
{ |
462 |
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
|
463 |
VirtIOBlock *s = VIRTIO_BLK(vdev); |
464 |
|
465 |
if (s->dataplane) {
|
466 |
virtio_blk_data_plane_stop(s->dataplane); |
467 |
} |
468 |
#endif
|
469 |
|
470 |
/*
|
471 |
* This should cancel pending requests, but can't do nicely until there
|
472 |
* are per-device request lists.
|
473 |
*/
|
474 |
bdrv_drain_all(); |
475 |
} |
476 |
|
477 |
/* coalesce internal state, copy to pci i/o region 0
|
478 |
*/
|
479 |
static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) |
480 |
{ |
481 |
VirtIOBlock *s = VIRTIO_BLK(vdev); |
482 |
struct virtio_blk_config blkcfg;
|
483 |
uint64_t capacity; |
484 |
int blk_size = s->conf->logical_block_size;
|
485 |
|
486 |
bdrv_get_geometry(s->bs, &capacity); |
487 |
memset(&blkcfg, 0, sizeof(blkcfg)); |
488 |
stq_raw(&blkcfg.capacity, capacity); |
489 |
stl_raw(&blkcfg.seg_max, 128 - 2); |
490 |
stw_raw(&blkcfg.cylinders, s->conf->cyls); |
491 |
stl_raw(&blkcfg.blk_size, blk_size); |
492 |
stw_raw(&blkcfg.min_io_size, s->conf->min_io_size / blk_size); |
493 |
stw_raw(&blkcfg.opt_io_size, s->conf->opt_io_size / blk_size); |
494 |
blkcfg.heads = s->conf->heads; |
495 |
/*
|
496 |
* We must ensure that the block device capacity is a multiple of
|
497 |
* the logical block size. If that is not the case, lets use
|
498 |
* sector_mask to adopt the geometry to have a correct picture.
|
499 |
* For those devices where the capacity is ok for the given geometry
|
500 |
* we dont touch the sector value of the geometry, since some devices
|
501 |
* (like s390 dasd) need a specific value. Here the capacity is already
|
502 |
* cyls*heads*secs*blk_size and the sector value is not block size
|
503 |
* divided by 512 - instead it is the amount of blk_size blocks
|
504 |
* per track (cylinder).
|
505 |
*/
|
506 |
if (bdrv_getlength(s->bs) / s->conf->heads / s->conf->secs % blk_size) {
|
507 |
blkcfg.sectors = s->conf->secs & ~s->sector_mask; |
508 |
} else {
|
509 |
blkcfg.sectors = s->conf->secs; |
510 |
} |
511 |
blkcfg.size_max = 0;
|
512 |
blkcfg.physical_block_exp = get_physical_block_exp(s->conf); |
513 |
blkcfg.alignment_offset = 0;
|
514 |
blkcfg.wce = bdrv_enable_write_cache(s->bs); |
515 |
memcpy(config, &blkcfg, sizeof(struct virtio_blk_config)); |
516 |
} |
517 |
|
518 |
static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config) |
519 |
{ |
520 |
VirtIOBlock *s = VIRTIO_BLK(vdev); |
521 |
struct virtio_blk_config blkcfg;
|
522 |
|
523 |
memcpy(&blkcfg, config, sizeof(blkcfg));
|
524 |
bdrv_set_enable_write_cache(s->bs, blkcfg.wce != 0);
|
525 |
} |
526 |
|
527 |
static uint32_t virtio_blk_get_features(VirtIODevice *vdev, uint32_t features)
|
528 |
{ |
529 |
VirtIOBlock *s = VIRTIO_BLK(vdev); |
530 |
|
531 |
features |= (1 << VIRTIO_BLK_F_SEG_MAX);
|
532 |
features |= (1 << VIRTIO_BLK_F_GEOMETRY);
|
533 |
features |= (1 << VIRTIO_BLK_F_TOPOLOGY);
|
534 |
features |= (1 << VIRTIO_BLK_F_BLK_SIZE);
|
535 |
features |= (1 << VIRTIO_BLK_F_SCSI);
|
536 |
|
537 |
if (s->blk.config_wce) {
|
538 |
features |= (1 << VIRTIO_BLK_F_CONFIG_WCE);
|
539 |
} |
540 |
if (bdrv_enable_write_cache(s->bs))
|
541 |
features |= (1 << VIRTIO_BLK_F_WCE);
|
542 |
|
543 |
if (bdrv_is_read_only(s->bs))
|
544 |
features |= 1 << VIRTIO_BLK_F_RO;
|
545 |
|
546 |
return features;
|
547 |
} |
548 |
|
549 |
static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status) |
550 |
{ |
551 |
VirtIOBlock *s = VIRTIO_BLK(vdev); |
552 |
uint32_t features; |
553 |
|
554 |
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
|
555 |
if (s->dataplane && !(status & (VIRTIO_CONFIG_S_DRIVER |
|
556 |
VIRTIO_CONFIG_S_DRIVER_OK))) { |
557 |
virtio_blk_data_plane_stop(s->dataplane); |
558 |
} |
559 |
#endif
|
560 |
|
561 |
if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
|
562 |
return;
|
563 |
} |
564 |
|
565 |
features = vdev->guest_features; |
566 |
bdrv_set_enable_write_cache(s->bs, !!(features & (1 << VIRTIO_BLK_F_WCE)));
|
567 |
} |
568 |
|
569 |
static void virtio_blk_save(QEMUFile *f, void *opaque) |
570 |
{ |
571 |
VirtIOBlock *s = opaque; |
572 |
VirtIODevice *vdev = VIRTIO_DEVICE(s); |
573 |
VirtIOBlockReq *req = s->rq; |
574 |
|
575 |
virtio_save(vdev, f); |
576 |
|
577 |
while (req) {
|
578 |
qemu_put_sbyte(f, 1);
|
579 |
qemu_put_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem)); |
580 |
req = req->next; |
581 |
} |
582 |
qemu_put_sbyte(f, 0);
|
583 |
} |
584 |
|
585 |
static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id) |
586 |
{ |
587 |
VirtIOBlock *s = opaque; |
588 |
VirtIODevice *vdev = VIRTIO_DEVICE(s); |
589 |
int ret;
|
590 |
|
591 |
if (version_id != 2) |
592 |
return -EINVAL;
|
593 |
|
594 |
ret = virtio_load(vdev, f); |
595 |
if (ret) {
|
596 |
return ret;
|
597 |
} |
598 |
|
599 |
while (qemu_get_sbyte(f)) {
|
600 |
VirtIOBlockReq *req = virtio_blk_alloc_request(s); |
601 |
qemu_get_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem)); |
602 |
req->next = s->rq; |
603 |
s->rq = req; |
604 |
|
605 |
virtqueue_map_sg(req->elem.in_sg, req->elem.in_addr, |
606 |
req->elem.in_num, 1);
|
607 |
virtqueue_map_sg(req->elem.out_sg, req->elem.out_addr, |
608 |
req->elem.out_num, 0);
|
609 |
} |
610 |
|
611 |
return 0; |
612 |
} |
613 |
|
614 |
static void virtio_blk_resize(void *opaque) |
615 |
{ |
616 |
VirtIODevice *vdev = VIRTIO_DEVICE(opaque); |
617 |
|
618 |
virtio_notify_config(vdev); |
619 |
} |
620 |
|
621 |
static const BlockDevOps virtio_block_ops = { |
622 |
.resize_cb = virtio_blk_resize, |
623 |
}; |
624 |
|
625 |
void virtio_blk_set_conf(DeviceState *dev, VirtIOBlkConf *blk)
|
626 |
{ |
627 |
VirtIOBlock *s = VIRTIO_BLK(dev); |
628 |
memcpy(&(s->blk), blk, sizeof(struct VirtIOBlkConf)); |
629 |
} |
630 |
|
631 |
static int virtio_blk_device_init(VirtIODevice *vdev) |
632 |
{ |
633 |
DeviceState *qdev = DEVICE(vdev); |
634 |
VirtIOBlock *s = VIRTIO_BLK(vdev); |
635 |
VirtIOBlkConf *blk = &(s->blk); |
636 |
static int virtio_blk_id; |
637 |
|
638 |
if (!blk->conf.bs) {
|
639 |
error_report("drive property not set");
|
640 |
return -1; |
641 |
} |
642 |
if (!bdrv_is_inserted(blk->conf.bs)) {
|
643 |
error_report("Device needs media, but drive is empty");
|
644 |
return -1; |
645 |
} |
646 |
|
647 |
blkconf_serial(&blk->conf, &blk->serial); |
648 |
if (blkconf_geometry(&blk->conf, NULL, 65535, 255, 255) < 0) { |
649 |
return -1; |
650 |
} |
651 |
|
652 |
virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK,
|
653 |
sizeof(struct virtio_blk_config)); |
654 |
|
655 |
vdev->get_config = virtio_blk_update_config; |
656 |
vdev->set_config = virtio_blk_set_config; |
657 |
vdev->get_features = virtio_blk_get_features; |
658 |
vdev->set_status = virtio_blk_set_status; |
659 |
vdev->reset = virtio_blk_reset; |
660 |
s->bs = blk->conf.bs; |
661 |
s->conf = &blk->conf; |
662 |
memcpy(&(s->blk), blk, sizeof(struct VirtIOBlkConf)); |
663 |
s->rq = NULL;
|
664 |
s->sector_mask = (s->conf->logical_block_size / BDRV_SECTOR_SIZE) - 1;
|
665 |
|
666 |
s->vq = virtio_add_queue(vdev, 128, virtio_blk_handle_output);
|
667 |
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
|
668 |
if (!virtio_blk_data_plane_create(vdev, blk, &s->dataplane)) {
|
669 |
virtio_common_cleanup(vdev); |
670 |
return -1; |
671 |
} |
672 |
#endif
|
673 |
|
674 |
s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); |
675 |
register_savevm(qdev, "virtio-blk", virtio_blk_id++, 2, |
676 |
virtio_blk_save, virtio_blk_load, s); |
677 |
bdrv_set_dev_ops(s->bs, &virtio_block_ops, s); |
678 |
bdrv_set_buffer_alignment(s->bs, s->conf->logical_block_size); |
679 |
|
680 |
bdrv_iostatus_enable(s->bs); |
681 |
|
682 |
add_boot_device_path(s->conf->bootindex, qdev, "/disk@0,0");
|
683 |
return 0; |
684 |
} |
685 |
|
686 |
static int virtio_blk_device_exit(DeviceState *dev) |
687 |
{ |
688 |
VirtIODevice *vdev = VIRTIO_DEVICE(dev); |
689 |
VirtIOBlock *s = VIRTIO_BLK(dev); |
690 |
#ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
|
691 |
virtio_blk_data_plane_destroy(s->dataplane); |
692 |
s->dataplane = NULL;
|
693 |
#endif
|
694 |
qemu_del_vm_change_state_handler(s->change); |
695 |
unregister_savevm(dev, "virtio-blk", s);
|
696 |
blockdev_mark_auto_del(s->bs); |
697 |
virtio_common_cleanup(vdev); |
698 |
return 0; |
699 |
} |
700 |
|
701 |
static Property virtio_blk_properties[] = {
|
702 |
DEFINE_VIRTIO_BLK_PROPERTIES(VirtIOBlock, blk), |
703 |
DEFINE_PROP_END_OF_LIST(), |
704 |
}; |
705 |
|
706 |
static void virtio_blk_class_init(ObjectClass *klass, void *data) |
707 |
{ |
708 |
DeviceClass *dc = DEVICE_CLASS(klass); |
709 |
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); |
710 |
dc->exit = virtio_blk_device_exit; |
711 |
dc->props = virtio_blk_properties; |
712 |
vdc->init = virtio_blk_device_init; |
713 |
vdc->get_config = virtio_blk_update_config; |
714 |
vdc->set_config = virtio_blk_set_config; |
715 |
vdc->get_features = virtio_blk_get_features; |
716 |
vdc->set_status = virtio_blk_set_status; |
717 |
vdc->reset = virtio_blk_reset; |
718 |
} |
719 |
|
720 |
static const TypeInfo virtio_device_info = { |
721 |
.name = TYPE_VIRTIO_BLK, |
722 |
.parent = TYPE_VIRTIO_DEVICE, |
723 |
.instance_size = sizeof(VirtIOBlock),
|
724 |
.class_init = virtio_blk_class_init, |
725 |
}; |
726 |
|
727 |
static void virtio_register_types(void) |
728 |
{ |
729 |
type_register_static(&virtio_device_info); |
730 |
} |
731 |
|
732 |
type_init(virtio_register_types) |