root / hw / virtio-blk.c @ d9c32310
History | View | Annotate | Download (12.4 kB)
1 |
/*
|
---|---|
2 |
* Virtio Block Device
|
3 |
*
|
4 |
* Copyright IBM, Corp. 2007
|
5 |
*
|
6 |
* Authors:
|
7 |
* Anthony Liguori <aliguori@us.ibm.com>
|
8 |
*
|
9 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
10 |
* the COPYING file in the top-level directory.
|
11 |
*
|
12 |
*/
|
13 |
|
14 |
#include <qemu-common.h> |
15 |
#include <sysemu.h> |
16 |
#include "virtio-blk.h" |
17 |
#include "block_int.h" |
18 |
#ifdef __linux__
|
19 |
# include <scsi/sg.h> |
20 |
#endif
|
21 |
|
22 |
typedef struct VirtIOBlock |
23 |
{ |
24 |
VirtIODevice vdev; |
25 |
BlockDriverState *bs; |
26 |
VirtQueue *vq; |
27 |
void *rq;
|
28 |
char serial_str[BLOCK_SERIAL_STRLEN + 1]; |
29 |
QEMUBH *bh; |
30 |
} VirtIOBlock; |
31 |
|
32 |
static VirtIOBlock *to_virtio_blk(VirtIODevice *vdev)
|
33 |
{ |
34 |
return (VirtIOBlock *)vdev;
|
35 |
} |
36 |
|
37 |
/* store identify data in little endian format
|
38 |
*/
|
39 |
static inline void put_le16(uint16_t *p, unsigned int v) |
40 |
{ |
41 |
*p = cpu_to_le16(v); |
42 |
} |
43 |
|
44 |
/* copy to *dst from *src, nul pad dst tail as needed to len bytes
|
45 |
*/
|
46 |
static inline void padstr(char *dst, const char *src, int len) |
47 |
{ |
48 |
while (len--)
|
49 |
*dst++ = *src ? *src++ : '\0';
|
50 |
} |
51 |
|
52 |
/* setup simulated identify data as appropriate for virtio block device
|
53 |
*
|
54 |
* ref: AT Attachment 8 - ATA/ATAPI Command Set (ATA8-ACS)
|
55 |
*/
|
56 |
static inline void virtio_identify_template(struct virtio_blk_config *bc) |
57 |
{ |
58 |
uint16_t *p = &bc->identify[0];
|
59 |
uint64_t lba_sectors = bc->capacity; |
60 |
|
61 |
memset(p, 0, sizeof(bc->identify)); |
62 |
put_le16(p + 0, 0x0); /* ATA device */ |
63 |
padstr((char *)(p + 23), QEMU_VERSION, 8); /* firmware revision */ |
64 |
padstr((char *)(p + 27), "QEMU VIRT_BLK", 40); /* model# */ |
65 |
put_le16(p + 47, 0x80ff); /* max xfer 255 sectors */ |
66 |
put_le16(p + 49, 0x0b00); /* support IORDY/LBA/DMA */ |
67 |
put_le16(p + 59, 0x1ff); /* cur xfer 255 sectors */ |
68 |
put_le16(p + 80, 0x1f0); /* support ATA8/7/6/5/4 */ |
69 |
put_le16(p + 81, 0x16); |
70 |
put_le16(p + 82, 0x400); |
71 |
put_le16(p + 83, 0x400); |
72 |
put_le16(p + 100, lba_sectors);
|
73 |
put_le16(p + 101, lba_sectors >> 16); |
74 |
put_le16(p + 102, lba_sectors >> 32); |
75 |
put_le16(p + 103, lba_sectors >> 48); |
76 |
} |
77 |
|
78 |
typedef struct VirtIOBlockReq |
79 |
{ |
80 |
VirtIOBlock *dev; |
81 |
VirtQueueElement elem; |
82 |
struct virtio_blk_inhdr *in;
|
83 |
struct virtio_blk_outhdr *out;
|
84 |
struct virtio_scsi_inhdr *scsi;
|
85 |
QEMUIOVector qiov; |
86 |
struct VirtIOBlockReq *next;
|
87 |
} VirtIOBlockReq; |
88 |
|
89 |
static void virtio_blk_req_complete(VirtIOBlockReq *req, int status) |
90 |
{ |
91 |
VirtIOBlock *s = req->dev; |
92 |
|
93 |
req->in->status = status; |
94 |
virtqueue_push(s->vq, &req->elem, req->qiov.size + sizeof(*req->in));
|
95 |
virtio_notify(&s->vdev, s->vq); |
96 |
|
97 |
qemu_free(req); |
98 |
} |
99 |
|
100 |
static int virtio_blk_handle_write_error(VirtIOBlockReq *req, int error) |
101 |
{ |
102 |
BlockInterfaceErrorAction action = drive_get_onerror(req->dev->bs); |
103 |
VirtIOBlock *s = req->dev; |
104 |
|
105 |
if (action == BLOCK_ERR_IGNORE)
|
106 |
return 0; |
107 |
|
108 |
if ((error == ENOSPC && action == BLOCK_ERR_STOP_ENOSPC)
|
109 |
|| action == BLOCK_ERR_STOP_ANY) { |
110 |
req->next = s->rq; |
111 |
s->rq = req; |
112 |
vm_stop(0);
|
113 |
} else {
|
114 |
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); |
115 |
} |
116 |
|
117 |
return 1; |
118 |
} |
119 |
|
120 |
static void virtio_blk_rw_complete(void *opaque, int ret) |
121 |
{ |
122 |
VirtIOBlockReq *req = opaque; |
123 |
|
124 |
if (ret && (req->out->type & VIRTIO_BLK_T_OUT)) {
|
125 |
if (virtio_blk_handle_write_error(req, -ret))
|
126 |
return;
|
127 |
} |
128 |
|
129 |
virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); |
130 |
} |
131 |
|
132 |
static VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
|
133 |
{ |
134 |
VirtIOBlockReq *req = qemu_mallocz(sizeof(*req));
|
135 |
req->dev = s; |
136 |
return req;
|
137 |
} |
138 |
|
139 |
static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
|
140 |
{ |
141 |
VirtIOBlockReq *req = virtio_blk_alloc_request(s); |
142 |
|
143 |
if (req != NULL) { |
144 |
if (!virtqueue_pop(s->vq, &req->elem)) {
|
145 |
qemu_free(req); |
146 |
return NULL; |
147 |
} |
148 |
} |
149 |
|
150 |
return req;
|
151 |
} |
152 |
|
153 |
#ifdef __linux__
|
154 |
static void virtio_blk_handle_scsi(VirtIOBlockReq *req) |
155 |
{ |
156 |
struct sg_io_hdr hdr;
|
157 |
int ret, size = 0; |
158 |
int status;
|
159 |
int i;
|
160 |
|
161 |
/*
|
162 |
* We require at least one output segment each for the virtio_blk_outhdr
|
163 |
* and the SCSI command block.
|
164 |
*
|
165 |
* We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
|
166 |
* and the sense buffer pointer in the input segments.
|
167 |
*/
|
168 |
if (req->elem.out_num < 2 || req->elem.in_num < 3) { |
169 |
virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR); |
170 |
return;
|
171 |
} |
172 |
|
173 |
/*
|
174 |
* No support for bidirection commands yet.
|
175 |
*/
|
176 |
if (req->elem.out_num > 2 && req->elem.in_num > 3) { |
177 |
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); |
178 |
return;
|
179 |
} |
180 |
|
181 |
/*
|
182 |
* The scsi inhdr is placed in the second-to-last input segment, just
|
183 |
* before the regular inhdr.
|
184 |
*/
|
185 |
req->scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base; |
186 |
size = sizeof(*req->in) + sizeof(*req->scsi); |
187 |
|
188 |
memset(&hdr, 0, sizeof(struct sg_io_hdr)); |
189 |
hdr.interface_id = 'S';
|
190 |
hdr.cmd_len = req->elem.out_sg[1].iov_len;
|
191 |
hdr.cmdp = req->elem.out_sg[1].iov_base;
|
192 |
hdr.dxfer_len = 0;
|
193 |
|
194 |
if (req->elem.out_num > 2) { |
195 |
/*
|
196 |
* If there are more than the minimally required 2 output segments
|
197 |
* there is write payload starting from the third iovec.
|
198 |
*/
|
199 |
hdr.dxfer_direction = SG_DXFER_TO_DEV; |
200 |
hdr.iovec_count = req->elem.out_num - 2;
|
201 |
|
202 |
for (i = 0; i < hdr.iovec_count; i++) |
203 |
hdr.dxfer_len += req->elem.out_sg[i + 2].iov_len;
|
204 |
|
205 |
hdr.dxferp = req->elem.out_sg + 2;
|
206 |
|
207 |
} else if (req->elem.in_num > 3) { |
208 |
/*
|
209 |
* If we have more than 3 input segments the guest wants to actually
|
210 |
* read data.
|
211 |
*/
|
212 |
hdr.dxfer_direction = SG_DXFER_FROM_DEV; |
213 |
hdr.iovec_count = req->elem.in_num - 3;
|
214 |
for (i = 0; i < hdr.iovec_count; i++) |
215 |
hdr.dxfer_len += req->elem.in_sg[i].iov_len; |
216 |
|
217 |
hdr.dxferp = req->elem.in_sg; |
218 |
size += hdr.dxfer_len; |
219 |
} else {
|
220 |
/*
|
221 |
* Some SCSI commands don't actually transfer any data.
|
222 |
*/
|
223 |
hdr.dxfer_direction = SG_DXFER_NONE; |
224 |
} |
225 |
|
226 |
hdr.sbp = req->elem.in_sg[req->elem.in_num - 3].iov_base;
|
227 |
hdr.mx_sb_len = req->elem.in_sg[req->elem.in_num - 3].iov_len;
|
228 |
size += hdr.mx_sb_len; |
229 |
|
230 |
ret = bdrv_ioctl(req->dev->bs, SG_IO, &hdr); |
231 |
if (ret) {
|
232 |
status = VIRTIO_BLK_S_UNSUPP; |
233 |
hdr.status = ret; |
234 |
hdr.resid = hdr.dxfer_len; |
235 |
} else if (hdr.status) { |
236 |
status = VIRTIO_BLK_S_IOERR; |
237 |
} else {
|
238 |
status = VIRTIO_BLK_S_OK; |
239 |
} |
240 |
|
241 |
req->scsi->errors = hdr.status; |
242 |
req->scsi->residual = hdr.resid; |
243 |
req->scsi->sense_len = hdr.sb_len_wr; |
244 |
req->scsi->data_len = hdr.dxfer_len; |
245 |
|
246 |
virtio_blk_req_complete(req, status); |
247 |
} |
248 |
#else
|
249 |
static void virtio_blk_handle_scsi(VirtIOBlockReq *req) |
250 |
{ |
251 |
virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP); |
252 |
} |
253 |
#endif /* __linux__ */ |
254 |
|
255 |
static void virtio_blk_handle_write(VirtIOBlockReq *req) |
256 |
{ |
257 |
bdrv_aio_writev(req->dev->bs, req->out->sector, &req->qiov, |
258 |
req->qiov.size / 512, virtio_blk_rw_complete, req);
|
259 |
} |
260 |
|
261 |
static void virtio_blk_handle_read(VirtIOBlockReq *req) |
262 |
{ |
263 |
bdrv_aio_readv(req->dev->bs, req->out->sector, &req->qiov, |
264 |
req->qiov.size / 512, virtio_blk_rw_complete, req);
|
265 |
} |
266 |
|
267 |
static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) |
268 |
{ |
269 |
VirtIOBlock *s = to_virtio_blk(vdev); |
270 |
VirtIOBlockReq *req; |
271 |
|
272 |
while ((req = virtio_blk_get_request(s))) {
|
273 |
if (req->elem.out_num < 1 || req->elem.in_num < 1) { |
274 |
fprintf(stderr, "virtio-blk missing headers\n");
|
275 |
exit(1);
|
276 |
} |
277 |
|
278 |
if (req->elem.out_sg[0].iov_len < sizeof(*req->out) || |
279 |
req->elem.in_sg[req->elem.in_num - 1].iov_len < sizeof(*req->in)) { |
280 |
fprintf(stderr, "virtio-blk header not in correct element\n");
|
281 |
exit(1);
|
282 |
} |
283 |
|
284 |
req->out = (void *)req->elem.out_sg[0].iov_base; |
285 |
req->in = (void *)req->elem.in_sg[req->elem.in_num - 1].iov_base; |
286 |
|
287 |
if (req->out->type & VIRTIO_BLK_T_SCSI_CMD) {
|
288 |
virtio_blk_handle_scsi(req); |
289 |
} else if (req->out->type & VIRTIO_BLK_T_OUT) { |
290 |
qemu_iovec_init_external(&req->qiov, &req->elem.out_sg[1],
|
291 |
req->elem.out_num - 1);
|
292 |
virtio_blk_handle_write(req); |
293 |
} else {
|
294 |
qemu_iovec_init_external(&req->qiov, &req->elem.in_sg[0],
|
295 |
req->elem.in_num - 1);
|
296 |
virtio_blk_handle_read(req); |
297 |
} |
298 |
} |
299 |
/*
|
300 |
* FIXME: Want to check for completions before returning to guest mode,
|
301 |
* so cached reads and writes are reported as quickly as possible. But
|
302 |
* that should be done in the generic block layer.
|
303 |
*/
|
304 |
} |
305 |
|
306 |
static void virtio_blk_dma_restart_bh(void *opaque) |
307 |
{ |
308 |
VirtIOBlock *s = opaque; |
309 |
VirtIOBlockReq *req = s->rq; |
310 |
|
311 |
qemu_bh_delete(s->bh); |
312 |
s->bh = NULL;
|
313 |
|
314 |
s->rq = NULL;
|
315 |
|
316 |
while (req) {
|
317 |
virtio_blk_handle_write(req); |
318 |
req = req->next; |
319 |
} |
320 |
} |
321 |
|
322 |
static void virtio_blk_dma_restart_cb(void *opaque, int running, int reason) |
323 |
{ |
324 |
VirtIOBlock *s = opaque; |
325 |
|
326 |
if (!running)
|
327 |
return;
|
328 |
|
329 |
if (!s->bh) {
|
330 |
s->bh = qemu_bh_new(virtio_blk_dma_restart_bh, s); |
331 |
qemu_bh_schedule(s->bh); |
332 |
} |
333 |
} |
334 |
|
335 |
static void virtio_blk_reset(VirtIODevice *vdev) |
336 |
{ |
337 |
/*
|
338 |
* This should cancel pending requests, but can't do nicely until there
|
339 |
* are per-device request lists.
|
340 |
*/
|
341 |
qemu_aio_flush(); |
342 |
} |
343 |
|
344 |
/* coalesce internal state, copy to pci i/o region 0
|
345 |
*/
|
346 |
static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config) |
347 |
{ |
348 |
VirtIOBlock *s = to_virtio_blk(vdev); |
349 |
struct virtio_blk_config blkcfg;
|
350 |
uint64_t capacity; |
351 |
int cylinders, heads, secs;
|
352 |
|
353 |
bdrv_get_geometry(s->bs, &capacity); |
354 |
bdrv_get_geometry_hint(s->bs, &cylinders, &heads, &secs); |
355 |
memset(&blkcfg, 0, sizeof(blkcfg)); |
356 |
stq_raw(&blkcfg.capacity, capacity); |
357 |
stl_raw(&blkcfg.seg_max, 128 - 2); |
358 |
stw_raw(&blkcfg.cylinders, cylinders); |
359 |
blkcfg.heads = heads; |
360 |
blkcfg.sectors = secs; |
361 |
blkcfg.size_max = 0;
|
362 |
virtio_identify_template(&blkcfg); |
363 |
memcpy(&blkcfg.identify[VIRTIO_BLK_ID_SN], s->serial_str, |
364 |
VIRTIO_BLK_ID_SN_BYTES); |
365 |
memcpy(config, &blkcfg, sizeof(blkcfg));
|
366 |
} |
367 |
|
368 |
static uint32_t virtio_blk_get_features(VirtIODevice *vdev)
|
369 |
{ |
370 |
VirtIOBlock *s = to_virtio_blk(vdev); |
371 |
uint32_t features = 0;
|
372 |
|
373 |
features |= (1 << VIRTIO_BLK_F_SEG_MAX);
|
374 |
features |= (1 << VIRTIO_BLK_F_GEOMETRY);
|
375 |
#ifdef __linux__
|
376 |
features |= (1 << VIRTIO_BLK_F_SCSI);
|
377 |
#endif
|
378 |
if (strcmp(s->serial_str, "0")) |
379 |
features |= 1 << VIRTIO_BLK_F_IDENTIFY;
|
380 |
|
381 |
return features;
|
382 |
} |
383 |
|
384 |
static void virtio_blk_save(QEMUFile *f, void *opaque) |
385 |
{ |
386 |
VirtIOBlock *s = opaque; |
387 |
VirtIOBlockReq *req = s->rq; |
388 |
|
389 |
virtio_save(&s->vdev, f); |
390 |
|
391 |
while (req) {
|
392 |
qemu_put_sbyte(f, 1);
|
393 |
qemu_put_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem)); |
394 |
req = req->next; |
395 |
} |
396 |
qemu_put_sbyte(f, 0);
|
397 |
} |
398 |
|
399 |
static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id) |
400 |
{ |
401 |
VirtIOBlock *s = opaque; |
402 |
|
403 |
if (version_id != 2) |
404 |
return -EINVAL;
|
405 |
|
406 |
virtio_load(&s->vdev, f); |
407 |
while (qemu_get_sbyte(f)) {
|
408 |
VirtIOBlockReq *req = virtio_blk_alloc_request(s); |
409 |
qemu_get_buffer(f, (unsigned char*)&req->elem, sizeof(req->elem)); |
410 |
req->next = s->rq; |
411 |
s->rq = req->next; |
412 |
} |
413 |
|
414 |
return 0; |
415 |
} |
416 |
|
417 |
VirtIODevice *virtio_blk_init(DeviceState *dev) |
418 |
{ |
419 |
VirtIOBlock *s; |
420 |
int cylinders, heads, secs;
|
421 |
static int virtio_blk_id; |
422 |
BlockDriverState *bs; |
423 |
char *ps;
|
424 |
|
425 |
s = (VirtIOBlock *)virtio_common_init("virtio-blk", VIRTIO_ID_BLOCK,
|
426 |
sizeof(struct virtio_blk_config), |
427 |
sizeof(VirtIOBlock));
|
428 |
|
429 |
bs = qdev_init_bdrv(dev, IF_VIRTIO); |
430 |
s->vdev.get_config = virtio_blk_update_config; |
431 |
s->vdev.get_features = virtio_blk_get_features; |
432 |
s->vdev.reset = virtio_blk_reset; |
433 |
s->bs = bs; |
434 |
s->rq = NULL;
|
435 |
if (strlen(ps = (char *)drive_get_serial(bs))) |
436 |
strncpy(s->serial_str, ps, sizeof(s->serial_str));
|
437 |
else
|
438 |
snprintf(s->serial_str, sizeof(s->serial_str), "0"); |
439 |
bs->private = dev; |
440 |
bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs); |
441 |
bdrv_set_geometry_hint(s->bs, cylinders, heads, secs); |
442 |
|
443 |
s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output);
|
444 |
|
445 |
qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s); |
446 |
register_savevm("virtio-blk", virtio_blk_id++, 2, |
447 |
virtio_blk_save, virtio_blk_load, s); |
448 |
|
449 |
return &s->vdev;
|
450 |
} |