root / hw / xen_disk.c @ 7157e2e2
History | View | Annotate | Download (22.6 kB)
1 |
/*
|
---|---|
2 |
* xen paravirt block device backend
|
3 |
*
|
4 |
* (c) Gerd Hoffmann <kraxel@redhat.com>
|
5 |
*
|
6 |
* This program is free software; you can redistribute it and/or modify
|
7 |
* it under the terms of the GNU General Public License as published by
|
8 |
* the Free Software Foundation; under version 2 of the License.
|
9 |
*
|
10 |
* This program is distributed in the hope that it will be useful,
|
11 |
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
12 |
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
13 |
* GNU General Public License for more details.
|
14 |
*
|
15 |
* You should have received a copy of the GNU General Public License along
|
16 |
* with this program; if not, see <http://www.gnu.org/licenses/>.
|
17 |
*/
|
18 |
|
19 |
#include <stdio.h> |
20 |
#include <stdlib.h> |
21 |
#include <stdarg.h> |
22 |
#include <string.h> |
23 |
#include <unistd.h> |
24 |
#include <signal.h> |
25 |
#include <inttypes.h> |
26 |
#include <time.h> |
27 |
#include <fcntl.h> |
28 |
#include <errno.h> |
29 |
#include <sys/ioctl.h> |
30 |
#include <sys/types.h> |
31 |
#include <sys/stat.h> |
32 |
#include <sys/mman.h> |
33 |
#include <sys/uio.h> |
34 |
|
35 |
#include <xs.h> |
36 |
#include <xenctrl.h> |
37 |
#include <xen/io/xenbus.h> |
38 |
|
39 |
#include "hw.h" |
40 |
#include "block_int.h" |
41 |
#include "qemu-char.h" |
42 |
#include "xen_blkif.h" |
43 |
#include "xen_backend.h" |
44 |
#include "blockdev.h" |
45 |
|
46 |
/* ------------------------------------------------------------- */
|
47 |
|
48 |
static int syncwrite = 0; |
49 |
static int batch_maps = 0; |
50 |
|
51 |
static int max_requests = 32; |
52 |
static int use_aio = 1; |
53 |
|
54 |
/* ------------------------------------------------------------- */
|
55 |
|
56 |
#define BLOCK_SIZE 512 |
57 |
#define IOCB_COUNT (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2) |
58 |
|
59 |
struct ioreq {
|
60 |
blkif_request_t req; |
61 |
int16_t status; |
62 |
|
63 |
/* parsed request */
|
64 |
off_t start; |
65 |
QEMUIOVector v; |
66 |
int presync;
|
67 |
int postsync;
|
68 |
|
69 |
/* grant mapping */
|
70 |
uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
71 |
uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
72 |
int prot;
|
73 |
void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
|
74 |
void *pages;
|
75 |
|
76 |
/* aio status */
|
77 |
int aio_inflight;
|
78 |
int aio_errors;
|
79 |
|
80 |
struct XenBlkDev *blkdev;
|
81 |
QLIST_ENTRY(ioreq) list; |
82 |
}; |
83 |
|
84 |
struct XenBlkDev {
|
85 |
struct XenDevice xendev; /* must be first */ |
86 |
char *params;
|
87 |
char *mode;
|
88 |
char *type;
|
89 |
char *dev;
|
90 |
char *devtype;
|
91 |
const char *fileproto; |
92 |
const char *filename; |
93 |
int ring_ref;
|
94 |
void *sring;
|
95 |
int64_t file_blk; |
96 |
int64_t file_size; |
97 |
int protocol;
|
98 |
blkif_back_rings_t rings; |
99 |
int more_work;
|
100 |
int cnt_map;
|
101 |
|
102 |
/* request lists */
|
103 |
QLIST_HEAD(inflight_head, ioreq) inflight; |
104 |
QLIST_HEAD(finished_head, ioreq) finished; |
105 |
QLIST_HEAD(freelist_head, ioreq) freelist; |
106 |
int requests_total;
|
107 |
int requests_inflight;
|
108 |
int requests_finished;
|
109 |
|
110 |
/* qemu block driver */
|
111 |
DriveInfo *dinfo; |
112 |
BlockDriverState *bs; |
113 |
QEMUBH *bh; |
114 |
}; |
115 |
|
116 |
/* ------------------------------------------------------------- */
|
117 |
|
118 |
static struct ioreq *ioreq_start(struct XenBlkDev *blkdev) |
119 |
{ |
120 |
struct ioreq *ioreq = NULL; |
121 |
|
122 |
if (QLIST_EMPTY(&blkdev->freelist)) {
|
123 |
if (blkdev->requests_total >= max_requests)
|
124 |
goto out;
|
125 |
/* allocate new struct */
|
126 |
ioreq = qemu_mallocz(sizeof(*ioreq));
|
127 |
ioreq->blkdev = blkdev; |
128 |
blkdev->requests_total++; |
129 |
qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST); |
130 |
} else {
|
131 |
/* get one from freelist */
|
132 |
ioreq = QLIST_FIRST(&blkdev->freelist); |
133 |
QLIST_REMOVE(ioreq, list); |
134 |
qemu_iovec_reset(&ioreq->v); |
135 |
} |
136 |
QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list); |
137 |
blkdev->requests_inflight++; |
138 |
|
139 |
out:
|
140 |
return ioreq;
|
141 |
} |
142 |
|
143 |
static void ioreq_finish(struct ioreq *ioreq) |
144 |
{ |
145 |
struct XenBlkDev *blkdev = ioreq->blkdev;
|
146 |
|
147 |
QLIST_REMOVE(ioreq, list); |
148 |
QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list); |
149 |
blkdev->requests_inflight--; |
150 |
blkdev->requests_finished++; |
151 |
} |
152 |
|
153 |
static void ioreq_release(struct ioreq *ioreq) |
154 |
{ |
155 |
struct XenBlkDev *blkdev = ioreq->blkdev;
|
156 |
|
157 |
QLIST_REMOVE(ioreq, list); |
158 |
memset(ioreq, 0, sizeof(*ioreq)); |
159 |
ioreq->blkdev = blkdev; |
160 |
QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list); |
161 |
blkdev->requests_finished--; |
162 |
} |
163 |
|
164 |
/*
|
165 |
* translate request into iovec + start offset
|
166 |
* do sanity checks along the way
|
167 |
*/
|
168 |
static int ioreq_parse(struct ioreq *ioreq) |
169 |
{ |
170 |
struct XenBlkDev *blkdev = ioreq->blkdev;
|
171 |
uintptr_t mem; |
172 |
size_t len; |
173 |
int i;
|
174 |
|
175 |
xen_be_printf(&blkdev->xendev, 3,
|
176 |
"op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n", |
177 |
ioreq->req.operation, ioreq->req.nr_segments, |
178 |
ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number); |
179 |
switch (ioreq->req.operation) {
|
180 |
case BLKIF_OP_READ:
|
181 |
ioreq->prot = PROT_WRITE; /* to memory */
|
182 |
break;
|
183 |
case BLKIF_OP_WRITE_BARRIER:
|
184 |
if (!ioreq->req.nr_segments) {
|
185 |
ioreq->presync = 1;
|
186 |
return 0; |
187 |
} |
188 |
if (!syncwrite)
|
189 |
ioreq->presync = ioreq->postsync = 1;
|
190 |
/* fall through */
|
191 |
case BLKIF_OP_WRITE:
|
192 |
ioreq->prot = PROT_READ; /* from memory */
|
193 |
if (syncwrite)
|
194 |
ioreq->postsync = 1;
|
195 |
break;
|
196 |
default:
|
197 |
xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n", |
198 |
ioreq->req.operation); |
199 |
goto err;
|
200 |
}; |
201 |
|
202 |
if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') { |
203 |
xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n"); |
204 |
goto err;
|
205 |
} |
206 |
|
207 |
ioreq->start = ioreq->req.sector_number * blkdev->file_blk; |
208 |
for (i = 0; i < ioreq->req.nr_segments; i++) { |
209 |
if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
|
210 |
xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n"); |
211 |
goto err;
|
212 |
} |
213 |
if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
|
214 |
xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n"); |
215 |
goto err;
|
216 |
} |
217 |
if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
|
218 |
xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n"); |
219 |
goto err;
|
220 |
} |
221 |
|
222 |
ioreq->domids[i] = blkdev->xendev.dom; |
223 |
ioreq->refs[i] = ioreq->req.seg[i].gref; |
224 |
|
225 |
mem = ioreq->req.seg[i].first_sect * blkdev->file_blk; |
226 |
len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
|
227 |
qemu_iovec_add(&ioreq->v, (void*)mem, len);
|
228 |
} |
229 |
if (ioreq->start + ioreq->v.size > blkdev->file_size) {
|
230 |
xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n"); |
231 |
goto err;
|
232 |
} |
233 |
return 0; |
234 |
|
235 |
err:
|
236 |
ioreq->status = BLKIF_RSP_ERROR; |
237 |
return -1; |
238 |
} |
239 |
|
240 |
static void ioreq_unmap(struct ioreq *ioreq) |
241 |
{ |
242 |
int gnt = ioreq->blkdev->xendev.gnttabdev;
|
243 |
int i;
|
244 |
|
245 |
if (ioreq->v.niov == 0) |
246 |
return;
|
247 |
if (batch_maps) {
|
248 |
if (!ioreq->pages)
|
249 |
return;
|
250 |
if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) |
251 |
xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", |
252 |
strerror(errno)); |
253 |
ioreq->blkdev->cnt_map -= ioreq->v.niov; |
254 |
ioreq->pages = NULL;
|
255 |
} else {
|
256 |
for (i = 0; i < ioreq->v.niov; i++) { |
257 |
if (!ioreq->page[i])
|
258 |
continue;
|
259 |
if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) |
260 |
xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n", |
261 |
strerror(errno)); |
262 |
ioreq->blkdev->cnt_map--; |
263 |
ioreq->page[i] = NULL;
|
264 |
} |
265 |
} |
266 |
} |
267 |
|
268 |
static int ioreq_map(struct ioreq *ioreq) |
269 |
{ |
270 |
int gnt = ioreq->blkdev->xendev.gnttabdev;
|
271 |
int i;
|
272 |
|
273 |
if (ioreq->v.niov == 0) |
274 |
return 0; |
275 |
if (batch_maps) {
|
276 |
ioreq->pages = xc_gnttab_map_grant_refs |
277 |
(gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot); |
278 |
if (ioreq->pages == NULL) { |
279 |
xen_be_printf(&ioreq->blkdev->xendev, 0,
|
280 |
"can't map %d grant refs (%s, %d maps)\n",
|
281 |
ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map); |
282 |
return -1; |
283 |
} |
284 |
for (i = 0; i < ioreq->v.niov; i++) |
285 |
ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE + |
286 |
(uintptr_t)ioreq->v.iov[i].iov_base; |
287 |
ioreq->blkdev->cnt_map += ioreq->v.niov; |
288 |
} else {
|
289 |
for (i = 0; i < ioreq->v.niov; i++) { |
290 |
ioreq->page[i] = xc_gnttab_map_grant_ref |
291 |
(gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot); |
292 |
if (ioreq->page[i] == NULL) { |
293 |
xen_be_printf(&ioreq->blkdev->xendev, 0,
|
294 |
"can't map grant ref %d (%s, %d maps)\n",
|
295 |
ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map); |
296 |
ioreq_unmap(ioreq); |
297 |
return -1; |
298 |
} |
299 |
ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base; |
300 |
ioreq->blkdev->cnt_map++; |
301 |
} |
302 |
} |
303 |
return 0; |
304 |
} |
305 |
|
306 |
static int ioreq_runio_qemu_sync(struct ioreq *ioreq) |
307 |
{ |
308 |
struct XenBlkDev *blkdev = ioreq->blkdev;
|
309 |
int i, rc, len = 0; |
310 |
off_t pos; |
311 |
|
312 |
if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) |
313 |
goto err_no_map;
|
314 |
if (ioreq->presync)
|
315 |
bdrv_flush(blkdev->bs); |
316 |
|
317 |
switch (ioreq->req.operation) {
|
318 |
case BLKIF_OP_READ:
|
319 |
pos = ioreq->start; |
320 |
for (i = 0; i < ioreq->v.niov; i++) { |
321 |
rc = bdrv_read(blkdev->bs, pos / BLOCK_SIZE, |
322 |
ioreq->v.iov[i].iov_base, |
323 |
ioreq->v.iov[i].iov_len / BLOCK_SIZE); |
324 |
if (rc != 0) { |
325 |
xen_be_printf(&blkdev->xendev, 0, "rd I/O error (%p, len %zd)\n", |
326 |
ioreq->v.iov[i].iov_base, |
327 |
ioreq->v.iov[i].iov_len); |
328 |
goto err;
|
329 |
} |
330 |
len += ioreq->v.iov[i].iov_len; |
331 |
pos += ioreq->v.iov[i].iov_len; |
332 |
} |
333 |
break;
|
334 |
case BLKIF_OP_WRITE:
|
335 |
case BLKIF_OP_WRITE_BARRIER:
|
336 |
if (!ioreq->req.nr_segments)
|
337 |
break;
|
338 |
pos = ioreq->start; |
339 |
for (i = 0; i < ioreq->v.niov; i++) { |
340 |
rc = bdrv_write(blkdev->bs, pos / BLOCK_SIZE, |
341 |
ioreq->v.iov[i].iov_base, |
342 |
ioreq->v.iov[i].iov_len / BLOCK_SIZE); |
343 |
if (rc != 0) { |
344 |
xen_be_printf(&blkdev->xendev, 0, "wr I/O error (%p, len %zd)\n", |
345 |
ioreq->v.iov[i].iov_base, |
346 |
ioreq->v.iov[i].iov_len); |
347 |
goto err;
|
348 |
} |
349 |
len += ioreq->v.iov[i].iov_len; |
350 |
pos += ioreq->v.iov[i].iov_len; |
351 |
} |
352 |
break;
|
353 |
default:
|
354 |
/* unknown operation (shouldn't happen -- parse catches this) */
|
355 |
goto err;
|
356 |
} |
357 |
|
358 |
if (ioreq->postsync)
|
359 |
bdrv_flush(blkdev->bs); |
360 |
ioreq->status = BLKIF_RSP_OKAY; |
361 |
|
362 |
ioreq_unmap(ioreq); |
363 |
ioreq_finish(ioreq); |
364 |
return 0; |
365 |
|
366 |
err:
|
367 |
ioreq_unmap(ioreq); |
368 |
err_no_map:
|
369 |
ioreq_finish(ioreq); |
370 |
ioreq->status = BLKIF_RSP_ERROR; |
371 |
return -1; |
372 |
} |
373 |
|
374 |
static void qemu_aio_complete(void *opaque, int ret) |
375 |
{ |
376 |
struct ioreq *ioreq = opaque;
|
377 |
|
378 |
if (ret != 0) { |
379 |
xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n", |
380 |
ioreq->req.operation == BLKIF_OP_READ ? "read" : "write"); |
381 |
ioreq->aio_errors++; |
382 |
} |
383 |
|
384 |
ioreq->aio_inflight--; |
385 |
if (ioreq->aio_inflight > 0) |
386 |
return;
|
387 |
|
388 |
ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; |
389 |
ioreq_unmap(ioreq); |
390 |
ioreq_finish(ioreq); |
391 |
qemu_bh_schedule(ioreq->blkdev->bh); |
392 |
} |
393 |
|
394 |
static int ioreq_runio_qemu_aio(struct ioreq *ioreq) |
395 |
{ |
396 |
struct XenBlkDev *blkdev = ioreq->blkdev;
|
397 |
|
398 |
if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) |
399 |
goto err_no_map;
|
400 |
|
401 |
ioreq->aio_inflight++; |
402 |
if (ioreq->presync)
|
403 |
bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
|
404 |
|
405 |
switch (ioreq->req.operation) {
|
406 |
case BLKIF_OP_READ:
|
407 |
ioreq->aio_inflight++; |
408 |
bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE, |
409 |
&ioreq->v, ioreq->v.size / BLOCK_SIZE, |
410 |
qemu_aio_complete, ioreq); |
411 |
break;
|
412 |
case BLKIF_OP_WRITE:
|
413 |
case BLKIF_OP_WRITE_BARRIER:
|
414 |
if (!ioreq->req.nr_segments)
|
415 |
break;
|
416 |
ioreq->aio_inflight++; |
417 |
bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE, |
418 |
&ioreq->v, ioreq->v.size / BLOCK_SIZE, |
419 |
qemu_aio_complete, ioreq); |
420 |
break;
|
421 |
default:
|
422 |
/* unknown operation (shouldn't happen -- parse catches this) */
|
423 |
goto err;
|
424 |
} |
425 |
|
426 |
if (ioreq->postsync)
|
427 |
bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
|
428 |
qemu_aio_complete(ioreq, 0);
|
429 |
|
430 |
return 0; |
431 |
|
432 |
err:
|
433 |
ioreq_unmap(ioreq); |
434 |
err_no_map:
|
435 |
ioreq_finish(ioreq); |
436 |
ioreq->status = BLKIF_RSP_ERROR; |
437 |
return -1; |
438 |
} |
439 |
|
440 |
static int blk_send_response_one(struct ioreq *ioreq) |
441 |
{ |
442 |
struct XenBlkDev *blkdev = ioreq->blkdev;
|
443 |
int send_notify = 0; |
444 |
int have_requests = 0; |
445 |
blkif_response_t resp; |
446 |
void *dst;
|
447 |
|
448 |
resp.id = ioreq->req.id; |
449 |
resp.operation = ioreq->req.operation; |
450 |
resp.status = ioreq->status; |
451 |
|
452 |
/* Place on the response ring for the relevant domain. */
|
453 |
switch (blkdev->protocol) {
|
454 |
case BLKIF_PROTOCOL_NATIVE:
|
455 |
dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt); |
456 |
break;
|
457 |
case BLKIF_PROTOCOL_X86_32:
|
458 |
dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part, |
459 |
blkdev->rings.x86_32_part.rsp_prod_pvt); |
460 |
break;
|
461 |
case BLKIF_PROTOCOL_X86_64:
|
462 |
dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part, |
463 |
blkdev->rings.x86_64_part.rsp_prod_pvt); |
464 |
break;
|
465 |
default:
|
466 |
dst = NULL;
|
467 |
} |
468 |
memcpy(dst, &resp, sizeof(resp));
|
469 |
blkdev->rings.common.rsp_prod_pvt++; |
470 |
|
471 |
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify); |
472 |
if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
|
473 |
/*
|
474 |
* Tail check for pending requests. Allows frontend to avoid
|
475 |
* notifications if requests are already in flight (lower
|
476 |
* overheads and promotes batching).
|
477 |
*/
|
478 |
RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests); |
479 |
} else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) { |
480 |
have_requests = 1;
|
481 |
} |
482 |
|
483 |
if (have_requests)
|
484 |
blkdev->more_work++; |
485 |
return send_notify;
|
486 |
} |
487 |
|
488 |
/* walk finished list, send outstanding responses, free requests */
|
489 |
static void blk_send_response_all(struct XenBlkDev *blkdev) |
490 |
{ |
491 |
struct ioreq *ioreq;
|
492 |
int send_notify = 0; |
493 |
|
494 |
while (!QLIST_EMPTY(&blkdev->finished)) {
|
495 |
ioreq = QLIST_FIRST(&blkdev->finished); |
496 |
send_notify += blk_send_response_one(ioreq); |
497 |
ioreq_release(ioreq); |
498 |
} |
499 |
if (send_notify)
|
500 |
xen_be_send_notify(&blkdev->xendev); |
501 |
} |
502 |
|
503 |
static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc) |
504 |
{ |
505 |
switch (blkdev->protocol) {
|
506 |
case BLKIF_PROTOCOL_NATIVE:
|
507 |
memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc), |
508 |
sizeof(ioreq->req));
|
509 |
break;
|
510 |
case BLKIF_PROTOCOL_X86_32:
|
511 |
blkif_get_x86_32_req(&ioreq->req, |
512 |
RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc)); |
513 |
break;
|
514 |
case BLKIF_PROTOCOL_X86_64:
|
515 |
blkif_get_x86_64_req(&ioreq->req, |
516 |
RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc)); |
517 |
break;
|
518 |
} |
519 |
return 0; |
520 |
} |
521 |
|
522 |
static void blk_handle_requests(struct XenBlkDev *blkdev) |
523 |
{ |
524 |
RING_IDX rc, rp; |
525 |
struct ioreq *ioreq;
|
526 |
|
527 |
blkdev->more_work = 0;
|
528 |
|
529 |
rc = blkdev->rings.common.req_cons; |
530 |
rp = blkdev->rings.common.sring->req_prod; |
531 |
xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
|
532 |
|
533 |
if (use_aio)
|
534 |
blk_send_response_all(blkdev); |
535 |
while (rc != rp) {
|
536 |
/* pull request from ring */
|
537 |
if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc))
|
538 |
break;
|
539 |
ioreq = ioreq_start(blkdev); |
540 |
if (ioreq == NULL) { |
541 |
blkdev->more_work++; |
542 |
break;
|
543 |
} |
544 |
blk_get_request(blkdev, ioreq, rc); |
545 |
blkdev->rings.common.req_cons = ++rc; |
546 |
|
547 |
/* parse them */
|
548 |
if (ioreq_parse(ioreq) != 0) { |
549 |
if (blk_send_response_one(ioreq))
|
550 |
xen_be_send_notify(&blkdev->xendev); |
551 |
ioreq_release(ioreq); |
552 |
continue;
|
553 |
} |
554 |
|
555 |
if (use_aio) {
|
556 |
/* run i/o in aio mode */
|
557 |
ioreq_runio_qemu_aio(ioreq); |
558 |
} else {
|
559 |
/* run i/o in sync mode */
|
560 |
ioreq_runio_qemu_sync(ioreq); |
561 |
} |
562 |
} |
563 |
if (!use_aio)
|
564 |
blk_send_response_all(blkdev); |
565 |
|
566 |
if (blkdev->more_work && blkdev->requests_inflight < max_requests)
|
567 |
qemu_bh_schedule(blkdev->bh); |
568 |
} |
569 |
|
570 |
/* ------------------------------------------------------------- */
|
571 |
|
572 |
static void blk_bh(void *opaque) |
573 |
{ |
574 |
struct XenBlkDev *blkdev = opaque;
|
575 |
blk_handle_requests(blkdev); |
576 |
} |
577 |
|
578 |
static void blk_alloc(struct XenDevice *xendev) |
579 |
{ |
580 |
struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); |
581 |
|
582 |
QLIST_INIT(&blkdev->inflight); |
583 |
QLIST_INIT(&blkdev->finished); |
584 |
QLIST_INIT(&blkdev->freelist); |
585 |
blkdev->bh = qemu_bh_new(blk_bh, blkdev); |
586 |
if (xen_mode != XEN_EMULATE)
|
587 |
batch_maps = 1;
|
588 |
} |
589 |
|
590 |
static int blk_init(struct XenDevice *xendev) |
591 |
{ |
592 |
struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); |
593 |
int index, qflags, have_barriers, info = 0; |
594 |
char *h;
|
595 |
|
596 |
/* read xenstore entries */
|
597 |
if (blkdev->params == NULL) { |
598 |
blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
|
599 |
h = strchr(blkdev->params, ':');
|
600 |
if (h != NULL) { |
601 |
blkdev->fileproto = blkdev->params; |
602 |
blkdev->filename = h+1;
|
603 |
*h = 0;
|
604 |
} else {
|
605 |
blkdev->fileproto = "<unset>";
|
606 |
blkdev->filename = blkdev->params; |
607 |
} |
608 |
} |
609 |
if (blkdev->mode == NULL) |
610 |
blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
|
611 |
if (blkdev->type == NULL) |
612 |
blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
|
613 |
if (blkdev->dev == NULL) |
614 |
blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
|
615 |
if (blkdev->devtype == NULL) |
616 |
blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
|
617 |
|
618 |
/* do we have all we need? */
|
619 |
if (blkdev->params == NULL || |
620 |
blkdev->mode == NULL ||
|
621 |
blkdev->type == NULL ||
|
622 |
blkdev->dev == NULL)
|
623 |
return -1; |
624 |
|
625 |
/* read-only ? */
|
626 |
if (strcmp(blkdev->mode, "w") == 0) { |
627 |
qflags = BDRV_O_RDWR; |
628 |
} else {
|
629 |
qflags = 0;
|
630 |
info |= VDISK_READONLY; |
631 |
} |
632 |
|
633 |
/* cdrom ? */
|
634 |
if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) |
635 |
info |= VDISK_CDROM; |
636 |
|
637 |
/* init qemu block driver */
|
638 |
index = (blkdev->xendev.dev - 202 * 256) / 16; |
639 |
blkdev->dinfo = drive_get(IF_XEN, 0, index);
|
640 |
if (!blkdev->dinfo) {
|
641 |
/* setup via xenbus -> create new block driver instance */
|
642 |
xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n"); |
643 |
blkdev->bs = bdrv_new(blkdev->dev); |
644 |
if (bdrv_open(blkdev->bs, blkdev->filename, qflags,
|
645 |
bdrv_find_whitelisted_format(blkdev->fileproto)) != 0) {
|
646 |
bdrv_delete(blkdev->bs); |
647 |
return -1; |
648 |
} |
649 |
} else {
|
650 |
/* setup via qemu cmdline -> already setup for us */
|
651 |
xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n"); |
652 |
blkdev->bs = blkdev->dinfo->bdrv; |
653 |
} |
654 |
blkdev->file_blk = BLOCK_SIZE; |
655 |
blkdev->file_size = bdrv_getlength(blkdev->bs); |
656 |
if (blkdev->file_size < 0) { |
657 |
xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n", |
658 |
(int)blkdev->file_size, strerror(-blkdev->file_size),
|
659 |
blkdev->bs->drv ? blkdev->bs->drv->format_name : "-");
|
660 |
blkdev->file_size = 0;
|
661 |
} |
662 |
have_barriers = blkdev->bs->drv && blkdev->bs->drv->bdrv_flush ? 1 : 0; |
663 |
|
664 |
xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\"," |
665 |
" size %" PRId64 " (%" PRId64 " MB)\n", |
666 |
blkdev->type, blkdev->fileproto, blkdev->filename, |
667 |
blkdev->file_size, blkdev->file_size >> 20);
|
668 |
|
669 |
/* fill info */
|
670 |
xenstore_write_be_int(&blkdev->xendev, "feature-barrier", have_barriers);
|
671 |
xenstore_write_be_int(&blkdev->xendev, "info", info);
|
672 |
xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
|
673 |
xenstore_write_be_int(&blkdev->xendev, "sectors",
|
674 |
blkdev->file_size / blkdev->file_blk); |
675 |
return 0; |
676 |
} |
677 |
|
678 |
static int blk_connect(struct XenDevice *xendev) |
679 |
{ |
680 |
struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); |
681 |
|
682 |
if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) |
683 |
return -1; |
684 |
if (xenstore_read_fe_int(&blkdev->xendev, "event-channel", |
685 |
&blkdev->xendev.remote_port) == -1)
|
686 |
return -1; |
687 |
|
688 |
blkdev->protocol = BLKIF_PROTOCOL_NATIVE; |
689 |
if (blkdev->xendev.protocol) {
|
690 |
if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) |
691 |
blkdev->protocol = BLKIF_PROTOCOL_X86_32; |
692 |
if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) |
693 |
blkdev->protocol = BLKIF_PROTOCOL_X86_64; |
694 |
} |
695 |
|
696 |
blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev, |
697 |
blkdev->xendev.dom, |
698 |
blkdev->ring_ref, |
699 |
PROT_READ | PROT_WRITE); |
700 |
if (!blkdev->sring)
|
701 |
return -1; |
702 |
blkdev->cnt_map++; |
703 |
|
704 |
switch (blkdev->protocol) {
|
705 |
case BLKIF_PROTOCOL_NATIVE:
|
706 |
{ |
707 |
blkif_sring_t *sring_native = blkdev->sring; |
708 |
BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE); |
709 |
break;
|
710 |
} |
711 |
case BLKIF_PROTOCOL_X86_32:
|
712 |
{ |
713 |
blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring; |
714 |
|
715 |
BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE); |
716 |
break;
|
717 |
} |
718 |
case BLKIF_PROTOCOL_X86_64:
|
719 |
{ |
720 |
blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring; |
721 |
|
722 |
BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE); |
723 |
break;
|
724 |
} |
725 |
} |
726 |
|
727 |
xen_be_bind_evtchn(&blkdev->xendev); |
728 |
|
729 |
xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, " |
730 |
"remote port %d, local port %d\n",
|
731 |
blkdev->xendev.protocol, blkdev->ring_ref, |
732 |
blkdev->xendev.remote_port, blkdev->xendev.local_port); |
733 |
return 0; |
734 |
} |
735 |
|
736 |
static void blk_disconnect(struct XenDevice *xendev) |
737 |
{ |
738 |
struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); |
739 |
|
740 |
if (blkdev->bs) {
|
741 |
if (!blkdev->dinfo) {
|
742 |
/* close/delete only if we created it ourself */
|
743 |
bdrv_close(blkdev->bs); |
744 |
bdrv_delete(blkdev->bs); |
745 |
} |
746 |
blkdev->bs = NULL;
|
747 |
} |
748 |
xen_be_unbind_evtchn(&blkdev->xendev); |
749 |
|
750 |
if (blkdev->sring) {
|
751 |
xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
|
752 |
blkdev->cnt_map--; |
753 |
blkdev->sring = NULL;
|
754 |
} |
755 |
} |
756 |
|
757 |
static int blk_free(struct XenDevice *xendev) |
758 |
{ |
759 |
struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); |
760 |
struct ioreq *ioreq;
|
761 |
|
762 |
while (!QLIST_EMPTY(&blkdev->freelist)) {
|
763 |
ioreq = QLIST_FIRST(&blkdev->freelist); |
764 |
QLIST_REMOVE(ioreq, list); |
765 |
qemu_iovec_destroy(&ioreq->v); |
766 |
qemu_free(ioreq); |
767 |
} |
768 |
|
769 |
qemu_free(blkdev->params); |
770 |
qemu_free(blkdev->mode); |
771 |
qemu_free(blkdev->type); |
772 |
qemu_free(blkdev->dev); |
773 |
qemu_free(blkdev->devtype); |
774 |
qemu_bh_delete(blkdev->bh); |
775 |
return 0; |
776 |
} |
777 |
|
778 |
static void blk_event(struct XenDevice *xendev) |
779 |
{ |
780 |
struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev); |
781 |
|
782 |
qemu_bh_schedule(blkdev->bh); |
783 |
} |
784 |
|
785 |
struct XenDevOps xen_blkdev_ops = {
|
786 |
.size = sizeof(struct XenBlkDev), |
787 |
.flags = DEVOPS_FLAG_NEED_GNTDEV, |
788 |
.alloc = blk_alloc, |
789 |
.init = blk_init, |
790 |
.connect = blk_connect, |
791 |
.disconnect = blk_disconnect, |
792 |
.event = blk_event, |
793 |
.free = blk_free, |
794 |
}; |