root / block / gluster.c @ f2e5dca4
History | View | Annotate | Download (19.4 kB)
1 |
/*
|
---|---|
2 |
* GlusterFS backend for QEMU
|
3 |
*
|
4 |
* Copyright (C) 2012 Bharata B Rao <bharata@linux.vnet.ibm.com>
|
5 |
*
|
6 |
* Pipe handling mechanism in AIO implementation is derived from
|
7 |
* block/rbd.c. Hence,
|
8 |
*
|
9 |
* Copyright (C) 2010-2011 Christian Brunner <chb@muc.de>,
|
10 |
* Josh Durgin <josh.durgin@dreamhost.com>
|
11 |
*
|
12 |
* This work is licensed under the terms of the GNU GPL, version 2. See
|
13 |
* the COPYING file in the top-level directory.
|
14 |
*
|
15 |
* Contributions after 2012-01-13 are licensed under the terms of the
|
16 |
* GNU GPL, version 2 or (at your option) any later version.
|
17 |
*/
|
18 |
#include <glusterfs/api/glfs.h> |
19 |
#include "block/block_int.h" |
20 |
#include "qemu/sockets.h" |
21 |
#include "qemu/uri.h" |
22 |
|
23 |
typedef struct GlusterAIOCB { |
24 |
BlockDriverAIOCB common; |
25 |
int64_t size; |
26 |
int ret;
|
27 |
bool *finished;
|
28 |
QEMUBH *bh; |
29 |
} GlusterAIOCB; |
30 |
|
31 |
typedef struct BDRVGlusterState { |
32 |
struct glfs *glfs;
|
33 |
int fds[2]; |
34 |
struct glfs_fd *fd;
|
35 |
int event_reader_pos;
|
36 |
GlusterAIOCB *event_acb; |
37 |
} BDRVGlusterState; |
38 |
|
39 |
#define GLUSTER_FD_READ 0 |
40 |
#define GLUSTER_FD_WRITE 1 |
41 |
|
42 |
typedef struct GlusterConf { |
43 |
char *server;
|
44 |
int port;
|
45 |
char *volname;
|
46 |
char *image;
|
47 |
char *transport;
|
48 |
} GlusterConf; |
49 |
|
50 |
static void qemu_gluster_gconf_free(GlusterConf *gconf) |
51 |
{ |
52 |
g_free(gconf->server); |
53 |
g_free(gconf->volname); |
54 |
g_free(gconf->image); |
55 |
g_free(gconf->transport); |
56 |
g_free(gconf); |
57 |
} |
58 |
|
59 |
static int parse_volume_options(GlusterConf *gconf, char *path) |
60 |
{ |
61 |
char *p, *q;
|
62 |
|
63 |
if (!path) {
|
64 |
return -EINVAL;
|
65 |
} |
66 |
|
67 |
/* volume */
|
68 |
p = q = path + strspn(path, "/");
|
69 |
p += strcspn(p, "/");
|
70 |
if (*p == '\0') { |
71 |
return -EINVAL;
|
72 |
} |
73 |
gconf->volname = g_strndup(q, p - q); |
74 |
|
75 |
/* image */
|
76 |
p += strspn(p, "/");
|
77 |
if (*p == '\0') { |
78 |
return -EINVAL;
|
79 |
} |
80 |
gconf->image = g_strdup(p); |
81 |
return 0; |
82 |
} |
83 |
|
84 |
/*
|
85 |
* file=gluster[+transport]://[server[:port]]/volname/image[?socket=...]
|
86 |
*
|
87 |
* 'gluster' is the protocol.
|
88 |
*
|
89 |
* 'transport' specifies the transport type used to connect to gluster
|
90 |
* management daemon (glusterd). Valid transport types are
|
91 |
* tcp, unix and rdma. If a transport type isn't specified, then tcp
|
92 |
* type is assumed.
|
93 |
*
|
94 |
* 'server' specifies the server where the volume file specification for
|
95 |
* the given volume resides. This can be either hostname, ipv4 address
|
96 |
* or ipv6 address. ipv6 address needs to be within square brackets [ ].
|
97 |
* If transport type is 'unix', then 'server' field should not be specifed.
|
98 |
* The 'socket' field needs to be populated with the path to unix domain
|
99 |
* socket.
|
100 |
*
|
101 |
* 'port' is the port number on which glusterd is listening. This is optional
|
102 |
* and if not specified, QEMU will send 0 which will make gluster to use the
|
103 |
* default port. If the transport type is unix, then 'port' should not be
|
104 |
* specified.
|
105 |
*
|
106 |
* 'volname' is the name of the gluster volume which contains the VM image.
|
107 |
*
|
108 |
* 'image' is the path to the actual VM image that resides on gluster volume.
|
109 |
*
|
110 |
* Examples:
|
111 |
*
|
112 |
* file=gluster://1.2.3.4/testvol/a.img
|
113 |
* file=gluster+tcp://1.2.3.4/testvol/a.img
|
114 |
* file=gluster+tcp://1.2.3.4:24007/testvol/dir/a.img
|
115 |
* file=gluster+tcp://[1:2:3:4:5:6:7:8]/testvol/dir/a.img
|
116 |
* file=gluster+tcp://[1:2:3:4:5:6:7:8]:24007/testvol/dir/a.img
|
117 |
* file=gluster+tcp://server.domain.com:24007/testvol/dir/a.img
|
118 |
* file=gluster+unix:///testvol/dir/a.img?socket=/tmp/glusterd.socket
|
119 |
* file=gluster+rdma://1.2.3.4:24007/testvol/a.img
|
120 |
*/
|
121 |
static int qemu_gluster_parseuri(GlusterConf *gconf, const char *filename) |
122 |
{ |
123 |
URI *uri; |
124 |
QueryParams *qp = NULL;
|
125 |
bool is_unix = false; |
126 |
int ret = 0; |
127 |
|
128 |
uri = uri_parse(filename); |
129 |
if (!uri) {
|
130 |
return -EINVAL;
|
131 |
} |
132 |
|
133 |
/* transport */
|
134 |
if (!strcmp(uri->scheme, "gluster")) { |
135 |
gconf->transport = g_strdup("tcp");
|
136 |
} else if (!strcmp(uri->scheme, "gluster+tcp")) { |
137 |
gconf->transport = g_strdup("tcp");
|
138 |
} else if (!strcmp(uri->scheme, "gluster+unix")) { |
139 |
gconf->transport = g_strdup("unix");
|
140 |
is_unix = true;
|
141 |
} else if (!strcmp(uri->scheme, "gluster+rdma")) { |
142 |
gconf->transport = g_strdup("rdma");
|
143 |
} else {
|
144 |
ret = -EINVAL; |
145 |
goto out;
|
146 |
} |
147 |
|
148 |
ret = parse_volume_options(gconf, uri->path); |
149 |
if (ret < 0) { |
150 |
goto out;
|
151 |
} |
152 |
|
153 |
qp = query_params_parse(uri->query); |
154 |
if (qp->n > 1 || (is_unix && !qp->n) || (!is_unix && qp->n)) { |
155 |
ret = -EINVAL; |
156 |
goto out;
|
157 |
} |
158 |
|
159 |
if (is_unix) {
|
160 |
if (uri->server || uri->port) {
|
161 |
ret = -EINVAL; |
162 |
goto out;
|
163 |
} |
164 |
if (strcmp(qp->p[0].name, "socket")) { |
165 |
ret = -EINVAL; |
166 |
goto out;
|
167 |
} |
168 |
gconf->server = g_strdup(qp->p[0].value);
|
169 |
} else {
|
170 |
gconf->server = g_strdup(uri->server); |
171 |
gconf->port = uri->port; |
172 |
} |
173 |
|
174 |
out:
|
175 |
if (qp) {
|
176 |
query_params_free(qp); |
177 |
} |
178 |
uri_free(uri); |
179 |
return ret;
|
180 |
} |
181 |
|
182 |
static struct glfs *qemu_gluster_init(GlusterConf *gconf, const char *filename) |
183 |
{ |
184 |
struct glfs *glfs = NULL; |
185 |
int ret;
|
186 |
int old_errno;
|
187 |
|
188 |
ret = qemu_gluster_parseuri(gconf, filename); |
189 |
if (ret < 0) { |
190 |
error_report("Usage: file=gluster[+transport]://[server[:port]]/"
|
191 |
"volname/image[?socket=...]");
|
192 |
errno = -ret; |
193 |
goto out;
|
194 |
} |
195 |
|
196 |
glfs = glfs_new(gconf->volname); |
197 |
if (!glfs) {
|
198 |
goto out;
|
199 |
} |
200 |
|
201 |
ret = glfs_set_volfile_server(glfs, gconf->transport, gconf->server, |
202 |
gconf->port); |
203 |
if (ret < 0) { |
204 |
goto out;
|
205 |
} |
206 |
|
207 |
/*
|
208 |
* TODO: Use GF_LOG_ERROR instead of hard code value of 4 here when
|
209 |
* GlusterFS makes GF_LOG_* macros available to libgfapi users.
|
210 |
*/
|
211 |
ret = glfs_set_logging(glfs, "-", 4); |
212 |
if (ret < 0) { |
213 |
goto out;
|
214 |
} |
215 |
|
216 |
ret = glfs_init(glfs); |
217 |
if (ret) {
|
218 |
error_report("Gluster connection failed for server=%s port=%d "
|
219 |
"volume=%s image=%s transport=%s", gconf->server, gconf->port,
|
220 |
gconf->volname, gconf->image, gconf->transport); |
221 |
goto out;
|
222 |
} |
223 |
return glfs;
|
224 |
|
225 |
out:
|
226 |
if (glfs) {
|
227 |
old_errno = errno; |
228 |
glfs_fini(glfs); |
229 |
errno = old_errno; |
230 |
} |
231 |
return NULL; |
232 |
} |
233 |
|
234 |
static void qemu_gluster_complete_aio(GlusterAIOCB *acb, BDRVGlusterState *s) |
235 |
{ |
236 |
int ret;
|
237 |
bool *finished = acb->finished;
|
238 |
BlockDriverCompletionFunc *cb = acb->common.cb; |
239 |
void *opaque = acb->common.opaque;
|
240 |
|
241 |
if (!acb->ret || acb->ret == acb->size) {
|
242 |
ret = 0; /* Success */ |
243 |
} else if (acb->ret < 0) { |
244 |
ret = acb->ret; /* Read/Write failed */
|
245 |
} else {
|
246 |
ret = -EIO; /* Partial read/write - fail it */
|
247 |
} |
248 |
|
249 |
qemu_aio_release(acb); |
250 |
cb(opaque, ret); |
251 |
if (finished) {
|
252 |
*finished = true;
|
253 |
} |
254 |
} |
255 |
|
256 |
static void qemu_gluster_aio_event_reader(void *opaque) |
257 |
{ |
258 |
BDRVGlusterState *s = opaque; |
259 |
ssize_t ret; |
260 |
|
261 |
do {
|
262 |
char *p = (char *)&s->event_acb; |
263 |
|
264 |
ret = read(s->fds[GLUSTER_FD_READ], p + s->event_reader_pos, |
265 |
sizeof(s->event_acb) - s->event_reader_pos);
|
266 |
if (ret > 0) { |
267 |
s->event_reader_pos += ret; |
268 |
if (s->event_reader_pos == sizeof(s->event_acb)) { |
269 |
s->event_reader_pos = 0;
|
270 |
qemu_gluster_complete_aio(s->event_acb, s); |
271 |
} |
272 |
} |
273 |
} while (ret < 0 && errno == EINTR); |
274 |
} |
275 |
|
276 |
/* TODO Convert to fine grained options */
|
277 |
static QemuOptsList runtime_opts = {
|
278 |
.name = "gluster",
|
279 |
.head = QTAILQ_HEAD_INITIALIZER(runtime_opts.head), |
280 |
.desc = { |
281 |
{ |
282 |
.name = "filename",
|
283 |
.type = QEMU_OPT_STRING, |
284 |
.help = "URL to the gluster image",
|
285 |
}, |
286 |
{ /* end of list */ }
|
287 |
}, |
288 |
}; |
289 |
|
290 |
static int qemu_gluster_open(BlockDriverState *bs, QDict *options, |
291 |
int bdrv_flags)
|
292 |
{ |
293 |
BDRVGlusterState *s = bs->opaque; |
294 |
int open_flags = O_BINARY;
|
295 |
int ret = 0; |
296 |
GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
|
297 |
QemuOpts *opts; |
298 |
Error *local_err = NULL;
|
299 |
const char *filename; |
300 |
|
301 |
opts = qemu_opts_create_nofail(&runtime_opts); |
302 |
qemu_opts_absorb_qdict(opts, options, &local_err); |
303 |
if (error_is_set(&local_err)) {
|
304 |
qerror_report_err(local_err); |
305 |
error_free(local_err); |
306 |
ret = -EINVAL; |
307 |
goto out;
|
308 |
} |
309 |
|
310 |
filename = qemu_opt_get(opts, "filename");
|
311 |
|
312 |
|
313 |
s->glfs = qemu_gluster_init(gconf, filename); |
314 |
if (!s->glfs) {
|
315 |
ret = -errno; |
316 |
goto out;
|
317 |
} |
318 |
|
319 |
if (bdrv_flags & BDRV_O_RDWR) {
|
320 |
open_flags |= O_RDWR; |
321 |
} else {
|
322 |
open_flags |= O_RDONLY; |
323 |
} |
324 |
|
325 |
if ((bdrv_flags & BDRV_O_NOCACHE)) {
|
326 |
open_flags |= O_DIRECT; |
327 |
} |
328 |
|
329 |
s->fd = glfs_open(s->glfs, gconf->image, open_flags); |
330 |
if (!s->fd) {
|
331 |
ret = -errno; |
332 |
goto out;
|
333 |
} |
334 |
|
335 |
ret = qemu_pipe(s->fds); |
336 |
if (ret < 0) { |
337 |
ret = -errno; |
338 |
goto out;
|
339 |
} |
340 |
fcntl(s->fds[GLUSTER_FD_READ], F_SETFL, O_NONBLOCK); |
341 |
qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], |
342 |
qemu_gluster_aio_event_reader, NULL, s);
|
343 |
|
344 |
out:
|
345 |
qemu_opts_del(opts); |
346 |
qemu_gluster_gconf_free(gconf); |
347 |
if (!ret) {
|
348 |
return ret;
|
349 |
} |
350 |
if (s->fd) {
|
351 |
glfs_close(s->fd); |
352 |
} |
353 |
if (s->glfs) {
|
354 |
glfs_fini(s->glfs); |
355 |
} |
356 |
return ret;
|
357 |
} |
358 |
|
359 |
static int qemu_gluster_create(const char *filename, |
360 |
QEMUOptionParameter *options) |
361 |
{ |
362 |
struct glfs *glfs;
|
363 |
struct glfs_fd *fd;
|
364 |
int ret = 0; |
365 |
int64_t total_size = 0;
|
366 |
GlusterConf *gconf = g_malloc0(sizeof(GlusterConf));
|
367 |
|
368 |
glfs = qemu_gluster_init(gconf, filename); |
369 |
if (!glfs) {
|
370 |
ret = -errno; |
371 |
goto out;
|
372 |
} |
373 |
|
374 |
while (options && options->name) {
|
375 |
if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
|
376 |
total_size = options->value.n / BDRV_SECTOR_SIZE; |
377 |
} |
378 |
options++; |
379 |
} |
380 |
|
381 |
fd = glfs_creat(glfs, gconf->image, |
382 |
O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR | S_IWUSR); |
383 |
if (!fd) {
|
384 |
ret = -errno; |
385 |
} else {
|
386 |
if (glfs_ftruncate(fd, total_size * BDRV_SECTOR_SIZE) != 0) { |
387 |
ret = -errno; |
388 |
} |
389 |
if (glfs_close(fd) != 0) { |
390 |
ret = -errno; |
391 |
} |
392 |
} |
393 |
out:
|
394 |
qemu_gluster_gconf_free(gconf); |
395 |
if (glfs) {
|
396 |
glfs_fini(glfs); |
397 |
} |
398 |
return ret;
|
399 |
} |
400 |
|
401 |
static void qemu_gluster_aio_cancel(BlockDriverAIOCB *blockacb) |
402 |
{ |
403 |
GlusterAIOCB *acb = (GlusterAIOCB *)blockacb; |
404 |
bool finished = false; |
405 |
|
406 |
acb->finished = &finished; |
407 |
while (!finished) {
|
408 |
qemu_aio_wait(); |
409 |
} |
410 |
} |
411 |
|
412 |
static const AIOCBInfo gluster_aiocb_info = { |
413 |
.aiocb_size = sizeof(GlusterAIOCB),
|
414 |
.cancel = qemu_gluster_aio_cancel, |
415 |
}; |
416 |
|
417 |
static void gluster_finish_aiocb(struct glfs_fd *fd, ssize_t ret, void *arg) |
418 |
{ |
419 |
GlusterAIOCB *acb = (GlusterAIOCB *)arg; |
420 |
BlockDriverState *bs = acb->common.bs; |
421 |
BDRVGlusterState *s = bs->opaque; |
422 |
int retval;
|
423 |
|
424 |
acb->ret = ret; |
425 |
retval = qemu_write_full(s->fds[GLUSTER_FD_WRITE], &acb, sizeof(acb));
|
426 |
if (retval != sizeof(acb)) { |
427 |
/*
|
428 |
* Gluster AIO callback thread failed to notify the waiting
|
429 |
* QEMU thread about IO completion.
|
430 |
*
|
431 |
* Complete this IO request and make the disk inaccessible for
|
432 |
* subsequent reads and writes.
|
433 |
*/
|
434 |
error_report("Gluster failed to notify QEMU about IO completion");
|
435 |
|
436 |
qemu_mutex_lock_iothread(); /* We are in gluster thread context */
|
437 |
acb->common.cb(acb->common.opaque, -EIO); |
438 |
qemu_aio_release(acb); |
439 |
close(s->fds[GLUSTER_FD_READ]); |
440 |
close(s->fds[GLUSTER_FD_WRITE]); |
441 |
qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL); |
442 |
bs->drv = NULL; /* Make the disk inaccessible */ |
443 |
qemu_mutex_unlock_iothread(); |
444 |
} |
445 |
} |
446 |
|
447 |
static BlockDriverAIOCB *qemu_gluster_aio_rw(BlockDriverState *bs,
|
448 |
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
|
449 |
BlockDriverCompletionFunc *cb, void *opaque, int write) |
450 |
{ |
451 |
int ret;
|
452 |
GlusterAIOCB *acb; |
453 |
BDRVGlusterState *s = bs->opaque; |
454 |
size_t size; |
455 |
off_t offset; |
456 |
|
457 |
offset = sector_num * BDRV_SECTOR_SIZE; |
458 |
size = nb_sectors * BDRV_SECTOR_SIZE; |
459 |
|
460 |
acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque); |
461 |
acb->size = size; |
462 |
acb->ret = 0;
|
463 |
acb->finished = NULL;
|
464 |
|
465 |
if (write) {
|
466 |
ret = glfs_pwritev_async(s->fd, qiov->iov, qiov->niov, offset, 0,
|
467 |
&gluster_finish_aiocb, acb); |
468 |
} else {
|
469 |
ret = glfs_preadv_async(s->fd, qiov->iov, qiov->niov, offset, 0,
|
470 |
&gluster_finish_aiocb, acb); |
471 |
} |
472 |
|
473 |
if (ret < 0) { |
474 |
goto out;
|
475 |
} |
476 |
return &acb->common;
|
477 |
|
478 |
out:
|
479 |
qemu_aio_release(acb); |
480 |
return NULL; |
481 |
} |
482 |
|
483 |
static int qemu_gluster_truncate(BlockDriverState *bs, int64_t offset) |
484 |
{ |
485 |
int ret;
|
486 |
BDRVGlusterState *s = bs->opaque; |
487 |
|
488 |
ret = glfs_ftruncate(s->fd, offset); |
489 |
if (ret < 0) { |
490 |
return -errno;
|
491 |
} |
492 |
|
493 |
return 0; |
494 |
} |
495 |
|
496 |
static BlockDriverAIOCB *qemu_gluster_aio_readv(BlockDriverState *bs,
|
497 |
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
|
498 |
BlockDriverCompletionFunc *cb, void *opaque)
|
499 |
{ |
500 |
return qemu_gluster_aio_rw(bs, sector_num, qiov, nb_sectors, cb, opaque, 0); |
501 |
} |
502 |
|
503 |
static BlockDriverAIOCB *qemu_gluster_aio_writev(BlockDriverState *bs,
|
504 |
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
|
505 |
BlockDriverCompletionFunc *cb, void *opaque)
|
506 |
{ |
507 |
return qemu_gluster_aio_rw(bs, sector_num, qiov, nb_sectors, cb, opaque, 1); |
508 |
} |
509 |
|
510 |
static BlockDriverAIOCB *qemu_gluster_aio_flush(BlockDriverState *bs,
|
511 |
BlockDriverCompletionFunc *cb, void *opaque)
|
512 |
{ |
513 |
int ret;
|
514 |
GlusterAIOCB *acb; |
515 |
BDRVGlusterState *s = bs->opaque; |
516 |
|
517 |
acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque); |
518 |
acb->size = 0;
|
519 |
acb->ret = 0;
|
520 |
acb->finished = NULL;
|
521 |
|
522 |
ret = glfs_fsync_async(s->fd, &gluster_finish_aiocb, acb); |
523 |
if (ret < 0) { |
524 |
goto out;
|
525 |
} |
526 |
return &acb->common;
|
527 |
|
528 |
out:
|
529 |
qemu_aio_release(acb); |
530 |
return NULL; |
531 |
} |
532 |
|
533 |
#ifdef CONFIG_GLUSTERFS_DISCARD
|
534 |
static BlockDriverAIOCB *qemu_gluster_aio_discard(BlockDriverState *bs,
|
535 |
int64_t sector_num, int nb_sectors, BlockDriverCompletionFunc *cb,
|
536 |
void *opaque)
|
537 |
{ |
538 |
int ret;
|
539 |
GlusterAIOCB *acb; |
540 |
BDRVGlusterState *s = bs->opaque; |
541 |
size_t size; |
542 |
off_t offset; |
543 |
|
544 |
offset = sector_num * BDRV_SECTOR_SIZE; |
545 |
size = nb_sectors * BDRV_SECTOR_SIZE; |
546 |
|
547 |
acb = qemu_aio_get(&gluster_aiocb_info, bs, cb, opaque); |
548 |
acb->size = 0;
|
549 |
acb->ret = 0;
|
550 |
acb->finished = NULL;
|
551 |
|
552 |
ret = glfs_discard_async(s->fd, offset, size, &gluster_finish_aiocb, acb); |
553 |
if (ret < 0) { |
554 |
goto out;
|
555 |
} |
556 |
return &acb->common;
|
557 |
|
558 |
out:
|
559 |
qemu_aio_release(acb); |
560 |
return NULL; |
561 |
} |
562 |
#endif
|
563 |
|
564 |
static int64_t qemu_gluster_getlength(BlockDriverState *bs)
|
565 |
{ |
566 |
BDRVGlusterState *s = bs->opaque; |
567 |
int64_t ret; |
568 |
|
569 |
ret = glfs_lseek(s->fd, 0, SEEK_END);
|
570 |
if (ret < 0) { |
571 |
return -errno;
|
572 |
} else {
|
573 |
return ret;
|
574 |
} |
575 |
} |
576 |
|
577 |
static int64_t qemu_gluster_allocated_file_size(BlockDriverState *bs)
|
578 |
{ |
579 |
BDRVGlusterState *s = bs->opaque; |
580 |
struct stat st;
|
581 |
int ret;
|
582 |
|
583 |
ret = glfs_fstat(s->fd, &st); |
584 |
if (ret < 0) { |
585 |
return -errno;
|
586 |
} else {
|
587 |
return st.st_blocks * 512; |
588 |
} |
589 |
} |
590 |
|
591 |
static void qemu_gluster_close(BlockDriverState *bs) |
592 |
{ |
593 |
BDRVGlusterState *s = bs->opaque; |
594 |
|
595 |
close(s->fds[GLUSTER_FD_READ]); |
596 |
close(s->fds[GLUSTER_FD_WRITE]); |
597 |
qemu_aio_set_fd_handler(s->fds[GLUSTER_FD_READ], NULL, NULL, NULL); |
598 |
|
599 |
if (s->fd) {
|
600 |
glfs_close(s->fd); |
601 |
s->fd = NULL;
|
602 |
} |
603 |
glfs_fini(s->glfs); |
604 |
} |
605 |
|
606 |
static int qemu_gluster_has_zero_init(BlockDriverState *bs) |
607 |
{ |
608 |
/* GlusterFS volume could be backed by a block device */
|
609 |
return 0; |
610 |
} |
611 |
|
612 |
static QEMUOptionParameter qemu_gluster_create_options[] = {
|
613 |
{ |
614 |
.name = BLOCK_OPT_SIZE, |
615 |
.type = OPT_SIZE, |
616 |
.help = "Virtual disk size"
|
617 |
}, |
618 |
{ NULL }
|
619 |
}; |
620 |
|
621 |
static BlockDriver bdrv_gluster = {
|
622 |
.format_name = "gluster",
|
623 |
.protocol_name = "gluster",
|
624 |
.instance_size = sizeof(BDRVGlusterState),
|
625 |
.bdrv_file_open = qemu_gluster_open, |
626 |
.bdrv_close = qemu_gluster_close, |
627 |
.bdrv_create = qemu_gluster_create, |
628 |
.bdrv_getlength = qemu_gluster_getlength, |
629 |
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, |
630 |
.bdrv_truncate = qemu_gluster_truncate, |
631 |
.bdrv_aio_readv = qemu_gluster_aio_readv, |
632 |
.bdrv_aio_writev = qemu_gluster_aio_writev, |
633 |
.bdrv_aio_flush = qemu_gluster_aio_flush, |
634 |
.bdrv_has_zero_init = qemu_gluster_has_zero_init, |
635 |
#ifdef CONFIG_GLUSTERFS_DISCARD
|
636 |
.bdrv_aio_discard = qemu_gluster_aio_discard, |
637 |
#endif
|
638 |
.create_options = qemu_gluster_create_options, |
639 |
}; |
640 |
|
641 |
static BlockDriver bdrv_gluster_tcp = {
|
642 |
.format_name = "gluster",
|
643 |
.protocol_name = "gluster+tcp",
|
644 |
.instance_size = sizeof(BDRVGlusterState),
|
645 |
.bdrv_file_open = qemu_gluster_open, |
646 |
.bdrv_close = qemu_gluster_close, |
647 |
.bdrv_create = qemu_gluster_create, |
648 |
.bdrv_getlength = qemu_gluster_getlength, |
649 |
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, |
650 |
.bdrv_truncate = qemu_gluster_truncate, |
651 |
.bdrv_aio_readv = qemu_gluster_aio_readv, |
652 |
.bdrv_aio_writev = qemu_gluster_aio_writev, |
653 |
.bdrv_aio_flush = qemu_gluster_aio_flush, |
654 |
.bdrv_has_zero_init = qemu_gluster_has_zero_init, |
655 |
#ifdef CONFIG_GLUSTERFS_DISCARD
|
656 |
.bdrv_aio_discard = qemu_gluster_aio_discard, |
657 |
#endif
|
658 |
.create_options = qemu_gluster_create_options, |
659 |
}; |
660 |
|
661 |
static BlockDriver bdrv_gluster_unix = {
|
662 |
.format_name = "gluster",
|
663 |
.protocol_name = "gluster+unix",
|
664 |
.instance_size = sizeof(BDRVGlusterState),
|
665 |
.bdrv_file_open = qemu_gluster_open, |
666 |
.bdrv_close = qemu_gluster_close, |
667 |
.bdrv_create = qemu_gluster_create, |
668 |
.bdrv_getlength = qemu_gluster_getlength, |
669 |
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, |
670 |
.bdrv_truncate = qemu_gluster_truncate, |
671 |
.bdrv_aio_readv = qemu_gluster_aio_readv, |
672 |
.bdrv_aio_writev = qemu_gluster_aio_writev, |
673 |
.bdrv_aio_flush = qemu_gluster_aio_flush, |
674 |
.bdrv_has_zero_init = qemu_gluster_has_zero_init, |
675 |
#ifdef CONFIG_GLUSTERFS_DISCARD
|
676 |
.bdrv_aio_discard = qemu_gluster_aio_discard, |
677 |
#endif
|
678 |
.create_options = qemu_gluster_create_options, |
679 |
}; |
680 |
|
681 |
static BlockDriver bdrv_gluster_rdma = {
|
682 |
.format_name = "gluster",
|
683 |
.protocol_name = "gluster+rdma",
|
684 |
.instance_size = sizeof(BDRVGlusterState),
|
685 |
.bdrv_file_open = qemu_gluster_open, |
686 |
.bdrv_close = qemu_gluster_close, |
687 |
.bdrv_create = qemu_gluster_create, |
688 |
.bdrv_getlength = qemu_gluster_getlength, |
689 |
.bdrv_get_allocated_file_size = qemu_gluster_allocated_file_size, |
690 |
.bdrv_truncate = qemu_gluster_truncate, |
691 |
.bdrv_aio_readv = qemu_gluster_aio_readv, |
692 |
.bdrv_aio_writev = qemu_gluster_aio_writev, |
693 |
.bdrv_aio_flush = qemu_gluster_aio_flush, |
694 |
.bdrv_has_zero_init = qemu_gluster_has_zero_init, |
695 |
#ifdef CONFIG_GLUSTERFS_DISCARD
|
696 |
.bdrv_aio_discard = qemu_gluster_aio_discard, |
697 |
#endif
|
698 |
.create_options = qemu_gluster_create_options, |
699 |
}; |
700 |
|
701 |
static void bdrv_gluster_init(void) |
702 |
{ |
703 |
bdrv_register(&bdrv_gluster_rdma); |
704 |
bdrv_register(&bdrv_gluster_unix); |
705 |
bdrv_register(&bdrv_gluster_tcp); |
706 |
bdrv_register(&bdrv_gluster); |
707 |
} |
708 |
|
709 |
block_init(bdrv_gluster_init); |