Statistics
| Branch: | Revision:

root / hw / xen_disk.c @ bc4caf49

History | View | Annotate | Download (23.4 kB)

1
/*
2
 *  xen paravirt block device backend
3
 *
4
 *  (c) Gerd Hoffmann <kraxel@redhat.com>
5
 *
6
 *  This program is free software; you can redistribute it and/or modify
7
 *  it under the terms of the GNU General Public License as published by
8
 *  the Free Software Foundation; under version 2 of the License.
9
 *
10
 *  This program is distributed in the hope that it will be useful,
11
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13
 *  GNU General Public License for more details.
14
 *
15
 *  You should have received a copy of the GNU General Public License along
16
 *  with this program; if not, see <http://www.gnu.org/licenses/>.
17
 *
18
 *  Contributions after 2012-01-13 are licensed under the terms of the
19
 *  GNU GPL, version 2 or (at your option) any later version.
20
 */
21

    
22
#include <stdio.h>
23
#include <stdlib.h>
24
#include <stdarg.h>
25
#include <string.h>
26
#include <unistd.h>
27
#include <signal.h>
28
#include <inttypes.h>
29
#include <time.h>
30
#include <fcntl.h>
31
#include <errno.h>
32
#include <sys/ioctl.h>
33
#include <sys/types.h>
34
#include <sys/stat.h>
35
#include <sys/mman.h>
36
#include <sys/uio.h>
37

    
38
#include <xs.h>
39
#include <xenctrl.h>
40
#include <xen/io/xenbus.h>
41

    
42
#include "hw.h"
43
#include "block_int.h"
44
#include "qemu-char.h"
45
#include "xen_blkif.h"
46
#include "xen_backend.h"
47
#include "blockdev.h"
48

    
49
/* ------------------------------------------------------------- */
50

    
51
static int syncwrite    = 0;
52
static int batch_maps   = 0;
53

    
54
static int max_requests = 32;
55

    
56
/* ------------------------------------------------------------- */
57

    
58
#define BLOCK_SIZE  512
59
#define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
60

    
61
struct ioreq {
62
    blkif_request_t     req;
63
    int16_t             status;
64

    
65
    /* parsed request */
66
    off_t               start;
67
    QEMUIOVector        v;
68
    int                 presync;
69
    int                 postsync;
70

    
71
    /* grant mapping */
72
    uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
73
    uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74
    int                 prot;
75
    void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
76
    void                *pages;
77

    
78
    /* aio status */
79
    int                 aio_inflight;
80
    int                 aio_errors;
81

    
82
    struct XenBlkDev    *blkdev;
83
    QLIST_ENTRY(ioreq)   list;
84
    BlockAcctCookie     acct;
85
};
86

    
87
struct XenBlkDev {
88
    struct XenDevice    xendev;  /* must be first */
89
    char                *params;
90
    char                *mode;
91
    char                *type;
92
    char                *dev;
93
    char                *devtype;
94
    const char          *fileproto;
95
    const char          *filename;
96
    int                 ring_ref;
97
    void                *sring;
98
    int64_t             file_blk;
99
    int64_t             file_size;
100
    int                 protocol;
101
    blkif_back_rings_t  rings;
102
    int                 more_work;
103
    int                 cnt_map;
104

    
105
    /* request lists */
106
    QLIST_HEAD(inflight_head, ioreq) inflight;
107
    QLIST_HEAD(finished_head, ioreq) finished;
108
    QLIST_HEAD(freelist_head, ioreq) freelist;
109
    int                 requests_total;
110
    int                 requests_inflight;
111
    int                 requests_finished;
112

    
113
    /* qemu block driver */
114
    DriveInfo           *dinfo;
115
    BlockDriverState    *bs;
116
    QEMUBH              *bh;
117
};
118

    
119
/* ------------------------------------------------------------- */
120

    
121
static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
122
{
123
    struct ioreq *ioreq = NULL;
124

    
125
    if (QLIST_EMPTY(&blkdev->freelist)) {
126
        if (blkdev->requests_total >= max_requests) {
127
            goto out;
128
        }
129
        /* allocate new struct */
130
        ioreq = g_malloc0(sizeof(*ioreq));
131
        ioreq->blkdev = blkdev;
132
        blkdev->requests_total++;
133
        qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
134
    } else {
135
        /* get one from freelist */
136
        ioreq = QLIST_FIRST(&blkdev->freelist);
137
        QLIST_REMOVE(ioreq, list);
138
        qemu_iovec_reset(&ioreq->v);
139
    }
140
    QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
141
    blkdev->requests_inflight++;
142

    
143
out:
144
    return ioreq;
145
}
146

    
147
static void ioreq_finish(struct ioreq *ioreq)
148
{
149
    struct XenBlkDev *blkdev = ioreq->blkdev;
150

    
151
    QLIST_REMOVE(ioreq, list);
152
    QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
153
    blkdev->requests_inflight--;
154
    blkdev->requests_finished++;
155
}
156

    
157
static void ioreq_release(struct ioreq *ioreq)
158
{
159
    struct XenBlkDev *blkdev = ioreq->blkdev;
160

    
161
    QLIST_REMOVE(ioreq, list);
162
    memset(ioreq, 0, sizeof(*ioreq));
163
    ioreq->blkdev = blkdev;
164
    QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
165
    blkdev->requests_finished--;
166
}
167

    
168
/*
169
 * translate request into iovec + start offset
170
 * do sanity checks along the way
171
 */
172
static int ioreq_parse(struct ioreq *ioreq)
173
{
174
    struct XenBlkDev *blkdev = ioreq->blkdev;
175
    uintptr_t mem;
176
    size_t len;
177
    int i;
178

    
179
    xen_be_printf(&blkdev->xendev, 3,
180
                  "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
181
                  ioreq->req.operation, ioreq->req.nr_segments,
182
                  ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
183
    switch (ioreq->req.operation) {
184
    case BLKIF_OP_READ:
185
        ioreq->prot = PROT_WRITE; /* to memory */
186
        break;
187
    case BLKIF_OP_WRITE_BARRIER:
188
        if (!ioreq->req.nr_segments) {
189
            ioreq->presync = 1;
190
            return 0;
191
        }
192
        if (!syncwrite) {
193
            ioreq->presync = ioreq->postsync = 1;
194
        }
195
        /* fall through */
196
    case BLKIF_OP_WRITE:
197
        ioreq->prot = PROT_READ; /* from memory */
198
        if (syncwrite) {
199
            ioreq->postsync = 1;
200
        }
201
        break;
202
    default:
203
        xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
204
                      ioreq->req.operation);
205
        goto err;
206
    };
207

    
208
    if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
209
        xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
210
        goto err;
211
    }
212

    
213
    ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
214
    for (i = 0; i < ioreq->req.nr_segments; i++) {
215
        if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
216
            xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
217
            goto err;
218
        }
219
        if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
220
            xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
221
            goto err;
222
        }
223
        if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
224
            xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
225
            goto err;
226
        }
227

    
228
        ioreq->domids[i] = blkdev->xendev.dom;
229
        ioreq->refs[i]   = ioreq->req.seg[i].gref;
230

    
231
        mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
232
        len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
233
        qemu_iovec_add(&ioreq->v, (void*)mem, len);
234
    }
235
    if (ioreq->start + ioreq->v.size > blkdev->file_size) {
236
        xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
237
        goto err;
238
    }
239
    return 0;
240

    
241
err:
242
    ioreq->status = BLKIF_RSP_ERROR;
243
    return -1;
244
}
245

    
246
static void ioreq_unmap(struct ioreq *ioreq)
247
{
248
    XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
249
    int i;
250

    
251
    if (ioreq->v.niov == 0) {
252
        return;
253
    }
254
    if (batch_maps) {
255
        if (!ioreq->pages) {
256
            return;
257
        }
258
        if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) {
259
            xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
260
                          strerror(errno));
261
        }
262
        ioreq->blkdev->cnt_map -= ioreq->v.niov;
263
        ioreq->pages = NULL;
264
    } else {
265
        for (i = 0; i < ioreq->v.niov; i++) {
266
            if (!ioreq->page[i]) {
267
                continue;
268
            }
269
            if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
270
                xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
271
                              strerror(errno));
272
            }
273
            ioreq->blkdev->cnt_map--;
274
            ioreq->page[i] = NULL;
275
        }
276
    }
277
}
278

    
279
static int ioreq_map(struct ioreq *ioreq)
280
{
281
    XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
282
    int i;
283

    
284
    if (ioreq->v.niov == 0) {
285
        return 0;
286
    }
287
    if (batch_maps) {
288
        ioreq->pages = xc_gnttab_map_grant_refs
289
            (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot);
290
        if (ioreq->pages == NULL) {
291
            xen_be_printf(&ioreq->blkdev->xendev, 0,
292
                          "can't map %d grant refs (%s, %d maps)\n",
293
                          ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map);
294
            return -1;
295
        }
296
        for (i = 0; i < ioreq->v.niov; i++) {
297
            ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE +
298
                (uintptr_t)ioreq->v.iov[i].iov_base;
299
        }
300
        ioreq->blkdev->cnt_map += ioreq->v.niov;
301
    } else  {
302
        for (i = 0; i < ioreq->v.niov; i++) {
303
            ioreq->page[i] = xc_gnttab_map_grant_ref
304
                (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot);
305
            if (ioreq->page[i] == NULL) {
306
                xen_be_printf(&ioreq->blkdev->xendev, 0,
307
                              "can't map grant ref %d (%s, %d maps)\n",
308
                              ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map);
309
                ioreq_unmap(ioreq);
310
                return -1;
311
            }
312
            ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base;
313
            ioreq->blkdev->cnt_map++;
314
        }
315
    }
316
    return 0;
317
}
318

    
319
static void qemu_aio_complete(void *opaque, int ret)
320
{
321
    struct ioreq *ioreq = opaque;
322

    
323
    if (ret != 0) {
324
        xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
325
                      ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
326
        ioreq->aio_errors++;
327
    }
328

    
329
    ioreq->aio_inflight--;
330
    if (ioreq->aio_inflight > 0) {
331
        return;
332
    }
333
    if (ioreq->postsync) {
334
        bdrv_flush(ioreq->blkdev->bs);
335
    }
336

    
337
    ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
338
    ioreq_unmap(ioreq);
339
    ioreq_finish(ioreq);
340
    bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
341
    qemu_bh_schedule(ioreq->blkdev->bh);
342
}
343

    
344
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
345
{
346
    struct XenBlkDev *blkdev = ioreq->blkdev;
347

    
348
    if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
349
        goto err_no_map;
350
    }
351

    
352
    ioreq->aio_inflight++;
353
    if (ioreq->presync) {
354
        bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
355
    }
356

    
357
    switch (ioreq->req.operation) {
358
    case BLKIF_OP_READ:
359
        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
360
        ioreq->aio_inflight++;
361
        bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
362
                       &ioreq->v, ioreq->v.size / BLOCK_SIZE,
363
                       qemu_aio_complete, ioreq);
364
        break;
365
    case BLKIF_OP_WRITE:
366
    case BLKIF_OP_WRITE_BARRIER:
367
        if (!ioreq->req.nr_segments) {
368
            break;
369
        }
370

    
371
        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
372
        ioreq->aio_inflight++;
373
        bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
374
                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
375
                        qemu_aio_complete, ioreq);
376
        break;
377
    default:
378
        /* unknown operation (shouldn't happen -- parse catches this) */
379
        goto err;
380
    }
381

    
382
    qemu_aio_complete(ioreq, 0);
383

    
384
    return 0;
385

    
386
err:
387
    ioreq_unmap(ioreq);
388
err_no_map:
389
    ioreq_finish(ioreq);
390
    ioreq->status = BLKIF_RSP_ERROR;
391
    return -1;
392
}
393

    
394
static int blk_send_response_one(struct ioreq *ioreq)
395
{
396
    struct XenBlkDev  *blkdev = ioreq->blkdev;
397
    int               send_notify   = 0;
398
    int               have_requests = 0;
399
    blkif_response_t  resp;
400
    void              *dst;
401

    
402
    resp.id        = ioreq->req.id;
403
    resp.operation = ioreq->req.operation;
404
    resp.status    = ioreq->status;
405

    
406
    /* Place on the response ring for the relevant domain. */
407
    switch (blkdev->protocol) {
408
    case BLKIF_PROTOCOL_NATIVE:
409
        dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
410
        break;
411
    case BLKIF_PROTOCOL_X86_32:
412
        dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
413
                                blkdev->rings.x86_32_part.rsp_prod_pvt);
414
        break;
415
    case BLKIF_PROTOCOL_X86_64:
416
        dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
417
                                blkdev->rings.x86_64_part.rsp_prod_pvt);
418
        break;
419
    default:
420
        dst = NULL;
421
    }
422
    memcpy(dst, &resp, sizeof(resp));
423
    blkdev->rings.common.rsp_prod_pvt++;
424

    
425
    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
426
    if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
427
        /*
428
         * Tail check for pending requests. Allows frontend to avoid
429
         * notifications if requests are already in flight (lower
430
         * overheads and promotes batching).
431
         */
432
        RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
433
    } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
434
        have_requests = 1;
435
    }
436

    
437
    if (have_requests) {
438
        blkdev->more_work++;
439
    }
440
    return send_notify;
441
}
442

    
443
/* walk finished list, send outstanding responses, free requests */
444
static void blk_send_response_all(struct XenBlkDev *blkdev)
445
{
446
    struct ioreq *ioreq;
447
    int send_notify = 0;
448

    
449
    while (!QLIST_EMPTY(&blkdev->finished)) {
450
        ioreq = QLIST_FIRST(&blkdev->finished);
451
        send_notify += blk_send_response_one(ioreq);
452
        ioreq_release(ioreq);
453
    }
454
    if (send_notify) {
455
        xen_be_send_notify(&blkdev->xendev);
456
    }
457
}
458

    
459
static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
460
{
461
    switch (blkdev->protocol) {
462
    case BLKIF_PROTOCOL_NATIVE:
463
        memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
464
               sizeof(ioreq->req));
465
        break;
466
    case BLKIF_PROTOCOL_X86_32:
467
        blkif_get_x86_32_req(&ioreq->req,
468
                             RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
469
        break;
470
    case BLKIF_PROTOCOL_X86_64:
471
        blkif_get_x86_64_req(&ioreq->req,
472
                             RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
473
        break;
474
    }
475
    return 0;
476
}
477

    
478
static void blk_handle_requests(struct XenBlkDev *blkdev)
479
{
480
    RING_IDX rc, rp;
481
    struct ioreq *ioreq;
482

    
483
    blkdev->more_work = 0;
484

    
485
    rc = blkdev->rings.common.req_cons;
486
    rp = blkdev->rings.common.sring->req_prod;
487
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
488

    
489
    blk_send_response_all(blkdev);
490
    while (rc != rp) {
491
        /* pull request from ring */
492
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
493
            break;
494
        }
495
        ioreq = ioreq_start(blkdev);
496
        if (ioreq == NULL) {
497
            blkdev->more_work++;
498
            break;
499
        }
500
        blk_get_request(blkdev, ioreq, rc);
501
        blkdev->rings.common.req_cons = ++rc;
502

    
503
        /* parse them */
504
        if (ioreq_parse(ioreq) != 0) {
505
            if (blk_send_response_one(ioreq)) {
506
                xen_be_send_notify(&blkdev->xendev);
507
            }
508
            ioreq_release(ioreq);
509
            continue;
510
        }
511

    
512
        ioreq_runio_qemu_aio(ioreq);
513
    }
514

    
515
    if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
516
        qemu_bh_schedule(blkdev->bh);
517
    }
518
}
519

    
520
/* ------------------------------------------------------------- */
521

    
522
static void blk_bh(void *opaque)
523
{
524
    struct XenBlkDev *blkdev = opaque;
525
    blk_handle_requests(blkdev);
526
}
527

    
528
static void blk_alloc(struct XenDevice *xendev)
529
{
530
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
531

    
532
    QLIST_INIT(&blkdev->inflight);
533
    QLIST_INIT(&blkdev->finished);
534
    QLIST_INIT(&blkdev->freelist);
535
    blkdev->bh = qemu_bh_new(blk_bh, blkdev);
536
    if (xen_mode != XEN_EMULATE) {
537
        batch_maps = 1;
538
    }
539
}
540

    
541
static int blk_init(struct XenDevice *xendev)
542
{
543
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
544
    int index, qflags, info = 0;
545

    
546
    /* read xenstore entries */
547
    if (blkdev->params == NULL) {
548
        char *h = NULL;
549
        blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
550
        if (blkdev->params != NULL) {
551
            h = strchr(blkdev->params, ':');
552
        }
553
        if (h != NULL) {
554
            blkdev->fileproto = blkdev->params;
555
            blkdev->filename  = h+1;
556
            *h = 0;
557
        } else {
558
            blkdev->fileproto = "<unset>";
559
            blkdev->filename  = blkdev->params;
560
        }
561
    }
562
    if (!strcmp("aio", blkdev->fileproto)) {
563
        blkdev->fileproto = "raw";
564
    }
565
    if (blkdev->mode == NULL) {
566
        blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
567
    }
568
    if (blkdev->type == NULL) {
569
        blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
570
    }
571
    if (blkdev->dev == NULL) {
572
        blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
573
    }
574
    if (blkdev->devtype == NULL) {
575
        blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
576
    }
577

    
578
    /* do we have all we need? */
579
    if (blkdev->params == NULL ||
580
        blkdev->mode == NULL   ||
581
        blkdev->type == NULL   ||
582
        blkdev->dev == NULL) {
583
        goto out_error;
584
    }
585

    
586
    /* read-only ? */
587
    qflags = BDRV_O_NOCACHE | BDRV_O_CACHE_WB | BDRV_O_NATIVE_AIO;
588
    if (strcmp(blkdev->mode, "w") == 0) {
589
        qflags |= BDRV_O_RDWR;
590
    } else {
591
        info  |= VDISK_READONLY;
592
    }
593

    
594
    /* cdrom ? */
595
    if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
596
        info  |= VDISK_CDROM;
597
    }
598

    
599
    /* init qemu block driver */
600
    index = (blkdev->xendev.dev - 202 * 256) / 16;
601
    blkdev->dinfo = drive_get(IF_XEN, 0, index);
602
    if (!blkdev->dinfo) {
603
        /* setup via xenbus -> create new block driver instance */
604
        xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
605
        blkdev->bs = bdrv_new(blkdev->dev);
606
        if (blkdev->bs) {
607
            if (bdrv_open(blkdev->bs, blkdev->filename, qflags,
608
                        bdrv_find_whitelisted_format(blkdev->fileproto)) != 0) {
609
                bdrv_delete(blkdev->bs);
610
                blkdev->bs = NULL;
611
            }
612
        }
613
        if (!blkdev->bs) {
614
            goto out_error;
615
        }
616
    } else {
617
        /* setup via qemu cmdline -> already setup for us */
618
        xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
619
        blkdev->bs = blkdev->dinfo->bdrv;
620
    }
621
    bdrv_attach_dev_nofail(blkdev->bs, blkdev);
622
    blkdev->file_blk  = BLOCK_SIZE;
623
    blkdev->file_size = bdrv_getlength(blkdev->bs);
624
    if (blkdev->file_size < 0) {
625
        xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
626
                      (int)blkdev->file_size, strerror(-blkdev->file_size),
627
                      blkdev->bs->drv ? blkdev->bs->drv->format_name : "-");
628
        blkdev->file_size = 0;
629
    }
630

    
631
    xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
632
                  " size %" PRId64 " (%" PRId64 " MB)\n",
633
                  blkdev->type, blkdev->fileproto, blkdev->filename,
634
                  blkdev->file_size, blkdev->file_size >> 20);
635

    
636
    /* fill info */
637
    xenstore_write_be_int(&blkdev->xendev, "feature-barrier", 1);
638
    xenstore_write_be_int(&blkdev->xendev, "info",            info);
639
    xenstore_write_be_int(&blkdev->xendev, "sector-size",     blkdev->file_blk);
640
    xenstore_write_be_int(&blkdev->xendev, "sectors",
641
                          blkdev->file_size / blkdev->file_blk);
642
    return 0;
643

    
644
out_error:
645
    g_free(blkdev->params);
646
    blkdev->params = NULL;
647
    g_free(blkdev->mode);
648
    blkdev->mode = NULL;
649
    g_free(blkdev->type);
650
    blkdev->type = NULL;
651
    g_free(blkdev->dev);
652
    blkdev->dev = NULL;
653
    g_free(blkdev->devtype);
654
    blkdev->devtype = NULL;
655
    return -1;
656
}
657

    
658
static int blk_connect(struct XenDevice *xendev)
659
{
660
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
661

    
662
    if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
663
        return -1;
664
    }
665
    if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
666
                             &blkdev->xendev.remote_port) == -1) {
667
        return -1;
668
    }
669

    
670
    blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
671
    if (blkdev->xendev.protocol) {
672
        if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
673
            blkdev->protocol = BLKIF_PROTOCOL_X86_32;
674
        }
675
        if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
676
            blkdev->protocol = BLKIF_PROTOCOL_X86_64;
677
        }
678
    }
679

    
680
    blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
681
                                            blkdev->xendev.dom,
682
                                            blkdev->ring_ref,
683
                                            PROT_READ | PROT_WRITE);
684
    if (!blkdev->sring) {
685
        return -1;
686
    }
687
    blkdev->cnt_map++;
688

    
689
    switch (blkdev->protocol) {
690
    case BLKIF_PROTOCOL_NATIVE:
691
    {
692
        blkif_sring_t *sring_native = blkdev->sring;
693
        BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
694
        break;
695
    }
696
    case BLKIF_PROTOCOL_X86_32:
697
    {
698
        blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
699

    
700
        BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
701
        break;
702
    }
703
    case BLKIF_PROTOCOL_X86_64:
704
    {
705
        blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
706

    
707
        BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
708
        break;
709
    }
710
    }
711

    
712
    xen_be_bind_evtchn(&blkdev->xendev);
713

    
714
    xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
715
                  "remote port %d, local port %d\n",
716
                  blkdev->xendev.protocol, blkdev->ring_ref,
717
                  blkdev->xendev.remote_port, blkdev->xendev.local_port);
718
    return 0;
719
}
720

    
721
static void blk_disconnect(struct XenDevice *xendev)
722
{
723
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
724

    
725
    if (blkdev->bs) {
726
        if (!blkdev->dinfo) {
727
            /* close/delete only if we created it ourself */
728
            bdrv_close(blkdev->bs);
729
            bdrv_detach_dev(blkdev->bs, blkdev);
730
            bdrv_delete(blkdev->bs);
731
        }
732
        blkdev->bs = NULL;
733
    }
734
    xen_be_unbind_evtchn(&blkdev->xendev);
735

    
736
    if (blkdev->sring) {
737
        xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
738
        blkdev->cnt_map--;
739
        blkdev->sring = NULL;
740
    }
741
}
742

    
743
static int blk_free(struct XenDevice *xendev)
744
{
745
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
746
    struct ioreq *ioreq;
747

    
748
    if (blkdev->bs || blkdev->sring) {
749
        blk_disconnect(xendev);
750
    }
751

    
752
    while (!QLIST_EMPTY(&blkdev->freelist)) {
753
        ioreq = QLIST_FIRST(&blkdev->freelist);
754
        QLIST_REMOVE(ioreq, list);
755
        qemu_iovec_destroy(&ioreq->v);
756
        g_free(ioreq);
757
    }
758

    
759
    g_free(blkdev->params);
760
    g_free(blkdev->mode);
761
    g_free(blkdev->type);
762
    g_free(blkdev->dev);
763
    g_free(blkdev->devtype);
764
    qemu_bh_delete(blkdev->bh);
765
    return 0;
766
}
767

    
768
static void blk_event(struct XenDevice *xendev)
769
{
770
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
771

    
772
    qemu_bh_schedule(blkdev->bh);
773
}
774

    
775
struct XenDevOps xen_blkdev_ops = {
776
    .size       = sizeof(struct XenBlkDev),
777
    .flags      = DEVOPS_FLAG_NEED_GNTDEV,
778
    .alloc      = blk_alloc,
779
    .init       = blk_init,
780
    .initialise    = blk_connect,
781
    .disconnect = blk_disconnect,
782
    .event      = blk_event,
783
    .free       = blk_free,
784
};