Statistics
| Branch: | Revision:

root / hw / block / xen_disk.c @ ddf5636d

History | View | Annotate | Download (30.8 kB)

1
/*
2
 *  xen paravirt block device backend
3
 *
4
 *  (c) Gerd Hoffmann <kraxel@redhat.com>
5
 *
6
 *  This program is free software; you can redistribute it and/or modify
7
 *  it under the terms of the GNU General Public License as published by
8
 *  the Free Software Foundation; under version 2 of the License.
9
 *
10
 *  This program is distributed in the hope that it will be useful,
11
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13
 *  GNU General Public License for more details.
14
 *
15
 *  You should have received a copy of the GNU General Public License along
16
 *  with this program; if not, see <http://www.gnu.org/licenses/>.
17
 *
18
 *  Contributions after 2012-01-13 are licensed under the terms of the
19
 *  GNU GPL, version 2 or (at your option) any later version.
20
 */
21

    
22
#include <stdio.h>
23
#include <stdlib.h>
24
#include <stdarg.h>
25
#include <string.h>
26
#include <unistd.h>
27
#include <signal.h>
28
#include <inttypes.h>
29
#include <time.h>
30
#include <fcntl.h>
31
#include <errno.h>
32
#include <sys/ioctl.h>
33
#include <sys/types.h>
34
#include <sys/stat.h>
35
#include <sys/mman.h>
36
#include <sys/uio.h>
37

    
38
#include "hw/hw.h"
39
#include "hw/xen/xen_backend.h"
40
#include "xen_blkif.h"
41
#include "sysemu/blockdev.h"
42

    
43
/* ------------------------------------------------------------- */
44

    
45
static int batch_maps   = 0;
46

    
47
static int max_requests = 32;
48

    
49
/* ------------------------------------------------------------- */
50

    
51
#define BLOCK_SIZE  512
52
#define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
53

    
54
struct PersistentGrant {
55
    void *page;
56
    struct XenBlkDev *blkdev;
57
};
58

    
59
typedef struct PersistentGrant PersistentGrant;
60

    
61
struct ioreq {
62
    blkif_request_t     req;
63
    int16_t             status;
64

    
65
    /* parsed request */
66
    off_t               start;
67
    QEMUIOVector        v;
68
    int                 presync;
69
    int                 postsync;
70
    uint8_t             mapped;
71

    
72
    /* grant mapping */
73
    uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74
    uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
75
    int                 prot;
76
    void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
77
    void                *pages;
78
    int                 num_unmap;
79

    
80
    /* aio status */
81
    int                 aio_inflight;
82
    int                 aio_errors;
83

    
84
    struct XenBlkDev    *blkdev;
85
    QLIST_ENTRY(ioreq)   list;
86
    BlockAcctCookie     acct;
87
};
88

    
89
struct XenBlkDev {
90
    struct XenDevice    xendev;  /* must be first */
91
    char                *params;
92
    char                *mode;
93
    char                *type;
94
    char                *dev;
95
    char                *devtype;
96
    bool                directiosafe;
97
    const char          *fileproto;
98
    const char          *filename;
99
    int                 ring_ref;
100
    void                *sring;
101
    int64_t             file_blk;
102
    int64_t             file_size;
103
    int                 protocol;
104
    blkif_back_rings_t  rings;
105
    int                 more_work;
106
    int                 cnt_map;
107

    
108
    /* request lists */
109
    QLIST_HEAD(inflight_head, ioreq) inflight;
110
    QLIST_HEAD(finished_head, ioreq) finished;
111
    QLIST_HEAD(freelist_head, ioreq) freelist;
112
    int                 requests_total;
113
    int                 requests_inflight;
114
    int                 requests_finished;
115

    
116
    /* Persistent grants extension */
117
    gboolean            feature_persistent;
118
    GTree               *persistent_gnts;
119
    unsigned int        persistent_gnt_count;
120
    unsigned int        max_grants;
121

    
122
    /* qemu block driver */
123
    DriveInfo           *dinfo;
124
    BlockDriverState    *bs;
125
    QEMUBH              *bh;
126
};
127

    
128
/* ------------------------------------------------------------- */
129

    
130
static void ioreq_reset(struct ioreq *ioreq)
131
{
132
    memset(&ioreq->req, 0, sizeof(ioreq->req));
133
    ioreq->status = 0;
134
    ioreq->start = 0;
135
    ioreq->presync = 0;
136
    ioreq->postsync = 0;
137
    ioreq->mapped = 0;
138

    
139
    memset(ioreq->domids, 0, sizeof(ioreq->domids));
140
    memset(ioreq->refs, 0, sizeof(ioreq->refs));
141
    ioreq->prot = 0;
142
    memset(ioreq->page, 0, sizeof(ioreq->page));
143
    ioreq->pages = NULL;
144

    
145
    ioreq->aio_inflight = 0;
146
    ioreq->aio_errors = 0;
147

    
148
    ioreq->blkdev = NULL;
149
    memset(&ioreq->list, 0, sizeof(ioreq->list));
150
    memset(&ioreq->acct, 0, sizeof(ioreq->acct));
151

    
152
    qemu_iovec_reset(&ioreq->v);
153
}
154

    
155
static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
156
{
157
    uint ua = GPOINTER_TO_UINT(a);
158
    uint ub = GPOINTER_TO_UINT(b);
159
    return (ua > ub) - (ua < ub);
160
}
161

    
162
static void destroy_grant(gpointer pgnt)
163
{
164
    PersistentGrant *grant = pgnt;
165
    XenGnttab gnt = grant->blkdev->xendev.gnttabdev;
166

    
167
    if (xc_gnttab_munmap(gnt, grant->page, 1) != 0) {
168
        xen_be_printf(&grant->blkdev->xendev, 0,
169
                      "xc_gnttab_munmap failed: %s\n",
170
                      strerror(errno));
171
    }
172
    grant->blkdev->persistent_gnt_count--;
173
    xen_be_printf(&grant->blkdev->xendev, 3,
174
                  "unmapped grant %p\n", grant->page);
175
    g_free(grant);
176
}
177

    
178
static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
179
{
180
    struct ioreq *ioreq = NULL;
181

    
182
    if (QLIST_EMPTY(&blkdev->freelist)) {
183
        if (blkdev->requests_total >= max_requests) {
184
            goto out;
185
        }
186
        /* allocate new struct */
187
        ioreq = g_malloc0(sizeof(*ioreq));
188
        ioreq->blkdev = blkdev;
189
        blkdev->requests_total++;
190
        qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
191
    } else {
192
        /* get one from freelist */
193
        ioreq = QLIST_FIRST(&blkdev->freelist);
194
        QLIST_REMOVE(ioreq, list);
195
    }
196
    QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
197
    blkdev->requests_inflight++;
198

    
199
out:
200
    return ioreq;
201
}
202

    
203
static void ioreq_finish(struct ioreq *ioreq)
204
{
205
    struct XenBlkDev *blkdev = ioreq->blkdev;
206

    
207
    QLIST_REMOVE(ioreq, list);
208
    QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
209
    blkdev->requests_inflight--;
210
    blkdev->requests_finished++;
211
}
212

    
213
static void ioreq_release(struct ioreq *ioreq, bool finish)
214
{
215
    struct XenBlkDev *blkdev = ioreq->blkdev;
216

    
217
    QLIST_REMOVE(ioreq, list);
218
    ioreq_reset(ioreq);
219
    ioreq->blkdev = blkdev;
220
    QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
221
    if (finish) {
222
        blkdev->requests_finished--;
223
    } else {
224
        blkdev->requests_inflight--;
225
    }
226
}
227

    
228
/*
229
 * translate request into iovec + start offset
230
 * do sanity checks along the way
231
 */
232
static int ioreq_parse(struct ioreq *ioreq)
233
{
234
    struct XenBlkDev *blkdev = ioreq->blkdev;
235
    uintptr_t mem;
236
    size_t len;
237
    int i;
238

    
239
    xen_be_printf(&blkdev->xendev, 3,
240
                  "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
241
                  ioreq->req.operation, ioreq->req.nr_segments,
242
                  ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
243
    switch (ioreq->req.operation) {
244
    case BLKIF_OP_READ:
245
        ioreq->prot = PROT_WRITE; /* to memory */
246
        break;
247
    case BLKIF_OP_FLUSH_DISKCACHE:
248
        ioreq->presync = 1;
249
        if (!ioreq->req.nr_segments) {
250
            return 0;
251
        }
252
        /* fall through */
253
    case BLKIF_OP_WRITE:
254
        ioreq->prot = PROT_READ; /* from memory */
255
        break;
256
    default:
257
        xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
258
                      ioreq->req.operation);
259
        goto err;
260
    };
261

    
262
    if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
263
        xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
264
        goto err;
265
    }
266

    
267
    ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
268
    for (i = 0; i < ioreq->req.nr_segments; i++) {
269
        if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
270
            xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
271
            goto err;
272
        }
273
        if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
274
            xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
275
            goto err;
276
        }
277
        if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
278
            xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
279
            goto err;
280
        }
281

    
282
        ioreq->domids[i] = blkdev->xendev.dom;
283
        ioreq->refs[i]   = ioreq->req.seg[i].gref;
284

    
285
        mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
286
        len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
287
        qemu_iovec_add(&ioreq->v, (void*)mem, len);
288
    }
289
    if (ioreq->start + ioreq->v.size > blkdev->file_size) {
290
        xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
291
        goto err;
292
    }
293
    return 0;
294

    
295
err:
296
    ioreq->status = BLKIF_RSP_ERROR;
297
    return -1;
298
}
299

    
300
static void ioreq_unmap(struct ioreq *ioreq)
301
{
302
    XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
303
    int i;
304

    
305
    if (ioreq->num_unmap == 0 || ioreq->mapped == 0) {
306
        return;
307
    }
308
    if (batch_maps) {
309
        if (!ioreq->pages) {
310
            return;
311
        }
312
        if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->num_unmap) != 0) {
313
            xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
314
                          strerror(errno));
315
        }
316
        ioreq->blkdev->cnt_map -= ioreq->num_unmap;
317
        ioreq->pages = NULL;
318
    } else {
319
        for (i = 0; i < ioreq->num_unmap; i++) {
320
            if (!ioreq->page[i]) {
321
                continue;
322
            }
323
            if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
324
                xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
325
                              strerror(errno));
326
            }
327
            ioreq->blkdev->cnt_map--;
328
            ioreq->page[i] = NULL;
329
        }
330
    }
331
    ioreq->mapped = 0;
332
}
333

    
334
static int ioreq_map(struct ioreq *ioreq)
335
{
336
    XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
337
    uint32_t domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
338
    uint32_t refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
339
    void *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
340
    int i, j, new_maps = 0;
341
    PersistentGrant *grant;
342
    /* domids and refs variables will contain the information necessary
343
     * to map the grants that are needed to fulfill this request.
344
     *
345
     * After mapping the needed grants, the page array will contain the
346
     * memory address of each granted page in the order specified in ioreq
347
     * (disregarding if it's a persistent grant or not).
348
     */
349

    
350
    if (ioreq->v.niov == 0 || ioreq->mapped == 1) {
351
        return 0;
352
    }
353
    if (ioreq->blkdev->feature_persistent) {
354
        for (i = 0; i < ioreq->v.niov; i++) {
355
            grant = g_tree_lookup(ioreq->blkdev->persistent_gnts,
356
                                    GUINT_TO_POINTER(ioreq->refs[i]));
357

    
358
            if (grant != NULL) {
359
                page[i] = grant->page;
360
                xen_be_printf(&ioreq->blkdev->xendev, 3,
361
                              "using persistent-grant %" PRIu32 "\n",
362
                              ioreq->refs[i]);
363
            } else {
364
                    /* Add the grant to the list of grants that
365
                     * should be mapped
366
                     */
367
                    domids[new_maps] = ioreq->domids[i];
368
                    refs[new_maps] = ioreq->refs[i];
369
                    page[i] = NULL;
370
                    new_maps++;
371
            }
372
        }
373
        /* Set the protection to RW, since grants may be reused later
374
         * with a different protection than the one needed for this request
375
         */
376
        ioreq->prot = PROT_WRITE | PROT_READ;
377
    } else {
378
        /* All grants in the request should be mapped */
379
        memcpy(refs, ioreq->refs, sizeof(refs));
380
        memcpy(domids, ioreq->domids, sizeof(domids));
381
        memset(page, 0, sizeof(page));
382
        new_maps = ioreq->v.niov;
383
    }
384

    
385
    if (batch_maps && new_maps) {
386
        ioreq->pages = xc_gnttab_map_grant_refs
387
            (gnt, new_maps, domids, refs, ioreq->prot);
388
        if (ioreq->pages == NULL) {
389
            xen_be_printf(&ioreq->blkdev->xendev, 0,
390
                          "can't map %d grant refs (%s, %d maps)\n",
391
                          new_maps, strerror(errno), ioreq->blkdev->cnt_map);
392
            return -1;
393
        }
394
        for (i = 0, j = 0; i < ioreq->v.niov; i++) {
395
            if (page[i] == NULL) {
396
                page[i] = ioreq->pages + (j++) * XC_PAGE_SIZE;
397
            }
398
        }
399
        ioreq->blkdev->cnt_map += new_maps;
400
    } else if (new_maps)  {
401
        for (i = 0; i < new_maps; i++) {
402
            ioreq->page[i] = xc_gnttab_map_grant_ref
403
                (gnt, domids[i], refs[i], ioreq->prot);
404
            if (ioreq->page[i] == NULL) {
405
                xen_be_printf(&ioreq->blkdev->xendev, 0,
406
                              "can't map grant ref %d (%s, %d maps)\n",
407
                              refs[i], strerror(errno), ioreq->blkdev->cnt_map);
408
                ioreq->mapped = 1;
409
                ioreq_unmap(ioreq);
410
                return -1;
411
            }
412
            ioreq->blkdev->cnt_map++;
413
        }
414
        for (i = 0, j = 0; i < ioreq->v.niov; i++) {
415
            if (page[i] == NULL) {
416
                page[i] = ioreq->page[j++];
417
            }
418
        }
419
    }
420
    if (ioreq->blkdev->feature_persistent) {
421
        while ((ioreq->blkdev->persistent_gnt_count < ioreq->blkdev->max_grants)
422
              && new_maps) {
423
            /* Go through the list of newly mapped grants and add as many
424
             * as possible to the list of persistently mapped grants.
425
             *
426
             * Since we start at the end of ioreq->page(s), we only need
427
             * to decrease new_maps to prevent this granted pages from
428
             * being unmapped in ioreq_unmap.
429
             */
430
            grant = g_malloc0(sizeof(*grant));
431
            new_maps--;
432
            if (batch_maps) {
433
                grant->page = ioreq->pages + (new_maps) * XC_PAGE_SIZE;
434
            } else {
435
                grant->page = ioreq->page[new_maps];
436
            }
437
            grant->blkdev = ioreq->blkdev;
438
            xen_be_printf(&ioreq->blkdev->xendev, 3,
439
                          "adding grant %" PRIu32 " page: %p\n",
440
                          refs[new_maps], grant->page);
441
            g_tree_insert(ioreq->blkdev->persistent_gnts,
442
                          GUINT_TO_POINTER(refs[new_maps]),
443
                          grant);
444
            ioreq->blkdev->persistent_gnt_count++;
445
        }
446
    }
447
    for (i = 0; i < ioreq->v.niov; i++) {
448
        ioreq->v.iov[i].iov_base += (uintptr_t)page[i];
449
    }
450
    ioreq->mapped = 1;
451
    ioreq->num_unmap = new_maps;
452
    return 0;
453
}
454

    
455
static int ioreq_runio_qemu_aio(struct ioreq *ioreq);
456

    
457
static void qemu_aio_complete(void *opaque, int ret)
458
{
459
    struct ioreq *ioreq = opaque;
460

    
461
    if (ret != 0) {
462
        xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
463
                      ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
464
        ioreq->aio_errors++;
465
    }
466

    
467
    ioreq->aio_inflight--;
468
    if (ioreq->presync) {
469
        ioreq->presync = 0;
470
        ioreq_runio_qemu_aio(ioreq);
471
        return;
472
    }
473
    if (ioreq->aio_inflight > 0) {
474
        return;
475
    }
476
    if (ioreq->postsync) {
477
        ioreq->postsync = 0;
478
        ioreq->aio_inflight++;
479
        bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
480
        return;
481
    }
482

    
483
    ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
484
    ioreq_unmap(ioreq);
485
    ioreq_finish(ioreq);
486
    bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
487
    qemu_bh_schedule(ioreq->blkdev->bh);
488
}
489

    
490
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
491
{
492
    struct XenBlkDev *blkdev = ioreq->blkdev;
493

    
494
    if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
495
        goto err_no_map;
496
    }
497

    
498
    ioreq->aio_inflight++;
499
    if (ioreq->presync) {
500
        bdrv_aio_flush(ioreq->blkdev->bs, qemu_aio_complete, ioreq);
501
        return 0;
502
    }
503

    
504
    switch (ioreq->req.operation) {
505
    case BLKIF_OP_READ:
506
        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
507
        ioreq->aio_inflight++;
508
        bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
509
                       &ioreq->v, ioreq->v.size / BLOCK_SIZE,
510
                       qemu_aio_complete, ioreq);
511
        break;
512
    case BLKIF_OP_WRITE:
513
    case BLKIF_OP_FLUSH_DISKCACHE:
514
        if (!ioreq->req.nr_segments) {
515
            break;
516
        }
517

    
518
        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
519
        ioreq->aio_inflight++;
520
        bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
521
                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
522
                        qemu_aio_complete, ioreq);
523
        break;
524
    default:
525
        /* unknown operation (shouldn't happen -- parse catches this) */
526
        goto err;
527
    }
528

    
529
    qemu_aio_complete(ioreq, 0);
530

    
531
    return 0;
532

    
533
err:
534
    ioreq_unmap(ioreq);
535
err_no_map:
536
    ioreq_finish(ioreq);
537
    ioreq->status = BLKIF_RSP_ERROR;
538
    return -1;
539
}
540

    
541
static int blk_send_response_one(struct ioreq *ioreq)
542
{
543
    struct XenBlkDev  *blkdev = ioreq->blkdev;
544
    int               send_notify   = 0;
545
    int               have_requests = 0;
546
    blkif_response_t  resp;
547
    void              *dst;
548

    
549
    resp.id        = ioreq->req.id;
550
    resp.operation = ioreq->req.operation;
551
    resp.status    = ioreq->status;
552

    
553
    /* Place on the response ring for the relevant domain. */
554
    switch (blkdev->protocol) {
555
    case BLKIF_PROTOCOL_NATIVE:
556
        dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
557
        break;
558
    case BLKIF_PROTOCOL_X86_32:
559
        dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
560
                                blkdev->rings.x86_32_part.rsp_prod_pvt);
561
        break;
562
    case BLKIF_PROTOCOL_X86_64:
563
        dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
564
                                blkdev->rings.x86_64_part.rsp_prod_pvt);
565
        break;
566
    default:
567
        dst = NULL;
568
    }
569
    memcpy(dst, &resp, sizeof(resp));
570
    blkdev->rings.common.rsp_prod_pvt++;
571

    
572
    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
573
    if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
574
        /*
575
         * Tail check for pending requests. Allows frontend to avoid
576
         * notifications if requests are already in flight (lower
577
         * overheads and promotes batching).
578
         */
579
        RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
580
    } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
581
        have_requests = 1;
582
    }
583

    
584
    if (have_requests) {
585
        blkdev->more_work++;
586
    }
587
    return send_notify;
588
}
589

    
590
/* walk finished list, send outstanding responses, free requests */
591
static void blk_send_response_all(struct XenBlkDev *blkdev)
592
{
593
    struct ioreq *ioreq;
594
    int send_notify = 0;
595

    
596
    while (!QLIST_EMPTY(&blkdev->finished)) {
597
        ioreq = QLIST_FIRST(&blkdev->finished);
598
        send_notify += blk_send_response_one(ioreq);
599
        ioreq_release(ioreq, true);
600
    }
601
    if (send_notify) {
602
        xen_be_send_notify(&blkdev->xendev);
603
    }
604
}
605

    
606
static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
607
{
608
    switch (blkdev->protocol) {
609
    case BLKIF_PROTOCOL_NATIVE:
610
        memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
611
               sizeof(ioreq->req));
612
        break;
613
    case BLKIF_PROTOCOL_X86_32:
614
        blkif_get_x86_32_req(&ioreq->req,
615
                             RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
616
        break;
617
    case BLKIF_PROTOCOL_X86_64:
618
        blkif_get_x86_64_req(&ioreq->req,
619
                             RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
620
        break;
621
    }
622
    return 0;
623
}
624

    
625
static void blk_handle_requests(struct XenBlkDev *blkdev)
626
{
627
    RING_IDX rc, rp;
628
    struct ioreq *ioreq;
629

    
630
    blkdev->more_work = 0;
631

    
632
    rc = blkdev->rings.common.req_cons;
633
    rp = blkdev->rings.common.sring->req_prod;
634
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
635

    
636
    blk_send_response_all(blkdev);
637
    while (rc != rp) {
638
        /* pull request from ring */
639
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
640
            break;
641
        }
642
        ioreq = ioreq_start(blkdev);
643
        if (ioreq == NULL) {
644
            blkdev->more_work++;
645
            break;
646
        }
647
        blk_get_request(blkdev, ioreq, rc);
648
        blkdev->rings.common.req_cons = ++rc;
649

    
650
        /* parse them */
651
        if (ioreq_parse(ioreq) != 0) {
652
            if (blk_send_response_one(ioreq)) {
653
                xen_be_send_notify(&blkdev->xendev);
654
            }
655
            ioreq_release(ioreq, false);
656
            continue;
657
        }
658

    
659
        ioreq_runio_qemu_aio(ioreq);
660
    }
661

    
662
    if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
663
        qemu_bh_schedule(blkdev->bh);
664
    }
665
}
666

    
667
/* ------------------------------------------------------------- */
668

    
669
static void blk_bh(void *opaque)
670
{
671
    struct XenBlkDev *blkdev = opaque;
672
    blk_handle_requests(blkdev);
673
}
674

    
675
/*
676
 * We need to account for the grant allocations requiring contiguous
677
 * chunks; the worst case number would be
678
 *     max_req * max_seg + (max_req - 1) * (max_seg - 1) + 1,
679
 * but in order to keep things simple just use
680
 *     2 * max_req * max_seg.
681
 */
682
#define MAX_GRANTS(max_req, max_seg) (2 * (max_req) * (max_seg))
683

    
684
static void blk_alloc(struct XenDevice *xendev)
685
{
686
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
687

    
688
    QLIST_INIT(&blkdev->inflight);
689
    QLIST_INIT(&blkdev->finished);
690
    QLIST_INIT(&blkdev->freelist);
691
    blkdev->bh = qemu_bh_new(blk_bh, blkdev);
692
    if (xen_mode != XEN_EMULATE) {
693
        batch_maps = 1;
694
    }
695
    if (xc_gnttab_set_max_grants(xendev->gnttabdev,
696
            MAX_GRANTS(max_requests, BLKIF_MAX_SEGMENTS_PER_REQUEST)) < 0) {
697
        xen_be_printf(xendev, 0, "xc_gnttab_set_max_grants failed: %s\n",
698
                      strerror(errno));
699
    }
700
}
701

    
702
static int blk_init(struct XenDevice *xendev)
703
{
704
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
705
    int info = 0;
706
    char *directiosafe = NULL;
707

    
708
    /* read xenstore entries */
709
    if (blkdev->params == NULL) {
710
        char *h = NULL;
711
        blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
712
        if (blkdev->params != NULL) {
713
            h = strchr(blkdev->params, ':');
714
        }
715
        if (h != NULL) {
716
            blkdev->fileproto = blkdev->params;
717
            blkdev->filename  = h+1;
718
            *h = 0;
719
        } else {
720
            blkdev->fileproto = "<unset>";
721
            blkdev->filename  = blkdev->params;
722
        }
723
    }
724
    if (!strcmp("aio", blkdev->fileproto)) {
725
        blkdev->fileproto = "raw";
726
    }
727
    if (blkdev->mode == NULL) {
728
        blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
729
    }
730
    if (blkdev->type == NULL) {
731
        blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
732
    }
733
    if (blkdev->dev == NULL) {
734
        blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
735
    }
736
    if (blkdev->devtype == NULL) {
737
        blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
738
    }
739
    directiosafe = xenstore_read_be_str(&blkdev->xendev, "direct-io-safe");
740
    blkdev->directiosafe = (directiosafe && atoi(directiosafe));
741

    
742
    /* do we have all we need? */
743
    if (blkdev->params == NULL ||
744
        blkdev->mode == NULL   ||
745
        blkdev->type == NULL   ||
746
        blkdev->dev == NULL) {
747
        goto out_error;
748
    }
749

    
750
    /* read-only ? */
751
    if (strcmp(blkdev->mode, "w")) {
752
        info  |= VDISK_READONLY;
753
    }
754

    
755
    /* cdrom ? */
756
    if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
757
        info  |= VDISK_CDROM;
758
    }
759

    
760
    blkdev->file_blk  = BLOCK_SIZE;
761

    
762
    /* fill info
763
     * blk_connect supplies sector-size and sectors
764
     */
765
    xenstore_write_be_int(&blkdev->xendev, "feature-flush-cache", 1);
766
    xenstore_write_be_int(&blkdev->xendev, "feature-persistent", 1);
767
    xenstore_write_be_int(&blkdev->xendev, "info", info);
768

    
769
    g_free(directiosafe);
770
    return 0;
771

    
772
out_error:
773
    g_free(blkdev->params);
774
    blkdev->params = NULL;
775
    g_free(blkdev->mode);
776
    blkdev->mode = NULL;
777
    g_free(blkdev->type);
778
    blkdev->type = NULL;
779
    g_free(blkdev->dev);
780
    blkdev->dev = NULL;
781
    g_free(blkdev->devtype);
782
    blkdev->devtype = NULL;
783
    g_free(directiosafe);
784
    blkdev->directiosafe = false;
785
    return -1;
786
}
787

    
788
static int blk_connect(struct XenDevice *xendev)
789
{
790
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
791
    int pers, index, qflags;
792
    bool readonly = true;
793

    
794
    /* read-only ? */
795
    if (blkdev->directiosafe) {
796
        qflags = BDRV_O_NOCACHE | BDRV_O_NATIVE_AIO;
797
    } else {
798
        qflags = BDRV_O_CACHE_WB;
799
    }
800
    if (strcmp(blkdev->mode, "w") == 0) {
801
        qflags |= BDRV_O_RDWR;
802
        readonly = false;
803
    }
804

    
805
    /* init qemu block driver */
806
    index = (blkdev->xendev.dev - 202 * 256) / 16;
807
    blkdev->dinfo = drive_get(IF_XEN, 0, index);
808
    if (!blkdev->dinfo) {
809
        /* setup via xenbus -> create new block driver instance */
810
        xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
811
        blkdev->bs = bdrv_new(blkdev->dev);
812
        if (blkdev->bs) {
813
            Error *local_err = NULL;
814
            BlockDriver *drv = bdrv_find_whitelisted_format(blkdev->fileproto,
815
                                                           readonly);
816
            if (bdrv_open(&blkdev->bs, blkdev->filename, NULL, NULL, qflags,
817
                          drv, &local_err) != 0)
818
            {
819
                xen_be_printf(&blkdev->xendev, 0, "error: %s\n",
820
                              error_get_pretty(local_err));
821
                error_free(local_err);
822
                bdrv_unref(blkdev->bs);
823
                blkdev->bs = NULL;
824
            }
825
        }
826
        if (!blkdev->bs) {
827
            return -1;
828
        }
829
    } else {
830
        /* setup via qemu cmdline -> already setup for us */
831
        xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
832
        blkdev->bs = blkdev->dinfo->bdrv;
833
        if (bdrv_is_read_only(blkdev->bs) && !readonly) {
834
            xen_be_printf(&blkdev->xendev, 0, "Unexpected read-only drive");
835
            blkdev->bs = NULL;
836
            return -1;
837
        }
838
        /* blkdev->bs is not create by us, we get a reference
839
         * so we can bdrv_unref() unconditionally */
840
        bdrv_ref(blkdev->bs);
841
    }
842
    bdrv_attach_dev_nofail(blkdev->bs, blkdev);
843
    blkdev->file_size = bdrv_getlength(blkdev->bs);
844
    if (blkdev->file_size < 0) {
845
        xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
846
                      (int)blkdev->file_size, strerror(-blkdev->file_size),
847
                      bdrv_get_format_name(blkdev->bs) ?: "-");
848
        blkdev->file_size = 0;
849
    }
850

    
851
    xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
852
                  " size %" PRId64 " (%" PRId64 " MB)\n",
853
                  blkdev->type, blkdev->fileproto, blkdev->filename,
854
                  blkdev->file_size, blkdev->file_size >> 20);
855

    
856
    /* Fill in number of sector size and number of sectors */
857
    xenstore_write_be_int(&blkdev->xendev, "sector-size", blkdev->file_blk);
858
    xenstore_write_be_int64(&blkdev->xendev, "sectors",
859
                            blkdev->file_size / blkdev->file_blk);
860

    
861
    if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
862
        return -1;
863
    }
864
    if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
865
                             &blkdev->xendev.remote_port) == -1) {
866
        return -1;
867
    }
868
    if (xenstore_read_fe_int(&blkdev->xendev, "feature-persistent", &pers)) {
869
        blkdev->feature_persistent = FALSE;
870
    } else {
871
        blkdev->feature_persistent = !!pers;
872
    }
873

    
874
    blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
875
    if (blkdev->xendev.protocol) {
876
        if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
877
            blkdev->protocol = BLKIF_PROTOCOL_X86_32;
878
        }
879
        if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
880
            blkdev->protocol = BLKIF_PROTOCOL_X86_64;
881
        }
882
    }
883

    
884
    blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
885
                                            blkdev->xendev.dom,
886
                                            blkdev->ring_ref,
887
                                            PROT_READ | PROT_WRITE);
888
    if (!blkdev->sring) {
889
        return -1;
890
    }
891
    blkdev->cnt_map++;
892

    
893
    switch (blkdev->protocol) {
894
    case BLKIF_PROTOCOL_NATIVE:
895
    {
896
        blkif_sring_t *sring_native = blkdev->sring;
897
        BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
898
        break;
899
    }
900
    case BLKIF_PROTOCOL_X86_32:
901
    {
902
        blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
903

    
904
        BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
905
        break;
906
    }
907
    case BLKIF_PROTOCOL_X86_64:
908
    {
909
        blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
910

    
911
        BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
912
        break;
913
    }
914
    }
915

    
916
    if (blkdev->feature_persistent) {
917
        /* Init persistent grants */
918
        blkdev->max_grants = max_requests * BLKIF_MAX_SEGMENTS_PER_REQUEST;
919
        blkdev->persistent_gnts = g_tree_new_full((GCompareDataFunc)int_cmp,
920
                                             NULL, NULL,
921
                                             (GDestroyNotify)destroy_grant);
922
        blkdev->persistent_gnt_count = 0;
923
    }
924

    
925
    xen_be_bind_evtchn(&blkdev->xendev);
926

    
927
    xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
928
                  "remote port %d, local port %d\n",
929
                  blkdev->xendev.protocol, blkdev->ring_ref,
930
                  blkdev->xendev.remote_port, blkdev->xendev.local_port);
931
    return 0;
932
}
933

    
934
static void blk_disconnect(struct XenDevice *xendev)
935
{
936
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
937

    
938
    if (blkdev->bs) {
939
        bdrv_detach_dev(blkdev->bs, blkdev);
940
        bdrv_unref(blkdev->bs);
941
        blkdev->bs = NULL;
942
    }
943
    xen_be_unbind_evtchn(&blkdev->xendev);
944

    
945
    if (blkdev->sring) {
946
        xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
947
        blkdev->cnt_map--;
948
        blkdev->sring = NULL;
949
    }
950
}
951

    
952
static int blk_free(struct XenDevice *xendev)
953
{
954
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
955
    struct ioreq *ioreq;
956

    
957
    if (blkdev->bs || blkdev->sring) {
958
        blk_disconnect(xendev);
959
    }
960

    
961
    /* Free persistent grants */
962
    if (blkdev->feature_persistent) {
963
        g_tree_destroy(blkdev->persistent_gnts);
964
    }
965

    
966
    while (!QLIST_EMPTY(&blkdev->freelist)) {
967
        ioreq = QLIST_FIRST(&blkdev->freelist);
968
        QLIST_REMOVE(ioreq, list);
969
        qemu_iovec_destroy(&ioreq->v);
970
        g_free(ioreq);
971
    }
972

    
973
    g_free(blkdev->params);
974
    g_free(blkdev->mode);
975
    g_free(blkdev->type);
976
    g_free(blkdev->dev);
977
    g_free(blkdev->devtype);
978
    qemu_bh_delete(blkdev->bh);
979
    return 0;
980
}
981

    
982
static void blk_event(struct XenDevice *xendev)
983
{
984
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
985

    
986
    qemu_bh_schedule(blkdev->bh);
987
}
988

    
989
struct XenDevOps xen_blkdev_ops = {
990
    .size       = sizeof(struct XenBlkDev),
991
    .flags      = DEVOPS_FLAG_NEED_GNTDEV,
992
    .alloc      = blk_alloc,
993
    .init       = blk_init,
994
    .initialise    = blk_connect,
995
    .disconnect = blk_disconnect,
996
    .event      = blk_event,
997
    .free       = blk_free,
998
};