Statistics
| Branch: | Revision:

root / hw / xen_disk.c @ 1f51470d

History | View | Annotate | Download (23.2 kB)

1 62d23efa aliguori
/*
2 62d23efa aliguori
 *  xen paravirt block device backend
3 62d23efa aliguori
 *
4 62d23efa aliguori
 *  (c) Gerd Hoffmann <kraxel@redhat.com>
5 62d23efa aliguori
 *
6 62d23efa aliguori
 *  This program is free software; you can redistribute it and/or modify
7 62d23efa aliguori
 *  it under the terms of the GNU General Public License as published by
8 62d23efa aliguori
 *  the Free Software Foundation; under version 2 of the License.
9 62d23efa aliguori
 *
10 62d23efa aliguori
 *  This program is distributed in the hope that it will be useful,
11 62d23efa aliguori
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12 62d23efa aliguori
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 62d23efa aliguori
 *  GNU General Public License for more details.
14 62d23efa aliguori
 *
15 62d23efa aliguori
 *  You should have received a copy of the GNU General Public License along
16 8167ee88 Blue Swirl
 *  with this program; if not, see <http://www.gnu.org/licenses/>.
17 6b620ca3 Paolo Bonzini
 *
18 6b620ca3 Paolo Bonzini
 *  Contributions after 2012-01-13 are licensed under the terms of the
19 6b620ca3 Paolo Bonzini
 *  GNU GPL, version 2 or (at your option) any later version.
20 62d23efa aliguori
 */
21 62d23efa aliguori
22 62d23efa aliguori
#include <stdio.h>
23 62d23efa aliguori
#include <stdlib.h>
24 62d23efa aliguori
#include <stdarg.h>
25 62d23efa aliguori
#include <string.h>
26 62d23efa aliguori
#include <unistd.h>
27 62d23efa aliguori
#include <signal.h>
28 62d23efa aliguori
#include <inttypes.h>
29 62d23efa aliguori
#include <time.h>
30 62d23efa aliguori
#include <fcntl.h>
31 62d23efa aliguori
#include <errno.h>
32 62d23efa aliguori
#include <sys/ioctl.h>
33 62d23efa aliguori
#include <sys/types.h>
34 62d23efa aliguori
#include <sys/stat.h>
35 62d23efa aliguori
#include <sys/mman.h>
36 62d23efa aliguori
#include <sys/uio.h>
37 62d23efa aliguori
38 62d23efa aliguori
#include <xs.h>
39 62d23efa aliguori
#include <xenctrl.h>
40 62d23efa aliguori
#include <xen/io/xenbus.h>
41 62d23efa aliguori
42 62d23efa aliguori
#include "hw.h"
43 62d23efa aliguori
#include "block_int.h"
44 62d23efa aliguori
#include "qemu-char.h"
45 62d23efa aliguori
#include "xen_blkif.h"
46 62d23efa aliguori
#include "xen_backend.h"
47 2446333c Blue Swirl
#include "blockdev.h"
48 62d23efa aliguori
49 62d23efa aliguori
/* ------------------------------------------------------------- */
50 62d23efa aliguori
51 62d23efa aliguori
static int syncwrite    = 0;
52 62d23efa aliguori
static int batch_maps   = 0;
53 62d23efa aliguori
54 62d23efa aliguori
static int max_requests = 32;
55 62d23efa aliguori
56 62d23efa aliguori
/* ------------------------------------------------------------- */
57 62d23efa aliguori
58 62d23efa aliguori
#define BLOCK_SIZE  512
59 62d23efa aliguori
#define IOCB_COUNT  (BLKIF_MAX_SEGMENTS_PER_REQUEST + 2)
60 62d23efa aliguori
61 62d23efa aliguori
struct ioreq {
62 62d23efa aliguori
    blkif_request_t     req;
63 62d23efa aliguori
    int16_t             status;
64 62d23efa aliguori
65 62d23efa aliguori
    /* parsed request */
66 62d23efa aliguori
    off_t               start;
67 62d23efa aliguori
    QEMUIOVector        v;
68 62d23efa aliguori
    int                 presync;
69 62d23efa aliguori
    int                 postsync;
70 62d23efa aliguori
71 62d23efa aliguori
    /* grant mapping */
72 62d23efa aliguori
    uint32_t            domids[BLKIF_MAX_SEGMENTS_PER_REQUEST];
73 62d23efa aliguori
    uint32_t            refs[BLKIF_MAX_SEGMENTS_PER_REQUEST];
74 62d23efa aliguori
    int                 prot;
75 62d23efa aliguori
    void                *page[BLKIF_MAX_SEGMENTS_PER_REQUEST];
76 62d23efa aliguori
    void                *pages;
77 62d23efa aliguori
78 62d23efa aliguori
    /* aio status */
79 62d23efa aliguori
    int                 aio_inflight;
80 62d23efa aliguori
    int                 aio_errors;
81 62d23efa aliguori
82 62d23efa aliguori
    struct XenBlkDev    *blkdev;
83 72cf2d4f Blue Swirl
    QLIST_ENTRY(ioreq)   list;
84 a597e79c Christoph Hellwig
    BlockAcctCookie     acct;
85 62d23efa aliguori
};
86 62d23efa aliguori
87 62d23efa aliguori
struct XenBlkDev {
88 62d23efa aliguori
    struct XenDevice    xendev;  /* must be first */
89 62d23efa aliguori
    char                *params;
90 62d23efa aliguori
    char                *mode;
91 62d23efa aliguori
    char                *type;
92 62d23efa aliguori
    char                *dev;
93 62d23efa aliguori
    char                *devtype;
94 62d23efa aliguori
    const char          *fileproto;
95 62d23efa aliguori
    const char          *filename;
96 62d23efa aliguori
    int                 ring_ref;
97 62d23efa aliguori
    void                *sring;
98 62d23efa aliguori
    int64_t             file_blk;
99 62d23efa aliguori
    int64_t             file_size;
100 62d23efa aliguori
    int                 protocol;
101 62d23efa aliguori
    blkif_back_rings_t  rings;
102 62d23efa aliguori
    int                 more_work;
103 62d23efa aliguori
    int                 cnt_map;
104 62d23efa aliguori
105 62d23efa aliguori
    /* request lists */
106 72cf2d4f Blue Swirl
    QLIST_HEAD(inflight_head, ioreq) inflight;
107 72cf2d4f Blue Swirl
    QLIST_HEAD(finished_head, ioreq) finished;
108 72cf2d4f Blue Swirl
    QLIST_HEAD(freelist_head, ioreq) freelist;
109 62d23efa aliguori
    int                 requests_total;
110 62d23efa aliguori
    int                 requests_inflight;
111 62d23efa aliguori
    int                 requests_finished;
112 62d23efa aliguori
113 62d23efa aliguori
    /* qemu block driver */
114 751c6a17 Gerd Hoffmann
    DriveInfo           *dinfo;
115 62d23efa aliguori
    BlockDriverState    *bs;
116 62d23efa aliguori
    QEMUBH              *bh;
117 62d23efa aliguori
};
118 62d23efa aliguori
119 62d23efa aliguori
/* ------------------------------------------------------------- */
120 62d23efa aliguori
121 62d23efa aliguori
static struct ioreq *ioreq_start(struct XenBlkDev *blkdev)
122 62d23efa aliguori
{
123 62d23efa aliguori
    struct ioreq *ioreq = NULL;
124 62d23efa aliguori
125 72cf2d4f Blue Swirl
    if (QLIST_EMPTY(&blkdev->freelist)) {
126 209cd7ab Anthony PERARD
        if (blkdev->requests_total >= max_requests) {
127 209cd7ab Anthony PERARD
            goto out;
128 209cd7ab Anthony PERARD
        }
129 209cd7ab Anthony PERARD
        /* allocate new struct */
130 7267c094 Anthony Liguori
        ioreq = g_malloc0(sizeof(*ioreq));
131 209cd7ab Anthony PERARD
        ioreq->blkdev = blkdev;
132 209cd7ab Anthony PERARD
        blkdev->requests_total++;
133 62d23efa aliguori
        qemu_iovec_init(&ioreq->v, BLKIF_MAX_SEGMENTS_PER_REQUEST);
134 62d23efa aliguori
    } else {
135 209cd7ab Anthony PERARD
        /* get one from freelist */
136 209cd7ab Anthony PERARD
        ioreq = QLIST_FIRST(&blkdev->freelist);
137 209cd7ab Anthony PERARD
        QLIST_REMOVE(ioreq, list);
138 62d23efa aliguori
        qemu_iovec_reset(&ioreq->v);
139 62d23efa aliguori
    }
140 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&blkdev->inflight, ioreq, list);
141 62d23efa aliguori
    blkdev->requests_inflight++;
142 62d23efa aliguori
143 62d23efa aliguori
out:
144 62d23efa aliguori
    return ioreq;
145 62d23efa aliguori
}
146 62d23efa aliguori
147 62d23efa aliguori
static void ioreq_finish(struct ioreq *ioreq)
148 62d23efa aliguori
{
149 62d23efa aliguori
    struct XenBlkDev *blkdev = ioreq->blkdev;
150 62d23efa aliguori
151 72cf2d4f Blue Swirl
    QLIST_REMOVE(ioreq, list);
152 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&blkdev->finished, ioreq, list);
153 62d23efa aliguori
    blkdev->requests_inflight--;
154 62d23efa aliguori
    blkdev->requests_finished++;
155 62d23efa aliguori
}
156 62d23efa aliguori
157 62d23efa aliguori
static void ioreq_release(struct ioreq *ioreq)
158 62d23efa aliguori
{
159 62d23efa aliguori
    struct XenBlkDev *blkdev = ioreq->blkdev;
160 62d23efa aliguori
161 72cf2d4f Blue Swirl
    QLIST_REMOVE(ioreq, list);
162 62d23efa aliguori
    memset(ioreq, 0, sizeof(*ioreq));
163 62d23efa aliguori
    ioreq->blkdev = blkdev;
164 72cf2d4f Blue Swirl
    QLIST_INSERT_HEAD(&blkdev->freelist, ioreq, list);
165 62d23efa aliguori
    blkdev->requests_finished--;
166 62d23efa aliguori
}
167 62d23efa aliguori
168 62d23efa aliguori
/*
169 62d23efa aliguori
 * translate request into iovec + start offset
170 62d23efa aliguori
 * do sanity checks along the way
171 62d23efa aliguori
 */
172 62d23efa aliguori
static int ioreq_parse(struct ioreq *ioreq)
173 62d23efa aliguori
{
174 62d23efa aliguori
    struct XenBlkDev *blkdev = ioreq->blkdev;
175 62d23efa aliguori
    uintptr_t mem;
176 62d23efa aliguori
    size_t len;
177 62d23efa aliguori
    int i;
178 62d23efa aliguori
179 62d23efa aliguori
    xen_be_printf(&blkdev->xendev, 3,
180 209cd7ab Anthony PERARD
                  "op %d, nr %d, handle %d, id %" PRId64 ", sector %" PRId64 "\n",
181 209cd7ab Anthony PERARD
                  ioreq->req.operation, ioreq->req.nr_segments,
182 209cd7ab Anthony PERARD
                  ioreq->req.handle, ioreq->req.id, ioreq->req.sector_number);
183 62d23efa aliguori
    switch (ioreq->req.operation) {
184 62d23efa aliguori
    case BLKIF_OP_READ:
185 209cd7ab Anthony PERARD
        ioreq->prot = PROT_WRITE; /* to memory */
186 209cd7ab Anthony PERARD
        break;
187 62d23efa aliguori
    case BLKIF_OP_WRITE_BARRIER:
188 5cbdebe3 Stefano Stabellini
        if (!ioreq->req.nr_segments) {
189 5cbdebe3 Stefano Stabellini
            ioreq->presync = 1;
190 5cbdebe3 Stefano Stabellini
            return 0;
191 5cbdebe3 Stefano Stabellini
        }
192 209cd7ab Anthony PERARD
        if (!syncwrite) {
193 209cd7ab Anthony PERARD
            ioreq->presync = ioreq->postsync = 1;
194 209cd7ab Anthony PERARD
        }
195 209cd7ab Anthony PERARD
        /* fall through */
196 62d23efa aliguori
    case BLKIF_OP_WRITE:
197 209cd7ab Anthony PERARD
        ioreq->prot = PROT_READ; /* from memory */
198 209cd7ab Anthony PERARD
        if (syncwrite) {
199 209cd7ab Anthony PERARD
            ioreq->postsync = 1;
200 209cd7ab Anthony PERARD
        }
201 209cd7ab Anthony PERARD
        break;
202 62d23efa aliguori
    default:
203 209cd7ab Anthony PERARD
        xen_be_printf(&blkdev->xendev, 0, "error: unknown operation (%d)\n",
204 209cd7ab Anthony PERARD
                      ioreq->req.operation);
205 209cd7ab Anthony PERARD
        goto err;
206 62d23efa aliguori
    };
207 62d23efa aliguori
208 908c7b9f Gerd Hoffmann
    if (ioreq->req.operation != BLKIF_OP_READ && blkdev->mode[0] != 'w') {
209 908c7b9f Gerd Hoffmann
        xen_be_printf(&blkdev->xendev, 0, "error: write req for ro device\n");
210 908c7b9f Gerd Hoffmann
        goto err;
211 908c7b9f Gerd Hoffmann
    }
212 908c7b9f Gerd Hoffmann
213 62d23efa aliguori
    ioreq->start = ioreq->req.sector_number * blkdev->file_blk;
214 62d23efa aliguori
    for (i = 0; i < ioreq->req.nr_segments; i++) {
215 209cd7ab Anthony PERARD
        if (i == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
216 209cd7ab Anthony PERARD
            xen_be_printf(&blkdev->xendev, 0, "error: nr_segments too big\n");
217 209cd7ab Anthony PERARD
            goto err;
218 209cd7ab Anthony PERARD
        }
219 209cd7ab Anthony PERARD
        if (ioreq->req.seg[i].first_sect > ioreq->req.seg[i].last_sect) {
220 209cd7ab Anthony PERARD
            xen_be_printf(&blkdev->xendev, 0, "error: first > last sector\n");
221 209cd7ab Anthony PERARD
            goto err;
222 209cd7ab Anthony PERARD
        }
223 209cd7ab Anthony PERARD
        if (ioreq->req.seg[i].last_sect * BLOCK_SIZE >= XC_PAGE_SIZE) {
224 209cd7ab Anthony PERARD
            xen_be_printf(&blkdev->xendev, 0, "error: page crossing\n");
225 209cd7ab Anthony PERARD
            goto err;
226 209cd7ab Anthony PERARD
        }
227 209cd7ab Anthony PERARD
228 209cd7ab Anthony PERARD
        ioreq->domids[i] = blkdev->xendev.dom;
229 209cd7ab Anthony PERARD
        ioreq->refs[i]   = ioreq->req.seg[i].gref;
230 209cd7ab Anthony PERARD
231 209cd7ab Anthony PERARD
        mem = ioreq->req.seg[i].first_sect * blkdev->file_blk;
232 209cd7ab Anthony PERARD
        len = (ioreq->req.seg[i].last_sect - ioreq->req.seg[i].first_sect + 1) * blkdev->file_blk;
233 62d23efa aliguori
        qemu_iovec_add(&ioreq->v, (void*)mem, len);
234 62d23efa aliguori
    }
235 62d23efa aliguori
    if (ioreq->start + ioreq->v.size > blkdev->file_size) {
236 209cd7ab Anthony PERARD
        xen_be_printf(&blkdev->xendev, 0, "error: access beyond end of file\n");
237 209cd7ab Anthony PERARD
        goto err;
238 62d23efa aliguori
    }
239 62d23efa aliguori
    return 0;
240 62d23efa aliguori
241 62d23efa aliguori
err:
242 62d23efa aliguori
    ioreq->status = BLKIF_RSP_ERROR;
243 62d23efa aliguori
    return -1;
244 62d23efa aliguori
}
245 62d23efa aliguori
246 62d23efa aliguori
static void ioreq_unmap(struct ioreq *ioreq)
247 62d23efa aliguori
{
248 d5b93ddf Anthony PERARD
    XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
249 62d23efa aliguori
    int i;
250 62d23efa aliguori
251 209cd7ab Anthony PERARD
    if (ioreq->v.niov == 0) {
252 62d23efa aliguori
        return;
253 209cd7ab Anthony PERARD
    }
254 62d23efa aliguori
    if (batch_maps) {
255 209cd7ab Anthony PERARD
        if (!ioreq->pages) {
256 209cd7ab Anthony PERARD
            return;
257 209cd7ab Anthony PERARD
        }
258 209cd7ab Anthony PERARD
        if (xc_gnttab_munmap(gnt, ioreq->pages, ioreq->v.niov) != 0) {
259 209cd7ab Anthony PERARD
            xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
260 209cd7ab Anthony PERARD
                          strerror(errno));
261 209cd7ab Anthony PERARD
        }
262 209cd7ab Anthony PERARD
        ioreq->blkdev->cnt_map -= ioreq->v.niov;
263 209cd7ab Anthony PERARD
        ioreq->pages = NULL;
264 62d23efa aliguori
    } else {
265 209cd7ab Anthony PERARD
        for (i = 0; i < ioreq->v.niov; i++) {
266 209cd7ab Anthony PERARD
            if (!ioreq->page[i]) {
267 209cd7ab Anthony PERARD
                continue;
268 209cd7ab Anthony PERARD
            }
269 209cd7ab Anthony PERARD
            if (xc_gnttab_munmap(gnt, ioreq->page[i], 1) != 0) {
270 209cd7ab Anthony PERARD
                xen_be_printf(&ioreq->blkdev->xendev, 0, "xc_gnttab_munmap failed: %s\n",
271 209cd7ab Anthony PERARD
                              strerror(errno));
272 209cd7ab Anthony PERARD
            }
273 209cd7ab Anthony PERARD
            ioreq->blkdev->cnt_map--;
274 209cd7ab Anthony PERARD
            ioreq->page[i] = NULL;
275 209cd7ab Anthony PERARD
        }
276 62d23efa aliguori
    }
277 62d23efa aliguori
}
278 62d23efa aliguori
279 62d23efa aliguori
static int ioreq_map(struct ioreq *ioreq)
280 62d23efa aliguori
{
281 d5b93ddf Anthony PERARD
    XenGnttab gnt = ioreq->blkdev->xendev.gnttabdev;
282 62d23efa aliguori
    int i;
283 62d23efa aliguori
284 209cd7ab Anthony PERARD
    if (ioreq->v.niov == 0) {
285 62d23efa aliguori
        return 0;
286 209cd7ab Anthony PERARD
    }
287 62d23efa aliguori
    if (batch_maps) {
288 209cd7ab Anthony PERARD
        ioreq->pages = xc_gnttab_map_grant_refs
289 209cd7ab Anthony PERARD
            (gnt, ioreq->v.niov, ioreq->domids, ioreq->refs, ioreq->prot);
290 209cd7ab Anthony PERARD
        if (ioreq->pages == NULL) {
291 209cd7ab Anthony PERARD
            xen_be_printf(&ioreq->blkdev->xendev, 0,
292 209cd7ab Anthony PERARD
                          "can't map %d grant refs (%s, %d maps)\n",
293 209cd7ab Anthony PERARD
                          ioreq->v.niov, strerror(errno), ioreq->blkdev->cnt_map);
294 209cd7ab Anthony PERARD
            return -1;
295 209cd7ab Anthony PERARD
        }
296 209cd7ab Anthony PERARD
        for (i = 0; i < ioreq->v.niov; i++) {
297 209cd7ab Anthony PERARD
            ioreq->v.iov[i].iov_base = ioreq->pages + i * XC_PAGE_SIZE +
298 209cd7ab Anthony PERARD
                (uintptr_t)ioreq->v.iov[i].iov_base;
299 209cd7ab Anthony PERARD
        }
300 209cd7ab Anthony PERARD
        ioreq->blkdev->cnt_map += ioreq->v.niov;
301 62d23efa aliguori
    } else  {
302 209cd7ab Anthony PERARD
        for (i = 0; i < ioreq->v.niov; i++) {
303 209cd7ab Anthony PERARD
            ioreq->page[i] = xc_gnttab_map_grant_ref
304 209cd7ab Anthony PERARD
                (gnt, ioreq->domids[i], ioreq->refs[i], ioreq->prot);
305 209cd7ab Anthony PERARD
            if (ioreq->page[i] == NULL) {
306 209cd7ab Anthony PERARD
                xen_be_printf(&ioreq->blkdev->xendev, 0,
307 209cd7ab Anthony PERARD
                              "can't map grant ref %d (%s, %d maps)\n",
308 209cd7ab Anthony PERARD
                              ioreq->refs[i], strerror(errno), ioreq->blkdev->cnt_map);
309 209cd7ab Anthony PERARD
                ioreq_unmap(ioreq);
310 209cd7ab Anthony PERARD
                return -1;
311 209cd7ab Anthony PERARD
            }
312 209cd7ab Anthony PERARD
            ioreq->v.iov[i].iov_base = ioreq->page[i] + (uintptr_t)ioreq->v.iov[i].iov_base;
313 209cd7ab Anthony PERARD
            ioreq->blkdev->cnt_map++;
314 209cd7ab Anthony PERARD
        }
315 62d23efa aliguori
    }
316 62d23efa aliguori
    return 0;
317 62d23efa aliguori
}
318 62d23efa aliguori
319 62d23efa aliguori
static void qemu_aio_complete(void *opaque, int ret)
320 62d23efa aliguori
{
321 62d23efa aliguori
    struct ioreq *ioreq = opaque;
322 62d23efa aliguori
323 62d23efa aliguori
    if (ret != 0) {
324 62d23efa aliguori
        xen_be_printf(&ioreq->blkdev->xendev, 0, "%s I/O error\n",
325 62d23efa aliguori
                      ioreq->req.operation == BLKIF_OP_READ ? "read" : "write");
326 62d23efa aliguori
        ioreq->aio_errors++;
327 62d23efa aliguori
    }
328 62d23efa aliguori
329 62d23efa aliguori
    ioreq->aio_inflight--;
330 209cd7ab Anthony PERARD
    if (ioreq->aio_inflight > 0) {
331 62d23efa aliguori
        return;
332 209cd7ab Anthony PERARD
    }
333 62d23efa aliguori
334 62d23efa aliguori
    ioreq->status = ioreq->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY;
335 62d23efa aliguori
    ioreq_unmap(ioreq);
336 62d23efa aliguori
    ioreq_finish(ioreq);
337 a597e79c Christoph Hellwig
    bdrv_acct_done(ioreq->blkdev->bs, &ioreq->acct);
338 62d23efa aliguori
    qemu_bh_schedule(ioreq->blkdev->bh);
339 62d23efa aliguori
}
340 62d23efa aliguori
341 62d23efa aliguori
static int ioreq_runio_qemu_aio(struct ioreq *ioreq)
342 62d23efa aliguori
{
343 62d23efa aliguori
    struct XenBlkDev *blkdev = ioreq->blkdev;
344 62d23efa aliguori
345 209cd7ab Anthony PERARD
    if (ioreq->req.nr_segments && ioreq_map(ioreq) == -1) {
346 209cd7ab Anthony PERARD
        goto err_no_map;
347 209cd7ab Anthony PERARD
    }
348 62d23efa aliguori
349 62d23efa aliguori
    ioreq->aio_inflight++;
350 209cd7ab Anthony PERARD
    if (ioreq->presync) {
351 209cd7ab Anthony PERARD
        bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
352 209cd7ab Anthony PERARD
    }
353 62d23efa aliguori
354 62d23efa aliguori
    switch (ioreq->req.operation) {
355 62d23efa aliguori
    case BLKIF_OP_READ:
356 a597e79c Christoph Hellwig
        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_READ);
357 62d23efa aliguori
        ioreq->aio_inflight++;
358 62d23efa aliguori
        bdrv_aio_readv(blkdev->bs, ioreq->start / BLOCK_SIZE,
359 62d23efa aliguori
                       &ioreq->v, ioreq->v.size / BLOCK_SIZE,
360 62d23efa aliguori
                       qemu_aio_complete, ioreq);
361 209cd7ab Anthony PERARD
        break;
362 62d23efa aliguori
    case BLKIF_OP_WRITE:
363 62d23efa aliguori
    case BLKIF_OP_WRITE_BARRIER:
364 209cd7ab Anthony PERARD
        if (!ioreq->req.nr_segments) {
365 5cbdebe3 Stefano Stabellini
            break;
366 209cd7ab Anthony PERARD
        }
367 a597e79c Christoph Hellwig
368 a597e79c Christoph Hellwig
        bdrv_acct_start(blkdev->bs, &ioreq->acct, ioreq->v.size, BDRV_ACCT_WRITE);
369 209bef3e Feiran Zheng
        ioreq->aio_inflight++;
370 62d23efa aliguori
        bdrv_aio_writev(blkdev->bs, ioreq->start / BLOCK_SIZE,
371 62d23efa aliguori
                        &ioreq->v, ioreq->v.size / BLOCK_SIZE,
372 62d23efa aliguori
                        qemu_aio_complete, ioreq);
373 209cd7ab Anthony PERARD
        break;
374 62d23efa aliguori
    default:
375 209cd7ab Anthony PERARD
        /* unknown operation (shouldn't happen -- parse catches this) */
376 209cd7ab Anthony PERARD
        goto err;
377 62d23efa aliguori
    }
378 62d23efa aliguori
379 209cd7ab Anthony PERARD
    if (ioreq->postsync) {
380 209cd7ab Anthony PERARD
        bdrv_flush(blkdev->bs); /* FIXME: aio_flush() ??? */
381 209cd7ab Anthony PERARD
    }
382 62d23efa aliguori
    qemu_aio_complete(ioreq, 0);
383 62d23efa aliguori
384 62d23efa aliguori
    return 0;
385 62d23efa aliguori
386 62d23efa aliguori
err:
387 f6ec953c Feiran Zheng
    ioreq_unmap(ioreq);
388 f6ec953c Feiran Zheng
err_no_map:
389 f6ec953c Feiran Zheng
    ioreq_finish(ioreq);
390 62d23efa aliguori
    ioreq->status = BLKIF_RSP_ERROR;
391 62d23efa aliguori
    return -1;
392 62d23efa aliguori
}
393 62d23efa aliguori
394 62d23efa aliguori
static int blk_send_response_one(struct ioreq *ioreq)
395 62d23efa aliguori
{
396 62d23efa aliguori
    struct XenBlkDev  *blkdev = ioreq->blkdev;
397 62d23efa aliguori
    int               send_notify   = 0;
398 62d23efa aliguori
    int               have_requests = 0;
399 62d23efa aliguori
    blkif_response_t  resp;
400 62d23efa aliguori
    void              *dst;
401 62d23efa aliguori
402 62d23efa aliguori
    resp.id        = ioreq->req.id;
403 62d23efa aliguori
    resp.operation = ioreq->req.operation;
404 62d23efa aliguori
    resp.status    = ioreq->status;
405 62d23efa aliguori
406 62d23efa aliguori
    /* Place on the response ring for the relevant domain. */
407 62d23efa aliguori
    switch (blkdev->protocol) {
408 62d23efa aliguori
    case BLKIF_PROTOCOL_NATIVE:
409 209cd7ab Anthony PERARD
        dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
410 209cd7ab Anthony PERARD
        break;
411 62d23efa aliguori
    case BLKIF_PROTOCOL_X86_32:
412 6fcfeff9 Blue Swirl
        dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
413 6fcfeff9 Blue Swirl
                                blkdev->rings.x86_32_part.rsp_prod_pvt);
414 209cd7ab Anthony PERARD
        break;
415 62d23efa aliguori
    case BLKIF_PROTOCOL_X86_64:
416 6fcfeff9 Blue Swirl
        dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
417 6fcfeff9 Blue Swirl
                                blkdev->rings.x86_64_part.rsp_prod_pvt);
418 209cd7ab Anthony PERARD
        break;
419 62d23efa aliguori
    default:
420 209cd7ab Anthony PERARD
        dst = NULL;
421 62d23efa aliguori
    }
422 62d23efa aliguori
    memcpy(dst, &resp, sizeof(resp));
423 62d23efa aliguori
    blkdev->rings.common.rsp_prod_pvt++;
424 62d23efa aliguori
425 62d23efa aliguori
    RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
426 62d23efa aliguori
    if (blkdev->rings.common.rsp_prod_pvt == blkdev->rings.common.req_cons) {
427 209cd7ab Anthony PERARD
        /*
428 209cd7ab Anthony PERARD
         * Tail check for pending requests. Allows frontend to avoid
429 209cd7ab Anthony PERARD
         * notifications if requests are already in flight (lower
430 209cd7ab Anthony PERARD
         * overheads and promotes batching).
431 209cd7ab Anthony PERARD
         */
432 209cd7ab Anthony PERARD
        RING_FINAL_CHECK_FOR_REQUESTS(&blkdev->rings.common, have_requests);
433 62d23efa aliguori
    } else if (RING_HAS_UNCONSUMED_REQUESTS(&blkdev->rings.common)) {
434 209cd7ab Anthony PERARD
        have_requests = 1;
435 62d23efa aliguori
    }
436 62d23efa aliguori
437 209cd7ab Anthony PERARD
    if (have_requests) {
438 209cd7ab Anthony PERARD
        blkdev->more_work++;
439 209cd7ab Anthony PERARD
    }
440 62d23efa aliguori
    return send_notify;
441 62d23efa aliguori
}
442 62d23efa aliguori
443 62d23efa aliguori
/* walk finished list, send outstanding responses, free requests */
444 62d23efa aliguori
static void blk_send_response_all(struct XenBlkDev *blkdev)
445 62d23efa aliguori
{
446 62d23efa aliguori
    struct ioreq *ioreq;
447 62d23efa aliguori
    int send_notify = 0;
448 62d23efa aliguori
449 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&blkdev->finished)) {
450 72cf2d4f Blue Swirl
        ioreq = QLIST_FIRST(&blkdev->finished);
451 209cd7ab Anthony PERARD
        send_notify += blk_send_response_one(ioreq);
452 209cd7ab Anthony PERARD
        ioreq_release(ioreq);
453 209cd7ab Anthony PERARD
    }
454 209cd7ab Anthony PERARD
    if (send_notify) {
455 209cd7ab Anthony PERARD
        xen_be_send_notify(&blkdev->xendev);
456 62d23efa aliguori
    }
457 62d23efa aliguori
}
458 62d23efa aliguori
459 62d23efa aliguori
static int blk_get_request(struct XenBlkDev *blkdev, struct ioreq *ioreq, RING_IDX rc)
460 62d23efa aliguori
{
461 62d23efa aliguori
    switch (blkdev->protocol) {
462 62d23efa aliguori
    case BLKIF_PROTOCOL_NATIVE:
463 209cd7ab Anthony PERARD
        memcpy(&ioreq->req, RING_GET_REQUEST(&blkdev->rings.native, rc),
464 209cd7ab Anthony PERARD
               sizeof(ioreq->req));
465 209cd7ab Anthony PERARD
        break;
466 62d23efa aliguori
    case BLKIF_PROTOCOL_X86_32:
467 6fcfeff9 Blue Swirl
        blkif_get_x86_32_req(&ioreq->req,
468 6fcfeff9 Blue Swirl
                             RING_GET_REQUEST(&blkdev->rings.x86_32_part, rc));
469 209cd7ab Anthony PERARD
        break;
470 62d23efa aliguori
    case BLKIF_PROTOCOL_X86_64:
471 6fcfeff9 Blue Swirl
        blkif_get_x86_64_req(&ioreq->req,
472 6fcfeff9 Blue Swirl
                             RING_GET_REQUEST(&blkdev->rings.x86_64_part, rc));
473 209cd7ab Anthony PERARD
        break;
474 62d23efa aliguori
    }
475 62d23efa aliguori
    return 0;
476 62d23efa aliguori
}
477 62d23efa aliguori
478 62d23efa aliguori
static void blk_handle_requests(struct XenBlkDev *blkdev)
479 62d23efa aliguori
{
480 62d23efa aliguori
    RING_IDX rc, rp;
481 62d23efa aliguori
    struct ioreq *ioreq;
482 62d23efa aliguori
483 62d23efa aliguori
    blkdev->more_work = 0;
484 62d23efa aliguori
485 62d23efa aliguori
    rc = blkdev->rings.common.req_cons;
486 62d23efa aliguori
    rp = blkdev->rings.common.sring->req_prod;
487 62d23efa aliguori
    xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
488 62d23efa aliguori
489 4e5b184d Paolo Bonzini
    blk_send_response_all(blkdev);
490 fc1f79f7 blueswir1
    while (rc != rp) {
491 62d23efa aliguori
        /* pull request from ring */
492 209cd7ab Anthony PERARD
        if (RING_REQUEST_CONS_OVERFLOW(&blkdev->rings.common, rc)) {
493 62d23efa aliguori
            break;
494 209cd7ab Anthony PERARD
        }
495 62d23efa aliguori
        ioreq = ioreq_start(blkdev);
496 62d23efa aliguori
        if (ioreq == NULL) {
497 62d23efa aliguori
            blkdev->more_work++;
498 62d23efa aliguori
            break;
499 62d23efa aliguori
        }
500 62d23efa aliguori
        blk_get_request(blkdev, ioreq, rc);
501 62d23efa aliguori
        blkdev->rings.common.req_cons = ++rc;
502 62d23efa aliguori
503 62d23efa aliguori
        /* parse them */
504 62d23efa aliguori
        if (ioreq_parse(ioreq) != 0) {
505 209cd7ab Anthony PERARD
            if (blk_send_response_one(ioreq)) {
506 62d23efa aliguori
                xen_be_send_notify(&blkdev->xendev);
507 209cd7ab Anthony PERARD
            }
508 62d23efa aliguori
            ioreq_release(ioreq);
509 62d23efa aliguori
            continue;
510 62d23efa aliguori
        }
511 62d23efa aliguori
512 4e5b184d Paolo Bonzini
        ioreq_runio_qemu_aio(ioreq);
513 209cd7ab Anthony PERARD
    }
514 62d23efa aliguori
515 209cd7ab Anthony PERARD
    if (blkdev->more_work && blkdev->requests_inflight < max_requests) {
516 62d23efa aliguori
        qemu_bh_schedule(blkdev->bh);
517 209cd7ab Anthony PERARD
    }
518 62d23efa aliguori
}
519 62d23efa aliguori
520 62d23efa aliguori
/* ------------------------------------------------------------- */
521 62d23efa aliguori
522 62d23efa aliguori
static void blk_bh(void *opaque)
523 62d23efa aliguori
{
524 62d23efa aliguori
    struct XenBlkDev *blkdev = opaque;
525 62d23efa aliguori
    blk_handle_requests(blkdev);
526 62d23efa aliguori
}
527 62d23efa aliguori
528 62d23efa aliguori
static void blk_alloc(struct XenDevice *xendev)
529 62d23efa aliguori
{
530 62d23efa aliguori
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
531 62d23efa aliguori
532 72cf2d4f Blue Swirl
    QLIST_INIT(&blkdev->inflight);
533 72cf2d4f Blue Swirl
    QLIST_INIT(&blkdev->finished);
534 72cf2d4f Blue Swirl
    QLIST_INIT(&blkdev->freelist);
535 62d23efa aliguori
    blkdev->bh = qemu_bh_new(blk_bh, blkdev);
536 209cd7ab Anthony PERARD
    if (xen_mode != XEN_EMULATE) {
537 62d23efa aliguori
        batch_maps = 1;
538 209cd7ab Anthony PERARD
    }
539 62d23efa aliguori
}
540 62d23efa aliguori
541 62d23efa aliguori
static int blk_init(struct XenDevice *xendev)
542 62d23efa aliguori
{
543 62d23efa aliguori
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
544 5cce43bb Kevin Wolf
    int index, qflags, info = 0;
545 62d23efa aliguori
546 62d23efa aliguori
    /* read xenstore entries */
547 62d23efa aliguori
    if (blkdev->params == NULL) {
548 5ea3c2b4 Stefano Stabellini
        char *h = NULL;
549 209cd7ab Anthony PERARD
        blkdev->params = xenstore_read_be_str(&blkdev->xendev, "params");
550 5ea3c2b4 Stefano Stabellini
        if (blkdev->params != NULL) {
551 5ea3c2b4 Stefano Stabellini
            h = strchr(blkdev->params, ':');
552 5ea3c2b4 Stefano Stabellini
        }
553 209cd7ab Anthony PERARD
        if (h != NULL) {
554 209cd7ab Anthony PERARD
            blkdev->fileproto = blkdev->params;
555 209cd7ab Anthony PERARD
            blkdev->filename  = h+1;
556 209cd7ab Anthony PERARD
            *h = 0;
557 209cd7ab Anthony PERARD
        } else {
558 209cd7ab Anthony PERARD
            blkdev->fileproto = "<unset>";
559 209cd7ab Anthony PERARD
            blkdev->filename  = blkdev->params;
560 209cd7ab Anthony PERARD
        }
561 209cd7ab Anthony PERARD
    }
562 7cef3f4f Stefano Stabellini
    if (!strcmp("aio", blkdev->fileproto)) {
563 7cef3f4f Stefano Stabellini
        blkdev->fileproto = "raw";
564 7cef3f4f Stefano Stabellini
    }
565 209cd7ab Anthony PERARD
    if (blkdev->mode == NULL) {
566 209cd7ab Anthony PERARD
        blkdev->mode = xenstore_read_be_str(&blkdev->xendev, "mode");
567 209cd7ab Anthony PERARD
    }
568 209cd7ab Anthony PERARD
    if (blkdev->type == NULL) {
569 209cd7ab Anthony PERARD
        blkdev->type = xenstore_read_be_str(&blkdev->xendev, "type");
570 209cd7ab Anthony PERARD
    }
571 209cd7ab Anthony PERARD
    if (blkdev->dev == NULL) {
572 209cd7ab Anthony PERARD
        blkdev->dev = xenstore_read_be_str(&blkdev->xendev, "dev");
573 209cd7ab Anthony PERARD
    }
574 209cd7ab Anthony PERARD
    if (blkdev->devtype == NULL) {
575 209cd7ab Anthony PERARD
        blkdev->devtype = xenstore_read_be_str(&blkdev->xendev, "device-type");
576 209cd7ab Anthony PERARD
    }
577 62d23efa aliguori
578 62d23efa aliguori
    /* do we have all we need? */
579 62d23efa aliguori
    if (blkdev->params == NULL ||
580 209cd7ab Anthony PERARD
        blkdev->mode == NULL   ||
581 209cd7ab Anthony PERARD
        blkdev->type == NULL   ||
582 209cd7ab Anthony PERARD
        blkdev->dev == NULL) {
583 5ea3c2b4 Stefano Stabellini
        goto out_error;
584 209cd7ab Anthony PERARD
    }
585 62d23efa aliguori
586 62d23efa aliguori
    /* read-only ? */
587 62d23efa aliguori
    if (strcmp(blkdev->mode, "w") == 0) {
588 209cd7ab Anthony PERARD
        qflags = BDRV_O_RDWR;
589 62d23efa aliguori
    } else {
590 209cd7ab Anthony PERARD
        qflags = 0;
591 209cd7ab Anthony PERARD
        info  |= VDISK_READONLY;
592 62d23efa aliguori
    }
593 62d23efa aliguori
594 62d23efa aliguori
    /* cdrom ? */
595 209cd7ab Anthony PERARD
    if (blkdev->devtype && !strcmp(blkdev->devtype, "cdrom")) {
596 209cd7ab Anthony PERARD
        info  |= VDISK_CDROM;
597 209cd7ab Anthony PERARD
    }
598 62d23efa aliguori
599 62d23efa aliguori
    /* init qemu block driver */
600 751c6a17 Gerd Hoffmann
    index = (blkdev->xendev.dev - 202 * 256) / 16;
601 751c6a17 Gerd Hoffmann
    blkdev->dinfo = drive_get(IF_XEN, 0, index);
602 751c6a17 Gerd Hoffmann
    if (!blkdev->dinfo) {
603 62d23efa aliguori
        /* setup via xenbus -> create new block driver instance */
604 62d23efa aliguori
        xen_be_printf(&blkdev->xendev, 2, "create new bdrv (xenbus setup)\n");
605 ad717139 Kevin Wolf
        blkdev->bs = bdrv_new(blkdev->dev);
606 5ea3c2b4 Stefano Stabellini
        if (blkdev->bs) {
607 5ea3c2b4 Stefano Stabellini
            if (bdrv_open(blkdev->bs, blkdev->filename, qflags,
608 5ea3c2b4 Stefano Stabellini
                        bdrv_find_whitelisted_format(blkdev->fileproto)) != 0) {
609 5ea3c2b4 Stefano Stabellini
                bdrv_delete(blkdev->bs);
610 5ea3c2b4 Stefano Stabellini
                blkdev->bs = NULL;
611 5ea3c2b4 Stefano Stabellini
            }
612 5ea3c2b4 Stefano Stabellini
        }
613 5ea3c2b4 Stefano Stabellini
        if (!blkdev->bs) {
614 5ea3c2b4 Stefano Stabellini
            goto out_error;
615 ad717139 Kevin Wolf
        }
616 62d23efa aliguori
    } else {
617 62d23efa aliguori
        /* setup via qemu cmdline -> already setup for us */
618 62d23efa aliguori
        xen_be_printf(&blkdev->xendev, 2, "get configured bdrv (cmdline setup)\n");
619 209cd7ab Anthony PERARD
        blkdev->bs = blkdev->dinfo->bdrv;
620 62d23efa aliguori
    }
621 fa879d62 Markus Armbruster
    bdrv_attach_dev_nofail(blkdev->bs, blkdev);
622 62d23efa aliguori
    blkdev->file_blk  = BLOCK_SIZE;
623 62d23efa aliguori
    blkdev->file_size = bdrv_getlength(blkdev->bs);
624 62d23efa aliguori
    if (blkdev->file_size < 0) {
625 62d23efa aliguori
        xen_be_printf(&blkdev->xendev, 1, "bdrv_getlength: %d (%s) | drv %s\n",
626 62d23efa aliguori
                      (int)blkdev->file_size, strerror(-blkdev->file_size),
627 62d23efa aliguori
                      blkdev->bs->drv ? blkdev->bs->drv->format_name : "-");
628 209cd7ab Anthony PERARD
        blkdev->file_size = 0;
629 62d23efa aliguori
    }
630 62d23efa aliguori
631 62d23efa aliguori
    xen_be_printf(xendev, 1, "type \"%s\", fileproto \"%s\", filename \"%s\","
632 209cd7ab Anthony PERARD
                  " size %" PRId64 " (%" PRId64 " MB)\n",
633 209cd7ab Anthony PERARD
                  blkdev->type, blkdev->fileproto, blkdev->filename,
634 209cd7ab Anthony PERARD
                  blkdev->file_size, blkdev->file_size >> 20);
635 62d23efa aliguori
636 62d23efa aliguori
    /* fill info */
637 5cce43bb Kevin Wolf
    xenstore_write_be_int(&blkdev->xendev, "feature-barrier", 1);
638 62d23efa aliguori
    xenstore_write_be_int(&blkdev->xendev, "info",            info);
639 62d23efa aliguori
    xenstore_write_be_int(&blkdev->xendev, "sector-size",     blkdev->file_blk);
640 62d23efa aliguori
    xenstore_write_be_int(&blkdev->xendev, "sectors",
641 209cd7ab Anthony PERARD
                          blkdev->file_size / blkdev->file_blk);
642 62d23efa aliguori
    return 0;
643 5ea3c2b4 Stefano Stabellini
644 5ea3c2b4 Stefano Stabellini
out_error:
645 7267c094 Anthony Liguori
    g_free(blkdev->params);
646 5ea3c2b4 Stefano Stabellini
    blkdev->params = NULL;
647 7267c094 Anthony Liguori
    g_free(blkdev->mode);
648 5ea3c2b4 Stefano Stabellini
    blkdev->mode = NULL;
649 7267c094 Anthony Liguori
    g_free(blkdev->type);
650 5ea3c2b4 Stefano Stabellini
    blkdev->type = NULL;
651 7267c094 Anthony Liguori
    g_free(blkdev->dev);
652 5ea3c2b4 Stefano Stabellini
    blkdev->dev = NULL;
653 7267c094 Anthony Liguori
    g_free(blkdev->devtype);
654 5ea3c2b4 Stefano Stabellini
    blkdev->devtype = NULL;
655 5ea3c2b4 Stefano Stabellini
    return -1;
656 62d23efa aliguori
}
657 62d23efa aliguori
658 62d23efa aliguori
static int blk_connect(struct XenDevice *xendev)
659 62d23efa aliguori
{
660 62d23efa aliguori
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
661 62d23efa aliguori
662 209cd7ab Anthony PERARD
    if (xenstore_read_fe_int(&blkdev->xendev, "ring-ref", &blkdev->ring_ref) == -1) {
663 209cd7ab Anthony PERARD
        return -1;
664 209cd7ab Anthony PERARD
    }
665 62d23efa aliguori
    if (xenstore_read_fe_int(&blkdev->xendev, "event-channel",
666 209cd7ab Anthony PERARD
                             &blkdev->xendev.remote_port) == -1) {
667 209cd7ab Anthony PERARD
        return -1;
668 209cd7ab Anthony PERARD
    }
669 62d23efa aliguori
670 62d23efa aliguori
    blkdev->protocol = BLKIF_PROTOCOL_NATIVE;
671 62d23efa aliguori
    if (blkdev->xendev.protocol) {
672 209cd7ab Anthony PERARD
        if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_32) == 0) {
673 62d23efa aliguori
            blkdev->protocol = BLKIF_PROTOCOL_X86_32;
674 209cd7ab Anthony PERARD
        }
675 209cd7ab Anthony PERARD
        if (strcmp(blkdev->xendev.protocol, XEN_IO_PROTO_ABI_X86_64) == 0) {
676 62d23efa aliguori
            blkdev->protocol = BLKIF_PROTOCOL_X86_64;
677 209cd7ab Anthony PERARD
        }
678 62d23efa aliguori
    }
679 62d23efa aliguori
680 62d23efa aliguori
    blkdev->sring = xc_gnttab_map_grant_ref(blkdev->xendev.gnttabdev,
681 209cd7ab Anthony PERARD
                                            blkdev->xendev.dom,
682 209cd7ab Anthony PERARD
                                            blkdev->ring_ref,
683 209cd7ab Anthony PERARD
                                            PROT_READ | PROT_WRITE);
684 209cd7ab Anthony PERARD
    if (!blkdev->sring) {
685 209cd7ab Anthony PERARD
        return -1;
686 209cd7ab Anthony PERARD
    }
687 62d23efa aliguori
    blkdev->cnt_map++;
688 62d23efa aliguori
689 62d23efa aliguori
    switch (blkdev->protocol) {
690 62d23efa aliguori
    case BLKIF_PROTOCOL_NATIVE:
691 62d23efa aliguori
    {
692 209cd7ab Anthony PERARD
        blkif_sring_t *sring_native = blkdev->sring;
693 209cd7ab Anthony PERARD
        BACK_RING_INIT(&blkdev->rings.native, sring_native, XC_PAGE_SIZE);
694 209cd7ab Anthony PERARD
        break;
695 62d23efa aliguori
    }
696 62d23efa aliguori
    case BLKIF_PROTOCOL_X86_32:
697 62d23efa aliguori
    {
698 209cd7ab Anthony PERARD
        blkif_x86_32_sring_t *sring_x86_32 = blkdev->sring;
699 6fcfeff9 Blue Swirl
700 6fcfeff9 Blue Swirl
        BACK_RING_INIT(&blkdev->rings.x86_32_part, sring_x86_32, XC_PAGE_SIZE);
701 209cd7ab Anthony PERARD
        break;
702 62d23efa aliguori
    }
703 62d23efa aliguori
    case BLKIF_PROTOCOL_X86_64:
704 62d23efa aliguori
    {
705 209cd7ab Anthony PERARD
        blkif_x86_64_sring_t *sring_x86_64 = blkdev->sring;
706 6fcfeff9 Blue Swirl
707 6fcfeff9 Blue Swirl
        BACK_RING_INIT(&blkdev->rings.x86_64_part, sring_x86_64, XC_PAGE_SIZE);
708 209cd7ab Anthony PERARD
        break;
709 62d23efa aliguori
    }
710 62d23efa aliguori
    }
711 62d23efa aliguori
712 62d23efa aliguori
    xen_be_bind_evtchn(&blkdev->xendev);
713 62d23efa aliguori
714 62d23efa aliguori
    xen_be_printf(&blkdev->xendev, 1, "ok: proto %s, ring-ref %d, "
715 209cd7ab Anthony PERARD
                  "remote port %d, local port %d\n",
716 209cd7ab Anthony PERARD
                  blkdev->xendev.protocol, blkdev->ring_ref,
717 209cd7ab Anthony PERARD
                  blkdev->xendev.remote_port, blkdev->xendev.local_port);
718 62d23efa aliguori
    return 0;
719 62d23efa aliguori
}
720 62d23efa aliguori
721 62d23efa aliguori
static void blk_disconnect(struct XenDevice *xendev)
722 62d23efa aliguori
{
723 62d23efa aliguori
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
724 62d23efa aliguori
725 62d23efa aliguori
    if (blkdev->bs) {
726 751c6a17 Gerd Hoffmann
        if (!blkdev->dinfo) {
727 62d23efa aliguori
            /* close/delete only if we created it ourself */
728 62d23efa aliguori
            bdrv_close(blkdev->bs);
729 62d23efa aliguori
            bdrv_delete(blkdev->bs);
730 62d23efa aliguori
        }
731 209cd7ab Anthony PERARD
        blkdev->bs = NULL;
732 62d23efa aliguori
    }
733 62d23efa aliguori
    xen_be_unbind_evtchn(&blkdev->xendev);
734 62d23efa aliguori
735 62d23efa aliguori
    if (blkdev->sring) {
736 209cd7ab Anthony PERARD
        xc_gnttab_munmap(blkdev->xendev.gnttabdev, blkdev->sring, 1);
737 209cd7ab Anthony PERARD
        blkdev->cnt_map--;
738 209cd7ab Anthony PERARD
        blkdev->sring = NULL;
739 62d23efa aliguori
    }
740 62d23efa aliguori
}
741 62d23efa aliguori
742 62d23efa aliguori
static int blk_free(struct XenDevice *xendev)
743 62d23efa aliguori
{
744 62d23efa aliguori
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
745 62d23efa aliguori
    struct ioreq *ioreq;
746 62d23efa aliguori
747 72cf2d4f Blue Swirl
    while (!QLIST_EMPTY(&blkdev->freelist)) {
748 209cd7ab Anthony PERARD
        ioreq = QLIST_FIRST(&blkdev->freelist);
749 72cf2d4f Blue Swirl
        QLIST_REMOVE(ioreq, list);
750 62d23efa aliguori
        qemu_iovec_destroy(&ioreq->v);
751 7267c094 Anthony Liguori
        g_free(ioreq);
752 62d23efa aliguori
    }
753 62d23efa aliguori
754 7267c094 Anthony Liguori
    g_free(blkdev->params);
755 7267c094 Anthony Liguori
    g_free(blkdev->mode);
756 7267c094 Anthony Liguori
    g_free(blkdev->type);
757 7267c094 Anthony Liguori
    g_free(blkdev->dev);
758 7267c094 Anthony Liguori
    g_free(blkdev->devtype);
759 62d23efa aliguori
    qemu_bh_delete(blkdev->bh);
760 62d23efa aliguori
    return 0;
761 62d23efa aliguori
}
762 62d23efa aliguori
763 62d23efa aliguori
static void blk_event(struct XenDevice *xendev)
764 62d23efa aliguori
{
765 62d23efa aliguori
    struct XenBlkDev *blkdev = container_of(xendev, struct XenBlkDev, xendev);
766 62d23efa aliguori
767 62d23efa aliguori
    qemu_bh_schedule(blkdev->bh);
768 62d23efa aliguori
}
769 62d23efa aliguori
770 62d23efa aliguori
struct XenDevOps xen_blkdev_ops = {
771 62d23efa aliguori
    .size       = sizeof(struct XenBlkDev),
772 62d23efa aliguori
    .flags      = DEVOPS_FLAG_NEED_GNTDEV,
773 62d23efa aliguori
    .alloc      = blk_alloc,
774 62d23efa aliguori
    .init       = blk_init,
775 384087b2 John Haxby
    .initialise    = blk_connect,
776 62d23efa aliguori
    .disconnect = blk_disconnect,
777 62d23efa aliguori
    .event      = blk_event,
778 62d23efa aliguori
    .free       = blk_free,
779 62d23efa aliguori
};